gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9143
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
#!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
import logging
import binascii
import base64
import time
import datetime
import random
import math
import struct
import hmac
import bisect
import shadowsocks
from shadowsocks import common, lru_cache, encrypt
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
from shadowsocks.crypto import openssl
rand_bytes = openssl.rand_bytes
def create_auth_chain_a(method):
return auth_chain_a(method)
def create_auth_chain_b(method):
return auth_chain_b(method)
def create_auth_chain_c(method):
return auth_chain_c(method)
def create_auth_chain_d(method):
return auth_chain_d(method)
def create_auth_chain_e(method):
return auth_chain_e(method)
def create_auth_chain_f(method):
return auth_chain_f(method)
obfs_map = {
'auth_chain_a': (create_auth_chain_a,),
'auth_chain_b': (create_auth_chain_b,),
'auth_chain_c': (create_auth_chain_c,),
'auth_chain_d': (create_auth_chain_d,),
'auth_chain_e': (create_auth_chain_e,),
'auth_chain_f': (create_auth_chain_f,),
}
class xorshift128plus(object):
max_int = (1 << 64) - 1
mov_mask = (1 << (64 - 23)) - 1
def __init__(self):
self.v0 = 0
self.v1 = 0
def next(self):
x = self.v0
y = self.v1
self.v0 = y
x ^= ((x & xorshift128plus.mov_mask) << 23)
x ^= (y ^ (x >> 17) ^ (y >> 26)) & xorshift128plus.max_int
self.v1 = x
return (x + y) & xorshift128plus.max_int
def init_from_bin(self, bin):
bin += b'\0' * 16
self.v0 = struct.unpack('<Q', bin[:8])[0]
self.v1 = struct.unpack('<Q', bin[8:16])[0]
def init_from_bin_len(self, bin, length):
bin += b'\0' * 16
bin = struct.pack('<H', length) + bin[2:]
self.v0 = struct.unpack('<Q', bin[:8])[0]
self.v1 = struct.unpack('<Q', bin[8:16])[0]
for i in range(4):
self.next()
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class auth_base(plain.plain):
def __init__(self, method):
super(auth_base, self).__init__(method)
self.method = method
self.no_compatible_method = ''
self.overhead = 4
def init_data(self):
return ''
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def set_server_info(self, server_info):
self.server_info = server_info
def client_encode(self, buf):
return buf
def client_decode(self, buf):
return (buf, False)
def server_encode(self, buf):
return buf
def server_decode(self, buf):
return (buf, True, False)
def not_match_return(self, buf):
self.raw_trans = True
self.overhead = 0
if self.method == self.no_compatible_method:
return (b'E'*2048, False)
return (buf, False)
class client_queue(object):
def __init__(self, begin_id):
self.front = begin_id - 64
self.back = begin_id + 1
self.alloc = {}
self.enable = True
self.last_update = time.time()
self.ref = 0
def update(self):
self.last_update = time.time()
def addref(self):
self.ref += 1
def delref(self):
if self.ref > 0:
self.ref -= 1
def is_active(self):
return (self.ref > 0) and (time.time() - self.last_update < 60 * 10)
def re_enable(self, connection_id):
self.enable = True
self.front = connection_id - 64
self.back = connection_id + 1
self.alloc = {}
def insert(self, connection_id):
if not self.enable:
logging.warn('obfs auth: not enable')
return False
if not self.is_active():
self.re_enable(connection_id)
self.update()
if connection_id < self.front:
logging.warn('obfs auth: deprecated id, someone replay attack')
return False
if connection_id > self.front + 0x4000:
logging.warn('obfs auth: wrong id')
return False
if connection_id in self.alloc:
logging.warn('obfs auth: duplicate id, someone replay attack')
return False
if self.back <= connection_id:
self.back = connection_id + 1
self.alloc[connection_id] = 1
while (self.front in self.alloc) or self.front + 0x1000 < self.back:
if self.front in self.alloc:
del self.alloc[self.front]
self.front += 1
self.addref()
return True
class obfs_auth_chain_data(object):
def __init__(self, name):
self.name = name
self.user_id = {}
self.local_client_id = b''
self.connection_id = 0
self.set_max_client(64) # max active client count
def update(self, user_id, client_id, connection_id):
if user_id not in self.user_id:
self.user_id[user_id] = lru_cache.LRUCache()
local_client_id = self.user_id[user_id]
if client_id in local_client_id:
local_client_id[client_id].update()
def set_max_client(self, max_client):
self.max_client = max_client
self.max_buffer = max(self.max_client * 2, 1024)
def insert(self, user_id, client_id, connection_id):
if user_id not in self.user_id:
self.user_id[user_id] = lru_cache.LRUCache()
local_client_id = self.user_id[user_id]
if local_client_id.get(client_id, None) is None or not local_client_id[client_id].enable:
if local_client_id.first() is None or len(local_client_id) < self.max_client:
if client_id not in local_client_id:
#TODO: check
local_client_id[client_id] = client_queue(connection_id)
else:
local_client_id[client_id].re_enable(connection_id)
return local_client_id[client_id].insert(connection_id)
if not local_client_id[local_client_id.first()].is_active():
del local_client_id[local_client_id.first()]
if client_id not in local_client_id:
#TODO: check
local_client_id[client_id] = client_queue(connection_id)
else:
local_client_id[client_id].re_enable(connection_id)
return local_client_id[client_id].insert(connection_id)
logging.warn(self.name + ': no inactive client')
return False
else:
return local_client_id[client_id].insert(connection_id)
def remove(self, user_id, client_id):
if user_id in self.user_id:
local_client_id = self.user_id[user_id]
if client_id in local_client_id:
local_client_id[client_id].delref()
class auth_chain_a(auth_base):
def __init__(self, method):
super(auth_chain_a, self).__init__(method)
self.hashfunc = hashlib.md5
self.recv_buf = b''
self.unit_len = 2800
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = b"auth_chain_a"
self.no_compatible_method = 'auth_chain_a'
self.pack_id = 1
self.recv_id = 1
self.user_id = None
self.user_id_num = 0
self.user_key = None
self.overhead = 4
self.client_over_head = 4
self.last_client_hash = b''
self.last_server_hash = b''
self.random_client = xorshift128plus()
self.random_server = xorshift128plus()
self.encryptor = None
def init_data(self):
return obfs_auth_chain_data(self.method)
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def trapezoid_random_float(self, d):
if d == 0:
return random.random()
s = random.random()
a = 1 - d
return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d)
def trapezoid_random_int(self, max_val, d):
v = self.trapezoid_random_float(d)
return int(v * max_val)
def rnd_data_len(self, buf_size, last_hash, random):
if buf_size > 1440:
return 0
random.init_from_bin_len(last_hash, buf_size)
if buf_size > 1300:
return random.next() % 31
if buf_size > 900:
return random.next() % 127
if buf_size > 400:
return random.next() % 521
return random.next() % 1021
def udp_rnd_data_len(self, last_hash, random):
random.init_from_bin(last_hash)
return random.next() % 127
def rnd_start_pos(self, rand_len, random):
if rand_len > 0:
return random.next() % 8589934609 % rand_len
return 0
def rnd_data(self, buf_size, buf, last_hash, random):
rand_len = self.rnd_data_len(buf_size, last_hash, random)
rnd_data_buf = rand_bytes(rand_len)
if buf_size == 0:
return rnd_data_buf
else:
if rand_len > 0:
start_pos = self.rnd_start_pos(rand_len, random)
return rnd_data_buf[:start_pos] + buf + rnd_data_buf[start_pos:]
else:
return buf
def pack_client_data(self, buf):
buf = self.encryptor.encrypt(buf)
data = self.rnd_data(len(buf), buf, self.last_client_hash, self.random_client)
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
length = len(buf) ^ struct.unpack('<H', self.last_client_hash[14:])[0]
data = struct.pack('<H', length) + data
self.last_client_hash = hmac.new(mac_key, data, self.hashfunc).digest()
data += self.last_client_hash[:2]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_server_data(self, buf):
buf = self.encryptor.encrypt(buf)
data = self.rnd_data(len(buf), buf, self.last_server_hash, self.random_server)
data_len = len(data) + 8
mac_key = self.user_key + struct.pack('<I', self.pack_id)
length = len(buf) ^ struct.unpack('<H', self.last_server_hash[14:])[0]
data = struct.pack('<H', length) + data
self.last_server_hash = hmac.new(mac_key, data, self.hashfunc).digest()
data += self.last_server_hash[:2]
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
return data
def pack_auth_data(self, auth_data, buf):
data = auth_data
data_len = 12 + 4 + 16 + 4
data = data + (struct.pack('<H', self.server_info.overhead) + struct.pack('<H', 0))
mac_key = self.server_info.iv + self.server_info.key
check_head = rand_bytes(4)
self.last_client_hash = hmac.new(mac_key, check_head, self.hashfunc).digest()
check_head += self.last_client_hash[:8]
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(b':')
self.user_key = items[1]
uid = struct.pack('<I', int(items[0]))
except:
uid = rand_bytes(4)
else:
uid = rand_bytes(4)
if self.user_key is None:
self.user_key = self.server_info.key
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc', b'\x00' * 16)
uid = struct.unpack('<I', uid)[0] ^ struct.unpack('<I', self.last_client_hash[8:12])[0]
uid = struct.pack('<I', uid)
data = uid + encryptor.encrypt(data)[16:]
self.last_server_hash = hmac.new(self.user_key, data, self.hashfunc).digest()
data = check_head + data + self.last_server_hash[:4]
self.encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(self.last_client_hash)), 'rc4')
return data + self.pack_client_data(buf)
def auth_data(self):
utc_time = int(time.time()) & 0xFFFFFFFF
if self.server_info.data.connection_id > 0xFF000000:
self.server_info.data.local_client_id = b''
if not self.server_info.data.local_client_id:
self.server_info.data.local_client_id = rand_bytes(4)
logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),))
self.server_info.data.connection_id = struct.unpack('<I', rand_bytes(4))[0] & 0xFFFFFF
self.server_info.data.connection_id += 1
return b''.join([struct.pack('<I', utc_time),
self.server_info.data.local_client_id,
struct.pack('<I', self.server_info.data.connection_id)])
def client_pre_encrypt(self, buf):
ret = b''
ogn_data_len = len(buf)
if not self.has_sent_header:
head_size = self.get_head_size(buf, 30)
datalen = min(len(buf), random.randint(0, 31) + head_size)
ret += self.pack_auth_data(self.auth_data(), buf[:datalen])
buf = buf[datalen:]
self.has_sent_header = True
while len(buf) > self.unit_len:
ret += self.pack_client_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_client_data(buf)
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
data_len = struct.unpack('<H', self.recv_buf[:2])[0] ^ struct.unpack('<H', self.last_server_hash[14:16])[0]
rand_len = self.rnd_data_len(data_len, self.last_server_hash, self.random_server)
length = data_len + rand_len
if length >= 4096:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length + 4 > len(self.recv_buf):
break
server_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest()
if server_hash[:2] != self.recv_buf[length + 2 : length + 4]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data uncorrect checksum')
pos = 2
if data_len > 0 and rand_len > 0:
pos = 2 + self.rnd_start_pos(rand_len, self.random_server)
out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos])
self.last_server_hash = server_hash
if self.recv_id == 1:
self.server_info.tcp_mss = struct.unpack('<H', out_buf[:2])[0]
out_buf = out_buf[2:]
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
self.recv_buf = self.recv_buf[length + 4:]
return out_buf
def server_pre_encrypt(self, buf):
if self.raw_trans:
return buf
ret = b''
if self.pack_id == 1:
tcp_mss = self.server_info.tcp_mss if self.server_info.tcp_mss < 1500 else 1500
self.server_info.tcp_mss = tcp_mss
buf = struct.pack('<H', tcp_mss) + buf
self.unit_len = tcp_mss - self.client_over_head
while len(buf) > self.unit_len:
ret += self.pack_server_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_server_data(buf)
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
sendback = False
if not self.has_recv_header:
if len(self.recv_buf) >= 12 or len(self.recv_buf) in [7, 8]:
recv_len = min(len(self.recv_buf), 12)
mac_key = self.server_info.recv_iv + self.server_info.key
md5data = hmac.new(mac_key, self.recv_buf[:4], self.hashfunc).digest()
if md5data[:recv_len - 4] != self.recv_buf[4:recv_len]:
return self.not_match_return(self.recv_buf)
if len(self.recv_buf) < 12 + 24:
return (b'', False)
self.last_client_hash = md5data
uid = struct.unpack('<I', self.recv_buf[12:16])[0] ^ struct.unpack('<I', md5data[8:12])[0]
self.user_id_num = uid
if uid in self.server_info.users:
self.user_id = uid
self.user_key = self.server_info.users[uid]['passwd'].encode('utf-8')
self.server_info.update_user_func(uid)
else:
self.user_id_num = 0
if not self.server_info.users:
self.user_key = self.server_info.key
else:
self.user_key = self.server_info.recv_iv
md5data = hmac.new(self.user_key, self.recv_buf[12 : 12 + 20], self.hashfunc).digest()
if md5data[:4] != self.recv_buf[32:36]:
logging.error('%s data uncorrect auth HMAC-MD5 from %s:%d, data %s' % (self.no_compatible_method, self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf)))
if len(self.recv_buf) < 36:
return (b'', False)
return self.not_match_return(self.recv_buf)
self.last_server_hash = md5data
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + self.salt, 'aes-128-cbc')
head = encryptor.decrypt(b'\x00' * 16 + self.recv_buf[16:32] + b'\x00') # need an extra byte or recv empty
self.client_over_head = struct.unpack('<H', head[12:14])[0]
utc_time = struct.unpack('<I', head[:4])[0]
client_id = struct.unpack('<I', head[4:8])[0]
connection_id = struct.unpack('<I', head[8:12])[0]
time_dif = common.int32(utc_time - (int(time.time()) & 0xffffffff))
if time_dif < -self.max_time_dif or time_dif > self.max_time_dif:
logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head)))
return self.not_match_return(self.recv_buf)
elif self.server_info.data.insert(self.user_id, client_id, connection_id):
self.has_recv_header = True
self.client_id = client_id
self.connection_id = connection_id
else:
logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf)))
return self.not_match_return(self.recv_buf)
self.encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(self.last_client_hash)), 'rc4')
self.recv_buf = self.recv_buf[36:]
self.has_recv_header = True
sendback = True
while len(self.recv_buf) > 4:
mac_key = self.user_key + struct.pack('<I', self.recv_id)
data_len = struct.unpack('<H', self.recv_buf[:2])[0] ^ struct.unpack('<H', self.last_client_hash[14:16])[0]
rand_len = self.rnd_data_len(data_len, self.last_client_hash, self.random_client)
length = data_len + rand_len
if length >= 4096:
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
logging.info(self.no_compatible_method + ': over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length + 4 > len(self.recv_buf):
break
client_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest()
if client_hash[:2] != self.recv_buf[length + 2 : length + 4]:
logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length])))
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = 2
if data_len > 0 and rand_len > 0:
pos = 2 + self.rnd_start_pos(rand_len, self.random_client)
out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos])
self.last_client_hash = client_hash
self.recv_buf = self.recv_buf[length + 4:]
if data_len == 0:
sendback = True
if out_buf:
self.server_info.data.update(self.user_id, self.client_id, self.connection_id)
return (out_buf, sendback)
def client_udp_pre_encrypt(self, buf):
if self.user_key is None:
if b':' in to_bytes(self.server_info.protocol_param):
try:
items = to_bytes(self.server_info.protocol_param).split(':')
self.user_key = self.hashfunc(items[1]).digest()
self.user_id = struct.pack('<I', int(items[0]))
except:
pass
if self.user_key is None:
self.user_id = rand_bytes(4)
self.user_key = self.server_info.key
authdata = rand_bytes(3)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, authdata, self.hashfunc).digest()
uid = struct.unpack('<I', self.user_id)[0] ^ struct.unpack('<I', md5data[:4])[0]
uid = struct.pack('<I', uid)
rand_len = self.udp_rnd_data_len(md5data, self.random_client)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.encrypt(buf)
buf = out_buf + rand_bytes(rand_len) + authdata + uid
return buf + hmac.new(self.user_key, buf, self.hashfunc).digest()[:1]
def client_udp_post_decrypt(self, buf):
if len(buf) <= 8:
return (b'', None)
if hmac.new(self.user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-1], self.hashfunc).digest()
rand_len = self.udp_rnd_data_len(md5data, self.random_server)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
return encryptor.decrypt(buf[:-8 - rand_len])
def server_udp_pre_encrypt(self, buf, uid):
if uid in self.server_info.users:
user_key = self.server_info.users[uid]['passwd'].encode('utf-8')
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
authdata = rand_bytes(7)
mac_key = self.server_info.key
md5data = hmac.new(mac_key, authdata, self.hashfunc).digest()
rand_len = self.udp_rnd_data_len(md5data, self.random_server)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.encrypt(buf)
buf = out_buf + rand_bytes(rand_len) + authdata
return buf + hmac.new(user_key, buf, self.hashfunc).digest()[:1]
def server_udp_post_decrypt(self, buf):
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-5], self.hashfunc).digest()
uid = struct.unpack('<I', buf[-5:-1])[0] ^ struct.unpack('<I', md5data[:4])[0]
if uid in self.server_info.users:
user_key = self.server_info.users[uid]['passwd'].encode('utf-8')
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
rand_len = self.udp_rnd_data_len(md5data, self.random_client)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.decrypt(buf[:-8 - rand_len])
return (out_buf, uid)
def dispose(self):
self.server_info.data.remove(self.user_id, self.client_id)
class auth_chain_b(auth_chain_a):
def __init__(self, method):
super(auth_chain_b, self).__init__(method)
self.salt = b"auth_chain_b"
self.no_compatible_method = 'auth_chain_b'
self.data_size_list = []
self.data_size_list2 = []
def init_data_size(self, key):
if self.data_size_list:
self.data_size_list = []
self.data_size_list2 = []
random = xorshift128plus()
random.init_from_bin(key)
list_len = random.next() % 8 + 4
for i in range(0, list_len):
self.data_size_list.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list.sort()
list_len = random.next() % 16 + 8
for i in range(0, list_len):
self.data_size_list2.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list2.sort()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
self.init_data_size(self.server_info.key)
def rnd_data_len(self, buf_size, last_hash, random):
if buf_size >= 1440:
return 0
random.init_from_bin_len(last_hash, buf_size)
pos = bisect.bisect_left(self.data_size_list, buf_size + self.server_info.overhead)
final_pos = pos + random.next() % (len(self.data_size_list))
if final_pos < len(self.data_size_list):
return self.data_size_list[final_pos] - buf_size - self.server_info.overhead
pos = bisect.bisect_left(self.data_size_list2, buf_size + self.server_info.overhead)
final_pos = pos + random.next() % (len(self.data_size_list2))
if final_pos < len(self.data_size_list2):
return self.data_size_list2[final_pos] - buf_size - self.server_info.overhead
if final_pos < pos + len(self.data_size_list2) - 1:
return 0
if buf_size > 1300:
return random.next() % 31
if buf_size > 900:
return random.next() % 127
if buf_size > 400:
return random.next() % 521
return random.next() % 1021
class auth_chain_c(auth_chain_b):
def __init__(self, method):
super(auth_chain_c, self).__init__(method)
self.salt = b"auth_chain_c"
self.no_compatible_method = 'auth_chain_c'
self.data_size_list0 = []
def init_data_size(self, key):
if self.data_size_list0:
self.data_size_list0 = []
random = xorshift128plus()
random.init_from_bin(key)
list_len = random.next() % (8 + 16) + (4 + 8)
for i in range(0, list_len):
self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list0.sort()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
self.init_data_size(self.server_info.key)
def rnd_data_len(self, buf_size, last_hash, random):
other_data_size = buf_size + self.server_info.overhead
random.init_from_bin_len(last_hash, buf_size)
if other_data_size >= self.data_size_list0[-1]:
if other_data_size >= 1440:
return 0
if other_data_size > 1300:
return random.next() % 31
if other_data_size > 900:
return random.next() % 127
if other_data_size > 400:
return random.next() % 521
return random.next() % 1021
pos = bisect.bisect_left(self.data_size_list0, other_data_size)
final_pos = pos + random.next() % (len(self.data_size_list0) - pos)
return self.data_size_list0[final_pos] - other_data_size
class auth_chain_d(auth_chain_b):
def __init__(self, method):
super(auth_chain_d, self).__init__(method)
self.salt = b"auth_chain_d"
self.no_compatible_method = 'auth_chain_d'
self.data_size_list0 = []
def check_and_patch_data_size(self, random):
if self.data_size_list0[-1] < 1300 and len(self.data_size_list0) < 64:
self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440))
self.check_and_patch_data_size(random)
def init_data_size(self, key):
if self.data_size_list0:
self.data_size_list0 = []
random = xorshift128plus()
random.init_from_bin(key)
list_len = random.next() % (8 + 16) + (4 + 8)
for i in range(0, list_len):
self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440))
self.data_size_list0.sort()
old_len = len(self.data_size_list0)
self.check_and_patch_data_size(random)
if old_len != len(self.data_size_list0):
self.data_size_list0.sort()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
self.init_data_size(self.server_info.key)
def rnd_data_len(self, buf_size, last_hash, random):
other_data_size = buf_size + self.server_info.overhead
if other_data_size >= self.data_size_list0[-1]:
return 0
random.init_from_bin_len(last_hash, buf_size)
pos = bisect.bisect_left(self.data_size_list0, other_data_size)
final_pos = pos + random.next() % (len(self.data_size_list0) - pos)
return self.data_size_list0[final_pos] - other_data_size
class auth_chain_e(auth_chain_d):
def __init__(self, method):
super(auth_chain_e, self).__init__(method)
self.salt = b"auth_chain_e"
self.no_compatible_method = 'auth_chain_e'
def rnd_data_len(self, buf_size, last_hash, random):
random.init_from_bin_len(last_hash, buf_size)
other_data_size = buf_size + self.server_info.overhead
if other_data_size >= self.data_size_list0[-1]:
return 0
pos = bisect.bisect_left(self.data_size_list0, other_data_size)
return self.data_size_list0[pos] - other_data_size
class auth_chain_f(auth_chain_e):
def __init__(self, method):
super(auth_chain_f, self).__init__(method)
self.salt = b"auth_chain_f"
self.no_compatible_method = 'auth_chain_f'
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
try:
self.key_change_interval = int(server_info.protocol_param.split('#')[1])
except:
self.key_change_interval = 60 * 60 * 24
self.key_change_datetime_key = int(int(time.time()) / self.key_change_interval)
self.key_change_datetime_key_bytes = []
for i in range(7, -1, -1):
self.key_change_datetime_key_bytes.append((self.key_change_datetime_key >> (8 * i)) & 0xFF)
self.server_info.data.set_max_client(max_client)
self.init_data_size(self.server_info.key)
def init_data_size(self, key):
if self.data_size_list0:
self.data_size_list0 = []
random = xorshift128plus()
new_key = bytearray(key)
for i in range(0, 8):
new_key[i] ^= self.key_change_datetime_key_bytes[i]
random.init_from_bin(new_key)
list_len = random.next() % (8 + 16) + (4 + 8)
for i in range(0, list_len):
self.data_size_list0.append(int(random.next() % 2340 % 2040 % 1440))
self.data_size_list0.sort()
old_len = len(self.data_size_list0)
self.check_and_patch_data_size(random)
if old_len != len(self.data_size_list0):
self.data_size_list0.sort()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections, struct
from functools import partial
from . import ir
from .. import flags, dalvik
from .jvmops import *
from . import arraytypes as arrays
from . import scalartypes as scalars
from . import typeinference, mathops
from .optimization import stack
from .. import util
# Code for converting dalvik bytecode to intermediate representation
# effectively this is just Java bytecode instructions with some abstractions for
# later optimization
_ilfdaOrd = [scalars.INT, scalars.LONG, scalars.FLOAT, scalars.DOUBLE, scalars.OBJ].index
_newArrayCodes = {('['+t).encode(): v for t, v in zip('ZCFDBSIJ', range(4, 12))}
_arrStoreOps = {t.encode(): v for t, v in zip('IJFD BCS', range(IASTORE, SASTORE+1))}
_arrLoadOps = {t.encode(): v for t, v in zip('IJFD BCS', range(IALOAD, SALOAD+1))}
# For generating IR instructions corresponding to a single Dalvik instruction
class IRBlock:
def __init__(self, parent, pos):
self.type_data = parent.types[pos]
self.pool = parent.pool
self.delay_consts = parent.opts.delay_consts
self.pos = pos
self.instructions = [ir.Label(pos)]
def add(self, jvm_instr):
self.instructions.append(jvm_instr)
def _other(self, bytecode):
self.add(ir.Other(bytecode=bytecode))
def u8(self, op): self._other(struct.pack('>B', op))
def u8u8(self, op, x): self._other(struct.pack('>BB', op, x))
def u8u16(self, op, x): self._other(struct.pack('>BH', op, x))
# wide non iinc
def u8u8u16(self, op, op2, x): self._other(struct.pack('>BBH', op, op2, x))
# invokeinterface
def u8u16u8u8(self, op, x, y, z): self._other(struct.pack('>BHBB', op, x, y, z))
def ldc(self, index):
if index < 256:
self.add(ir.OtherConstant(bytecode=bytes([LDC, index])))
else:
self.add(ir.OtherConstant(bytecode=struct.pack('>BH', LDC_W, index)))
def load(self, reg, stype, desc=None, clsname=None):
# if we know the register to be 0/null, don't bother loading
if self.type_data.arrs[reg] == arrays.NULL:
self.const(0, stype)
else:
self.add(ir.RegAccess(reg, stype, store=False))
# cast to appropriate type if tainted
if stype == scalars.OBJ and self.type_data.tainted[reg]:
assert(desc is None or clsname is None)
if clsname is None:
# remember to handle arrays - also fallthrough if desc is None
clsname = desc[1:-1] if (desc and desc.startswith(b'L')) else desc
if clsname is not None and clsname != b'java/lang/Object':
self.u8u16(CHECKCAST, self.pool.class_(clsname))
def loadAsArray(self, reg):
at = self.type_data.arrs[reg]
if at == arrays.NULL:
self.const_null()
else:
self.add(ir.RegAccess(reg, scalars.OBJ, store=False))
if self.type_data.tainted[reg]:
if at == arrays.INVALID:
# needs to be some type of object array, so just cast to Object[]
self.u8u16(CHECKCAST, self.pool.class_(b'[Ljava/lang/Object;'))
else:
# note - will throw if actual type is boolean[] but there's not
# much we can do in this case
self.u8u16(CHECKCAST, self.pool.class_(at))
def store(self, reg, stype):
self.add(ir.RegAccess(reg, stype, store=True))
def return_(self, stype=None):
if stype is None:
self.u8(RETURN)
else:
self.u8(IRETURN + _ilfdaOrd(stype))
def const(self, val, stype):
assert((1<<64) > val >= 0)
if stype == scalars.OBJ:
assert(val == 0)
self.const_null()
else:
# If constant pool is simple, assume we're in non-opt mode and only use
# the constant pool for generating constants instead of calculating
# bytecode sequences for them. If we're in opt mode, pass None for pool
# to generate bytecode instead
pool = None if self.delay_consts else self.pool
self.add(ir.PrimConstant(stype, val, pool=pool))
def const_null(self):
self.add(ir.OtherConstant(bytecode=bytes([ACONST_NULL])))
def fillarraysub(self, op, cbs, pop=True):
gen = stack.genDups(len(cbs), 0 if pop else 1)
for i, cb in enumerate(cbs):
for bytecode in next(gen):
self._other(bytecode)
self.const(i, scalars.INT)
cb()
self.u8(op)
# may need to pop at end
for bytecode in next(gen):
self._other(bytecode)
def newarray(self, desc):
if desc in _newArrayCodes:
self.u8u8(NEWARRAY, _newArrayCodes[desc])
else:
# can be either multidim array or object array descriptor
desc = desc[1:]
if desc.startswith(b'L'):
desc = desc[1:-1]
self.u8u16(ANEWARRAY, self.pool.class_(desc))
def fillarraydata(self, op, stype, vals):
self.fillarraysub(op, [partial(self.const, val, stype) for val in vals])
def cast(self, dex, reg, index):
self.load(reg, scalars.OBJ)
self.u8u16(CHECKCAST, self.pool.class_(dex.clsType(index)))
self.store(reg, scalars.OBJ)
def goto(self, target):
self.add(ir.Goto(target))
def if_(self, op, target):
self.add(ir.If(op, target))
def switch(self, default, jumps):
jumps = {util.s32(k):v for k,v in jumps.items() if v != default}
if jumps:
self.add(ir.Switch(default, jumps))
else:
self.goto(default)
def generateExceptLabels(self):
s_ind = 0
e_ind = len(self.instructions)
# assume only Other instructions can throw
while s_ind < e_ind and not isinstance(self.instructions[s_ind], ir.Other):
s_ind += 1
while s_ind < e_ind and not isinstance(self.instructions[e_ind-1], ir.Other):
e_ind -= 1
assert(s_ind < e_ind)
start_lbl, end_lbl = ir.Label(), ir.Label()
self.instructions.insert(s_ind, start_lbl)
self.instructions.insert(e_ind+1, end_lbl)
return start_lbl, end_lbl
class IRWriter:
def __init__(self, pool, method, types, opts):
self.pool = pool
self.method = method
self.types = types
self.opts = opts
self.iblocks = {}
self.flat_instructions = None
self.excepts = []
self.labels = {}
self.initial_args = None
self.exception_redirects = {}
self.except_starts = set()
self.except_ends = set()
self.jump_targets = set()
# used to detect jump targets with a unique predecessor
self.target_pred_counts = collections.defaultdict(int)
self.numregs = None # will be set once registers are allocated (see registers.py)
self.upper_bound = None # upper bound on code length
def calcInitialArgs(self, nregs, scalar_ptypes):
self.initial_args = args = []
regoff = nregs - len(scalar_ptypes)
for i, st in enumerate(scalar_ptypes):
if st == scalars.INVALID:
args.append(None)
else:
args.append((i + regoff, st))
def addExceptionRedirect(self, target):
return self.exception_redirects.setdefault(target, ir.Label())
def createBlock(self, instr):
block = IRBlock(self, instr.pos)
self.iblocks[block.pos] = block
self.labels[block.pos] = block.instructions[0]
return block
def flatten(self):
instructions = []
for pos in sorted(self.iblocks):
if pos in self.exception_redirects:
# check if we can put handler pop in front of block
if instructions and not instructions[-1].fallsthrough():
instructions.append(self.exception_redirects.pop(pos))
instructions.append(ir.Other(bytecode=bytes([POP])))
# if not, leave it in dict to be redirected later
# now add instructions for actual block
instructions += self.iblocks[pos].instructions
# exception handler pops that couldn't be placed inline
# in this case, just put them at the end with a goto back to the handler
for target in sorted(self.exception_redirects):
instructions.append(self.exception_redirects[target])
instructions.append(ir.Other(bytecode=bytes([POP])))
instructions.append(ir.Goto(target))
self.flat_instructions = instructions
self.iblocks = self.exception_redirects = None
def replaceInstrs(self, replace):
if replace:
instructions = []
for instr in self.flat_instructions:
instructions.extend(replace.get(instr, [instr]))
self.flat_instructions = instructions
assert(len(set(instructions)) == len(instructions))
def calcUpperBound(self):
# Get an uppper bound on the size of the bytecode
size = 0
for ins in self.flat_instructions:
if ins.bytecode is None:
size += ins.max
else:
size += len(ins.bytecode)
self.upper_bound = size
return size
################################################################################
def visitNop(method, dex, instr_d, type_data, block, instr):
pass
def visitMove(method, dex, instr_d, type_data, block, instr):
for st in (scalars.INT, scalars.OBJ, scalars.FLOAT):
if st & type_data.prims[instr.args[1]]:
block.load(instr.args[1], st)
block.store(instr.args[0], st)
def visitMoveWide(method, dex, instr_d, type_data, block, instr):
for st in (scalars.LONG, scalars.DOUBLE):
if st & type_data.prims[instr.args[1]]:
block.load(instr.args[1], st)
block.store(instr.args[0], st)
def visitMoveResult(method, dex, instr_d, type_data, block, instr):
st = scalars.fromDesc(instr.prev_result)
block.store(instr.args[0], st)
def visitReturn(method, dex, instr_d, type_data, block, instr):
if method.id.return_type == b'V':
block.return_()
else:
st = scalars.fromDesc(method.id.return_type)
block.load(instr.args[0], st, desc=method.id.return_type)
block.return_(st)
def visitConst32(method, dex, instr_d, type_data, block, instr):
val = instr.args[1] % (1<<32)
block.const(val, scalars.INT)
block.store(instr.args[0], scalars.INT)
block.const(val, scalars.FLOAT)
block.store(instr.args[0], scalars.FLOAT)
if not val:
block.const_null()
block.store(instr.args[0], scalars.OBJ)
def visitConst64(method, dex, instr_d, type_data, block, instr):
val = instr.args[1] % (1<<64)
block.const(val, scalars.LONG)
block.store(instr.args[0], scalars.LONG)
block.const(val, scalars.DOUBLE)
block.store(instr.args[0], scalars.DOUBLE)
def visitConstString(method, dex, instr_d, type_data, block, instr):
val = dex.string(instr.args[1])
block.ldc(block.pool.string(val))
block.store(instr.args[0], scalars.OBJ)
def visitConstClass(method, dex, instr_d, type_data, block, instr):
# Could use dex.type here since the JVM doesn't care, but this is cleaner
val = dex.clsType(instr.args[1])
block.ldc(block.pool.class_(val))
block.store(instr.args[0], scalars.OBJ)
def visitMonitorEnter(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[0], scalars.OBJ)
block.u8(MONITORENTER)
def visitMonitorExit(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[0], scalars.OBJ)
block.u8(MONITOREXIT)
def visitCheckCast(method, dex, instr_d, type_data, block, instr):
block.cast(dex, instr.args[0], instr.args[1])
def visitInstanceOf(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[1], scalars.OBJ)
block.u8u16(INSTANCEOF, block.pool.class_(dex.clsType(instr.args[2])))
block.store(instr.args[0], scalars.INT)
def visitArrayLen(method, dex, instr_d, type_data, block, instr):
block.loadAsArray(instr.args[1])
block.u8(ARRAYLENGTH)
block.store(instr.args[0], scalars.INT)
def visitNewInstance(method, dex, instr_d, type_data, block, instr):
block.u8u16(NEW, block.pool.class_(dex.clsType(instr.args[1])))
block.store(instr.args[0], scalars.OBJ)
def visitNewArray(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[1], scalars.INT)
block.newarray(dex.type(instr.args[2]))
block.store(instr.args[0], scalars.OBJ)
def visitFilledNewArray(method, dex, instr_d, type_data, block, instr):
regs = instr.args[1]
block.const(len(regs), scalars.INT)
block.newarray(dex.type(instr.args[0]))
st, elet = arrays.eletPair(arrays.fromDesc(dex.type(instr.args[0])))
op = _arrStoreOps.get(elet, AASTORE)
cbs = [partial(block.load, reg, st) for reg in regs]
# if not followed by move-result, don't leave it on the stack
mustpop = instr_d.get(instr.pos2).type != dalvik.MoveResult
block.fillarraysub(op, cbs, pop=mustpop)
def visitFillArrayData(method, dex, instr_d, type_data, block, instr):
width, arrdata = instr_d[instr.args[1]].fillarrdata
at = type_data.arrs[instr.args[0]]
block.loadAsArray(instr.args[0])
if at is arrays.NULL:
block.u8(ATHROW)
else:
if len(arrdata) == 0:
# fill-array-data throws a NPE if array is null even when
# there is 0 data, so we need to add an instruction that
# throws a NPE in this case
block.u8(ARRAYLENGTH)
block.u8(POP)
else:
st, elet = arrays.eletPair(at)
# check if we need to sign extend
if elet == b'B':
arrdata = [util.signExtend(x, 8) & 0xFFFFFFFF for x in arrdata]
elif elet == b'S':
arrdata = [util.signExtend(x, 16) & 0xFFFFFFFF for x in arrdata]
block.fillarraydata(_arrStoreOps.get(elet, AASTORE), st, arrdata)
def visitThrow(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[0], scalars.OBJ, clsname=b'java/lang/Throwable')
block.u8(ATHROW)
def visitGoto(method, dex, instr_d, type_data, block, instr):
block.goto(instr.args[0])
def visitSwitch(method, dex, instr_d, type_data, block, instr):
block.load(instr.args[0], scalars.INT)
switchdata = instr_d[instr.args[1]].switchdata
default = instr.pos2
jumps = {k:(offset + instr.pos) % (1<<32) for k, offset in switchdata.items()}
block.switch(default, jumps)
def visitCmp(method, dex, instr_d, type_data, block, instr):
op = [FCMPL, FCMPG, DCMPL, DCMPG, LCMP][instr.opcode - 0x2d]
st = [scalars.FLOAT, scalars.FLOAT, scalars.DOUBLE, scalars.DOUBLE, scalars.LONG][instr.opcode - 0x2d]
block.load(instr.args[1], st)
block.load(instr.args[2], st)
block.u8(op)
block.store(instr.args[0], scalars.INT)
def visitIf(method, dex, instr_d, type_data, block, instr):
st = type_data.prims[instr.args[0]] & type_data.prims[instr.args[1]]
if st & scalars.INT:
block.load(instr.args[0], scalars.INT)
block.load(instr.args[1], scalars.INT)
op = [IF_ICMPEQ, IF_ICMPNE, IF_ICMPLT, IF_ICMPGE, IF_ICMPGT, IF_ICMPLE][instr.opcode - 0x32]
else:
block.load(instr.args[0], scalars.OBJ)
block.load(instr.args[1], scalars.OBJ)
op = [IF_ACMPEQ, IF_ACMPNE][instr.opcode - 0x32]
block.if_(op, instr.args[2])
def visitIfZ(method, dex, instr_d, type_data, block, instr):
if type_data.prims[instr.args[0]] & scalars.INT:
block.load(instr.args[0], scalars.INT)
op = [IFEQ, IFNE, IFLT, IFGE, IFGT, IFLE][instr.opcode - 0x38]
else:
block.load(instr.args[0], scalars.OBJ)
op = [IFNULL, IFNONNULL][instr.opcode - 0x38]
block.if_(op, instr.args[1])
def visitArrayGet(method, dex, instr_d, type_data, block, instr):
at = type_data.arrs[instr.args[1]]
if at is arrays.NULL:
block.const_null()
block.u8(ATHROW)
else:
block.loadAsArray(instr.args[1])
block.load(instr.args[2], scalars.INT)
st, elet = arrays.eletPair(at)
block.u8(_arrLoadOps.get(elet, AALOAD))
block.store(instr.args[0], st)
def visitArrayPut(method, dex, instr_d, type_data, block, instr):
at = type_data.arrs[instr.args[1]]
if at is arrays.NULL:
block.const_null()
block.u8(ATHROW)
else:
block.loadAsArray(instr.args[1])
block.load(instr.args[2], scalars.INT)
st, elet = arrays.eletPair(at)
block.load(instr.args[0], st)
block.u8(_arrStoreOps.get(elet, AASTORE))
def visitInstanceGet(method, dex, instr_d, type_data, block, instr):
field_id = dex.field_id(instr.args[2])
st = scalars.fromDesc(field_id.desc)
block.load(instr.args[1], scalars.OBJ, clsname=field_id.cname)
block.u8u16(GETFIELD, block.pool.field(field_id.triple()))
block.store(instr.args[0], st)
def visitInstancePut(method, dex, instr_d, type_data, block, instr):
field_id = dex.field_id(instr.args[2])
st = scalars.fromDesc(field_id.desc)
block.load(instr.args[1], scalars.OBJ, clsname=field_id.cname)
block.load(instr.args[0], st, desc=field_id.desc)
block.u8u16(PUTFIELD, block.pool.field(field_id.triple()))
def visitStaticGet(method, dex, instr_d, type_data, block, instr):
field_id = dex.field_id(instr.args[1])
st = scalars.fromDesc(field_id.desc)
block.u8u16(GETSTATIC, block.pool.field(field_id.triple()))
block.store(instr.args[0], st)
def visitStaticPut(method, dex, instr_d, type_data, block, instr):
field_id = dex.field_id(instr.args[1])
st = scalars.fromDesc(field_id.desc)
block.load(instr.args[0], st, desc=field_id.desc)
block.u8u16(PUTSTATIC, block.pool.field(field_id.triple()))
def visitInvoke(method, dex, instr_d, type_data, block, instr):
isstatic = instr.type == dalvik.InvokeStatic
called_id = dex.method_id(instr.args[0])
sts = scalars.paramTypes(called_id, static=isstatic)
descs = called_id.getSpacedParamTypes(isstatic=isstatic)
assert(len(sts) == len(instr.args[1]) == len(descs))
for st, desc, reg in zip(sts, descs, instr.args[1]):
if st != scalars.INVALID: # skip long/double tops
block.load(reg, st, desc=desc)
op = {
dalvik.InvokeVirtual: INVOKEVIRTUAL,
dalvik.InvokeSuper: INVOKESPECIAL,
dalvik.InvokeDirect: INVOKESPECIAL,
dalvik.InvokeStatic: INVOKESTATIC,
dalvik.InvokeInterface: INVOKEINTERFACE,
}[instr.type]
if instr.type == dalvik.InvokeInterface:
block.u8u16u8u8(op, block.pool.imethod(called_id.triple()), len(descs), 0)
else:
block.u8u16(op, block.pool.method(called_id.triple()))
# check if we need to pop result instead of leaving on stack
if instr_d.get(instr.pos2).type != dalvik.MoveResult:
if called_id.return_type != b'V':
st = scalars.fromDesc(called_id.return_type)
block.u8(POP2 if scalars.iswide(st) else POP)
def visitUnaryOp(method, dex, instr_d, type_data, block, instr):
op, srct, destt = mathops.UNARY[instr.opcode]
block.load(instr.args[1], srct)
# *not requires special handling since there's no direct Java equivalent. Instead we have to do x ^ -1
if op == IXOR:
block.u8(ICONST_M1)
elif op == LXOR:
block.u8(ICONST_M1)
block.u8(I2L)
block.u8(op)
block.store(instr.args[0], destt)
def visitBinaryOp(method, dex, instr_d, type_data, block, instr):
op, st, st2 = mathops.BINARY[instr.opcode]
# index arguments as negative so it works for regular and 2addr forms
block.load(instr.args[-2], st)
block.load(instr.args[-1], st2)
block.u8(op)
block.store(instr.args[0], st)
def visitBinaryOpConst(method, dex, instr_d, type_data, block, instr):
op = mathops.BINARY_LIT[instr.opcode]
if op == ISUB: # rsub
block.const(instr.args[2] % (1<<32), scalars.INT)
block.load(instr.args[1], scalars.INT)
else:
block.load(instr.args[1], scalars.INT)
block.const(instr.args[2] % (1<<32), scalars.INT)
block.u8(op)
block.store(instr.args[0], scalars.INT)
################################################################################
VISIT_FUNCS = {
dalvik.Nop: visitNop,
dalvik.Move: visitMove,
dalvik.MoveWide: visitMoveWide,
dalvik.MoveResult: visitMoveResult,
dalvik.Return: visitReturn,
dalvik.Const32: visitConst32,
dalvik.Const64: visitConst64,
dalvik.ConstString: visitConstString,
dalvik.ConstClass: visitConstClass,
dalvik.MonitorEnter: visitMonitorEnter,
dalvik.MonitorExit: visitMonitorExit,
dalvik.CheckCast: visitCheckCast,
dalvik.InstanceOf: visitInstanceOf,
dalvik.ArrayLen: visitArrayLen,
dalvik.NewInstance: visitNewInstance,
dalvik.NewArray: visitNewArray,
dalvik.FilledNewArray: visitFilledNewArray,
dalvik.FillArrayData: visitFillArrayData,
dalvik.Throw: visitThrow,
dalvik.Goto: visitGoto,
dalvik.Switch: visitSwitch,
dalvik.Cmp: visitCmp,
dalvik.If: visitIf,
dalvik.IfZ: visitIfZ,
dalvik.ArrayGet: visitArrayGet,
dalvik.ArrayPut: visitArrayPut,
dalvik.InstanceGet: visitInstanceGet,
dalvik.InstancePut: visitInstancePut,
dalvik.StaticGet: visitStaticGet,
dalvik.StaticPut: visitStaticPut,
dalvik.InvokeVirtual: visitInvoke,
dalvik.InvokeSuper: visitInvoke,
dalvik.InvokeDirect: visitInvoke,
dalvik.InvokeStatic: visitInvoke,
dalvik.InvokeInterface: visitInvoke,
dalvik.UnaryOp: visitUnaryOp,
dalvik.BinaryOp: visitBinaryOp,
dalvik.BinaryOpConst: visitBinaryOpConst,
}
def writeBytecode(pool, method, opts):
dex = method.dex
code = method.code
instr_d = {instr.pos: instr for instr in code.bytecode}
types, all_handlers = typeinference.doInference(dex, method, code, code.bytecode, instr_d)
scalar_ptypes = scalars.paramTypes(method.id, static=(method.access & flags.ACC_STATIC))
writer = IRWriter(pool, method, types, opts)
writer.calcInitialArgs(code.nregs, scalar_ptypes)
for instr in code.bytecode:
if instr.pos not in types: # skip unreachable instructions
continue
type_data = types[instr.pos]
block = writer.createBlock(instr)
VISIT_FUNCS[instr.type](method, dex, instr_d, type_data, block, instr)
for instr in sorted(all_handlers, key=lambda instr: instr.pos):
assert(all_handlers[instr])
if instr.pos not in types: # skip unreachable instructions
continue
start, end = writer.iblocks[instr.pos].generateExceptLabels()
writer.except_starts.add(start)
writer.except_ends.add(end)
for ctype, handler_pos in all_handlers[instr]:
# If handler doesn't use the caught exception, we need to redirect to a pop instead
if instr_d.get(handler_pos).type != dalvik.MoveResult:
target = writer.addExceptionRedirect(handler_pos)
else:
target = writer.labels[handler_pos]
writer.jump_targets.add(target)
writer.target_pred_counts[target] += 1
# When catching Throwable, we can use the special index 0 instead,
# potentially saving a constant pool entry or two
jctype = 0 if ctype == b'java/lang/Throwable' else pool.class_(ctype)
writer.excepts.append((start, end, target, jctype))
writer.flatten()
# find jump targets (in addition to exception handler targets)
for instr in writer.flat_instructions:
for target in instr.targets():
label = writer.labels[target]
writer.jump_targets.add(label)
writer.target_pred_counts[label] += 1
return writer
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import tempfile
import time
import media.audio_tools as audio_tools
# This little construct ensures we can run even if we have a bad version of
# psutil installed. If so, we'll just skip the test that needs it.
_HAS_CORRECT_PSUTIL_VERSION = False
try:
import psutil
if 'version_info' in dir(psutil):
# If psutil has any version info at all, it's recent enough.
_HAS_CORRECT_PSUTIL_VERSION = True
except ImportError, e:
pass
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import pyauto_utils
import webrtc_test_base
class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Test we can set up a WebRTC call and disconnect it.
Prerequisites: This test case must run on a machine with a webcam, either
fake or real, and with some kind of audio device. You must make the
peerconnection_server target before you run.
The test case will launch a custom binary
(peerconnection_server) which will allow two WebRTC clients to find each
other. For more details, see the source code which is available at the site
http://code.google.com/p/libjingle/source/browse/ (make sure to browse to
trunk/talk/examples/peerconnection/server).
"""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.StartPeerConnectionServer()
def tearDown(self):
self.StopPeerConnectionServer()
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes())
def _SimpleWebrtcCall(self, request_video, request_audio, duration_seconds=0):
"""Tests we can call and hang up with WebRTC.
This test exercises pretty much the whole happy-case for the WebRTC
JavaScript API. Currently, it exercises a normal call setup using the API
defined at http://dev.w3.org/2011/webrtc/editor/webrtc.html. The API is
still evolving.
The test will load the supplied HTML file, which in turn will load different
javascript files depending on which version of the signaling protocol
we are running.
The supplied HTML file will be loaded in two tabs and tell the web
pages to start up WebRTC, which will acquire video and audio devices on the
system. This will launch a dialog in Chrome which we click past using the
automation controller. Then, we will order both tabs to connect the server,
which will make the two tabs aware of each other. Once that is done we order
one tab to call the other.
We make sure that the javascript tells us that the call succeeded, lets it
run for a while and try to hang up the call after that. We verify video is
playing by using the video detector.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
duration_seconds: The number of seconds to keep the call up before
shutting it down.
"""
self._SetupCall(request_video=request_video, request_audio=request_audio)
if duration_seconds:
print 'Call up: sleeping %d seconds...' % duration_seconds
time.sleep(duration_seconds);
# The hang-up will automatically propagate to the second tab.
self.HangUp(from_tab_with_index=0)
self.WaitUntilHangUpVerified(tab_index=1)
self.Disconnect(tab_index=0)
self.Disconnect(tab_index=1)
# Ensure we didn't miss any errors.
self.AssertNoFailures(tab_index=0)
self.AssertNoFailures(tab_index=1)
def testWebrtcJsep01Call(self):
"""Uses a draft of the PeerConnection API, using JSEP01."""
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
self._SimpleWebrtcCall(request_video=True, request_audio=True)
def testWebrtcVideoOnlyJsep01Call(self):
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
self._SimpleWebrtcCall(request_video=True, request_audio=False)
def testWebrtcAudioOnlyJsep01Call(self):
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
self._SimpleWebrtcCall(request_video=False, request_audio=True)
def testWebrtcJsep01CallAndVerifyAudioIsPlaying(self):
"""Test that WebRTC is capable of transmitting at least some audio.
This test has some nontrivial prerequisites:
1. The target system must have an active microphone, it must be selected
as default input for the user that runs the test, and it must record a
certain minimum level of ambient noise (for instance server fans).
Verify that you are getting ambient noise in the microphone by either
recording it directly or checking your OS' microphone settings. Amplify
the microphone if the background noise is too low. The microphone should
capture noise consistently above 5% of its total range.
2. The target system must be configured to record its own input*.
* On Linux:
1. # sudo apt-get install pavucontrol
2. For the user who will run the test: # pavucontrol
3. In a separate terminal, # arecord dummy
4. In pavucontrol, go to the recording tab.
5. For the ALSA plug-in [aplay]: ALSA Capture from, change from <x> to
<Monitor of x>, where x is whatever your primary sound device is called.
6. Try launching chrome as the target user on the target machine, try
playing, say, a YouTube video, and record with # arecord -f dat mine.dat.
Verify the recording with aplay (should have recorded what you played
from chrome).
"""
if not self.IsLinux():
print 'This test is only available on Linux for now.'
return
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
def AudioCall():
self._SimpleWebrtcCall(request_video=False, request_audio=True,
duration_seconds=5)
self._RecordAudioAndEnsureNotSilent(record_duration_seconds=10,
sound_producing_function=AudioCall)
def testJsep01AndMeasureCpu20Seconds(self):
if not _HAS_CORRECT_PSUTIL_VERSION:
print ('WARNING: Can not run cpu/mem measurements with this version of '
'psutil. You must have at least psutil 0.4.1 installed for the '
'version of python you are running this test with.')
return
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
# Prepare CPU measurements.
renderer_process = self._GetChromeRendererProcess(tab_index=0)
renderer_process.get_cpu_percent()
self._SimpleWebrtcCall(request_video=True,
request_audio=True,
duration_seconds=20)
cpu_usage = renderer_process.get_cpu_percent(interval=0)
mem_usage_bytes = renderer_process.get_memory_info()[0]
mem_usage_kb = float(mem_usage_bytes) / 1024
pyauto_utils.PrintPerfResult('cpu', 'jsep01_call', cpu_usage, '%')
pyauto_utils.PrintPerfResult('memory', 'jsep01_call', mem_usage_kb, 'KiB')
def testLocalPreview(self):
"""Brings up a local preview and ensures video is playing.
This test will launch a window with a single tab and run a getUserMedia call
which will give us access to the webcam and microphone. Then the javascript
code will hook up the webcam data to the local-view video tag. We will
detect video in that tag using the video detector, and if we see video
moving the test passes.
"""
url = self.GetFileURLForDataPath('webrtc', 'webrtc_jsep01_test.html')
self.NavigateToURL(url)
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
self._StartDetectingVideo(tab_index=0, video_element='local-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
def testHandlesNewGetUserMediaRequestSeparately(self):
"""Ensures WebRTC doesn't allow new requests to piggy-back on old ones."""
url = self.GetFileURLForDataPath('webrtc', 'webrtc_jsep01_test.html')
self.NavigateToURL(url)
self.AppendTab(pyauto.GURL(url))
self.GetUserMedia(tab_index=0)
self.GetUserMedia(tab_index=1)
self.Connect("user_1", tab_index=0)
self.Connect("user_2", tab_index=1)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
self.assertEquals('failed-with-error-1',
self.GetUserMedia(tab_index=0, action='cancel'))
self.assertEquals('failed-with-error-1',
self.GetUserMedia(tab_index=0, action='dismiss'))
def testMediaStreamTrackEnable(self):
"""Tests MediaStreamTrack.enable on tracks connected to a PeerConnection.
This test will check that if a local track is muted, the remote end don't
get video. Also test that if a remote track is disabled, the video is not
updated in the video tag."""
# TODO(perkj): Also verify that the local preview is muted when the
# feature is implemented.
# TODO(perkj): Verify that audio is muted.
self._LoadPageInTwoTabs('webrtc_jsep01_test.html')
self._SetupCall(request_video=True, request_audio=True)
select_video_function = \
'function(local) { return local.getVideoTracks()[0]; }'
self.assertEquals('ok-video-toggled-to-false', self.ExecuteJavascript(
'toggleLocalStream(' + select_video_function + ', "video")',
tab_index=0))
self._WaitForVideo(tab_index=1, expect_playing=False)
self.assertEquals('ok-video-toggled-to-true', self.ExecuteJavascript(
'toggleLocalStream(' + select_video_function + ', "video")',
tab_index=0))
self._WaitForVideo(tab_index=1, expect_playing=True)
# Test disabling a remote stream. The remote video is not played."""
self.assertEquals('ok-video-toggled-to-false', self.ExecuteJavascript(
'toggleRemoteStream(' + select_video_function + ', "video")',
tab_index=1))
self._WaitForVideo(tab_index=1, expect_playing=False)
self.assertEquals('ok-video-toggled-to-true', self.ExecuteJavascript(
'toggleRemoteStream(' + select_video_function + ', "video")',
tab_index=1))
self._WaitForVideo(tab_index=1, expect_playing=True)
def _LoadPageInTwoTabs(self, test_page):
url = self.GetFileURLForDataPath('webrtc', test_page)
self.NavigateToURL(url)
self.AppendTab(pyauto.GURL(url))
def _SetupCall(self, request_video, request_audio):
"""Gets user media and establishes a call.
Assumes that two tabs are already opened with a suitable test page.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
"""
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=0, request_video=request_video, request_audio=request_audio))
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=1, request_video=request_video, request_audio=request_audio))
self.Connect('user_1', tab_index=0)
self.Connect('user_2', tab_index=1)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
if request_video:
self._StartDetectingVideo(tab_index=0, video_element='remote-view')
self._StartDetectingVideo(tab_index=1, video_element='remote-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
self._WaitForVideo(tab_index=1, expect_playing=True)
def _StartDetectingVideo(self, tab_index, video_element):
self.assertEquals('ok-started', self.ExecuteJavascript(
'startDetection("%s", "frame-buffer", 320, 240)' % video_element,
tab_index=tab_index));
def _WaitForVideo(self, tab_index, expect_playing):
# TODO(phoglund): Remove this hack if we manage to get a more stable Linux
# bot to run these tests.
if self.IsLinux():
print "Linux; pretending to wait for video..."
time.sleep(1)
return
expect_retval='video-playing' if expect_playing else 'video-not-playing'
video_playing = self.WaitUntil(
function=lambda: self.ExecuteJavascript('isVideoPlaying()',
tab_index=tab_index),
expect_retval=expect_retval)
self.assertTrue(video_playing,
msg= 'Timed out while waiting for isVideoPlaying to ' +
'return ' + expect_retval + '.')
def _GetChromeRendererProcess(self, tab_index):
"""Returns the Chrome renderer process as a psutil process wrapper."""
tab_info = self.GetBrowserInfo()['windows'][0]['tabs'][tab_index]
renderer_id = tab_info['renderer_pid']
if not renderer_id:
self.fail('Can not find the tab renderer process.')
return psutil.Process(renderer_id)
def _RecordAudioAndEnsureNotSilent(self, record_duration_seconds,
sound_producing_function):
_RECORD_DURATION = 10
_SIZE_OF_EMPTY_DAT_FILE_BYTES = 44
# The two temp files that will be potentially used in the test.
temp_file = None
file_no_silence = None
try:
temp_file = self._CreateTempFile()
record_thread = audio_tools.AudioRecorderThread(_RECORD_DURATION,
temp_file)
record_thread.start()
sound_producing_function()
record_thread.join()
if record_thread.error:
self.fail(record_thread.error)
file_no_silence = self._CreateTempFile()
audio_tools.RemoveSilence(temp_file, file_no_silence)
self.assertTrue(os.path.getsize(file_no_silence) >
_SIZE_OF_EMPTY_DAT_FILE_BYTES,
msg=('The test recorded only silence. Ensure your '
'machine is correctly configured for this test.'))
finally:
# Delete the temporary files used by the test.
if temp_file:
os.remove(temp_file)
if file_no_silence:
os.remove(file_no_silence)
def _CreateTempFile(self):
"""Returns an absolute path to an empty temp file."""
file_handle, path = tempfile.mkstemp(suffix='_webrtc.dat')
os.close(file_handle)
return path
if __name__ == '__main__':
pyauto_functional.Main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Board.status'
db.alter_column(u'boards_board', 'status_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['boards.Status'], null=True))
# Changing field 'Board.city'
db.alter_column(u'boards_board', 'city_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['boards.City'], null=True))
# Changing field 'Board.direction'
db.alter_column(u'boards_board', 'direction_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['boards.Direction'], null=True))
# Changing field 'Board.grp'
db.alter_column(u'boards_board', 'grp', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Board.light'
db.alter_column(u'boards_board', 'light', self.gf('django.db.models.fields.NullBooleanField')(null=True))
# Changing field 'Board.price'
db.alter_column(u'boards_board', 'price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=2))
# Changing field 'Board.material'
db.alter_column(u'boards_board', 'material', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Changing field 'Board.tax'
db.alter_column(u'boards_board', 'tax', self.gf('django.db.models.fields.CharField')(max_length=10, null=True))
# Changing field 'Board.montage_price'
db.alter_column(u'boards_board', 'montage_price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=2))
# Changing field 'Board.height'
db.alter_column(u'boards_board', 'height', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Board.width'
db.alter_column(u'boards_board', 'width', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Board.coord'
db.alter_column(u'boards_board', 'coord', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True))
# Changing field 'Board.address'
db.alter_column(u'boards_board', 'address', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Board.owner'
db.alter_column(u'boards_board', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['boards.Company'], null=True))
# Changing field 'Board.ots'
db.alter_column(u'boards_board', 'ots', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Board.cpt'
db.alter_column(u'boards_board', 'cpt', self.gf('django.db.models.fields.IntegerField')(max_length=6, null=True))
# Changing field 'Board.comment'
db.alter_column(u'boards_board', 'comment', self.gf('django.db.models.fields.CharField')(max_length=300, null=True))
# Changing field 'Board.side'
db.alter_column(u'boards_board', 'side', self.gf('django.db.models.fields.CharField')(max_length=2, null=True))
def backwards(self, orm):
# Changing field 'Board.status'
db.alter_column(u'boards_board', 'status_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['boards.Status']))
# Changing field 'Board.city'
db.alter_column(u'boards_board', 'city_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['boards.City']))
# Changing field 'Board.direction'
db.alter_column(u'boards_board', 'direction_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['boards.Direction']))
# Changing field 'Board.grp'
db.alter_column(u'boards_board', 'grp', self.gf('django.db.models.fields.FloatField')(default=0))
# Changing field 'Board.light'
db.alter_column(u'boards_board', 'light', self.gf('django.db.models.fields.BooleanField')(default=False))
# Changing field 'Board.price'
db.alter_column(u'boards_board', 'price', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2))
# Changing field 'Board.material'
db.alter_column(u'boards_board', 'material', self.gf('django.db.models.fields.CharField')(default=0, max_length=100))
# Changing field 'Board.tax'
db.alter_column(u'boards_board', 'tax', self.gf('django.db.models.fields.CharField')(default=0, max_length=10))
# Changing field 'Board.montage_price'
db.alter_column(u'boards_board', 'montage_price', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2))
# Changing field 'Board.height'
db.alter_column(u'boards_board', 'height', self.gf('django.db.models.fields.FloatField')(default=0))
# Changing field 'Board.width'
db.alter_column(u'boards_board', 'width', self.gf('django.db.models.fields.FloatField')(default=0))
# Changing field 'Board.coord'
db.alter_column(u'boards_board', 'coord', self.gf('django.contrib.gis.db.models.fields.PointField')(default=0))
# Changing field 'Board.address'
db.alter_column(u'boards_board', 'address', self.gf('django.db.models.fields.CharField')(default=0, max_length=200))
# Changing field 'Board.owner'
db.alter_column(u'boards_board', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['boards.Company']))
# Changing field 'Board.ots'
db.alter_column(u'boards_board', 'ots', self.gf('django.db.models.fields.FloatField')(default=0))
# Changing field 'Board.cpt'
db.alter_column(u'boards_board', 'cpt', self.gf('django.db.models.fields.IntegerField')(default=0, max_length=6))
# Changing field 'Board.comment'
db.alter_column(u'boards_board', 'comment', self.gf('django.db.models.fields.CharField')(default=0, max_length=300))
# Changing field 'Board.side'
db.alter_column(u'boards_board', 'side', self.gf('django.db.models.fields.CharField')(default=0, max_length=2))
models = {
u'boards.board': {
'Meta': {'object_name': 'Board'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.City']", 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'coord': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'cpt': ('django.db.models.fields.IntegerField', [], {'max_length': '6', 'null': 'True'}),
'direction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Direction']", 'null': 'True'}),
'grp': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'light': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'material': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'montage_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2'}),
'ots': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Company']", 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2'}),
'side': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Status']", 'null': 'True'}),
'tax': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'boards.calendar': {
'Meta': {'object_name': 'Calendar'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Board']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Contract']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Status']"})
},
u'boards.city': {
'Meta': {'object_name': 'City'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Region']"})
},
u'boards.company': {
'Meta': {'object_name': 'Company'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'business_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.City']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inn': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'kpp': ('django.db.models.fields.IntegerField', [], {'max_length': '9'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'okofp': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'payment_account': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Region']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
u'boards.contract': {
'Meta': {'object_name': 'Contract'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent'", 'to': u"orm['boards.Company']"}),
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Board']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['boards.Company']"}),
'sign_date': ('django.db.models.fields.DateTimeField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'boards.direction': {
'Meta': {'object_name': 'Direction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '70'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'})
},
u'boards.image': {
'Meta': {'object_name': 'Image'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['boards.Board']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
u'boards.region': {
'Meta': {'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'boards.status': {
'Meta': {'object_name': 'Status'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '70'})
}
}
complete_apps = ['boards']
|
|
'''
Input file parser for the LEMON parser generator.
'''
from ccruft import *
from error import *
from struct import *
from table import *
from sys import exit
MAXRHS = 1000
(
INITIALIZE,
WAITING_FOR_DECL_OR_RULE,
WAITING_FOR_DECL_KEYWORD,
WAITING_FOR_DECL_ARG,
WAITING_FOR_PRECEDENCE_SYMBOL,
WAITING_FOR_ARROW,
IN_RHS,
LHS_ALIAS_1,
LHS_ALIAS_2,
LHS_ALIAS_3,
RHS_ALIAS_1,
RHS_ALIAS_2,
PRECEDENCE_MARK_1,
PRECEDENCE_MARK_2,
RESYNC_AFTER_RULE_ERROR,
RESYNC_AFTER_DECL_ERROR,
WAITING_FOR_FALLBACK_ID,
WAITING_FOR_WILDCARD_ID,
) = range(18)
pstate = struct(
'pstate',
(
'filename', # Name of the input file
'tokenlineno', # Linenumber at which current token starts
'errorcnt', # Number of errors so far
'tokenstart', # Text of current token
'gp', # Global state vector
'state', # The state of the parser
'fallback', # The fallback token
'lhs', # Left-hand side of current rule
'lhsalias', # Alias for the LHS
'nrhs', # Number of right-hand side symbols seen
'rhs', # RHS symbols
'alias', # Aliases for each RHS symbol (or NULL)
'prevrule', # Previous rule parsed
'declkeyword', # Keyword of a declaration
'declargslot', # Where the declaration argument should be put
'insertLineMacro', # Add #line before declaration insert
'declassoc', # Assign this association to decl arguments
'preccounter', # Assign this precedence to decl arguments
'firstrule', # Pointer to first rule in the grammar
'lastrule', # Pointer to the most recently parsed rule
)
)
def parseonetoken(psp, x):
'''Parse a single token.'''
x = Strsafe(x) # Save the token permanently
if psp.state == INITIALIZE:
psp.prevrule = None
psp.preccounter = 0
psp.firstrule = psp.lastrule = None
psp.gp.nrule = 0
psp.state = WAITING_FOR_DECL_OR_RULE
if psp.state == WAITING_FOR_DECL_OR_RULE:
if x[0] == '%':
psp.state = WAITING_FOR_DECL_KEYWORD
elif x[0].islower():
psp.lhs = Symbol_new(x)
psp.nrhs = 0
psp.lhsalias = None
psp.state = WAITING_FOR_ARROW
elif x[0] == '{':
if True:
ErrorMsg(psp.filename, psp.tokenlineno,
"Code fragment actions are not supported.")
psp.errorcnt += 1
elif psp.prevrule is None:
ErrorMsg(psp.filename, psp.tokenlineno,
"There is not prior rule upon which to attach the code "
"fragment which begins on this line.")
psp.errorcnt += 1
elif psp.prevrule.code is not None:
ErrorMsg(psp.filename, psp.tokenlineno,
"Code fragment beginning on this line is not the first "
"to follow the previous rule.")
psp.errorcnt += 1
else:
psp.prevrule.line = psp.tokenlineno
psp.prevrule.code = x[1:]
elif x[0] == '[':
psp.state = PRECEDENCE_MARK_1
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Token "%s" should be either "%%" or a nonterminal name.',
x)
psp.errorcnt += 1
elif psp.state == PRECEDENCE_MARK_1:
if not x[0].isupper():
ErrorMsg(psp.filename, psp.tokenlineno,
"The precedence symbol must be a terminal.")
psp.errorcnt += 1
elif psp.prevrule is None:
ErrorMsg(psp.filename, psp.tokenlineno,
'There is no prior rule to assign precedence "[%s]".', x)
psp.errorcnt += 1
elif psp.prevrule.precsym is not None:
ErrorMsg(psp.filename, psp.tokenlineno,
"Precedence mark on this line is not the first to follow the previous rule.")
psp.errorcnt += 1
else:
psp.prevrule.precsym = Symbol_new(x)
psp.state = PRECEDENCE_MARK_2
elif psp.state == PRECEDENCE_MARK_2:
if x[0] != ']':
ErrorMsg(psp.filename, psp.tokenlineno,
'Missing "]" on precedence mark.')
psp.errorcnt += 1
psp.state = WAITING_FOR_DECL_OR_RULE
elif psp.state == WAITING_FOR_ARROW:
if x[:3] == '::=':
psp.state = IN_RHS
elif x[0] == '(':
psp.state = LHS_ALIAS_1
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Expected to see a ":" following the LHS symbol "%s".',
psp.lhs.name)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == LHS_ALIAS_1:
if x[0].isalpha():
psp.lhsalias = x
psp.state = LHS_ALIAS_2
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'"%s" is not a valid alias for the LHS "%s"\n',
x, psp.lhs.name)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == LHS_ALIAS_2:
if x[0] == ')':
psp.state = LHS_ALIAS_3
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Missing ")" following LHS alias name "%s".',
psp.lhsalias)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == LHS_ALIAS_3:
if x[:3] == '::=':
psp.state = IN_RHS
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Missing "->" following: "%s(%s)".',
psp.lhs.name, psp.lhsalias)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == IN_RHS:
if x[0] == '.':
rp = rule(
ruleline = psp.tokenlineno,
rhs = psp.rhs[:psp.nrhs],
rhsalias = psp.alias[:psp.nrhs],
lhs = psp.lhs,
lhsalias = psp.lhsalias,
nrhs = psp.nrhs,
code = None,
precsym = None,
index = psp.gp.nrule,
lhsStart = False,
line = 0,
canReduce = False,
nextlhs = None,
next = None,
)
psp.gp.nrule += 1
rp.nextlhs = rp.lhs.rule
rp.lhs.rule = rp
if psp.firstrule is None:
psp.firstrule = psp.lastrule = rp
else:
psp.lastrule.next = rp
psp.lastrule = rp
psp.prevrule = rp
psp.state = WAITING_FOR_DECL_OR_RULE
elif x[0].isalpha():
if psp.nrhs >= MAXRHS:
ErrorMsg(psp.filename, psp.tokenlineno,
'Too many symbols on RHS of rule beginning at "%s".',
x)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
else:
psp.rhs[psp.nrhs] = Symbol_new(x)
psp.alias[psp.nrhs] = None
psp.nrhs += 1
elif x[0] in ('|', '/') and psp.nrhs > 0:
msp = psp.rhs[psp.nrhs - 1]
if msp.type != MULTITERMINAL:
origsp = msp
msp = symbol(
type = MULTITERMINAL,
nsubsym = 1,
subsym = [origsp],
name = origsp.name,
index = 0,
rule = None,
fallback = None,
prec = 0,
assoc = 0,
firstset = None,
_lambda = False,
useCnt = 0,
)
psp.rhs[psp.nrhs - 1] = msp
msp.nsubsym += 1
msp.subsym.append(Symbol_new(x[1:]))
if x[1].islower() or msp.subsym[0].name[0].islower():
ErrorMsg(psp.filename, psp.tokenlineno,
"Cannot form a compound containing a non-terminal")
psp.errorcnt += 1
elif x[0] == '(' and psp.nrhs > 0:
psp.state = RHS_ALIAS_1
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Illegal character on RHS of rule: "%s".', x)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == RHS_ALIAS_1:
if x[0].isalpha():
psp.alias[psp.nrhs - 1] = x
psp.state = RHS_ALIAS_2
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'"%s" is not a valid alias for the RHS symbol "%s"\n',
x, psp.rhs[psp.nrhs - 1].name)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == RHS_ALIAS_2:
if x[0] == ')':
psp.state = IN_RHS
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Missing ")" following LHS alias name "%s".',
psp.lhsalias)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_RULE_ERROR
elif psp.state == WAITING_FOR_DECL_KEYWORD:
if x[0].isalpha():
psp.declkeyword = x
psp.declargslot = None
psp.insertLineMacro = True
psp.state = WAITING_FOR_DECL_ARG
if strcmp(x, "name") == 0:
psp.declargslot = 'name'
psp.insertLineMacro = False
elif strcmp(x, "token_prefix") == 0:
psp.declargslot = 'tokenprefix'
psp.insertLineMacro = False
elif strcmp(x, "start_symbol") == 0:
psp.declargslot = 'start'
psp.insertLineMacro = False
elif strcmp(x, "left") == 0:
psp.preccounter += 1
psp.declassoc = LEFT
psp.state = WAITING_FOR_PRECEDENCE_SYMBOL
elif strcmp(x, "right") == 0:
psp.preccounter += 1
psp.declassoc = RIGHT
psp.state = WAITING_FOR_PRECEDENCE_SYMBOL
elif strcmp(x, "nonassoc") == 0:
psp.preccounter += 1
psp.declassoc = NONE
psp.state = WAITING_FOR_PRECEDENCE_SYMBOL
elif strcmp(x, "fallback") == 0:
psp.fallback = None
psp.state = WAITING_FOR_FALLBACK_ID
elif strcmp(x, "wildcard") == 0:
psp.state = WAITING_FOR_WILDCARD_ID
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Unknown declaration keyword: "%%%s".', x)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_DECL_ERROR
else:
ErrorMsg(psp.filename, psp.tokenlineno,
'Illegal declaration keyword: "%s".', x)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_DECL_ERROR
elif psp.state == WAITING_FOR_PRECEDENCE_SYMBOL:
if x[0] == '.':
psp.state = WAITING_FOR_DECL_OR_RULE
elif x[0].isupper():
sp = Symbol_new(x)
if sp.prec >= 0:
ErrorMsg(psp.filename, psp.tokenlineno,
'Symbol "%s" has already be given a precedence.',
x)
psp.errorcnt += 1
else:
sp.prec = psp.preccounter
sp.assoc = psp.declassoc
else:
ErrorMsg(psp.filename, psp.tokenlineno,
"""Can't assign a precedence to "%s".""", x)
psp.errorcnt += 1
elif psp.state == WAITING_FOR_DECL_ARG:
if x[0] in ('{', '"') or x[0].isalnum():
zNew = x
if zNew[0] in ('"', '{'):
zNew = zNew[1:]
zOld = getattr(psp.gp, psp.declargslot)
if not zOld:
zOld = ""
zBuf = zOld
if psp.insertLineMacro:
if zBuf and zBuf[-1] != '\n':
zBuf += '\n'
zBuf += "#line %d " % psp.tokenlineno
zBuf += '"'
zBuf += psp.filename.replace('\\', '\\\\')
zBuf += '"'
zBuf += '\n'
zBuf += zNew
setattr(psp.gp, psp.declargslot, zBuf)
psp.state = WAITING_FOR_DECL_OR_RULE
else:
ErrorMsg(psp.filename, psp.tokenlineno,
"Illegal argument to %%%s: %s",
psp.declkeyword, x)
psp.errorcnt += 1
psp.state = RESYNC_AFTER_DECL_ERROR
elif psp.state == WAITING_FOR_FALLBACK_ID:
if x[0] == '.':
psp.state = WAITING_FOR_DECL_OR_RULE
elif not x[0].isupper():
ErrorMsg(psp.filename, psp.tokenlineno,
'%%fallback argument "%s" should be a token', x)
psp.errorcnt += 1
else:
sp = Symbol_new(x)
if psp.fallback is None:
psp.fallback = sp
elif sp.fallback:
ErrorMsg(psp.filename, psp.tokenlineno,
"More than one fallback assigned to token %s", x)
psp.errorcnt += 1
else:
sp.fallback = psp.fallback
psp.gp.has_fallback = 1
elif psp.state == WAITING_FOR_WILDCARD_ID:
if x[0] == '.':
psp.state = WAITING_FOR_DECL_OR_RULE
elif not x[0].isupper():
ErrorMsg(psp.filename, psp.tokenlineno,
'%%wildcard argument "%s" should be a token', x)
psp.errorcnt += 1
else:
sp = Symbol_new(x)
if psp.gp.wildcard is None:
psp.gp.wildcard = sp
else:
ErrorMsg(psp.filename, psp.tokenlineno, "Extra wildcard to token: %s", x)
psp.errorcnt += 1
elif psp.state in (RESYNC_AFTER_RULE_ERROR, RESYNC_AFTER_DECL_ERROR):
if x[0] == '.':
psp.state = WAITING_FOR_DECL_OR_RULE
elif x[0] == '%':
psp.state = WAITING_FOR_DECL_KEYWORD
return
# In spite of its name, this function is really a scanner. It read in
# the entire input file (all at once) then tokenizes it. Each token
# is passed to the function "parseonetoken" which builds all the
# appropriate data structures in the global state vector "gp".
def Parse(gp):
startline = 0
ps = pstate(
gp = gp,
filename = gp.filename,
errorcnt = 0,
state = INITIALIZE,
tokenlineno = 0,
tokenstart = None,
fallback = None,
lhs = None,
lhsalias = None,
nrhs = 0,
rhs = [None] * MAXRHS,
alias = [None] * MAXRHS,
prevrule = None,
declkeyword = None,
declargslot = None,
insertLineMacro = False,
declassoc = 0,
preccounter = 0,
firstrule = None,
lastrule = None,
)
# Begin by reading the input file
try:
fp = open(ps.filename, "rb")
except IOError:
ErrorMsg(ps.filename, 0, "Can't open this file for reading.")
gp.errorcnt += 1
return
filebuf = fp.read()
fp.close()
lineno = 1
# Now scan the text of the input file
cp = 0
while cp < len(filebuf):
c = filebuf[cp]
# Keep track of the line number
if c == '\n':
lineno += 1
# Skip all white space
if c.isspace():
cp += 1
continue
# Skip C++ style comments
if filebuf[cp:cp+2] == "//":
cp += 2
while cp < len(filebuf):
if filebuf[cp] == '\n':
break
cp += 1
continue
# Skip C style comments
if filebuf[cp:cp+2] == "/*":
cp += 2
while cp < len(filebuf):
if filebuf[cp] == '\n':
lineno += 1
if filebuf[cp-1:cp+1] == '*/':
cp += 1
break
cp += 1
continue
ps.tokenstart = cp # Mark the beginning of the token
ps.tokenlineno = lineno # Linenumber on which token begins
if c == '"':
# String literals
cp += 1
while cp < len(filebuf):
c = filebuf[cp]
if c == '"':
nextcp = cp + 1
break
if c == '\n':
lineno += 1
cp += 1
else:
ErrorMsg(ps.filename, startline,
"String starting on this line is not terminated "
"before the end of the file.")
ps.errorcnt += 1
nextcp = cp
elif c == '{':
# A block of C code
cp += 1
level = 1
while cp < len(filebuf) and (level > 1 or filebuf[cp] != '}'):
c = filebuf[cp]
if c == '\n':
lineno += 1
elif c == '{':
level += 1
elif c == '}':
level -= 1
elif filebuf[cp:cp+2] == "/*":
# Skip comments
cp += 2
while cp < len(filebuf):
c = filebuf[cp]
if filebuf[cp] == '\n':
lineno += 1
if filebuf[cp-1:cp+1] == '*/':
cp += 1
break
cp += 1
elif filebuf[cp:cp+2] == "//":
# Skip C++ style comments too
cp += 2
while cp < len(filebuf):
if filebuf[cp] == '\n':
lineno += 1
break
cp += 1
elif c == "'" or c == '"':
# String and character literals
startchar = c
prevc = 0
cp += 1
while (cp < len(filebuf) and
(filebuf[cp] != startchar or prevc == '\\')
):
c = filebuf[cp]
if c == '\n':
lineno += 1
if prevc == '\\':
prevc = 0
else:
prevc = c
cp += 1
cp += 1
if cp == len(filebuf):
ErrorMsg(ps.filename, ps.tokenlineno,
"C code starting on this line is not terminated "
"before the end of the file.")
ps.errorcnt += 1
nextcp = cp
else:
nextcp = cp + 1
elif c.isalnum():
# Identifiers
while c.isalnum() or c == '_':
cp += 1
if cp > len(filebuf):
break
c = filebuf[cp]
nextcp = cp
elif filebuf[cp:cp+3] == "::=":
# The operator "::="
cp += 3
nextcp = cp
elif (c in ('/', '|')) and cp+1 < len(filebuf) and filebuf[cp+1].isalpha():
cp += 2
while cp < len(filebuf):
c = filebuf[cp]
if not (c.isalnum() or c == '_'):
break
cp += 1
nextcp = cp
else:
# All other (one character) operators
cp += 1
nextcp = cp
# Parse the token
token = filebuf[ps.tokenstart:cp]
parseonetoken(ps, token)
cp = nextcp
gp.rule = ps.firstrule
gp.errorcnt = ps.errorcnt
return
|
|
import torch
import warnings
from torch.optim.optimizer import Optimizer
import math
import itertools as it
import torch.optim as optim
warnings.filterwarnings("once")
class Ranger(Optimizer):
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer/blob/master/ranger.py
def __init__(
self,
params,
lr=1e-3,
alpha=0.5,
k=6,
N_sma_threshhold=5,
betas=(0.95, 0.999),
eps=1e-5,
weight_decay=0,
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f"Invalid slow update rate: {alpha}")
if not 1 <= k:
raise ValueError(f"Invalid lookahead steps: {k}")
if not lr > 0:
raise ValueError(f"Invalid Learning Rate: {lr}")
if not eps > 0:
raise ValueError(f"Invalid eps: {eps}")
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(
lr=lr,
alpha=alpha,
k=k,
step_counter=0,
betas=betas,
N_sma_threshhold=N_sma_threshhold,
eps=eps,
weight_decay=weight_decay,
)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
"Ranger optimizer does not support sparse gradients"
)
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if (
len(state) == 0
): # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
# print("Initializing slow buffer...should not see this at load from saved model!")
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state["slow_buffer"] = torch.empty_like(p.data)
state["slow_buffer"].copy_(p.data)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = self.radam_buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (1 - beta1 ** state["step"])
else:
step_size = 1.0 / (1 - beta1 ** state["step"])
buffered[2] = step_size
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group["lr"], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state["step"] % group["k"] == 0:
slow_p = state["slow_buffer"] # get access to slow param tensor
slow_p.add_(
self.alpha, p.data - slow_p
) # (fast weights - slow weights) * alpha
p.data.copy_(
slow_p
) # copy interpolated weights to RAdam param tensor
return loss
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("RAdam does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = self.buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (1 - beta1 ** state["step"])
else:
step_size = 1.0 / (1 - beta1 ** state["step"])
buffered[2] = step_size
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size * group["lr"], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group["lr"], exp_avg)
p.data.copy_(p_data_fp32)
return loss
# https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
class Lookahead(Optimizer):
def __init__(self, base_optimizer, alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f"Invalid slow update rate: {alpha}")
if not 1 <= k:
raise ValueError(f"Invalid lookahead steps: {k}")
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [
[p.clone().detach() for p in group["params"]] for group in self.param_groups
]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group, slow_weights in zip(self.param_groups, self.slow_weights):
group["step_counter"] += 1
if group["step_counter"] % self.k != 0:
continue
for p, q in zip(group["params"], slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha, p.data - q.data)
p.data.copy_(q.data)
return loss
class Ralamb(Optimizer):
"""
Ralamb optimizer (RAdam + LARS trick)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("Ralamb does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state["step"] += 1
buffered = self.buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, radam_step = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step = (
group["lr"]
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state["step"])
)
else:
radam_step = group["lr"] / (1 - beta1 ** state["step"])
buffered[2] = radam_step
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
radam_norm = p_data_fp32.pow(2).sum().sqrt()
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state["weight_norm"] = weight_norm
state["adam_norm"] = radam_norm
state["trust_ratio"] = trust_ratio
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-radam_step * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
def get_optimizer(
optimizer: str = "Adam",
lookahead: bool = False,
model=None,
separate_decoder: bool = True,
lr: float = 1e-3,
lr_e: float = 1e-3,
):
"""
# https://github.com/lonePatient/lookahead_pytorch/blob/master/run.py
:param optimizer:
:param lookahead:
:param model:
:param separate_decoder:
:param lr:
:param lr_e:
:return:
"""
if separate_decoder:
params = [
{"params": model.decoder.parameters(), "lr": lr},
{"params": model.encoder.parameters(), "lr": lr_e},
]
else:
params = [{"params": model.parameters(), "lr": lr}]
if optimizer == "Adam":
optimizer = optim.Adam(params, lr=lr)
elif optimizer == "RAdam":
optimizer = RAdam(params, lr=lr)
elif optimizer == "Ralamb":
optimizer = Ralamb(params, lr=lr)
else:
raise ValueError("unknown base optimizer type")
if lookahead:
optimizer = Lookahead(base_optimizer=optimizer, k=5, alpha=0.5)
return optimizer
|
|
from __future__ import print_function
import matplotlib.delaunay as delaunay
import matplotlib._tri as _tri
import numpy as np
class Triangulation(object):
"""
An unstructured triangular grid consisting of npoints points and
ntri triangles. The triangles can either be specified by the user
or automatically generated using a Delaunay triangulation.
Read-only attributes:
*x*: array of shape (npoints).
x-coordinates of grid points.
*y*: array of shape (npoints).
y-coordinates of grid points.
*triangles*: integer array of shape (ntri,3).
For each triangle, the indices of the three points that make
up the triangle, ordered in an anticlockwise manner.
*mask*: optional boolean array of shape (ntri).
Which triangles are masked out.
*edges*: integer array of shape (?,2).
All edges of non-masked triangles. Each edge is the start
point index and end point index. Each edge (start,end and
end,start) appears only once.
*neighbors*: integer array of shape (ntri,3).
For each triangle, the indices of the three triangles that
share the same edges, or -1 if there is no such neighboring
triangle. neighbors[i,j] is the triangle that is the neighbor
to the edge from point index triangles[i,j] to point index
triangles[i,(j+1)%3].
For a Triangulation to be valid it must not have duplicate points,
triangles formed from colinear points, or overlapping triangles.
"""
def __init__(self, x, y, triangles=None, mask=None):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x and y must be equal-length 1-D arrays")
self.mask = None
self._edges = None
self._neighbors = None
if triangles is None:
# No triangulation specified, so use matplotlib.delaunay.
dt = delaunay.Triangulation(self.x, self.y)
self.triangles = np.asarray(
dt.to_client_point_indices(dt.triangle_nodes),
dtype=np.int32)
if mask is None:
self._edges = np.asarray(
dt.to_client_point_indices(dt.edge_db),
dtype=np.int32)
# Delaunay triangle_neighbors uses different edge indexing,
# so convert.
neighbors = np.asarray(dt.triangle_neighbors, dtype=np.int32)
self._neighbors = np.roll(neighbors, 1, axis=1)
else:
# Triangulation specified. Copy, since we may correct triangle
# orientation.
self.triangles = np.array(triangles, dtype=np.int32)
if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
raise ValueError('triangles must be a (?,3) array')
if self.triangles.max() >= len(self.x):
raise ValueError('triangles max element is out of bounds')
if self.triangles.min() < 0:
raise ValueError('triangles min element is out of bounds')
if mask is not None:
self.mask = np.asarray(mask, dtype=np.bool)
if len(self.mask.shape) != 1 or \
self.mask.shape[0] != self.triangles.shape[0]:
raise ValueError('mask array must have same length as '
'triangles array')
# Underlying C++ object is not created until first needed.
self._cpp_triangulation = None
# Default TriFinder not created until needed.
self._trifinder = None
def calculate_plane_coefficients(self, z):
"""
Calculate plane equation coefficients for all unmasked triangles from
the point (x,y) coordinates and specified z-array of shape (npoints).
Returned array has shape (npoints,3) and allows z-value at (x,y)
position in triangle tri to be calculated using
z = array[tri,0]*x + array[tri,1]*y + array[tri,2].
"""
return self.get_cpp_triangulation().calculate_plane_coefficients(z)
@property
def edges(self):
if self._edges is None:
self._edges = self.get_cpp_triangulation().get_edges()
return self._edges
def get_cpp_triangulation(self):
# Return the underlying C++ Triangulation object, creating it
# if necessary.
if self._cpp_triangulation is None:
self._cpp_triangulation = _tri.Triangulation(
self.x, self.y, self.triangles, self.mask, self._edges,
self._neighbors)
return self._cpp_triangulation
def get_masked_triangles(self):
"""
Return an array of triangles that are not masked.
"""
if self.mask is not None:
return self.triangles.compress(1 - self.mask, axis=0)
else:
return self.triangles
@staticmethod
def get_from_args_and_kwargs(*args, **kwargs):
"""
Return a Triangulation object from the args and kwargs, and
the remaining args and kwargs with the consumed values removed.
There are two alternatives: either the first argument is a
Triangulation object, in which case it is returned, or the args
and kwargs are sufficient to create a new Triangulation to
return. In the latter case, see Triangulation.__init__ for
the possible args and kwargs.
"""
if isinstance(args[0], Triangulation):
triangulation = args[0]
args = args[1:]
else:
x = args[0]
y = args[1]
args = args[2:] # Consumed first two args.
# Check triangles in kwargs then args.
triangles = kwargs.pop('triangles', None)
from_args = False
if triangles is None and len(args) > 0:
triangles = args[0]
from_args = True
if triangles is not None:
try:
triangles = np.asarray(triangles, dtype=np.int32)
except ValueError:
triangles = None
if triangles is not None and (triangles.ndim != 2 or
triangles.shape[1] != 3):
triangles = None
if triangles is not None and from_args:
args = args[1:] # Consumed first item in args.
# Check for mask in kwargs.
mask = kwargs.pop('mask', None)
triangulation = Triangulation(x, y, triangles, mask)
return triangulation, args, kwargs
def get_trifinder(self):
"""
Return the default :class:`matplotlib.tri.TriFinder` of this
triangulation, creating it if necessary. This allows the same
TriFinder object to be easily shared.
"""
if self._trifinder is None:
# Default TriFinder class.
from matplotlib.tri.trifinder import TrapezoidMapTriFinder
self._trifinder = TrapezoidMapTriFinder(self)
return self._trifinder
@property
def neighbors(self):
if self._neighbors is None:
self._neighbors = self.get_cpp_triangulation().get_neighbors()
return self._neighbors
def set_mask(self, mask):
"""
Set or clear the mask array. This is either None, or a boolean
array of shape (ntri).
"""
if mask is None:
self.mask = None
else:
self.mask = np.asarray(mask, dtype=np.bool)
if len(self.mask.shape) != 1 or \
self.mask.shape[0] != self.triangles.shape[0]:
raise ValueError('mask array must have same length as '
'triangles array')
# Set mask in C++ Triangulation.
if self._cpp_triangulation is not None:
self._cpp_triangulation.set_mask(self.mask)
# Clear derived fields so they are recalculated when needed.
self._edges = None
self._neighbors = None
# Recalculate TriFinder if it exists.
if self._trifinder is not None:
self._trifinder._initialize()
|
|
"""A rather messy way of parsing commands."""
### This is better than what I made but I still probably want to redo it
from pag import words as pag_words
class Token:
T_VERB = 'Verb'
T_NOUN = 'Noun'
T_EXTRA = 'Extra'
T_DIRECTION = 'Direction'
def __init__(self, tvalue, ttype=T_VERB):
"""
tvalue : Token literal value.
ttype : Token type.
"""
self._ttype = ttype
self._tvalue = tvalue
def __str__(self):
return self._tvalue
def __repr__(self):
return "{0}<{1}>".format(self._ttype, self._tvalue)
def __eq__(self, other):
return other._ttype == self._ttype and other._tvalue == self._tvalue
class Preprocessor:
def __init__(self):
self._directions = pag_words.directions
self._extras = pag_words.extras
def supplement_words(self, words=None):
"""
"""
if words is not None:
if 'extras' in words:
self._extras = {**self._extras, **words['extras']}
if 'directions' in words:
self._directions = {**self._verbs, **words['directions']}
def prep(self, command):
"""
Pre-process a command.
Returns a sequence of string words
"""
# Normalise whitespaces
toreturn = command.lower().strip()
if len(toreturn) == 0:
return ""
word_seq = toreturn.split()
word_seq = [w for w in word_seq if len(w) > 0]
# See if command is only a direction
for i in self._directions:
if command.strip() == i:
# Return Verb, Noun
word_seq = ["go", i]
else:
for syn in self._directions[i]:
if command.strip() == syn:
word_seq = ["go", i]
# remove extra words
removing = [word for word in word_seq if word in self._extras]
for word in removing:
word_seq.remove(word)
return word_seq
class Parser:
def __init__(self):
pass
self._words = None
self._verbs = pag_words.verbs
self._nouns = pag_words.nouns
self._extras = pag_words.extras
self._directions = pag_words.directions
def supplement_words(self, words=None):
"""
"""
self._words = words
if words is not None:
if 'verbs' in words:
self._verbs = {**self._verbs, **words['verbs']}
if 'nouns' in words:
self._nouns = {**self._nouns, **words['nouns']}
if 'extras' in words:
self._extras = {**self._extras, **words['extras']}
if 'directions' in words:
self._directions = {**self._verbs, **words['directions']}
def eat_verb(self, word_seq):
"""
Try to consume a verb from a word sequence.
On success:
- Returns a new token of type T_VERB
- Consumed word removed from word_seq.
On failure:
- Returns None
- word_seq unchanged.
"""
if len(word_seq) == 0:
return None
word = word_seq[0]
for i in self._verbs:
if word.strip() == i:
word_seq.pop(0)
return Token(i)
else:
for syn in self._verbs[i]:
if (word.strip() == syn):
word_seq.pop(0)
return Token(i)
return None
def eat_noun(self, word_seq):
"""
Try to consume a noun from a word sequence.
On success:
- Returns a new token of type T_NOUN
- Consumed word removed from word_seq.
On failure:
- Returns None
- word_seq unchanged.
"""
if len(word_seq) == 0:
return None
# Attempt a greedy eat.
# I.e. attempt to eat 'toilet paper roll'
# even if we would succeed at 'toilet paper'
greedy_seq = self.merge_first_words(word_seq)
if len(greedy_seq) != len(word_seq):
greedy_res = self.eat_noun(greedy_seq)
if greedy_res is not None:
while len(greedy_seq) < len(word_seq):
word_seq.pop(0)
return greedy_res
word = word_seq[0]
for i in {**self._nouns, **self._directions}:
if word == i:
word_seq.pop(0)
return Token(i, Token.T_NOUN)
else:
for syn in {**self._nouns, **self._directions}[i]:
if word == syn:
word_seq.pop(0)
return Token(i, Token.T_NOUN)
def merge_first_words(self, word_seq):
"""
Merge first two words in a word sequence.
Needed for multi-word words, i.e. 'look at', 'toilet paper'
"""
if len(word_seq) > 1:
return [word_seq[0] + " " + word_seq[1]] + word_seq[2:]
return word_seq[:]
def parse(self, command):
prep = Preprocessor()
prep.supplement_words(self._words)
word_seq = prep.prep(command)
parsed_command = []
# command must start with a verb
verb = self.eat_verb(word_seq)
if verb is None and len(word_seq) > 1:
# Try again, but with multi-word commands. I.e. 'pick up'
word_seq = self.merge_first_words(word_seq)
verb = self.eat_verb(word_seq)
if verb is not None:
parsed_command.append(verb)
else:
print('What?')
return
# Next is a noun. Maybe.
if len(word_seq) > 0:
noun_result = self.eat_noun(word_seq)
if noun_result is not None:
parsed_command.append(noun_result)
else:
rest_of_command = " ".join(word_seq)
print(f'I don\'t understand the noun "{rest_of_command}".')
return
if len(word_seq) > 0:
rest_of_command = " ".join(word_seq)
print(f'I don\'t understand the extra word "{rest_of_command}".')
return
return parsed_command
def parse_command(command, words=None):
parser = Parser()
parser.supplement_words(words)
tokens = parser.parse(command)
if tokens is None:
return None
else:
return [t._tvalue for t in tokens]
|
|
import ssl
from typing import Any, Dict, Optional, Union
from unittest import mock
import pytest
from urllib3.exceptions import ProxySchemeUnsupported, SNIMissingWarning, SSLError
from urllib3.util import ssl_
class TestSSL:
@pytest.mark.parametrize(
"addr",
[
# IPv6
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
# IPv4
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
# IPv6 w/ Zone IDs
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(self, addr: Union[bytes, str]) -> None:
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def test_is_ipaddress_false(self, addr: Union[bytes, str]) -> None:
assert not ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "should_warn"],
[
(True, "www.google.com", False),
(True, "127.0.0.1", False),
(False, "127.0.0.1", False),
(False, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_sni_missing_warning_with_ip_addresses(
self,
monkeypatch: pytest.MonkeyPatch,
has_sni: bool,
server_hostname: Optional[str],
should_warn: bool,
) -> None:
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
with mock.patch("warnings.warn") as warn:
ssl_.ssl_wrap_socket(
sock, server_hostname=server_hostname, ssl_context=context
)
if should_warn:
assert warn.call_count >= 1
warnings = [call[0][1] for call in warn.call_args_list]
assert SNIMissingWarning in warnings
else:
assert warn.call_count == 0
@pytest.mark.parametrize(
["ciphers", "expected_ciphers"],
[
(None, ssl_.DEFAULT_CIPHERS),
("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20"),
],
)
def test_create_urllib3_context_set_ciphers(
self,
monkeypatch: pytest.MonkeyPatch,
ciphers: Optional[str],
expected_ciphers: str,
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
if ciphers is None and ssl_.USE_DEFAULT_SSLCONTEXT_CIPHERS:
assert context.set_ciphers.call_count == 0
else:
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(expected_ciphers)
def test_create_urllib3_no_context(self) -> None:
with mock.patch("urllib3.util.ssl_.SSLContext", None):
with pytest.raises(TypeError):
ssl_.create_urllib3_context()
def test_wrap_socket_given_context_no_load_default_certs(self) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
def test_wrap_socket_given_ca_certs_no_load_default_certs(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None, None)
def test_wrap_socket_default_loads_default_certs(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
def test_wrap_socket_no_ssltransport(self) -> None:
with mock.patch("urllib3.util.ssl_.SSLTransport", None):
with pytest.raises(ProxySchemeUnsupported):
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, tls_in_tls=True)
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(
self,
monkeypatch: pytest.MonkeyPatch,
pha: Optional[bool],
expected_pha: Optional[bool],
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
@pytest.mark.parametrize("use_default_sslcontext_ciphers", [True, False])
def test_create_urllib3_context_default_ciphers(
self, monkeypatch: pytest.MonkeyPatch, use_default_sslcontext_ciphers: bool
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
monkeypatch.setattr(
ssl_, "USE_DEFAULT_SSLCONTEXT_CIPHERS", use_default_sslcontext_ciphers
)
ssl_.create_urllib3_context()
if use_default_sslcontext_ciphers:
context.set_ciphers.assert_not_called()
else:
context.set_ciphers.assert_called_with(ssl_.DEFAULT_CIPHERS)
@pytest.mark.parametrize(
"kwargs",
[
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_maximum_version": ssl.TLSVersion.TLSv1,
},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
"ssl_maximum_version": ssl.TLSVersion.MAXIMUM_SUPPORTED,
},
],
)
def test_create_urllib3_context_ssl_version_and_ssl_min_max_version_errors(
self, kwargs: Dict[str, Any]
) -> None:
with pytest.raises(ValueError) as e:
ssl_.create_urllib3_context(**kwargs)
assert str(e.value) == (
"Can't specify both 'ssl_version' and either 'ssl_minimum_version' or 'ssl_maximum_version'"
)
@pytest.mark.parametrize(
"kwargs",
[
{
"ssl_version": ssl.PROTOCOL_TLS,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": ssl.PROTOCOL_TLS_CLIENT,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": None,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{"ssl_version": ssl.PROTOCOL_TLSv1, "ssl_minimum_version": None},
{"ssl_version": ssl.PROTOCOL_TLSv1, "ssl_maximum_version": None},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": None,
"ssl_maximum_version": None,
},
],
)
def test_create_urllib3_context_ssl_version_and_ssl_min_max_version_no_error(
self, kwargs: Dict[str, Any]
) -> None:
ssl_.create_urllib3_context(**kwargs)
def test_assert_fingerprint_raises_exception_on_none_cert(self) -> None:
with pytest.raises(SSLError):
ssl_.assert_fingerprint(
cert=None, fingerprint="55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"
)
|
|
# -*- test-case-name: admin.test.test_release -*-
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Helper utilities for the Flocker release process.
XXX This script is not automatically checked by buildbot. See
https://clusterhq.atlassian.net/browse/FLOC-397
"""
import json
import os
import sys
import tempfile
from setuptools import __version__ as setuptools_version
from subprocess import check_call
from effect import (
Effect, sync_perform, ComposedDispatcher)
from effect.do import do
from characteristic import attributes
from git import GitCommandError, Repo
import requests
from twisted.python.filepath import FilePath
from twisted.python.usage import Options, UsageError
from twisted.python.constants import Names, NamedConstant
from twisted.web import template
import flocker
from flocker.common.version import get_package_key_suffix
from flocker.provision._effect import sequence, dispatcher as base_dispatcher
from flocker.common.version import (
get_doc_version,
get_pre_release,
is_pre_release,
is_release,
is_weekly_release,
target_release,
UnparseableVersion,
)
from flocker.provision._install import ARCHIVE_BUCKET
from .aws import (
boto_dispatcher,
UpdateS3RoutingRule,
UpdateS3ErrorPage,
ListS3Keys,
DeleteS3Keys,
CopyS3Keys,
DownloadS3KeyRecursively,
UploadToS3,
UploadToS3Recursively,
CreateCloudFrontInvalidation,
)
from .yum import (
yum_dispatcher,
CreateRepo,
DownloadPackagesFromRepository,
)
from .vagrant import vagrant_version
from .homebrew import make_recipe
from .packaging import available_distributions, DISTRIBUTION_NAME_MAP
DEV_ARCHIVE_BUCKET = 'clusterhq-dev-archive'
class NotTagged(Exception):
"""
Raised if publishing to production and the version being published version
isn't tagged.
"""
class NotARelease(Exception):
"""
Raised if trying to publish documentation to, or packages for a version
that isn't a release.
"""
class DocumentationRelease(Exception):
"""
Raised if trying to upload packages for a documentation release.
"""
class Environments(Names):
"""
The environments that documentation can be published to.
"""
PRODUCTION = NamedConstant()
STAGING = NamedConstant()
class TagExists(Exception):
"""
Raised if trying to release a version for which a tag already exists.
"""
class BranchExists(Exception):
"""
Raised if trying to release a version for which a branch already exists.
"""
class BaseBranchDoesNotExist(Exception):
"""
Raised if trying to release a version for which the expected base branch
does not exist.
"""
class MissingPreRelease(Exception):
"""
Raised if trying to release a pre-release for which the previous expected
pre-release does not exist.
"""
class NoPreRelease(Exception):
"""
Raised if trying to release a marketing release if no pre-release exists.
"""
class PushFailed(Exception):
"""
Raised if pushing to Git fails.
"""
class IncorrectSetuptoolsVersion(Exception):
"""
Raised if trying to create packages which require a specific version of
setuptools to be installed.
"""
@attributes([
'documentation_bucket',
'cloudfront_cname',
'dev_bucket',
])
class DocumentationConfiguration(object):
"""
The configuration for publishing documentation.
:ivar bytes documentation_bucket: The bucket to publish documentation to.
:ivar bytes cloudfront_cname: a CNAME associated to the cloudfront
distribution pointing at the documentation bucket.
:ivar bytes dev_bucket: The bucket buildbot uploads documentation to.
"""
DOCUMENTATION_CONFIGURATIONS = {
Environments.PRODUCTION:
DocumentationConfiguration(
documentation_bucket="clusterhq-docs",
cloudfront_cname="docs.clusterhq.com",
dev_bucket="clusterhq-dev-docs"),
Environments.STAGING:
DocumentationConfiguration(
documentation_bucket="clusterhq-staging-docs",
cloudfront_cname="docs.staging.clusterhq.com",
dev_bucket="clusterhq-dev-docs"),
}
@do
def publish_docs(flocker_version, doc_version, environment):
"""
Publish the Flocker documentation. The documentation for each version of
Flocker is uploaded to a development bucket on S3 by the build server and
this copies the documentation for a particular ``flocker_version`` and
publishes it as ``doc_version``. Attempting to publish documentation to a
staging environment as a documentation version publishes it as the version
being updated.
:param bytes flocker_version: The version of Flocker to publish the
documentation for.
:param bytes doc_version: The version to publish the documentation as.
:param Environments environment: The environment to publish the
documentation to.
:raises NotARelease: Raised if trying to publish to a version that isn't a
release.
:raises NotTagged: Raised if publishing to production and the version being
published version isn't tagged.
"""
if not (is_release(doc_version)
or is_weekly_release(doc_version)
or is_pre_release(doc_version)):
raise NotARelease()
if environment == Environments.PRODUCTION:
if get_doc_version(flocker_version) != doc_version:
raise NotTagged()
configuration = DOCUMENTATION_CONFIGURATIONS[environment]
dev_prefix = '%s/' % (flocker_version,)
version_prefix = 'en/%s/' % (get_doc_version(doc_version),)
is_dev = not is_release(doc_version)
if is_dev:
stable_prefix = "en/devel/"
else:
stable_prefix = "en/latest/"
# Get the list of keys in the new documentation.
new_version_keys = yield Effect(
ListS3Keys(bucket=configuration.dev_bucket,
prefix=dev_prefix))
# Get the list of keys already existing for the given version.
# This should only be non-empty for documentation releases.
existing_version_keys = yield Effect(
ListS3Keys(bucket=configuration.documentation_bucket,
prefix=version_prefix))
# Copy the new documentation to the documentation bucket.
yield Effect(
CopyS3Keys(source_bucket=configuration.dev_bucket,
source_prefix=dev_prefix,
destination_bucket=configuration.documentation_bucket,
destination_prefix=version_prefix,
keys=new_version_keys))
# Delete any keys that aren't in the new documentation.
yield Effect(
DeleteS3Keys(bucket=configuration.documentation_bucket,
prefix=version_prefix,
keys=existing_version_keys - new_version_keys))
# Update the key used for error pages if we're publishing to staging or if
# we're publishing a marketing release to production.
if ((environment is Environments.STAGING) or
(environment is Environments.PRODUCTION and not is_dev)):
yield Effect(
UpdateS3ErrorPage(bucket=configuration.documentation_bucket,
target_prefix=version_prefix))
# Update the redirect for the stable URL (en/latest/ or en/devel/)
# to point to the new version. Returns the old target.
old_prefix = yield Effect(
UpdateS3RoutingRule(bucket=configuration.documentation_bucket,
prefix=stable_prefix,
target_prefix=version_prefix))
# If we have changed versions, get all the keys from the old version
if old_prefix:
previous_version_keys = yield Effect(
ListS3Keys(bucket=configuration.documentation_bucket,
prefix=old_prefix))
else:
previous_version_keys = set()
# The changed keys are the new keys, the keys that were deleted from this
# version, and the keys for the previous version.
changed_keys = (new_version_keys |
existing_version_keys |
previous_version_keys)
# S3 serves /index.html when given /, so any changed /index.html means
# that / changed as well.
# Note that we check for '/index.html' but remove 'index.html'
changed_keys |= {key_name[:-len('index.html')]
for key_name in changed_keys
if key_name.endswith('/index.html')}
# Always update the root.
changed_keys |= {''}
# The full paths are all the changed keys under the stable prefix, and
# the new version prefix. This set is slightly bigger than necessary.
changed_paths = {prefix + key_name
for key_name in changed_keys
for prefix in [stable_prefix, version_prefix]}
# Invalidate all the changed paths in cloudfront.
yield Effect(
CreateCloudFrontInvalidation(cname=configuration.cloudfront_cname,
paths=changed_paths))
class PublishDocsOptions(Options):
"""
Arguments for ``publish-docs`` script.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of flocker from which the documentation was built."],
["doc-version", None, None,
"The version to publish the documentation as.\n"
"This will differ from \"flocker-version\" for staging uploads."
"Attempting to publish documentation as a documentation version "
"publishes it as the version being updated.\n"
"``doc-version`` is set to 0.3.0+doc1 the documentation will be "
"published as 0.3.0.\n"],
]
optFlags = [
["production", None, "Publish documentation to production."],
]
environment = Environments.STAGING
def parseArgs(self):
if self['doc-version'] is None:
self['doc-version'] = get_doc_version(self['flocker-version'])
if self['production']:
self.environment = Environments.PRODUCTION
def publish_docs_main(args, base_path, top_level):
"""
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = PublishDocsOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
try:
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=publish_docs(
flocker_version=options['flocker-version'],
doc_version=options['doc-version'],
environment=options.environment,
))
except NotARelease:
sys.stderr.write("%s: Can't publish non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except NotTagged:
sys.stderr.write(
"%s: Can't publish non-tagged version to production.\n"
% (base_path.basename(),))
raise SystemExit(1)
class UploadOptions(Options):
"""
Options for uploading artifacts.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of Flocker to upload artifacts for."
"Python packages for " + flocker.__version__ + "will be uploaded.\n"],
["target", None, ARCHIVE_BUCKET,
"The bucket to upload artifacts to.\n"],
["build-server", None,
b'http://build.clusterhq.com',
"The URL of the build-server.\n"],
["homebrew-tap", None, "[email protected]:ClusterHQ/homebrew-tap.git",
"The Git repository to add a Homebrew recipe to.\n"],
]
def parseArgs(self):
version = self['flocker-version']
if not (is_release(version)
or is_weekly_release(version)
or is_pre_release(version)):
raise NotARelease()
if get_doc_version(version) != version:
raise DocumentationRelease()
FLOCKER_PACKAGES = [
b'clusterhq-python-flocker',
b'clusterhq-flocker-cli',
b'clusterhq-flocker-node',
]
def publish_homebrew_recipe(homebrew_repo_url, version, source_bucket,
scratch_directory):
"""
Publish a Homebrew recipe to a Git repository.
:param git.Repo homebrew_repo: Homebrew tap Git repository. This should
be an SSH URL so as not to require a username and password.
:param bytes version: Version of Flocker to publish a recipe for.
:param bytes source_bucket: S3 bucket to get source distribution from.
:param FilePath scratch_directory: Temporary directory to create a recipe
in.
"""
url_template = 'https://{bucket}.s3.amazonaws.com/python/Flocker-{version}.tar.gz' # noqa
sdist_url = url_template.format(bucket=source_bucket, version=version)
content = make_recipe(
version=version,
sdist_url=sdist_url)
homebrew_repo = Repo.clone_from(
url=homebrew_repo_url,
to_path=scratch_directory.path)
recipe = 'flocker-{version}.rb'.format(version=version)
FilePath(homebrew_repo.working_dir).child(recipe).setContent(content)
homebrew_repo.index.add([recipe])
homebrew_repo.index.commit('Add recipe for Flocker version ' + version)
# Sometimes this raises an index error, and it seems to be a race
# condition. There should probably be a loop until push succeeds or
# whatever condition is necessary for it to succeed is met. FLOC-2043.
push_info = homebrew_repo.remotes.origin.push(homebrew_repo.head)[0]
if (push_info.flags & push_info.ERROR) != 0:
raise PushFailed()
@do
def publish_vagrant_metadata(version, box_url, scratch_directory, box_name,
target_bucket):
"""
Publish Vagrant metadata for a given version of a given box.
:param bytes version: The version of the Vagrant box to publish metadata
for.
:param bytes box_url: The URL of the Vagrant box.
:param FilePath scratch_directory: A directory to create Vagrant metadata
files in before uploading.
:param bytes box_name: The name of the Vagrant box to publish metadata for.
:param bytes target_bucket: S3 bucket to upload metadata to.
"""
metadata_filename = '{box_name}.json'.format(box_name=box_name)
# Download recursively because there may not be a metadata file
yield Effect(DownloadS3KeyRecursively(
source_bucket=target_bucket,
source_prefix='vagrant',
target_path=scratch_directory,
filter_extensions=(metadata_filename,)))
metadata = {
"description": "clusterhq/{box_name} box.".format(box_name=box_name),
"name": "clusterhq/{box_name}".format(box_name=box_name),
"versions": [],
}
try:
existing_metadata_file = scratch_directory.children()[0]
except IndexError:
pass
else:
existing_metadata = json.loads(existing_metadata_file.getContent())
for version_metadata in existing_metadata['versions']:
# In the future we may want to have multiple providers for the
# same version but for now we discard any current providers for
# the version being added.
if version_metadata['version'] != vagrant_version(version):
metadata['versions'].append(version_metadata)
metadata['versions'].append({
"version": vagrant_version(version),
"providers": [
{
"url": box_url,
"name": "virtualbox",
},
],
})
# If there is an existing file, overwrite it. Else create a new file.
new_metadata_file = scratch_directory.child(metadata_filename)
new_metadata_file.setContent(json.dumps(metadata))
yield Effect(UploadToS3(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='vagrant/' + metadata_filename,
file=new_metadata_file,
content_type='application/json',
))
@do
def update_repo(package_directory, target_bucket, target_key, source_repo,
packages, flocker_version, distribution):
"""
Update ``target_bucket`` yum repository with ``packages`` from
``source_repo`` repository.
:param FilePath package_directory: Temporary directory to download
repository to.
:param bytes target_bucket: S3 bucket to upload repository to.
:param bytes target_key: Path within S3 bucket to upload repository to.
:param bytes source_repo: Repository on the build server to get packages
from.
:param list packages: List of bytes, each specifying the name of a package
to upload to the repository.
:param bytes flocker_version: The version of flocker to upload packages
for.
:param Distribution distribution: The distribution to upload packages for.
"""
package_directory.createDirectory()
package_type = distribution.package_type()
yield Effect(DownloadS3KeyRecursively(
source_bucket=target_bucket,
source_prefix=target_key,
target_path=package_directory,
filter_extensions=('.' + package_type.value,)))
downloaded_packages = yield Effect(DownloadPackagesFromRepository(
source_repo=source_repo,
target_path=package_directory,
packages=packages,
flocker_version=flocker_version,
distribution=distribution,
))
new_metadata = yield Effect(CreateRepo(
repository_path=package_directory,
distribution=distribution,
))
yield Effect(UploadToS3Recursively(
source_path=package_directory,
target_bucket=target_bucket,
target_key=target_key,
files=downloaded_packages | new_metadata,
))
@do
def upload_packages(scratch_directory, target_bucket, version, build_server,
top_level):
"""
The ClusterHQ yum and deb repositories contain packages for Flocker, as
well as the dependencies which aren't available in CentOS 7. It is
currently hosted on Amazon S3. When doing a release, we want to add the
new Flocker packages, while preserving the existing packages in the
repository. To do this, we download the current repository, add the new
package, update the metadata, and then upload the repository.
:param FilePath scratch_directory: Temporary directory to download
repository to.
:param bytes target_bucket: S3 bucket to upload repository to.
:param bytes version: Version to download packages for.
:param bytes build_server: Server to download new packages from.
:param FilePath top_level: The top-level of the flocker repository.
"""
distribution_names = available_distributions(
flocker_source_path=top_level,
)
for distribution_name in distribution_names:
distribution = DISTRIBUTION_NAME_MAP[distribution_name]
architecture = distribution.native_package_architecture()
yield update_repo(
package_directory=scratch_directory.child(
b'{}-{}-{}'.format(
distribution.name,
distribution.version,
architecture)),
target_bucket=target_bucket,
target_key=os.path.join(
distribution.name + get_package_key_suffix(version),
distribution.version,
architecture),
source_repo=os.path.join(
build_server, b'results/omnibus',
version,
b'{}-{}'.format(distribution.name, distribution.version)),
packages=FLOCKER_PACKAGES,
flocker_version=version,
distribution=distribution,
)
packages_template = (
'<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">\n'
'This is an index for pip\n'
'<div t:render="packages"><a>'
'<t:attr name="href"><t:slot name="package_name" /></t:attr>'
'<t:slot name="package_name" />'
'</a><br />\n</div>'
'</html>'
)
class PackagesElement(template.Element):
"""A Twisted Web template element to render the Pip index file."""
def __init__(self, packages):
template.Element.__init__(self, template.XMLString(packages_template))
self._packages = packages
@template.renderer
def packages(self, request, tag):
for package in self._packages:
if package != 'index.html':
yield tag.clone().fillSlots(package_name=package)
def create_pip_index(scratch_directory, packages):
"""
Create an index file for pip.
:param FilePath scratch_directory: Temporary directory to create index in.
:param list packages: List of bytes, filenames of packages to be in the
index.
"""
index_file = scratch_directory.child('index.html')
with index_file.open('w') as f:
# Although this returns a Deferred, it works without the reactor
# because there are no Deferreds in the template evaluation.
# See this cheat described at
# https://twistedmatrix.com/documents/15.0.0/web/howto/twisted-templates.html
template.flatten(None, PackagesElement(packages), f.write)
return index_file
@do
def upload_pip_index(scratch_directory, target_bucket):
"""
Upload an index file for pip to S3.
:param FilePath scratch_directory: Temporary directory to create index in.
:param bytes target_bucket: S3 bucket to upload index to.
"""
packages = yield Effect(
ListS3Keys(bucket=target_bucket,
prefix='python/'))
index_path = create_pip_index(
scratch_directory=scratch_directory,
packages=packages)
yield Effect(
UploadToS3(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='python/index.html',
file=index_path,
))
@do
def upload_python_packages(scratch_directory, target_bucket, top_level,
output, error):
"""
The repository contains source distributions and binary distributions
(wheels) for Flocker. It is currently hosted on Amazon S3.
:param FilePath scratch_directory: Temporary directory to create packages
in.
:param bytes target_bucket: S3 bucket to upload packages to.
:param FilePath top_level: The top-level of the flocker repository.
"""
if setuptools_version != '3.6':
# XXX Use PEP440 version system so new setuptools can be used.
# https://clusterhq.atlassian.net/browse/FLOC-1331.
raise IncorrectSetuptoolsVersion()
# XXX This has a side effect so it should be an Effect
# https://clusterhq.atlassian.net/browse/FLOC-1731
check_call([
'python', 'setup.py',
'sdist', '--dist-dir={}'.format(scratch_directory.path),
'bdist_wheel', '--dist-dir={}'.format(scratch_directory.path)],
cwd=top_level.path, stdout=output, stderr=error)
files = set([file.basename() for file in scratch_directory.children()])
yield Effect(UploadToS3Recursively(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='python',
files=files,
))
def publish_artifacts_main(args, base_path, top_level):
"""
Publish release artifacts.
:param list args: The arguments passed to the scripts.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = UploadOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
except NotARelease:
sys.stderr.write("%s: Can't publish artifacts for a non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except DocumentationRelease:
sys.stderr.write("%s: Can't publish artifacts for a documentation "
"release.\n" % (base_path.basename(),))
raise SystemExit(1)
dispatcher = ComposedDispatcher([boto_dispatcher, yum_dispatcher,
base_dispatcher])
scratch_directory = FilePath(tempfile.mkdtemp(
prefix=b'flocker-upload-'))
scratch_directory.child('packages').createDirectory()
scratch_directory.child('python').createDirectory()
scratch_directory.child('pip').createDirectory()
scratch_directory.child('vagrant').createDirectory()
scratch_directory.child('homebrew').createDirectory()
box_type = "flocker-tutorial"
vagrant_prefix = 'vagrant/tutorial/'
box_name = "{box_type}-{version}.box".format(
box_type=box_type,
version=options['flocker-version'],
)
box_url = "https://{bucket}.s3.amazonaws.com/{key}".format(
bucket=options['target'],
key=vagrant_prefix + box_name,
)
try:
sync_perform(
dispatcher=dispatcher,
effect=sequence([
upload_packages(
scratch_directory=scratch_directory.child('packages'),
target_bucket=options['target'],
version=options['flocker-version'],
build_server=options['build-server'],
top_level=top_level,
),
upload_python_packages(
scratch_directory=scratch_directory.child('python'),
target_bucket=options['target'],
top_level=top_level,
output=sys.stdout,
error=sys.stderr,
),
upload_pip_index(
scratch_directory=scratch_directory.child('pip'),
target_bucket=options['target'],
),
Effect(
CopyS3Keys(
source_bucket=DEV_ARCHIVE_BUCKET,
source_prefix=vagrant_prefix,
destination_bucket=options['target'],
destination_prefix=vagrant_prefix,
keys=[box_name],
)
),
publish_vagrant_metadata(
version=options['flocker-version'],
box_url=box_url,
scratch_directory=scratch_directory.child('vagrant'),
box_name=box_type,
target_bucket=options['target'],
),
]),
)
publish_homebrew_recipe(
homebrew_repo_url=options['homebrew-tap'],
version=options['flocker-version'],
source_bucket=options['target'],
scratch_directory=scratch_directory.child('homebrew'),
)
except IncorrectSetuptoolsVersion:
sys.stderr.write("%s: setuptools version must be 3.6.\n"
% (base_path.basename(),))
raise SystemExit(1)
finally:
scratch_directory.remove()
def calculate_base_branch(version, path):
"""
The branch a release branch is created from depends on the release
type and sometimes which pre-releases have preceeded this.
:param bytes version: The version of Flocker to get a base branch for.
:param bytes path: See :func:`git.Repo.init`.
:returns: The base branch from which the new release branch was created.
"""
if not (is_release(version)
or is_weekly_release(version)
or is_pre_release(version)):
raise NotARelease()
repo = Repo(path=path, search_parent_directories=True)
existing_tags = [tag for tag in repo.tags if tag.name == version]
if existing_tags:
raise TagExists()
release_branch_prefix = 'release/flocker-'
if is_weekly_release(version):
base_branch_name = 'master'
elif is_pre_release(version) and get_pre_release(version) == 1:
base_branch_name = 'master'
elif get_doc_version(version) != version:
base_branch_name = release_branch_prefix + get_doc_version(version)
else:
if is_pre_release(version):
target_version = target_release(version)
else:
target_version = version
pre_releases = []
for tag in repo.tags:
try:
if (is_pre_release(tag.name) and
target_version == target_release(tag.name)):
pre_releases.append(tag.name)
except UnparseableVersion:
# The Flocker repository contains versions which are not
# currently considered valid versions.
pass
if not pre_releases:
raise NoPreRelease()
latest_pre_release = sorted(
pre_releases,
key=lambda pre_release: get_pre_release(pre_release))[-1]
if (is_pre_release(version) and get_pre_release(version) >
get_pre_release(latest_pre_release) + 1):
raise MissingPreRelease()
base_branch_name = release_branch_prefix + latest_pre_release
# We create a new branch from a branch, not a tag, because a maintenance
# or documentation change may have been applied to the branch and not the
# tag.
try:
base_branch = [
branch for branch in repo.branches if
branch.name == base_branch_name][0]
except IndexError:
raise BaseBranchDoesNotExist()
return base_branch
def create_release_branch(version, base_branch):
"""
checkout a new Git branch to make changes on and later tag as a release.
:param bytes version: The version of Flocker to create a release branch
for.
:param base_branch: See :func:`git.Head`. The branch to create the release
branch from.
"""
try:
base_branch.checkout(b='release/flocker-' + version)
except GitCommandError:
raise BranchExists()
class CreateReleaseBranchOptions(Options):
"""
Arguments for ``create-release-branch`` script.
"""
optParameters = [
["flocker-version", None, None,
"The version of Flocker to create a release branch for."],
]
def parseArgs(self):
if self['flocker-version'] is None:
raise UsageError("`--flocker-version` must be specified.")
def create_release_branch_main(args, base_path, top_level):
"""
Create a release branch.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = CreateReleaseBranchOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
version = options['flocker-version']
path = FilePath(__file__).path
try:
base_branch = calculate_base_branch(version=version, path=path)
create_release_branch(version=version, base_branch=base_branch)
except NotARelease:
sys.stderr.write("%s: Can't create a release branch for non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except TagExists:
sys.stderr.write("%s: Tag already exists for this release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except NoPreRelease:
sys.stderr.write("%s: No (previous) pre-release exists for this "
"release.\n" % (base_path.basename(),))
raise SystemExit(1)
except BaseBranchDoesNotExist:
sys.stderr.write("%s: The expected base branch does not exist.\n"
% (base_path.basename(),))
raise SystemExit(1)
except BranchExists:
sys.stderr.write("%s: The release branch already exists.\n"
% (base_path.basename(),))
raise SystemExit(1)
class TestRedirectsOptions(Options):
"""
Arguments for ``test-redirects`` script.
"""
optParameters = [
["doc-version", None, None,
"The version which the documentation sites are expected to redirect "
"to.\n"
],
]
optFlags = [
["production", None, "Check the production documentation site."],
]
environment = Environments.STAGING
def parseArgs(self):
if self['doc-version'] is None:
self['doc-version'] = get_doc_version(flocker.__version__)
if self['production']:
self.environment = Environments.PRODUCTION
def test_redirects_main(args, base_path, top_level):
"""
Tests redirects to Flocker documentation.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = TestRedirectsOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
doc_version = options['doc-version']
document_configuration = DOCUMENTATION_CONFIGURATIONS[options.environment]
base_url = 'https://' + document_configuration.cloudfront_cname
is_dev = not is_release(doc_version)
if is_dev:
expected_redirects = {
'/en/devel': '/en/' + doc_version + '/',
'/en/devel/faq/index.html':
'/en/' + doc_version + '/faq/index.html',
}
else:
expected_redirects = {
'/': '/en/' + doc_version + '/',
'/en/': '/en/' + doc_version + '/',
'/en/latest': '/en/' + doc_version + '/',
'/en/latest/faq/index.html':
'/en/' + doc_version + '/faq/index.html',
}
failed_redirects = []
for path in expected_redirects:
original_url = base_url + path
expected_url = base_url + expected_redirects[path]
final_url = requests.get(original_url).url
if expected_url != final_url:
failed_redirects.append(original_url)
message = (
"'{original_url}' expected to redirect to '{expected_url}', "
"instead redirects to '{final_url}'.\n").format(
original_url=original_url,
expected_url=expected_url,
final_url=final_url,
)
sys.stderr.write(message)
if len(failed_redirects):
raise SystemExit(1)
else:
print 'All tested redirects work correctly.'
class PublishDevBoxOptions(Options):
"""
Options for publishing a Vagrant development box.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of Flocker to upload a development box for.\n"],
["target", None, ARCHIVE_BUCKET,
"The bucket to upload a development box to.\n"],
]
def publish_dev_box_main(args, base_path, top_level):
"""
Publish a development Vagrant box.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = PublishDevBoxOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
scratch_directory = FilePath(tempfile.mkdtemp(
prefix=b'flocker-upload-'))
scratch_directory.child('vagrant').createDirectory()
box_type = "flocker-dev"
prefix = 'vagrant/dev/'
box_name = "{box_type}-{version}.box".format(
box_type=box_type,
version=options['flocker-version'],
)
box_url = "https://{bucket}.s3.amazonaws.com/{key}".format(
bucket=options['target'],
key=prefix + box_name,
)
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=sequence([
Effect(
CopyS3Keys(
source_bucket=DEV_ARCHIVE_BUCKET,
source_prefix=prefix,
destination_bucket=options['target'],
destination_prefix=prefix,
keys=[box_name],
)
),
publish_vagrant_metadata(
version=options['flocker-version'],
box_url=box_url,
scratch_directory=scratch_directory.child('vagrant'),
box_name=box_type,
target_bucket=options['target'],
),
]),
)
|
|
from common_fixtures import * # NOQA
test_network_policy = os.environ.get(
'TEST_NETWORK_POLICY', "False")
np_reason = \
'Intended to not execute this network policy test'
if_network_policy = pytest.mark.skipif(test_network_policy != "ALL",
reason=np_reason)
if_network_policy_none = pytest.mark.skipif(
test_network_policy != "NONE",
reason=np_reason)
if_network_policy_within_stack = pytest.mark.skipif(
test_network_policy != "WITHIN_STACK",
reason=np_reason)
if_network_policy_within_service = pytest.mark.skipif(
test_network_policy != "WITHIN_SERVICE",
reason=np_reason)
if_network_policy_within_linked = pytest.mark.skipif(
test_network_policy != "WITHIN_LINKED",
reason=np_reason)
if_network_policy_groupby = pytest.mark.skipif(
test_network_policy != "WITHIN_GROUPBY",
reason=np_reason)
NETWORKPOLICY_SUBDIR = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/networkpolicy')
policy_within_stack = {"within": "stack", "action": "allow"}
policy_groupby = {"between": {"groupBy": "com.rancher.stack.location"},
"action": "allow"}
policy_within_service = {"within": "service", "action": "allow"}
policy_within_linked = {"within": "linked", "action": "allow"}
shared_environment = {"env": []}
@pytest.fixture(scope='session', autouse=True)
def create_env_for_network_policy(request, client, socat_containers):
assert check_for_network_policy_manager(client)
env2 = create_stack_with_service(client, "test2", NETWORKPOLICY_SUBDIR,
"stack2.yml", "stack2-rc.yml")
assert len(env2.services()) == 6
env1 = create_stack_with_service(client, "test1", NETWORKPOLICY_SUBDIR,
"stack1.yml", "stack1-rc.yml")
assert len(env1.services()) == 11
create_standalone_containers(client)
time.sleep(sleep_interval)
populate_env_details(client)
def fin():
to_delete = [env1, env2]
delete_all(client, to_delete)
delete_all(client, shared_environment["containers"])
delete_all(client, shared_environment["containers_with_label"])
request.addfinalizer(fin)
def populate_env_details(client):
env = client.list_stack(name="test1")
assert len(env) == 1
env1 = env[0]
env = client.list_stack(name="test2")
assert len(env) == 1
env2 = env[0]
shared_environment["env"].append(env1)
shared_environment["env"].append(env2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client, env1, "test1allow")
shared_environment["stack1_test2allow"] = \
get_service_by_name(client, env1, "test2allow")
shared_environment["stack1_test3deny"] = \
get_service_by_name(client, env1, "test3deny")
shared_environment["stack1_test4deny"] = \
get_service_by_name(client, env1, "test4deny")
shared_environment["stack1_lbwithinstack"] = \
get_service_by_name(client, env1, "lbwithininstack")
shared_environment["stack1_lbcrossstack"] = \
get_service_by_name(client, env1, "lbcrossstack")
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client, env1, "servicewithlinks")
shared_environment["stack1_servicecrosslinks"] = \
get_service_by_name(client, env1, "servicecrosslinks")
shared_environment["stack1_servicelinktosidekick"] = \
get_service_by_name(client, env1, "servicelinktosidekick")
shared_environment["stack1_linktowebservice"] = \
get_service_by_name(client, env1, "linktowebservice")
shared_environment["stack2_test1allow"] = \
get_service_by_name(client, env2, "test1allow")
shared_environment["stack2_test2allow"] = \
get_service_by_name(client, env2, "test2allow")
shared_environment["stack2_test3deny"] = \
get_service_by_name(client, env2, "test3deny")
shared_environment["stack2_test4deny"] = \
get_service_by_name(client, env2, "test4deny")
service_with_sidekick = {}
service_with_sidekick["p_con1"] = \
get_container_by_name(client, "test2-testp1-1")
service_with_sidekick["p_con2"] = \
get_container_by_name(client, "test2-testp1-2")
service_with_sidekick["s1_con1"] = \
get_container_by_name(client, "test2-testp1-tests1-1")
service_with_sidekick["s1_con2"] = \
get_container_by_name(client, "test2-testp1-tests1-2")
service_with_sidekick["s2_con1"] = \
get_container_by_name(client, "test2-testp1-tests2-1")
service_with_sidekick["s2_con2"] = \
get_container_by_name(client, "test2-testp1-tests2-2")
shared_environment["stack2_sidekick"] = service_with_sidekick
time.sleep(sleep_interval)
def validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there connectivity between containers of different
# services within the same stack is allowed
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"]],
connection="allow")
# Validate that there is no connectivity between containers of different
# services across stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99")
# Cross stacks access for links should be denied
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
# Cross stacks access for LBs should be denied
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
def validate_default_network_action_deny_networkpolicy_none(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there is no connectivity between containers of different
# services across stacks and within stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that Lb service is not able to reach targets within the
# same stack and cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_groupby(
client):
# Validate that containers that do not have the labels defined
# in group by policy are not allowed to communicate with other
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that stand alone containers that have the labels defined
# in group by policy are allowed to communicate with service containers
# having the same labels
for container in shared_environment["containers_with_label"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that service containers that have matching labels defined
# in group by policy are allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that all service containers within the same service that has
# group by labels are able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack2_test3deny"]],
connection="allow")
# Validate that service containers that do not have matching labels defined
# in group by policy are not allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
def validate_default_network_action_deny_networkpolicy_within_service(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test1allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to the same service are able to
# communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"]],
connection="allow")
# Validate that containers belonging to the different services within
# the same stack or cross stack are not able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has no access to targets with in
# same stacks or cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(
client, shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]], "99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client):
# Validate that containers of primary services are able to connect with
# other containers in the same service and containers in other sidekick
# services
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["p_con1"],
[shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
# Validate that containers of sidekick services are able to connect with
# other containers in the same service and containers in other sidekick
# services and primary service
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s1_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s2_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
def validate_default_network_action_deny_networkpolicy_within_linked(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to a service are not able to
# communicate with other containers in the same service or different
# service
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has access to targets with in
# same stacks
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that Lb services has access to targets cross stacks
validate_lb_service(client,
shared_environment["stack1_lbcrossstack"],
"9090",
[shared_environment["stack2_test1allow"]])
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
service_with_links = shared_environment["stack1_servicecrosslinks"]
linked_service = [shared_environment["stack2_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "98", "mylink")
def validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, port, linkName=None):
# Validate that all containers of a service with link has access to
# the containers of the service that it is linked to
validate_connectivity_between_services(
client,
service_with_links,
linked_service,
connection="allow")
# Validate that all containers of a service that is linked by other service
# has no access to the containers of the service that it is linked by
# (s1 -> s2) containers of s2 have no access to s1
for l_service in linked_service:
validate_connectivity_between_services(
client,
l_service,
[service_with_links],
connection="deny")
# Validate that containers are reachable using their link name
validate_linked_service(client,
service_with_links,
linked_service,
port,
linkName=linkName)
def validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client):
containers = get_service_container_list(
client, shared_environment["stack1_servicelinktosidekick"])
# Validate connectivity between containers of linked services to linked
# service with sidekick
for con in containers:
validate_connectivity_between_container_list(
client,
con,
shared_environment["stack2_sidekick"].values(),
"allow")
for linked_con in shared_environment["stack2_sidekick"].values():
for con in containers:
validate_connectivity_between_containers(
client, linked_con, con, "deny")
def validate_dna_deny_np_within_linked_for_servicealias(
client):
# Validate connectivity between containers of linked services to services
# linked to webservice
validate_connectivity_between_services(
client, shared_environment["stack1_linktowebservice"],
[shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"]],
connection="allow")
validate_connectivity_between_services(
client, shared_environment["stack1_test4deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack2_tes34deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
set_network_policy(client, "deny", policy_within_stack)
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_none(client):
set_network_policy(client, "deny")
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_stop_service(
client, socat_containers):
set_network_policy(client, "deny")
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_delete_service(
client, socat_containers):
set_network_policy(client, "deny")
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_restart_service(
client, socat_containers):
set_network_policy(client, "deny")
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_groupby(
client):
set_network_policy(client, "deny", policy_groupby)
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_delete_service(
client):
set_network_policy(client, "deny", policy_within_service)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
delete_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_scale_service(
client):
set_network_policy(client, "deny", policy_within_service)
scale_service(shared_environment["stack1_test1allow"], client, 3)
scale_service(shared_environment["stack1_lbcrossstack"], client, 3)
scale_service(shared_environment["stack1_lbwithinstack"], client, 3)
scale_service(shared_environment["stack1_servicewithlinks"], client, 3)
populate_env_details(client)
validate_default_network_action_deny_networkpolicy_within_service(
client)
scale_service(shared_environment["stack1_test1allow"], client, 2)
scale_service(shared_environment["stack1_lbcrossstack"], client, 2)
scale_service(shared_environment["stack1_lbwithinstack"], client, 2)
scale_service(shared_environment["stack1_servicewithlinks"], client, 2)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_stop_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
stop_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy
def test_dna_deny_np_allow_within_service_check_sidekicks(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_linked(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sk(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sa(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_dna_deny_np_within_linked_for_servicealias(
client)
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_scaleup(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 3)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
scale_service(service_with_links, client, 3)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
service_with_links = shared_environment["stack1_servicewithlinks"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(service_with_links, client, 2)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_adding_removing_links(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
# Add another service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id},
{"serviceId": shared_environment["stack1_test2allow"].id}])
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test1allow"]], "99")
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test2allow"]], "99")
# Remove existing service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id}])
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
validate_connectivity_between_services(
client, service_with_links,
[shared_environment["stack1_test2allow"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test2allow"],
[service_with_links],
connection="deny")
def scale_service(service, client, final_scale):
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
check_container_in_service(client, service)
def set_network_policy(client, defaultPolicyAction="allow", policy=None):
networks = client.list_network(name='ipsec')
assert len(networks) == 1
network = networks[0]
network = client.update(
network, defaultPolicyAction=defaultPolicyAction, policy=policy)
network = wait_success(client, network)
assert network.defaultPolicyAction == defaultPolicyAction
populate_env_details(client)
def check_for_network_policy_manager(client):
np_manager = False
env = client.list_stack(name="network-policy-manager")
if len(env) == 1:
service = get_service_by_name(client, env[0],
"network-policy-manager")
if service.state == "active":
np_manager = True
return np_manager
def create_standalone_containers(client):
hosts = client.list_host(kind='docker', removed_null=True)
cons = []
cons_with_label = []
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3001:22'],
image=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id)
con = client.wait_success(con)
assert con.state == "running"
cons.append(con)
shared_environment["containers"] = cons
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3002:22'],
image=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id,
labels={"com.rancher.stack.location": "east"})
con = client.wait_success(con)
assert con.state == "running"
cons_with_label.append(con)
shared_environment["containers_with_label"] = cons_with_label
|
|
from __future__ import absolute_import
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
import allauth.app_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import get_next_redirect_url, setup_user_email
from allauth.utils import get_user_model
from ..utils import get_request_param
from . import app_settings, providers
from .adapter import get_adapter
from .fields import JSONField
class SocialAppManager(models.Manager):
def get_current(self, provider, request=None):
cache = {}
if request:
cache = getattr(request, '_socialapp_cache', {})
request._socialapp_cache = cache
app = cache.get(provider)
if not app:
site = get_current_site(request)
app = self.get(
sites__id=site.id,
provider=provider)
cache[provider] = app
return app
class SocialApp(models.Model):
objects = SocialAppManager()
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
name = models.CharField(verbose_name=_('name'),
max_length=40)
client_id = models.CharField(verbose_name=_('client id'),
max_length=191,
help_text=_('App ID, or consumer key'))
secret = models.CharField(verbose_name=_('secret key'),
max_length=191,
blank=True,
help_text=_('API secret, client secret, or'
' consumer secret'))
key = models.CharField(verbose_name=_('key'),
max_length=191,
blank=True,
help_text=_('Key'))
# Most apps can be used across multiple domains, therefore we use
# a ManyToManyField. Note that Facebook requires an app per domain
# (unless the domains share a common base name).
# blank=True allows for disabling apps without removing them
sites = models.ManyToManyField(Site, blank=True)
class Meta:
verbose_name = _('social application')
verbose_name_plural = _('social applications')
def __str__(self):
return self.name
class SocialAccount(models.Model):
user = models.ForeignKey(allauth.app_settings.USER_MODEL,
on_delete=models.CASCADE)
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 191
# for URLField (in case of utf8mb4). How about
# models.TextField(unique=True) then? Well, that won't work
# either for MySQL due to another bug[1]. So the only way out
# would be to drop the unique constraint, or switch to shorter
# identity URLs. Opted for the latter, as [2] suggests that
# identity URLs are supposed to be short anyway, at least for the
# old spec.
#
# [1] http://code.djangoproject.com/ticket/2495.
# [2] http://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(verbose_name=_('uid'),
max_length=app_settings.UID_MAX_LENGTH)
last_login = models.DateTimeField(verbose_name=_('last login'),
auto_now=True)
date_joined = models.DateTimeField(verbose_name=_('date joined'),
auto_now_add=True)
extra_data = JSONField(verbose_name=_('extra data'), default=dict)
class Meta:
unique_together = ('provider', 'uid')
verbose_name = _('social account')
verbose_name_plural = _('social accounts')
def authenticate(self):
return authenticate(account=self)
def __str__(self):
return force_str(self.user)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self):
return providers.registry.by_id(self.provider)
def get_provider_account(self):
return self.get_provider().wrap_account(self)
class SocialToken(models.Model):
app = models.ForeignKey(SocialApp, on_delete=models.CASCADE)
account = models.ForeignKey(SocialAccount, on_delete=models.CASCADE)
token = models.TextField(
verbose_name=_('token'),
help_text=_(
'"oauth_token" (OAuth1) or access token (OAuth2)'))
token_secret = models.TextField(
blank=True,
verbose_name=_('token secret'),
help_text=_(
'"oauth_token_secret" (OAuth1) or refresh token (OAuth2)'))
expires_at = models.DateTimeField(blank=True, null=True,
verbose_name=_('expires at'))
class Meta:
unique_together = ('app', 'account')
verbose_name = _('social application token')
verbose_name_plural = _('social application tokens')
def __str__(self):
return self.token
class SocialLogin(object):
"""
Represents a social user that is in the process of being logged
in. This consists of the following information:
`account` (`SocialAccount` instance): The social account being
logged in. Providers are not responsible for checking whether or
not an account already exists or not. Therefore, a provider
typically creates a new (unsaved) `SocialAccount` instance. The
`User` instance pointed to by the account (`account.user`) may be
prefilled by the provider for use as a starting point later on
during the signup process.
`token` (`SocialToken` instance): An optional access token token
that results from performing a successful authentication
handshake.
`state` (`dict`): The state to be preserved during the
authentication handshake. Note that this state may end up in the
url -- do not put any secrets in here. It currently only contains
the url to redirect to after login.
`email_addresses` (list of `EmailAddress`): Optional list of
e-mail addresses retrieved from the provider.
"""
def __init__(self, user=None, account=None, token=None,
email_addresses=[]):
if token:
assert token.account is None or token.account == account
self.token = token
self.user = user
self.account = account
self.email_addresses = email_addresses
self.state = {}
def connect(self, request, user):
self.user = user
self.save(request, connect=True)
def serialize(self):
serialize_instance = get_adapter().serialize_instance
ret = dict(account=serialize_instance(self.account),
user=serialize_instance(self.user),
state=self.state,
email_addresses=[serialize_instance(ea)
for ea in self.email_addresses])
if self.token:
ret['token'] = serialize_instance(self.token)
return ret
@classmethod
def deserialize(cls, data):
deserialize_instance = get_adapter().deserialize_instance
account = deserialize_instance(SocialAccount, data['account'])
user = deserialize_instance(get_user_model(), data['user'])
if 'token' in data:
token = deserialize_instance(SocialToken, data['token'])
else:
token = None
email_addresses = []
for ea in data['email_addresses']:
email_address = deserialize_instance(EmailAddress, ea)
email_addresses.append(email_address)
ret = cls()
ret.token = token
ret.account = account
ret.user = user
ret.email_addresses = email_addresses
ret.state = data['state']
return ret
def save(self, request, connect=False):
"""
Saves a new account. Note that while the account is new,
the user may be an existing one (when connecting accounts)
"""
assert not self.is_existing
user = self.user
user.save()
self.account.user = user
self.account.save()
if app_settings.STORE_TOKENS and self.token and self.token.app.pk:
self.token.account = self.account
self.token.save()
if connect:
# TODO: Add any new email addresses automatically?
pass
else:
setup_user_email(request, user, self.email_addresses)
@property
def is_existing(self):
"""
Account is temporary, not yet backed by a database record.
"""
return self.account.pk is not None
def lookup(self):
"""
Lookup existing account, if any.
"""
assert not self.is_existing
try:
a = SocialAccount.objects.get(provider=self.account.provider,
uid=self.account.uid)
# Update account
a.extra_data = self.account.extra_data
self.account = a
self.user = self.account.user
a.save()
# Update token
if app_settings.STORE_TOKENS and self.token and self.token.app.pk:
assert not self.token.pk
try:
t = SocialToken.objects.get(account=self.account,
app=self.token.app)
t.token = self.token.token
if self.token.token_secret:
# only update the refresh token if we got one
# many oauth2 providers do not resend the refresh token
t.token_secret = self.token.token_secret
t.expires_at = self.token.expires_at
t.save()
self.token = t
except SocialToken.DoesNotExist:
self.token.account = a
self.token.save()
except SocialAccount.DoesNotExist:
pass
def get_redirect_url(self, request):
url = self.state.get('next')
return url
@classmethod
def state_from_request(cls, request):
state = {}
next_url = get_next_redirect_url(request)
if next_url:
state['next'] = next_url
state['process'] = get_request_param(request, 'process', 'login')
state['scope'] = get_request_param(request, 'scope', '')
state['auth_params'] = get_request_param(request, 'auth_params', '')
return state
@classmethod
def stash_state(cls, request):
state = cls.state_from_request(request)
verifier = get_random_string()
request.session['socialaccount_state'] = (state, verifier)
return verifier
@classmethod
def unstash_state(cls, request):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier = request.session.pop('socialaccount_state')
return state
@classmethod
def verify_and_unstash_state(cls, request, verifier):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier2 = request.session.pop('socialaccount_state')
if verifier != verifier2:
raise PermissionDenied()
return state
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import dumps, loads
from sys import exc_info
from time import sleep
from os import remove
from os.path import join
import traceback
import warnings
import qiita_db as qdb
from qiita_core.qiita_settings import r_client, qiita_config
from qiita_ware.commands import (download_remote, list_remote,
submit_VAMPS, submit_EBI)
from qiita_ware.metadata_pipeline import (
create_templates_from_qiime_mapping_file)
from qiita_ware.exceptions import EBISubmissionError
def build_analysis_files(job):
"""Builds the files for an analysis
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job with the information for building the files
"""
with qdb.sql_connection.TRN:
params = job.parameters.values
analysis_id = params['analysis']
merge_duplicated_sample_ids = params['merge_dup_sample_ids']
analysis = qdb.analysis.Analysis(analysis_id)
biom_files = analysis.build_files(merge_duplicated_sample_ids)
cmd = qdb.software.Command.get_validator('BIOM')
val_jobs = []
for dtype, biom_fp, archive_artifact_fp in biom_files:
if archive_artifact_fp is not None:
files = dumps({'biom': [biom_fp],
'plain_text': [archive_artifact_fp]})
else:
files = dumps({'biom': [biom_fp]})
validate_params = qdb.software.Parameters.load(
cmd, values_dict={'files': files,
'artifact_type': 'BIOM',
'provenance': dumps({'job': job.id,
'data_type': dtype}),
'analysis': analysis_id,
'template': None})
val_jobs.append(qdb.processing_job.ProcessingJob.create(
analysis.owner, validate_params, True))
job._set_validator_jobs(val_jobs)
for j in val_jobs:
j.submit()
sleep(1)
# The validator jobs no longer finish the job automatically so we need
# to release the validators here
job.release_validators()
def release_validators(job):
"""Waits until all the validators of a job are completed
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job with the information of the parent job
"""
qdb.processing_job.ProcessingJob(
job.parameters.values['job']).release_validators()
job._set_status('success')
def submit_to_VAMPS(job):
"""Submits an artifact to VAMPS
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
submit_VAMPS(job.parameters.values['artifact'])
job._set_status('success')
def submit_to_EBI(job):
"""Submit a study to EBI
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
artifact_id = int(param_vals['artifact'])
submission_type = param_vals['submission_type']
artifact = qdb.artifact.Artifact(artifact_id)
for info in artifact.study._ebi_submission_jobs():
jid, aid, js, cbste, era = info
if js in ('running', 'queued') and jid != job.id:
error_msg = ("Cannot perform parallel EBI submission for "
"the same study. Current job running: %s" % js)
raise EBISubmissionError(error_msg)
submit_EBI(artifact_id, submission_type, True)
job._set_status('success')
def copy_artifact(job):
"""Creates a copy of an artifact
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
orig_artifact = qdb.artifact.Artifact(param_vals['artifact'])
prep_template = qdb.metadata_template.prep_template.PrepTemplate(
param_vals['prep_template'])
qdb.artifact.Artifact.copy(orig_artifact, prep_template)
job._set_status('success')
def delete_artifact(job):
"""Deletes an artifact from the system
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
artifact_id = job.parameters.values['artifact']
qdb.artifact.Artifact.delete(artifact_id)
job._set_status('success')
def create_sample_template(job):
"""Creates a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
params = job.parameters.values
fp = params['fp']
study = qdb.study.Study(int(params['study_id']))
is_mapping_file = params['is_mapping_file']
data_type = params['data_type']
with warnings.catch_warnings(record=True) as warns:
if is_mapping_file:
create_templates_from_qiime_mapping_file(fp, study, data_type)
else:
qdb.metadata_template.sample_template.SampleTemplate.create(
qdb.metadata_template.util.load_template_to_dataframe(fp),
study)
remove(fp)
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("sample_template_%s" % study.id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def update_sample_template(job):
"""Updates a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
study_id = param_vals['study']
fp = param_vals['template_fp']
with warnings.catch_warnings(record=True) as warns:
st = qdb.metadata_template.sample_template.SampleTemplate(study_id)
df = qdb.metadata_template.util.load_template_to_dataframe(fp)
st.extend_and_update(df)
remove(fp)
# Join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("sample_template_%s" % study_id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def delete_sample_template(job):
"""Deletes a sample template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
qdb.metadata_template.sample_template.SampleTemplate.delete(
job.parameters.values['study'])
job._set_status('success')
def update_prep_template(job):
"""Updates a prep template
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
prep_id = param_vals['prep_template']
fp = param_vals['template_fp']
prep = qdb.metadata_template.prep_template.PrepTemplate(prep_id)
with warnings.catch_warnings(record=True) as warns:
df = qdb.metadata_template.util.load_template_to_dataframe(fp)
prep.extend_and_update(df)
remove(fp)
# Join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
r_client.set("prep_template_%s" % prep_id,
dumps({'job_id': job.id, 'alert_type': 'warning',
'alert_msg': msg}))
job._set_status('success')
def delete_sample_or_column(job):
"""Deletes a sample or a column from the metadata
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
obj_class = param_vals['obj_class']
obj_id = param_vals['obj_id']
sample_or_col = param_vals['sample_or_col']
name = param_vals['name'].split(',')
if obj_class == 'SampleTemplate':
constructor = qdb.metadata_template.sample_template.SampleTemplate
elif obj_class == 'PrepTemplate':
constructor = qdb.metadata_template.prep_template.PrepTemplate
else:
raise ValueError('Unknown value "%s". Choose between '
'"SampleTemplate" and "PrepTemplate"' % obj_class)
if sample_or_col == 'columns':
del_func = constructor(obj_id).delete_column
name = name[0]
elif sample_or_col == 'samples':
del_func = constructor(obj_id).delete_samples
else:
raise ValueError('Unknown value "%s". Choose between "samples" '
'and "columns"' % sample_or_col)
del_func(name)
job._set_status('success')
def _delete_analysis_artifacts(analysis):
aids = [a.id for a in analysis.artifacts]
aids.sort(reverse=True)
for aid in aids:
qdb.artifact.Artifact.delete(aid)
qdb.analysis.Analysis.delete(analysis.id)
def delete_study(job):
"""Deletes a full study
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
MT = qdb.metadata_template
with qdb.sql_connection.TRN:
study_id = job.parameters.values['study']
study = qdb.study.Study(study_id)
# deleting analyses
for analysis in study.analyses():
_delete_analysis_artifacts(analysis)
for pt in study.prep_templates():
to_delete = list(pt.artifact.descendants.nodes())
to_delete.reverse()
for td in to_delete:
qdb.artifact.Artifact.delete(td.id)
MT.prep_template.PrepTemplate.delete(pt.id)
if MT.sample_template.SampleTemplate.exists(study_id):
MT.sample_template.SampleTemplate.delete(study_id)
qdb.study.Study.delete(study_id)
job._set_status('success')
def complete_job(job):
"""Completes a job
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
payload = loads(param_vals['payload'])
if payload['success']:
artifacts = payload['artifacts']
error = None
else:
artifacts = None
error = payload['error']
c_job = qdb.processing_job.ProcessingJob(param_vals['job_id'])
try:
c_job.complete(payload['success'], artifacts, error)
except Exception:
c_job._set_error(traceback.format_exception(*exc_info()))
job._set_status('success')
if 'archive' in payload:
pass
# ToDo: Archive
# features = payload['archive']
# here we should call the method from the command to archive
def delete_analysis(job):
"""Deletes a full analysis
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
analysis_id = job.parameters.values['analysis_id']
analysis = qdb.analysis.Analysis(analysis_id)
_delete_analysis_artifacts(analysis)
r_client.delete('analysis_delete_%d' % analysis_id)
job._set_status('success')
def list_remote_files(job):
"""Lists valid study files on a remote server
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
url = job.parameters.values['url']
private_key = job.parameters.values['private_key']
study_id = job.parameters.values['study_id']
try:
files = list_remote(url, private_key)
r_client.set("upload_study_%s" % study_id,
dumps({'job_id': job.id, 'url': url, 'files': files}))
except Exception:
job._set_error(traceback.format_exception(*exc_info()))
else:
job._set_status('success')
finally:
# making sure to always delete the key so Qiita never keeps it
remove(private_key)
def download_remote_files(job):
"""Downloads valid study files from a remote server
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
url = job.parameters.values['url']
destination = job.parameters.values['destination']
private_key = job.parameters.values['private_key']
try:
download_remote(url, private_key, destination)
except Exception:
job._set_error(traceback.format_exception(*exc_info()))
else:
job._set_status('success')
def INSDC_download(job):
"""Download an accession from INSDC
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The processing job performing the task
"""
with qdb.sql_connection.TRN:
param_vals = job.parameters.values
download_source = param_vals['download_source']
accession = param_vals['accession']
if job.user.level != 'admin':
job._set_error('INSDC_download is only for administrators')
job_dir = join(qiita_config.working_dir, job.id)
qdb.util.create_nested_path(job_dir)
# code doing something
print(download_source, accession)
job._set_status('success')
TASK_DICT = {'build_analysis_files': build_analysis_files,
'release_validators': release_validators,
'submit_to_VAMPS': submit_to_VAMPS,
'submit_to_EBI': submit_to_EBI,
'copy_artifact': copy_artifact,
'delete_artifact': delete_artifact,
'create_sample_template': create_sample_template,
'update_sample_template': update_sample_template,
'delete_sample_template': delete_sample_template,
'update_prep_template': update_prep_template,
'delete_sample_or_column': delete_sample_or_column,
'delete_study': delete_study,
'complete_job': complete_job,
'delete_analysis': delete_analysis,
'list_remote_files': list_remote_files,
'download_remote_files': download_remote_files,
'INSDC_download': INSDC_download}
def private_task(job_id):
"""Completes a Qiita private task
Parameters
----------
job_id : str
The job id
"""
if job_id == 'register':
# We don't need to do anything here if Qiita is registering plugins
return
job = qdb.processing_job.ProcessingJob(job_id)
job.update_heartbeat_state()
task_name = job.command.name
try:
TASK_DICT[task_name](job)
except Exception as e:
log_msg = "Error on job %s: %s" % (
job.id, ''.join(traceback.format_exception(*exc_info())))
le = qdb.logger.LogEntry.create('Runtime', log_msg)
job.complete(False, error="Error (log id: %d): %s" % (le.id, e))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
is_fp16 = features.dtype == np.float16
if is_fp16:
# Do the compute in fp32 and cast the input back to fp32.
features = features.astype(np.float32)
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
res = np.log(softmax)
else:
res = softmax
if is_fp16:
res = res.astype(np.float16)
return res
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.cached_session(use_gpu=use_gpu):
if log:
tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name)
else:
tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5,
atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5,
atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32 # pylint: disable=redefined-builtin
else:
type = np.float64 # pylint: disable=redefined-builtin
max = np.finfo(type).max # pylint: disable=redefined-builtin
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.cached_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testFloatGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax float dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testHalfGPU(self):
if test.is_gpu_available(cuda_only=True):
rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)]
for row, col in zip(rows, cols):
logging.info("Testing softmax half dtype in shape [%d, %d]", row, col)
data = np.random.rand(row, col)
self._testAll(data.astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTensorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test1DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInputNoReshape(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, axis=0).eval()
def testDimTooLarge(self):
with self.cached_session():
# Use placeholder to make sure we get runtime error instead of shape
# inference error.
dim = array_ops.placeholder_with_default(100, shape=[])
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu) as sess:
x = array_ops.placeholder(dtypes.float32)
y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
test.main()
|
|
from peewee import *
from .base import ModelTestCase
from .base import TestModel
from .base import get_in_memory_db
from .base import requires_models
from .base_models import Tweet
from .base_models import User
class User(TestModel):
username = TextField(unique=True)
class Note(TestModel):
text = TextField()
users = ManyToManyField(User)
NoteUserThrough = Note.users.get_through_model()
AltThroughDeferred = DeferredThroughModel()
class AltNote(TestModel):
text = TextField()
users = ManyToManyField(User, through_model=AltThroughDeferred)
class AltThroughModel(TestModel):
user = ForeignKeyField(User, backref='_xx_rel')
note = ForeignKeyField(AltNote, backref='_xx_rel')
class Meta:
primary_key = CompositeKey('user', 'note')
AltThroughDeferred.set_model(AltThroughModel)
class Student(TestModel):
name = TextField()
CourseStudentDeferred = DeferredThroughModel()
class Course(TestModel):
name = TextField()
students = ManyToManyField(Student, backref='+')
students2 = ManyToManyField(Student, through_model=CourseStudentDeferred)
CourseStudent = Course.students.get_through_model()
class CourseStudent2(TestModel):
course = ForeignKeyField(Course, backref='+')
student = ForeignKeyField(Student, backref='+')
CourseStudentDeferred.set_model(CourseStudent2)
class Color(TestModel):
name = TextField(unique=True)
LogoColorDeferred = DeferredThroughModel()
class Logo(TestModel):
name = TextField(unique=True)
colors = ManyToManyField(Color, through_model=LogoColorDeferred)
class LogoColor(TestModel):
logo = ForeignKeyField(Logo, field=Logo.name)
color = ForeignKeyField(Color, field=Color.name) # FK to non-PK column.
LogoColorDeferred.set_model(LogoColor)
class TestManyToManyFKtoNonPK(ModelTestCase):
database = get_in_memory_db()
requires = [Color, Logo, LogoColor]
def test_manytomany_fk_to_non_pk(self):
red = Color.create(name='red')
green = Color.create(name='green')
blue = Color.create(name='blue')
lrg = Logo.create(name='logo-rg')
lrb = Logo.create(name='logo-rb')
lrgb = Logo.create(name='logo-rgb')
lrg.colors.add([red, green])
lrb.colors.add([red, blue])
lrgb.colors.add([red, green, blue])
def assertColors(logo, expected):
colors = [c.name for c in logo.colors.order_by(Color.name)]
self.assertEqual(colors, expected)
assertColors(lrg, ['green', 'red'])
assertColors(lrb, ['blue', 'red'])
assertColors(lrgb, ['blue', 'green', 'red'])
def assertLogos(color, expected):
logos = [l.name for l in color.logos.order_by(Logo.name)]
self.assertEqual(logos, expected)
assertLogos(red, ['logo-rb', 'logo-rg', 'logo-rgb'])
assertLogos(green, ['logo-rg', 'logo-rgb'])
assertLogos(blue, ['logo-rb', 'logo-rgb'])
# Verify we can delete data as well.
lrg.colors.remove(red)
self.assertEqual([c.name for c in lrg.colors], ['green'])
blue.logos.remove(lrb)
self.assertEqual([c.name for c in lrb.colors], ['red'])
# Verify we can insert using a SELECT query.
lrg.colors.add(Color.select().where(Color.name != 'blue'), True)
assertColors(lrg, ['green', 'red'])
lrb.colors.add(Color.select().where(Color.name == 'blue'))
assertColors(lrb, ['blue', 'red'])
# Verify we can insert logos using a SELECT query.
black = Color.create(name='black')
black.logos.add(Logo.select().where(Logo.name != 'logo-rgb'))
assertLogos(black, ['logo-rb', 'logo-rg'])
assertColors(lrb, ['black', 'blue', 'red'])
assertColors(lrg, ['black', 'green', 'red'])
assertColors(lrgb, ['blue', 'green', 'red'])
# Verify we can delete using a SELECT query.
lrg.colors.remove(Color.select().where(Color.name == 'red'))
assertColors(lrg, ['black', 'green'])
black.logos.remove(Logo.select().where(Logo.name == 'logo-rg'))
assertLogos(black, ['logo-rb'])
# Verify we can clear.
lrg.colors.clear()
assertColors(lrg, [])
assertColors(lrb, ['black', 'blue', 'red']) # Not affected.
black.logos.clear()
assertLogos(black, [])
assertLogos(red, ['logo-rb', 'logo-rgb'])
class TestManyToManyBackrefBehavior(ModelTestCase):
database = get_in_memory_db()
requires = [Student, Course, CourseStudent, CourseStudent2]
def setUp(self):
super(TestManyToManyBackrefBehavior, self).setUp()
math = Course.create(name='math')
engl = Course.create(name='engl')
huey, mickey, zaizee = [Student.create(name=name)
for name in ('huey', 'mickey', 'zaizee')]
# Set up relationships.
math.students.add([huey, zaizee])
engl.students.add([mickey])
math.students2.add([mickey])
engl.students2.add([huey, zaizee])
def test_manytomanyfield_disabled_backref(self):
math = Course.get(name='math')
query = math.students.order_by(Student.name)
self.assertEqual([s.name for s in query], ['huey', 'zaizee'])
huey = Student.get(name='huey')
math.students.remove(huey)
self.assertEqual([s.name for s in math.students], ['zaizee'])
# The backref is via the CourseStudent2 through-model.
self.assertEqual([c.name for c in huey.courses], ['engl'])
def test_through_model_disabled_backrefs(self):
# Here we're testing the case where the many-to-many field does not
# explicitly disable back-references, but the foreign-keys on the
# through model have disabled back-references.
engl = Course.get(name='engl')
query = engl.students2.order_by(Student.name)
self.assertEqual([s.name for s in query], ['huey', 'zaizee'])
zaizee = Student.get(Student.name == 'zaizee')
engl.students2.remove(zaizee)
self.assertEqual([s.name for s in engl.students2], ['huey'])
math = Course.get(name='math')
self.assertEqual([s.name for s in math.students2], ['mickey'])
class TestManyToManyInheritance(ModelTestCase):
def test_manytomany_inheritance(self):
class BaseModel(TestModel):
class Meta:
database = self.database
class User(BaseModel):
username = TextField()
class Project(BaseModel):
name = TextField()
users = ManyToManyField(User, backref='projects')
def subclass_project():
class VProject(Project):
pass
# We cannot subclass Project, because the many-to-many field "users"
# will be inherited, but the through-model does not contain a
# foreign-key to VProject. The through-model in this case is
# ProjectUsers, which has foreign-keys to project and user.
self.assertRaises(ValueError, subclass_project)
PThrough = Project.users.through_model
self.assertTrue(PThrough.project.rel_model is Project)
self.assertTrue(PThrough.user.rel_model is User)
class TestManyToMany(ModelTestCase):
database = get_in_memory_db()
requires = [User, Note, NoteUserThrough, AltNote, AltThroughModel]
user_to_note = {
'gargie': [1, 2],
'huey': [2, 3],
'mickey': [3, 4],
'zaizee': [4, 5],
}
def setUp(self):
super(TestManyToMany, self).setUp()
for username in sorted(self.user_to_note):
User.create(username=username)
for i in range(5):
Note.create(text='note-%s' % (i + 1))
def test_through_model(self):
self.assertEqual(len(NoteUserThrough._meta.fields), 3)
fields = NoteUserThrough._meta.fields
self.assertEqual(sorted(fields), ['id', 'note', 'user'])
note_field = fields['note']
self.assertEqual(note_field.rel_model, Note)
self.assertFalse(note_field.null)
user_field = fields['user']
self.assertEqual(user_field.rel_model, User)
self.assertFalse(user_field.null)
def _set_data(self):
for username, notes in self.user_to_note.items():
user = User.get(User.username == username)
for note in notes:
NoteUserThrough.create(
note=Note.get(Note.text == 'note-%s' % note),
user=user)
def assertNotes(self, query, expected):
notes = [note.text for note in query]
self.assertEqual(sorted(notes),
['note-%s' % i for i in sorted(expected)])
def assertUsers(self, query, expected):
usernames = [user.username for user in query]
self.assertEqual(sorted(usernames), sorted(expected))
def test_accessor_query(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
self.assertNotes(gargie.notes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(2):
self.assertNotes(User.create(username='x').notes, [])
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['gargie'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['gargie', 'huey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
with self.assertQueryCount(2):
self.assertUsers(Note.create(text='x').users, [])
def test_prefetch_notes(self):
self._set_data()
with self.assertQueryCount(3):
gargie, huey, mickey, zaizee = prefetch(
User.select().order_by(User.username),
NoteUserThrough,
Note)
with self.assertQueryCount(0):
self.assertNotes(gargie.notes, [1, 2])
with self.assertQueryCount(0):
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(2):
self.assertNotes(User.create(username='x').notes, [])
def test_prefetch_users(self):
self._set_data()
with self.assertQueryCount(3):
n1, n2, n3, n4, n5 = prefetch(
Note.select().order_by(Note.text),
NoteUserThrough,
User)
with self.assertQueryCount(0):
self.assertUsers(n1.users, ['gargie'])
with self.assertQueryCount(0):
self.assertUsers(n2.users, ['gargie', 'huey'])
with self.assertQueryCount(0):
self.assertUsers(n5.users, ['zaizee'])
with self.assertQueryCount(2):
self.assertUsers(Note.create(text='x').users, [])
def test_query_filtering(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
notes = gargie.notes.where(Note.text != 'note-2')
self.assertNotes(notes, [1])
def test_set_value(self):
self._set_data()
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(2):
gargie.notes = n3
self.assertNotes(gargie.notes, [3])
self.assertUsers(n3.users, ['gargie', 'huey', 'mickey'])
self.assertUsers(n1.users, [])
gargie.notes = [n3, n4]
self.assertNotes(gargie.notes, [3, 4])
self.assertUsers(n3.users, ['gargie', 'huey', 'mickey'])
self.assertUsers(n4.users, ['gargie', 'mickey', 'zaizee'])
def test_set_query(self):
huey = User.get(User.username == 'huey')
with self.assertQueryCount(2):
huey.notes = Note.select().where(~Note.text.endswith('4'))
self.assertNotes(huey.notes, [1, 2, 3, 5])
def test_add(self):
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
gargie.notes.add([n1, n2])
self.assertNotes(gargie.notes, [1, 2])
self.assertUsers(n1.users, ['gargie'])
self.assertUsers(n2.users, ['gargie'])
for note in [n3, n4, n5]:
self.assertUsers(note.users, [])
with self.assertQueryCount(1):
huey.notes.add(Note.select().where(
fn.substr(Note.text, 6, 1) << ['1', '3', '5']))
self.assertNotes(huey.notes, [1, 3, 5])
self.assertUsers(n1.users, ['gargie', 'huey'])
self.assertUsers(n2.users, ['gargie'])
self.assertUsers(n3.users, ['huey'])
self.assertUsers(n4.users, [])
self.assertUsers(n5.users, ['huey'])
with self.assertQueryCount(1):
gargie.notes.add(n4)
self.assertNotes(gargie.notes, [1, 2, 4])
with self.assertQueryCount(2):
n3.users.add(
User.select().where(User.username != 'gargie'),
clear_existing=True)
self.assertUsers(n3.users, ['huey', 'mickey', 'zaizee'])
def test_add_by_pk(self):
huey = User.get(User.username == 'huey')
n1, n2, n3 = Note.select().order_by(Note.text).limit(3)
huey.notes.add([n1.id, n2.id])
self.assertNotes(huey.notes, [1, 2])
self.assertUsers(n1.users, ['huey'])
self.assertUsers(n2.users, ['huey'])
self.assertUsers(n3.users, [])
def test_unique(self):
n1 = Note.get(Note.text == 'note-1')
huey = User.get(User.username == 'huey')
def add_user(note, user):
with self.assertQueryCount(1):
note.users.add(user)
add_user(n1, huey)
self.assertRaises(IntegrityError, add_user, n1, huey)
add_user(n1, User.get(User.username == 'zaizee'))
self.assertUsers(n1.users, ['huey', 'zaizee'])
def test_remove(self):
self._set_data()
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
gargie.notes.remove([n1, n2, n3])
self.assertNotes(gargie.notes, [])
self.assertNotes(huey.notes, [2, 3])
with self.assertQueryCount(1):
huey.notes.remove(Note.select().where(
Note.text << ['note-2', 'note-4', 'note-5']))
self.assertNotes(huey.notes, [3])
self.assertNotes(mickey.notes, [3, 4])
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(1):
n4.users.remove([gargie, mickey])
self.assertUsers(n4.users, ['zaizee'])
with self.assertQueryCount(1):
n5.users.remove(User.select())
self.assertUsers(n5.users, [])
def test_remove_by_id(self):
self._set_data()
gargie, huey = User.select().order_by(User.username).limit(2)
n1, n2, n3, n4 = Note.select().order_by(Note.text).limit(4)
gargie.notes.add([n3, n4])
with self.assertQueryCount(1):
gargie.notes.remove([n1.id, n3.id])
self.assertNotes(gargie.notes, [2, 4])
self.assertNotes(huey.notes, [2, 3])
def test_clear(self):
gargie = User.get(User.username == 'gargie')
huey = User.get(User.username == 'huey')
gargie.notes = Note.select()
huey.notes = Note.select()
self.assertEqual(gargie.notes.count(), 5)
self.assertEqual(huey.notes.count(), 5)
gargie.notes.clear()
self.assertEqual(gargie.notes.count(), 0)
self.assertEqual(huey.notes.count(), 5)
n1 = Note.get(Note.text == 'note-1')
n2 = Note.get(Note.text == 'note-2')
n1.users = User.select()
n2.users = User.select()
self.assertEqual(n1.users.count(), 4)
self.assertEqual(n2.users.count(), 4)
n1.users.clear()
self.assertEqual(n1.users.count(), 0)
self.assertEqual(n2.users.count(), 4)
def test_manual_through(self):
gargie, huey, mickey, zaizee = User.select().order_by(User.username)
alt_notes = []
for i in range(5):
alt_notes.append(AltNote.create(text='note-%s' % (i + 1)))
self.assertNotes(gargie.altnotes, [])
for alt_note in alt_notes:
self.assertUsers(alt_note.users, [])
n1, n2, n3, n4, n5 = alt_notes
# Test adding relationships by setting the descriptor.
gargie.altnotes = [n1, n2]
with self.assertQueryCount(2):
huey.altnotes = AltNote.select().where(
fn.substr(AltNote.text, 6, 1) << ['1', '3', '5'])
mickey.altnotes.add([n1, n4])
with self.assertQueryCount(2):
zaizee.altnotes = AltNote.select()
# Test that the notes were added correctly.
with self.assertQueryCount(1):
self.assertNotes(gargie.altnotes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(huey.altnotes, [1, 3, 5])
with self.assertQueryCount(1):
self.assertNotes(mickey.altnotes, [1, 4])
with self.assertQueryCount(1):
self.assertNotes(zaizee.altnotes, [1, 2, 3, 4, 5])
# Test removing notes.
with self.assertQueryCount(1):
gargie.altnotes.remove(n1)
self.assertNotes(gargie.altnotes, [2])
with self.assertQueryCount(1):
huey.altnotes.remove([n1, n2, n3])
self.assertNotes(huey.altnotes, [5])
with self.assertQueryCount(1):
sq = (AltNote
.select()
.where(fn.SUBSTR(AltNote.text, 6, 1) << ['1', '2', '4']))
zaizee.altnotes.remove(sq)
self.assertNotes(zaizee.altnotes, [3, 5])
# Test the backside of the relationship.
n1.users = User.select().where(User.username != 'gargie')
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['huey', 'mickey', 'zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['gargie'])
with self.assertQueryCount(1):
self.assertUsers(n3.users, ['zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n4.users, ['mickey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['huey', 'zaizee'])
with self.assertQueryCount(1):
n1.users.remove(User.select())
with self.assertQueryCount(1):
n5.users.remove([gargie, huey])
with self.assertQueryCount(1):
self.assertUsers(n1.users, [])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
class Person(TestModel):
name = CharField()
class Account(TestModel):
person = ForeignKeyField(Person, primary_key=True)
class AccountList(TestModel):
name = CharField()
accounts = ManyToManyField(Account, backref='lists')
AccountListThrough = AccountList.accounts.get_through_model()
class TestForeignKeyPrimaryKeyManyToMany(ModelTestCase):
database = get_in_memory_db()
requires = [Person, Account, AccountList, AccountListThrough]
test_data = (
('huey', ('cats', 'evil')),
('zaizee', ('cats', 'good')),
('mickey', ('dogs', 'good')),
('zombie', ()),
)
def setUp(self):
super(TestForeignKeyPrimaryKeyManyToMany, self).setUp()
name2list = {}
for name, lists in self.test_data:
p = Person.create(name=name)
a = Account.create(person=p)
for l in lists:
if l not in name2list:
name2list[l] = AccountList.create(name=l)
name2list[l].accounts.add(a)
def account_for(self, name):
return Account.select().join(Person).where(Person.name == name).get()
def assertLists(self, l1, l2):
self.assertEqual(sorted(list(l1)), sorted(list(l2)))
def test_pk_is_fk(self):
list2names = {}
for name, lists in self.test_data:
account = self.account_for(name)
self.assertLists([l.name for l in account.lists],
lists)
for l in lists:
list2names.setdefault(l, [])
list2names[l].append(name)
for list_name, names in list2names.items():
account_list = AccountList.get(AccountList.name == list_name)
self.assertLists([s.person.name for s in account_list.accounts],
names)
def test_empty(self):
al = AccountList.create(name='empty')
self.assertEqual(list(al.accounts), [])
|
|
from dateutil import parser
from hs_core.hydroshare.utils import get_resource_file_name_and_extension
from hs_file_types.models import GeoRasterLogicalFile, GeoRasterFileMetaData, GenericLogicalFile, \
NetCDFLogicalFile
def assert_raster_file_type_metadata(self):
# test the resource now has 2 files (vrt file added as part of metadata extraction)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# check that the 2 resource files are now associated with GeoRasterLogicalFile
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
self.assertEqual(res_file.has_logical_file, True)
self.assertTrue(isinstance(res_file.logical_file, GeoRasterLogicalFile))
# check that we put the 2 files in a new folder (small_logan)
for res_file in self.composite_resource.files.all():
file_path, base_file_name, _ = get_resource_file_name_and_extension(res_file)
expected_file_path = "{}/data/contents/small_logan/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# check that there is one GeoRasterLogicalFile object
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
res_file = self.composite_resource.files.first()
# check that the logicalfile is associated with 2 files
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'small_logan')
self.assertEqual(logical_file.has_metadata, True)
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# test that size property of the logical file is equal to sun of size of all files
# that are part of the logical file
self.assertEqual(logical_file.size, sum([f.size for f in logical_file.files.all()]))
# test that there should be 1 object of type GeoRasterFileMetaData
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
# test that the metadata associated with logical file id of type GeoRasterFileMetaData
self.assertTrue(isinstance(logical_file.metadata, GeoRasterFileMetaData))
# there should be 2 format elements associated with resource
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
self.assertEqual(
self.composite_resource.metadata.formats.all().filter(value='application/vrt').count(),
1)
self.assertEqual(self.composite_resource.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# test extracted metadata for the file type
# geo raster file type should have all the metadata elements
self.assertEqual(logical_file.metadata.has_all_required_elements(), True)
# there should be 1 coverage element - box type
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertEqual(logical_file.metadata.spatial_coverage.type, 'box')
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.0500269597691)
self.assertEqual(box_coverage.value['eastlimit'], -111.57773718106195)
self.assertEqual(box_coverage.value['southlimit'], 41.98722286029891)
self.assertEqual(box_coverage.value['westlimit'], -111.69756293084055)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEqual(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4655492.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 452144.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4648592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 442274.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'],
'NAD83 / UTM zone 12N')
# testing extended metadata element: cell information
cell_info = logical_file.metadata.cellInformation
self.assertEqual(cell_info.rows, 230)
self.assertEqual(cell_info.columns, 329)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(logical_file.metadata.bandInformations.count(), 1)
band_info = logical_file.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '2880.00708008')
self.assertEqual(band_info.minimumValue, '1870.63659668')
def assert_netcdf_file_type_metadata(self, title):
# check that there is one NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# There should be now 2 files
self.assertEqual(self.composite_resource.files.count(), 2)
# check that we put the 2 files in a new folder (netcdf_valid)
for res_file in self.composite_resource.files.all():
file_path, base_file_name = res_file.full_path, res_file.file_name
expected_file_path = u"{}/data/contents/netcdf_valid/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# logical file should be associated with 2 files
self.assertEqual(logical_file.files.all().count(), 2)
file_extensions = set([f.extension for f in logical_file.files.all()])
self.assertIn('.nc', file_extensions)
self.assertIn('.txt', file_extensions)
# test extracted netcdf file type metadata
# there should 2 content file
self.assertEqual(self.composite_resource.files.all().count(), 2)
# test core metadata after metadata extraction
# title = "Test NetCDF File Type Metadata"
self.assertEqual(self.composite_resource.metadata.title.value, title)
# there should be an abstract element
self.assertNotEqual(self.composite_resource.metadata.description, None)
extracted_abstract = "This netCDF data is the simulation output from Utah Energy " \
"Balance (UEB) model.It includes the simulation result " \
"of snow water equivalent during the period " \
"Oct. 2009 to June 2010 for TWDEF site in Utah."
self.assertEqual(self.composite_resource.metadata.description.abstract, extracted_abstract)
# there should be no source element
self.assertEqual(self.composite_resource.metadata.sources.all().count(), 0)
# there should be one license element:
self.assertNotEquals(self.composite_resource.metadata.rights.statement, 1)
# there should be no relation element
self.assertEqual(self.composite_resource.metadata.relations.all().count(), 0)
# there should be 2 creator
self.assertEqual(self.composite_resource.metadata.creators.all().count(), 2)
# there should be one contributor
self.assertEqual(self.composite_resource.metadata.contributors.all().count(), 1)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(type='box').
count(), 1)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(type='period').
count(), 1)
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.867126409)
self.assertEqual(box_coverage.value['eastlimit'], -111.505940368)
self.assertEqual(box_coverage.value['southlimit'], 41.8639080745)
self.assertEqual(box_coverage.value['westlimit'], -111.51138808)
temporal_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('10/01/2009').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('05/30/2010').date())
# there should be 2 format elements
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
self.assertEqual(self.composite_resource.metadata.formats.all().
filter(value='text/plain').count(), 1)
self.assertEqual(self.composite_resource.metadata.formats.all().
filter(value='application/x-netcdf').count(), 1)
# test file type metadata
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# there should be one keyword element
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertIn('Snow water equivalent', logical_file.metadata.keywords)
# test dataset_name attribute of the logical file which shoould have the extracted value
dataset_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010"
self.assertEqual(logical_file.dataset_name, dataset_title)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.projection_string_type, 'Proj4 String')
proj_text = u'+proj=tmerc +y_0=0.0 +k_0=0.9996 +x_0=500000.0 +lat_0=0.0 +lon_0=-111.0'
self.assertEqual(ori_coverage.projection_string_text, proj_text)
self.assertEqual(ori_coverage.value['northlimit'], '4.63515e+06')
self.assertEqual(ori_coverage.value['eastlimit'], '458010.0')
self.assertEqual(ori_coverage.value['southlimit'], '4.63479e+06')
self.assertEqual(ori_coverage.value['westlimit'], '457560.0')
self.assertEqual(ori_coverage.value['units'], 'Meter')
self.assertEqual(ori_coverage.value['projection'], 'transverse_mercator')
# testing extended metadata element: variables
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# test time variable
var_time = logical_file.metadata.variables.all().filter(name='time').first()
self.assertNotEquals(var_time, None)
self.assertEqual(var_time.unit, 'hours since 2009-10-1 0:0:00 UTC')
self.assertEqual(var_time.type, 'Float')
self.assertEqual(var_time.shape, 'time')
self.assertEqual(var_time.descriptive_name, 'time')
# test x variable
var_x = logical_file.metadata.variables.all().filter(name='x').first()
self.assertNotEquals(var_x, None)
self.assertEqual(var_x.unit, 'Meter')
self.assertEqual(var_x.type, 'Float')
self.assertEqual(var_x.shape, 'x')
self.assertEqual(var_x.descriptive_name, 'x coordinate of projection')
# test y variable
var_y = logical_file.metadata.variables.all().filter(name='y').first()
self.assertNotEquals(var_y, None)
self.assertEqual(var_y.unit, 'Meter')
self.assertEqual(var_y.type, 'Float')
self.assertEqual(var_y.shape, 'y')
self.assertEqual(var_y.descriptive_name, 'y coordinate of projection')
# test SWE variable
var_swe = logical_file.metadata.variables.all().filter(name='SWE').first()
self.assertNotEquals(var_swe, None)
self.assertEqual(var_swe.unit, 'm')
self.assertEqual(var_swe.type, 'Float')
self.assertEqual(var_swe.shape, 'y,x,time')
self.assertEqual(var_swe.descriptive_name, 'Snow water equivalent')
self.assertEqual(var_swe.method, 'model simulation of UEB model')
self.assertEqual(var_swe.missing_value, '-9999')
# test grid mapping variable
var_grid = logical_file.metadata.variables.all(). \
filter(name='transverse_mercator').first()
self.assertNotEquals(var_grid, None)
self.assertEqual(var_grid.unit, 'Unknown')
self.assertEqual(var_grid.type, 'Unknown')
self.assertEqual(var_grid.shape, 'Not defined')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for :class:`HadoopFileSystem`."""
# pytype: skip-file
import io
import logging
import posixpath
import unittest
from parameterized import parameterized_class
from apache_beam.io import hadoopfilesystem as hdfs
from apache_beam.io.filesystem import BeamIOError
from apache_beam.options.pipeline_options import HadoopFileSystemOptions
from apache_beam.options.pipeline_options import PipelineOptions
class FakeFile(io.BytesIO):
"""File object for FakeHdfs"""
__hash__ = None # type: ignore[assignment]
def __init__(self, path, mode='', type='FILE'):
io.BytesIO.__init__(self)
self.stat = {
'path': path,
'mode': mode,
'type': type,
}
self.saved_data = None
def __eq__(self, other):
return self.stat == other.stat and self.getvalue() == self.getvalue()
def close(self):
self.saved_data = self.getvalue()
io.BytesIO.close(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def size(self):
if self.closed: # pylint: disable=using-constant-test
if self.saved_data is None:
return 0
return len(self.saved_data)
return len(self.getvalue())
def get_file_status(self):
"""Returns a partial WebHDFS FileStatus object."""
return {
hdfs._FILE_STATUS_PATH_SUFFIX: posixpath.basename(self.stat['path']),
hdfs._FILE_STATUS_LENGTH: self.size,
hdfs._FILE_STATUS_TYPE: self.stat['type'],
}
def get_file_checksum(self):
"""Returns a WebHDFS FileChecksum object."""
return {
hdfs._FILE_CHECKSUM_ALGORITHM: 'fake_algo',
hdfs._FILE_CHECKSUM_BYTES: 'checksum_byte_sequence',
hdfs._FILE_CHECKSUM_LENGTH: 5,
}
class FakeHdfsError(Exception):
"""Generic error for FakeHdfs methods."""
class FakeHdfs(object):
"""Fake implementation of ``hdfs.Client``."""
def __init__(self):
self.files = {}
def write(self, path):
if self.status(path, strict=False) is not None:
raise FakeHdfsError('Path already exists: %s' % path)
new_file = FakeFile(path, 'wb')
self.files[path] = new_file
return new_file
def read(self, path, offset=0, length=None):
old_file = self.files.get(path, None)
if old_file is None:
raise FakeHdfsError('Path not found: %s' % path)
if old_file.stat['type'] == 'DIRECTORY':
raise FakeHdfsError('Cannot open a directory: %s' % path)
if not old_file.closed:
raise FakeHdfsError('File already opened: %s' % path)
# old_file is closed and can't be operated upon. Return a copy instead.
new_file = FakeFile(path, 'rb')
if old_file.saved_data:
new_file.write(old_file.saved_data)
new_file.seek(0)
return new_file
def list(self, path, status=False):
if not status:
raise ValueError('status must be True')
fs = self.status(path, strict=False)
if (fs is not None and
fs[hdfs._FILE_STATUS_TYPE] == hdfs._FILE_STATUS_TYPE_FILE):
raise ValueError(
'list must be called on a directory, got file: %s' % path)
result = []
for file in self.files.values():
if file.stat['path'].startswith(path):
fs = file.get_file_status()
result.append((fs[hdfs._FILE_STATUS_PATH_SUFFIX], fs))
return result
def makedirs(self, path):
self.files[path] = FakeFile(path, type='DIRECTORY')
def status(self, path, strict=True):
f = self.files.get(path)
if f is None:
if strict:
raise FakeHdfsError('Path not found: %s' % path)
else:
return f
return f.get_file_status()
def delete(self, path, recursive=True):
if not recursive:
raise FakeHdfsError('Non-recursive mode not implemented')
_ = self.status(path)
for filepath in list(self.files):
if filepath.startswith(path):
del self.files[filepath]
def walk(self, path):
paths = [path]
while paths:
path = paths.pop()
files = []
dirs = []
for full_path in self.files:
if not full_path.startswith(path):
continue
short_path = posixpath.relpath(full_path, path)
if '/' not in short_path:
if self.status(full_path)[hdfs._FILE_STATUS_TYPE] == 'DIRECTORY':
if short_path != '.':
dirs.append(short_path)
else:
files.append(short_path)
yield path, dirs, files
paths = [posixpath.join(path, dir) for dir in dirs]
def rename(self, path1, path2):
if self.status(path1, strict=False) is None:
raise FakeHdfsError('Path1 not found: %s' % path1)
files_to_rename = [
path for path in self.files
if path == path1 or path.startswith(path1 + '/')
]
for fullpath in files_to_rename:
f = self.files.pop(fullpath)
newpath = path2 + fullpath[len(path1):]
f.stat['path'] = newpath
self.files[newpath] = f
def checksum(self, path):
f = self.files.get(path, None)
if f is None:
raise FakeHdfsError('Path not found: %s' % path)
return f.get_file_checksum()
@parameterized_class(('full_urls', ), [(False, ), (True, )])
class HadoopFileSystemTest(unittest.TestCase):
def setUp(self):
self._fake_hdfs = FakeHdfs()
hdfs.hdfs.InsecureClient = (lambda *args, **kwargs: self._fake_hdfs)
pipeline_options = PipelineOptions()
hdfs_options = pipeline_options.view_as(HadoopFileSystemOptions)
hdfs_options.hdfs_host = ''
hdfs_options.hdfs_port = 0
hdfs_options.hdfs_user = ''
self.fs = hdfs.HadoopFileSystem(pipeline_options)
self.fs._full_urls = self.full_urls
if self.full_urls:
self.tmpdir = 'hdfs://test_dir'
else:
self.tmpdir = 'hdfs://server/test_dir'
for filename in ['old_file1', 'old_file2']:
url = self.fs.join(self.tmpdir, filename)
self.fs.create(url).close()
def test_scheme(self):
self.assertEqual(self.fs.scheme(), 'hdfs')
self.assertEqual(hdfs.HadoopFileSystem.scheme(), 'hdfs')
def test_parse_url(self):
cases = [
('hdfs://', ('', '/'), False),
('hdfs://', None, True),
('hdfs://a', ('', '/a'), False),
('hdfs://a', ('a', '/'), True),
('hdfs://a/', ('', '/a/'), False),
('hdfs://a/', ('a', '/'), True),
('hdfs://a/b', ('', '/a/b'), False),
('hdfs://a/b', ('a', '/b'), True),
('hdfs://a/b/', ('', '/a/b/'), False),
('hdfs://a/b/', ('a', '/b/'), True),
('hdfs:/a/b', None, False),
('hdfs:/a/b', None, True),
('invalid', None, False),
('invalid', None, True),
]
for url, expected, full_urls in cases:
if self.full_urls != full_urls:
continue
try:
result = self.fs._parse_url(url)
except ValueError:
self.assertIsNone(expected, msg=(url, expected, full_urls))
continue
self.assertEqual(expected, result, msg=(url, expected, full_urls))
def test_url_join(self):
self.assertEqual(
'hdfs://tmp/path/to/file',
self.fs.join('hdfs://tmp/path', 'to', 'file'))
self.assertEqual(
'hdfs://tmp/path/to/file', self.fs.join('hdfs://tmp/path', 'to/file'))
self.assertEqual('hdfs://tmp/path/', self.fs.join('hdfs://tmp/path/', ''))
if not self.full_urls:
self.assertEqual('hdfs://bar', self.fs.join('hdfs://foo', '/bar'))
self.assertEqual('hdfs://bar', self.fs.join('hdfs://foo/', '/bar'))
with self.assertRaises(ValueError):
self.fs.join('/no/scheme', 'file')
else:
self.assertEqual('hdfs://foo/bar', self.fs.join('hdfs://foo', '/bar'))
self.assertEqual('hdfs://foo/bar', self.fs.join('hdfs://foo/', '/bar'))
def test_url_split(self):
self.assertEqual(('hdfs://tmp/path/to', 'file'),
self.fs.split('hdfs://tmp/path/to/file'))
if not self.full_urls:
self.assertEqual(('hdfs://', 'tmp'), self.fs.split('hdfs://tmp'))
self.assertEqual(('hdfs://tmp', ''), self.fs.split('hdfs://tmp/'))
self.assertEqual(('hdfs://tmp', 'a'), self.fs.split('hdfs://tmp/a'))
else:
self.assertEqual(('hdfs://tmp/', ''), self.fs.split('hdfs://tmp'))
self.assertEqual(('hdfs://tmp/', ''), self.fs.split('hdfs://tmp/'))
self.assertEqual(('hdfs://tmp/', 'a'), self.fs.split('hdfs://tmp/a'))
self.assertEqual(('hdfs://tmp/a', ''), self.fs.split('hdfs://tmp/a/'))
with self.assertRaisesRegex(ValueError, r'parse'):
self.fs.split('tmp')
def test_mkdirs(self):
url = self.fs.join(self.tmpdir, 't1/t2')
self.fs.mkdirs(url)
self.assertTrue(self.fs.exists(url))
def test_mkdirs_failed(self):
url = self.fs.join(self.tmpdir, 't1/t2')
self.fs.mkdirs(url)
with self.assertRaises(IOError):
self.fs.mkdirs(url)
def test_match_file(self):
expected_files = [
self.fs.join(self.tmpdir, filename)
for filename in ['old_file1', 'old_file2']
]
match_patterns = expected_files
result = self.fs.match(match_patterns)
returned_files = [
f.path for match_result in result for f in match_result.metadata_list
]
self.assertCountEqual(expected_files, returned_files)
def test_match_file_with_limits(self):
expected_files = [
self.fs.join(self.tmpdir, filename)
for filename in ['old_file1', 'old_file2']
]
result = self.fs.match([self.tmpdir + '/'], [1])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(len(files), 1)
self.assertIn(files[0], expected_files)
def test_match_file_with_zero_limit(self):
result = self.fs.match([self.tmpdir + '/'], [0])[0]
self.assertEqual(len(result.metadata_list), 0)
def test_match_file_empty(self):
url = self.fs.join(self.tmpdir, 'nonexistent_file')
result = self.fs.match([url])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_error(self):
url = self.fs.join(self.tmpdir, 'old_file1')
bad_url = 'bad_url'
with self.assertRaisesRegex(BeamIOError,
r'^Match operation failed .* %s' % bad_url):
result = self.fs.match([bad_url, url])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.fs._parse_url(url)])
def test_match_directory(self):
expected_files = [
self.fs.join(self.tmpdir, filename)
for filename in ['old_file1', 'old_file2']
]
# Listing without a trailing '/' should return the directory itself and not
# its contents. The fake HDFS client here has a "sparse" directory
# structure, so listing without a '/' will return no results.
result = self.fs.match([self.tmpdir + '/'])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, expected_files)
def test_match_directory_trailing_slash(self):
expected_files = [
self.fs.join(self.tmpdir, filename)
for filename in ['old_file1', 'old_file2']
]
result = self.fs.match([self.tmpdir + '/'])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, expected_files)
def test_create_success(self):
url = self.fs.join(self.tmpdir, 'new_file')
handle = self.fs.create(url)
self.assertIsNotNone(handle)
_, url = self.fs._parse_url(url)
expected_file = FakeFile(url, 'wb')
self.assertEqual(self._fake_hdfs.files[url], expected_file)
def test_create_write_read_compressed(self):
url = self.fs.join(self.tmpdir, 'new_file.gz')
handle = self.fs.create(url)
self.assertIsNotNone(handle)
_, path = self.fs._parse_url(url)
expected_file = FakeFile(path, 'wb')
self.assertEqual(self._fake_hdfs.files[path], expected_file)
data = b'abc' * 10
handle.write(data)
# Compressed data != original data
self.assertNotEqual(data, self._fake_hdfs.files[path].getvalue())
handle.close()
handle = self.fs.open(url)
read_data = handle.read(len(data))
self.assertEqual(data, read_data)
handle.close()
def test_open(self):
url = self.fs.join(self.tmpdir, 'old_file1')
handle = self.fs.open(url)
expected_data = b''
data = handle.read()
self.assertEqual(data, expected_data)
def test_open_bad_path(self):
with self.assertRaises(FakeHdfsError):
self.fs.open(self.fs.join(self.tmpdir, 'nonexistent/path'))
def _cmpfiles(self, url1, url2):
with self.fs.open(url1) as f1:
with self.fs.open(url2) as f2:
data1 = f1.read()
data2 = f2.read()
return data1 == data2
def test_copy_file(self):
url1 = self.fs.join(self.tmpdir, 'new_file1')
url2 = self.fs.join(self.tmpdir, 'new_file2')
url3 = self.fs.join(self.tmpdir, 'new_file3')
with self.fs.create(url1) as f1:
f1.write(b'Hello')
self.fs.copy([url1, url1], [url2, url3])
self.assertTrue(self._cmpfiles(url1, url2))
self.assertTrue(self._cmpfiles(url1, url3))
def test_copy_file_overwrite_error(self):
url1 = self.fs.join(self.tmpdir, 'new_file1')
url2 = self.fs.join(self.tmpdir, 'new_file2')
with self.fs.create(url1) as f1:
f1.write(b'Hello')
with self.fs.create(url2) as f2:
f2.write(b'nope')
with self.assertRaisesRegex(BeamIOError,
r'already exists.*%s' %
posixpath.basename(url2)):
self.fs.copy([url1], [url2])
def test_copy_file_error(self):
url1 = self.fs.join(self.tmpdir, 'new_file1')
url2 = self.fs.join(self.tmpdir, 'new_file2')
url3 = self.fs.join(self.tmpdir, 'new_file3')
url4 = self.fs.join(self.tmpdir, 'new_file4')
with self.fs.create(url3) as f:
f.write(b'Hello')
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed .*%s.*%s.* not found' %
(url1, url2)):
self.fs.copy([url1, url3], [url2, url4])
self.assertTrue(self._cmpfiles(url3, url4))
def test_copy_directory(self):
url_t1 = self.fs.join(self.tmpdir, 't1')
url_t1_inner = self.fs.join(self.tmpdir, 't1/inner')
url_t2 = self.fs.join(self.tmpdir, 't2')
url_t2_inner = self.fs.join(self.tmpdir, 't2/inner')
self.fs.mkdirs(url_t1)
self.fs.mkdirs(url_t1_inner)
self.fs.mkdirs(url_t2)
url1 = self.fs.join(url_t1_inner, 'f1')
url2 = self.fs.join(url_t2_inner, 'f1')
with self.fs.create(url1) as f:
f.write(b'Hello')
self.fs.copy([url_t1], [url_t2])
self.assertTrue(self._cmpfiles(url1, url2))
def test_copy_directory_overwrite_error(self):
url_t1 = self.fs.join(self.tmpdir, 't1')
url_t1_inner = self.fs.join(self.tmpdir, 't1/inner')
url_t2 = self.fs.join(self.tmpdir, 't2')
url_t2_inner = self.fs.join(self.tmpdir, 't2/inner')
self.fs.mkdirs(url_t1)
self.fs.mkdirs(url_t1_inner)
self.fs.mkdirs(url_t2)
self.fs.mkdirs(url_t2_inner)
url1 = self.fs.join(url_t1, 'f1')
url1_inner = self.fs.join(url_t1_inner, 'f2')
url2 = self.fs.join(url_t2, 'f1')
unused_url2_inner = self.fs.join(url_t2_inner, 'f2')
url3_inner = self.fs.join(url_t2_inner, 'f3')
for url in [url1, url1_inner, url3_inner]:
with self.fs.create(url) as f:
f.write(b'Hello')
with self.fs.create(url2) as f:
f.write(b'nope')
with self.assertRaisesRegex(BeamIOError, r'already exists'):
self.fs.copy([url_t1], [url_t2])
def test_rename_file(self):
url1 = self.fs.join(self.tmpdir, 'f1')
url2 = self.fs.join(self.tmpdir, 'f2')
with self.fs.create(url1) as f:
f.write(b'Hello')
self.fs.rename([url1], [url2])
self.assertFalse(self.fs.exists(url1))
self.assertTrue(self.fs.exists(url2))
def test_rename_file_error(self):
url1 = self.fs.join(self.tmpdir, 'f1')
url2 = self.fs.join(self.tmpdir, 'f2')
url3 = self.fs.join(self.tmpdir, 'f3')
url4 = self.fs.join(self.tmpdir, 'f4')
with self.fs.create(url3) as f:
f.write(b'Hello')
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed .*%s.*%s' %
(url1, url2)):
self.fs.rename([url1, url3], [url2, url4])
self.assertFalse(self.fs.exists(url3))
self.assertTrue(self.fs.exists(url4))
def test_rename_directory(self):
url_t1 = self.fs.join(self.tmpdir, 't1')
url_t2 = self.fs.join(self.tmpdir, 't2')
self.fs.mkdirs(url_t1)
url1 = self.fs.join(url_t1, 'f1')
url2 = self.fs.join(url_t2, 'f1')
with self.fs.create(url1) as f:
f.write(b'Hello')
self.fs.rename([url_t1], [url_t2])
self.assertFalse(self.fs.exists(url_t1))
self.assertTrue(self.fs.exists(url_t2))
self.assertFalse(self.fs.exists(url1))
self.assertTrue(self.fs.exists(url2))
def test_exists(self):
url1 = self.fs.join(self.tmpdir, 'old_file1')
url2 = self.fs.join(self.tmpdir, 'nonexistent')
self.assertTrue(self.fs.exists(url1))
self.assertFalse(self.fs.exists(url2))
def test_size(self):
url = self.fs.join(self.tmpdir, 'f1')
with self.fs.create(url) as f:
f.write(b'Hello')
self.assertEqual(5, self.fs.size(url))
def test_checksum(self):
url = self.fs.join(self.tmpdir, 'f1')
with self.fs.create(url) as f:
f.write(b'Hello')
self.assertEqual(
'fake_algo-5-checksum_byte_sequence', self.fs.checksum(url))
def test_delete_file(self):
url = self.fs.join(self.tmpdir, 'old_file1')
self.assertTrue(self.fs.exists(url))
self.fs.delete([url])
self.assertFalse(self.fs.exists(url))
def test_delete_dir(self):
url_t1 = self.fs.join(self.tmpdir, 'new_dir1')
url_t2 = self.fs.join(url_t1, 'new_dir2')
url1 = self.fs.join(url_t2, 'new_file1')
url2 = self.fs.join(url_t2, 'new_file2')
self.fs.mkdirs(url_t1)
self.fs.mkdirs(url_t2)
self.fs.create(url1).close()
self.fs.create(url2).close()
self.assertTrue(self.fs.exists(url1))
self.assertTrue(self.fs.exists(url2))
self.fs.delete([url_t1])
self.assertFalse(self.fs.exists(url_t1))
self.assertFalse(self.fs.exists(url_t2))
self.assertFalse(self.fs.exists(url2))
self.assertFalse(self.fs.exists(url1))
def test_delete_error(self):
url1 = self.fs.join(self.tmpdir, 'nonexistent')
url2 = self.fs.join(self.tmpdir, 'old_file1')
self.assertTrue(self.fs.exists(url2))
_, path1 = self.fs._parse_url(url1)
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation failed .* %s' % path1):
self.fs.delete([url1, url2])
self.assertFalse(self.fs.exists(url2))
class HadoopFileSystemRuntimeValueProviderTest(unittest.TestCase):
"""Tests pipeline_options, in the form of a
RuntimeValueProvider.runtime_options object."""
def setUp(self):
self._fake_hdfs = FakeHdfs()
hdfs.hdfs.InsecureClient = (lambda *args, **kwargs: self._fake_hdfs)
def test_dict_options(self):
pipeline_options = {
'hdfs_host': '',
'hdfs_port': 0,
'hdfs_user': '',
}
self.fs = hdfs.HadoopFileSystem(pipeline_options=pipeline_options)
self.assertFalse(self.fs._full_urls)
def test_dict_options_missing(self):
with self.assertRaisesRegex(ValueError, r'hdfs_host'):
self.fs = hdfs.HadoopFileSystem(
pipeline_options={
'hdfs_port': 0,
'hdfs_user': '',
})
with self.assertRaisesRegex(ValueError, r'hdfs_port'):
self.fs = hdfs.HadoopFileSystem(
pipeline_options={
'hdfs_host': '',
'hdfs_user': '',
})
with self.assertRaisesRegex(ValueError, r'hdfs_user'):
self.fs = hdfs.HadoopFileSystem(
pipeline_options={
'hdfs_host': '',
'hdfs_port': 0,
})
def test_dict_options_full_urls(self):
pipeline_options = {
'hdfs_host': '',
'hdfs_port': 0,
'hdfs_user': '',
'hdfs_full_urls': 'invalid',
}
with self.assertRaisesRegex(ValueError, r'hdfs_full_urls'):
self.fs = hdfs.HadoopFileSystem(pipeline_options=pipeline_options)
pipeline_options['hdfs_full_urls'] = True
self.fs = hdfs.HadoopFileSystem(pipeline_options=pipeline_options)
self.assertTrue(self.fs._full_urls)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import socket
import stat
import subprocess
from django.utils.translation import ugettext_lazy as _
from desktop.redaction.engine import parse_redaction_policy_from_file
from desktop.lib.conf import Config, ConfigSection, UnspecifiedConfigSection,\
coerce_bool, coerce_csv, coerce_json_dict,\
validate_path, list_of_compiled_res, coerce_str_lowercase
from desktop.lib.i18n import force_unicode
from desktop.lib.paths import get_desktop_root
def coerce_database(database):
if database == 'mysql':
return 'django.db.backends.mysql'
elif database == 'postgres' or database == 'postgresql_psycopg2':
return 'django.db.backends.postgresql_psycopg2'
elif database == 'oracle':
return 'django.db.backends.oracle'
elif database in ('sqlite', 'sqlite3'):
return 'django.db.backends.sqlite3'
else:
return str(database)
def coerce_port(port):
port = int(port)
if port == 0:
return ''
else:
return port
def coerce_password_from_script(script):
p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE)
password = p.communicate()[0]
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, script)
# whitespace may be significant in the password, but most files have a
# trailing newline.
return password.strip('\n')
HTTP_HOST = Config(
key="http_host",
help=_("HTTP host to bind to."),
type=str,
default="0.0.0.0")
HTTP_PORT = Config(
key="http_port",
help=_("HTTP port to bind to."),
type=int,
default=8888)
HTTP_ALLOWED_METHODS = Config(
key="http_allowed_methods",
help=_("HTTP methods the server will be allowed to service."),
type=coerce_csv,
private=True,
default=['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT'])
SSL_CERTIFICATE = Config(
key="ssl_certificate",
help=_("Filename of SSL Certificate"),
default=None)
SSL_PRIVATE_KEY = Config(
key="ssl_private_key",
help=_("Filename of SSL RSA Private Key"),
default=None)
SSL_CIPHER_LIST = Config(
key="ssl_cipher_list",
help=_("List of allowed and disallowed ciphers"),
default="DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2")
SSL_PASSWORD = Config(
key="ssl_password",
help=_("SSL password of the the certificate"),
default=None)
SSL_PASSWORD_SCRIPT = Config(
key="ssl_password_script",
help=_("Execute this script to produce the SSL password. This will be used when `ssl_password` is not set."),
type=coerce_password_from_script,
default=None)
LDAP_PASSWORD = Config(
key="ldap_password",
help=_("LDAP password of the hue user used for LDAP authentications. For example for LDAP Authentication with HiveServer2/Impala."),
private=True,
default=None)
LDAP_PASSWORD_SCRIPT = Config(
key="ldap_password_script",
help=_("Execute this script to produce the LDAP password. This will be used when `ldap_password` is not set."),
private=True,
type=coerce_password_from_script,
default=None)
LDAP_USERNAME = Config(
key="ldap_username",
help=_("LDAP username of the hue user used for LDAP authentications. For example for LDAP Authentication with HiveServer2/Impala."),
private=True,
default="hue")
ENABLE_SERVER = Config(
key="enable_server",
help=_("If set to false, runcpserver will not actually start the web server. Used if Apache is being used as a WSGI container."),
type=coerce_bool,
default=True)
CHERRYPY_SERVER_THREADS = Config(
key="cherrypy_server_threads",
help=_("Number of threads used by the CherryPy web server."),
type=int,
default=40)
SECRET_KEY = Config(
key="secret_key",
help=_("Used in hashing algorithms for sessions."),
default="")
SECRET_KEY_SCRIPT = Config(
key="secret_key_script",
help=_("Execute this script to produce the Django secret key. This will be used when `secret_key` is not set."),
type=coerce_password_from_script,
private=True,
default=None)
USER_ACCESS_HISTORY_SIZE = Config(
key="user_access_history_size",
help=_("Number of user access to remember per view per user."),
type=int,
default=10)
COLLECT_USAGE = Config(
key="collect_usage",
help=_("Help improve Hue with anonymous usage analytics."
"Use Google Analytics to see how many times an application or specific section of an application is used, nothing more."),
type=coerce_bool,
default=True)
POLL_ENABLED = Config(
key="poll_enabled",
help=_("Use poll(2) in Hue thrift pool."),
type=coerce_bool,
private=True,
default=True)
MIDDLEWARE = Config(
key="middleware",
help=_("Comma-separated list of Django middleware classes to use. " +
"See https://docs.djangoproject.com/en/1.4/ref/middleware/ for " +
"more details on middlewares in Django."),
type=coerce_csv,
default=[])
REDIRECT_WHITELIST = Config(
key="redirect_whitelist",
help=_("Comma-separated list of regular expressions, which match the redirect URL."
"For example, to restrict to your local domain and FQDN, the following value can be used:"
" ^\/.*$,^http:\/\/www.mydomain.com\/.*$"),
type=list_of_compiled_res(skip_empty=True),
default='^\/.*$')
SECURE_PROXY_SSL_HEADER = Config(
key="secure_proxy_ssl_header",
help=_("Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER."),
type=coerce_bool,
default=False)
APP_BLACKLIST = Config(
key='app_blacklist',
default='',
type=coerce_csv,
help=_('Comma separated list of apps to not load at server startup.')
)
DEMO_ENABLED = Config( # Internal and Temporary
key="demo_enabled",
help=_("To set to true in combination when using Hue demo backend."),
type=coerce_bool,
private=True,
default=False)
LOG_REDACTION_FILE = Config(
key="log_redaction_file",
help=_("Use this file to parse and redact log message."),
type=parse_redaction_policy_from_file,
default=None)
ALLOWED_HOSTS = Config(
key='allowed_hosts',
default=['*'],
type=coerce_csv,
help=_('Comma separated list of strings representing the host/domain names that the Hue server can serve.')
)
def is_https_enabled():
return bool(SSL_CERTIFICATE.get() and SSL_PRIVATE_KEY.get())
#
# Email (SMTP) settings
#
_default_from_email = None
def default_from_email():
"""Email for hue@<host-fqdn>"""
global _default_from_email
if _default_from_email is None:
try:
fqdn = socket.getfqdn()
except:
fqdn = 'localhost'
_default_from_email = "hue@" + fqdn
return _default_from_email
def default_database_options():
"""Database type dependent options"""
if DATABASE.ENGINE.get().endswith('oracle'):
return {'threaded': True}
elif DATABASE.ENGINE.get().endswith('sqlite3'):
return {'timeout': 30}
else:
return {}
SMTP = ConfigSection(
key='smtp',
help=_('Configuration options for connecting to an external SMTP server.'),
members=dict(
HOST = Config(
key="host",
help=_("The SMTP server for email notification delivery."),
type=str,
default="localhost"
),
PORT = Config(
key="port",
help=_("The SMTP server port."),
type=int,
default=25
),
USER = Config(
key="user",
help=_("The username for the SMTP host."),
type=str,
default=""
),
PASSWORD = Config(
key="password",
help=_("The password for the SMTP user."),
type=str,
private=True,
default=""
),
PASSWORD_SCRIPT = Config(
key="password_script",
help=_("Execute this script to produce the SMTP user password. This will be used when the SMTP `password` is not set."),
type=coerce_password_from_script,
private=True,
default=None,
),
USE_TLS = Config(
key="tls",
help=_("Whether to use a TLS (secure) connection when talking to the SMTP server."),
type=coerce_bool,
default=False
),
DEFAULT_FROM= Config(
key="default_from_email",
help=_("Default email address to use for various automated notifications from Hue."),
type=str,
dynamic_default=default_from_email
),
)
)
DATABASE = ConfigSection(
key='database',
help=_("""Configuration options for specifying the Desktop Database.
For more info, see http://docs.djangoproject.com/en/1.4/ref/settings/#database-engine"""),
members=dict(
ENGINE=Config(
key='engine',
help=_('Database engine, such as postgresql_psycopg2, mysql, or sqlite3.'),
type=coerce_database,
default='django.db.backends.sqlite3',
),
NAME=Config(
key='name',
help=_('Database name, or path to DB if using sqlite3.'),
type=str,
default=get_desktop_root('desktop.db'),
),
USER=Config(
key='user',
help=_('Database username.'),
type=str,
default='',
),
PASSWORD=Config(
key='password',
help=_('Database password.'),
private=True,
type=str,
default='',
),
PASSWORD_SCRIPT=Config(
key='password_script',
help=_('Execute this script to produce the database password. This will be used when `password` is not set.'),
private=True,
type=coerce_password_from_script,
default='',
),
HOST=Config(
key='host',
help=_('Database host.'),
type=str,
default='',
),
PORT=Config(
key='port',
help=_('Database port.'),
type=coerce_port,
default='0',
),
OPTIONS=Config(
key='options',
help=_('Database options to send to the server when connecting.'),
type=coerce_json_dict,
dynamic_default=default_database_options
)
)
)
SESSION = ConfigSection(
key='session',
help=_("""Configuration options for specifying the Desktop session.
For more info, see https://docs.djangoproject.com/en/1.4/topics/http/sessions/"""),
members=dict(
TTL=Config(
key='ttl',
help=_("The cookie containing the users' session ID will expire after this amount of time in seconds."),
type=int,
default=60*60*24*14,
),
SECURE=Config(
key='secure',
help=_("The cookie containing the users' session ID will be secure. This should only be enabled with HTTPS."),
type=coerce_bool,
default=False,
),
HTTP_ONLY=Config(
key='http_only',
help=_("The cookie containing the users' session ID will use the HTTP only flag."),
type=coerce_bool,
default=False
),
EXPIRE_AT_BROWSER_CLOSE=Config(
key='expire_at_browser_close',
help=_("Use session-length cookies. Logs out the user when she closes the browser window."),
type=coerce_bool,
default=False
)
)
)
KERBEROS = ConfigSection(
key="kerberos",
help=_("""Configuration options for specifying Hue's Kerberos integration for
secured Hadoop clusters."""),
members=dict(
HUE_KEYTAB=Config(
key='hue_keytab',
help=_("Path to a Kerberos keytab file containing Hue's service credentials."),
type=str,
default=None),
HUE_PRINCIPAL=Config(
key='hue_principal',
help=_("Kerberos principal name for Hue. Typically 'hue/hostname.foo.com'."),
type=str,
default="hue/%s" % socket.getfqdn()),
KEYTAB_REINIT_FREQUENCY=Config(
key='reinit_frequency',
help=_("Frequency in seconds with which Hue will renew its keytab."),
type=int,
default=60*60), #1h
CCACHE_PATH=Config(
key='ccache_path',
help=_("Path to keep Kerberos credentials cached."),
private=True,
type=str,
default="/tmp/hue_krb5_ccache",
),
KINIT_PATH=Config(
key='kinit_path',
help=_("Path to Kerberos 'kinit' command."),
type=str,
default="kinit", # use PATH!
)
)
)
# See python's documentation for time.tzset for valid values.
TIME_ZONE = Config(
key="time_zone",
help=_("Time zone name."),
type=str,
default=os.environ.get("TZ", "America/Los_Angeles")
)
DEFAULT_SITE_ENCODING = Config(
key='default_site_encoding',
help=_('Default system-wide unicode encoding.'),
type=str,
default='utf-8'
)
SERVER_USER = Config(
key="server_user",
help=_("Username to run servers as."),
type=str,
default="hue")
SERVER_GROUP = Config(
key="server_group",
help=_("Group to run servers as."),
type=str,
default="hue")
DEFAULT_USER = Config(
key="default_user",
help=_("This should be the user running hue webserver"),
type=str,
default="hue")
DEFAULT_HDFS_SUPERUSER = Config(
key="default_hdfs_superuser",
help=_("This should be the hdfs super user"),
type=str,
default="hdfs")
CUSTOM = ConfigSection(
key="custom",
help=_("Customizations to the UI."),
members=dict(
BANNER_TOP_HTML=Config("banner_top_html",
default="",
help=_("Top banner HTML code. This code will be placed in the navigation bar "
"so that it will reside at the top of the page in a fixed position. " +
"One common value is `<img src=\"http://www.example.com/example.gif\" />`")),
))
AUTH = ConfigSection(
key="auth",
help=_("Configuration options for user authentication into the web application."),
members=dict(
BACKEND=Config("backend",
default="desktop.auth.backend.AllowFirstUserDjangoBackend",
help=_("Authentication backend. Common settings are "
"django.contrib.auth.backends.ModelBackend (fully Django backend), " +
"desktop.auth.backend.AllowAllBackend (allows everyone), " +
"desktop.auth.backend.AllowFirstUserDjangoBackend (relies on Django and user manager, after the first login). ")),
USER_AUGMENTOR=Config("user_augmentor",
default="desktop.auth.backend.DefaultUserAugmentor",
help=_("Class which defines extra accessor methods for User objects.")),
PAM_SERVICE=Config("pam_service",
default="login",
help=_("The service to use when querying PAM. "
"The service usually corresponds to a single filename in /etc/pam.d")),
REMOTE_USER_HEADER=Config("remote_user_header",
default="HTTP_REMOTE_USER",
help=_("When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets "
"the normalized name of the header that contains the remote user. "
"The HTTP header in the request is converted to a key by converting "
"all characters to uppercase, replacing any hyphens with underscores "
"and adding an HTTP_ prefix to the name. So, for example, if the header "
"is called Remote-User that would be configured as HTTP_REMOTE_USER")),
IGNORE_USERNAME_CASE = Config("ignore_username_case",
help=_("Ignore the case of usernames when searching for existing users in Hue."),
type=coerce_bool,
default=True),
FORCE_USERNAME_LOWERCASE = Config("force_username_lowercase",
help=_("Force usernames to lowercase when creating new users from LDAP."),
type=coerce_bool,
default=True),
EXPIRES_AFTER = Config("expires_after",
help=_("Users will expire after they have not logged in for 'n' amount of seconds."
"A negative number means that users will never expire."),
type=int,
default=-1),
EXPIRE_SUPERUSERS = Config("expire_superusers",
help=_("Apply 'expires_after' to superusers."),
type=coerce_bool,
default=True)
))
LDAP = ConfigSection(
key="ldap",
help=_("Configuration options for LDAP connectivity."),
members=dict(
CREATE_USERS_ON_LOGIN = Config("create_users_on_login",
help=_("Create users when they login with their LDAP credentials."),
type=coerce_bool,
default=True),
SYNC_GROUPS_ON_LOGIN = Config("sync_groups_on_login",
help=_("Synchronize a users groups when they login."),
type=coerce_bool,
default=False),
IGNORE_USERNAME_CASE = Config("ignore_username_case",
help=_("Ignore the case of usernames when searching for existing users in Hue."),
type=coerce_bool,
default=False),
FORCE_USERNAME_LOWERCASE = Config("force_username_lowercase",
help=_("Force usernames to lowercase when creating new users from LDAP."),
type=coerce_bool,
private=True,
default=False),
SUBGROUPS = Config("subgroups",
help=_("Choose which kind of subgrouping to use: nested or suboordinate (deprecated)."),
type=coerce_str_lowercase,
default="suboordinate"),
NESTED_MEMBERS_SEARCH_DEPTH = Config("nested_members_search_depth",
help=_("Define the number of levels to search for nested members."),
type=int,
default=10),
FOLLOW_REFERRALS = Config("follow_referrals",
help=_("Whether or not to follow referrals."),
type=coerce_bool,
default=False),
DEBUG = Config("debug",
type=coerce_bool,
default=False,
help=_("Set to a value to enable python-ldap debugging.")),
DEBUG_LEVEL = Config("debug_level",
default=255,
type=int,
help=_("Sets the debug level within the underlying LDAP C lib.")),
TRACE_LEVEL = Config("trace_level",
default=0,
type=int,
help=_("Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,"
"2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.")),
LDAP_SERVERS = UnspecifiedConfigSection(
key="ldap_servers",
help=_("LDAP server record."),
each=ConfigSection(
members=dict(
BASE_DN=Config("base_dn",
default=None,
help=_("The base LDAP distinguished name to use for LDAP search.")),
NT_DOMAIN=Config("nt_domain",
default=None,
help=_("The NT domain used for LDAP authentication.")),
LDAP_URL=Config("ldap_url",
default=None,
help=_("The LDAP URL to connect to.")),
USE_START_TLS=Config("use_start_tls",
default=True,
type=coerce_bool,
help=_("Use StartTLS when communicating with LDAP server.")),
LDAP_CERT=Config("ldap_cert",
default=None,
help=_("A PEM-format file containing certificates for the CA's that Hue will trust for authentication over TLS. The certificate for the CA that signed the LDAP server certificate must be included among these certificates. See more here http://www.openldap.org/doc/admin24/tls.html.")),
LDAP_USERNAME_PATTERN=Config("ldap_username_pattern",
default=None,
help=_("A pattern to use for constructing LDAP usernames.")),
BIND_DN=Config("bind_dn",
default=None,
help=_("The distinguished name to bind as, when importing from LDAP.")),
BIND_PASSWORD=Config("bind_password",
default=None,
private=True,
help=_("The password for the bind user.")),
BIND_PASSWORD_SCRIPT=Config("bind_password_script",
default=None,
private=True,
type=coerce_password_from_script,
help=_("Execute this script to produce the LDAP bind user password. This will be used when `bind_password` is not set.")),
SEARCH_BIND_AUTHENTICATION=Config("search_bind_authentication",
default=True,
type=coerce_bool,
help=_("Use search bind authentication.")),
FOLLOW_REFERRALS = Config("follow_referrals",
help=_("Whether or not to follow referrals."),
type=coerce_bool,
default=False),
DEBUG = Config("debug",
type=coerce_bool,
default=False,
help=_("Set to a value to enable python-ldap debugging.")),
DEBUG_LEVEL = Config("debug_level",
default=255,
type=int,
help=_("Sets the debug level within the underlying LDAP C lib.")),
TRACE_LEVEL = Config("trace_level",
default=0,
type=int,
help=_("Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,"
"2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.")),
USERS = ConfigSection(
key="users",
help=_("Configuration for LDAP user schema and search."),
members=dict(
USER_FILTER=Config("user_filter",
default="objectclass=*",
help=_("A base filter for use when searching for users.")),
USER_NAME_ATTR=Config("user_name_attr",
default="sAMAccountName",
help=_("The username attribute in the LDAP schema. "
"Typically, this is 'sAMAccountName' for AD and 'uid' "
"for other LDAP systems.")),
)
),
GROUPS = ConfigSection(
key="groups",
help=_("Configuration for LDAP group schema and search."),
members=dict(
GROUP_FILTER=Config("group_filter",
default="objectclass=*",
help=_("A base filter for use when searching for groups.")),
GROUP_NAME_ATTR=Config("group_name_attr",
default="cn",
help=_("The group name attribute in the LDAP schema. "
"Typically, this is 'cn'.")),
GROUP_MEMBER_ATTR=Config("group_member_attr",
default="member",
help=_("The LDAP attribute which specifies the "
"members of a group.")),
))))),
# Every thing below here is deprecated and should be removed in an upcoming major release.
BASE_DN=Config("base_dn",
default=None,
help=_("The base LDAP distinguished name to use for LDAP search.")),
NT_DOMAIN=Config("nt_domain",
default=None,
help=_("The NT domain used for LDAP authentication.")),
LDAP_URL=Config("ldap_url",
default=None,
help=_("The LDAP URL to connect to.")),
USE_START_TLS=Config("use_start_tls",
default=True,
type=coerce_bool,
help=_("Use StartTLS when communicating with LDAP server.")),
LDAP_CERT=Config("ldap_cert",
default=None,
help=_("A PEM-format file containing certificates for the CA's that Hue will trust for authentication over TLS. The certificate for the CA that signed the LDAP server certificate must be included among these certificates. See more here http://www.openldap.org/doc/admin24/tls.html.")),
LDAP_USERNAME_PATTERN=Config("ldap_username_pattern",
default=None,
help=_("A pattern to use for constructing LDAP usernames.")),
BIND_DN=Config("bind_dn",
default=None,
help=_("The distinguished name to bind as, when importing from LDAP.")),
BIND_PASSWORD=Config("bind_password",
default=None,
private=True,
help=_("The password for the bind user.")),
BIND_PASSWORD_SCRIPT=Config("bind_password_script",
default=None,
private=True,
type=coerce_password_from_script,
help=_("Execute this script to produce the LDAP bind user password. This will be used when `bind_password` is not set.")),
SEARCH_BIND_AUTHENTICATION=Config("search_bind_authentication",
default=True,
type=coerce_bool,
help=_("Use search bind authentication.")),
USERS = ConfigSection(
key="users",
help=_("Configuration for LDAP user schema and search."),
members=dict(
USER_FILTER=Config("user_filter",
default="objectclass=*",
help=_("A base filter for use when searching for users.")),
USER_NAME_ATTR=Config("user_name_attr",
default="sAMAccountName",
help=_("The username attribute in the LDAP schema. "
"Typically, this is 'sAMAccountName' for AD and 'uid' "
"for other LDAP systems.")),
)),
GROUPS = ConfigSection(
key="groups",
help=_("Configuration for LDAP group schema and search."),
members=dict(
GROUP_FILTER=Config("group_filter",
default="objectclass=*",
help=_("A base filter for use when searching for groups.")),
GROUP_NAME_ATTR=Config("group_name_attr",
default="cn",
help=_("The group name attribute in the LDAP schema. "
"Typically, this is 'cn'.")),
GROUP_MEMBER_ATTR=Config("group_member_attr",
default="member",
help=_("The LDAP attribute which specifies the "
"members of a group.")),
))))
OAUTH = ConfigSection(
key='oauth',
help=_('Configuration options for Oauth 1.0 authentication'),
members=dict(
CONSUMER_KEY = Config(
key="consumer_key",
help=_("The Consumer key of the application."),
type=str,
default="XXXXXXXXXXXXXXXXXXXXX"
),
CONSUMER_SECRET = Config(
key="consumer_secret",
help=_("The Consumer secret of the application."),
type=str,
default="XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
),
REQUEST_TOKEN_URL = Config(
key="request_token_url",
help=_("The Request token URL."),
type=str,
default="https://api.twitter.com/oauth/request_token"
),
ACCESS_TOKEN_URL = Config(
key="access_token_url",
help=_("The Access token URL."),
type=str,
default="https://api.twitter.com/oauth/access_token"
),
AUTHENTICATE_URL = Config(
key="authenticate_url",
help=_("The Authorize URL."),
type=str,
default="https://api.twitter.com/oauth/authorize"
),
)
)
LOCAL_FILESYSTEMS = UnspecifiedConfigSection(
key="local_filesystems",
help=_("Paths on the local file system that users should be able to browse."),
each=ConfigSection(
members=dict(
PATH=Config("path",
required=True,
help=_("The path on the local filesystem.")))))
def default_feedback_url():
"""A version-specific URL."""
return "http://groups.google.com/a/cloudera.org/group/hue-user"
FEEDBACK_URL = Config(
key="feedback_url",
help=_("Link for 'feedback' tab."),
type=str,
dynamic_default=default_feedback_url
)
SEND_DBUG_MESSAGES = Config(
key="send_dbug_messages",
help=_("Whether to send debug messages from JavaScript to the server logs."),
type=coerce_bool,
default=False
)
DATABASE_LOGGING = Config(
key="database_logging",
help=_("Enable or disable database debug mode."),
type=coerce_bool,
default=False
)
DJANGO_ADMINS = UnspecifiedConfigSection(
key="django_admins",
help=_("Administrators that should receive error emails."),
each=ConfigSection(
members=dict(
NAME=Config("name",
required=True,
help=_("The full name of the admin.")),
EMAIL=Config("email",
required=True,
help=_("The email address of the admin.")))))
DJANGO_DEBUG_MODE = Config(
key="django_debug_mode",
help=_("Enable or disable Django debug mode."),
type=coerce_bool,
default=True
)
HTTP_500_DEBUG_MODE = Config(
key='http_500_debug_mode',
help=_('Enable or disable debugging information in the 500 internal server error response. '
'Note that the debugging information may contain sensitive data. '
'If django_debug_mode is True, this is automatically enabled.'),
type=coerce_bool,
default=True
)
MEMORY_PROFILER = Config(
key='memory_profiler',
help=_('Enable or disable memory profiling.'),
type=coerce_bool,
default=False)
AUDIT_EVENT_LOG_DIR = Config(
key="audit_event_log_dir",
help=_("The directory where to store the auditing logs. Auditing is disable if the value is empty."),
type=str,
default=""
)
AUDIT_LOG_MAX_FILE_SIZE = Config(
key="audit_log_max_file_size",
help=_("Size in KB/MB/GB for audit log to rollover."),
type=str,
default="100MB"
)
DJANGO_SERVER_EMAIL = Config(
key='django_server_email',
help=_('Email address that internal error messages should send as.'),
default='[email protected]'
)
DJANGO_EMAIL_BACKEND = Config(
key="django_email_backend",
help=_("The email backend to use."),
type=str,
default="django.core.mail.backends.smtp.EmailBackend"
)
def validate_ldap(user, config):
res = []
if config.SEARCH_BIND_AUTHENTICATION.get():
if config.LDAP_URL.get() is not None:
bind_dn = config.BIND_DN.get()
bind_password = config.BIND_PASSWORD.get() or config.BIND_PASSWORD_SCRIPT.get()
if bool(bind_dn) != bool(bind_password):
if bind_dn == None:
res.append((LDAP.BIND_DN,
unicode(_("If you set bind_password, then you must set bind_dn."))))
else:
res.append((LDAP.BIND_PASSWORD,
unicode(_("If you set bind_dn, then you must set bind_password."))))
else:
if config.NT_DOMAIN.get() is not None or \
config.LDAP_USERNAME_PATTERN.get() is not None:
if config.LDAP_URL.get() is None:
res.append((config.LDAP_URL,
unicode(_("LDAP is only partially configured. An LDAP URL must be provided."))))
if config.LDAP_URL.get() is not None:
if config.NT_DOMAIN.get() is None and \
config.LDAP_USERNAME_PATTERN.get() is None:
res.append((config.LDAP_URL,
unicode(_("LDAP is only partially configured. An NT Domain or username "
"search pattern must be provided."))))
if config.LDAP_USERNAME_PATTERN.get() is not None and \
'<username>' not in config.LDAP_USERNAME_PATTERN.get():
res.append((config.LDAP_USERNAME_PATTERN,
unicode(_("The LDAP username pattern should contain the special"
"<username> replacement string for authentication."))))
return res
def validate_mysql_storage():
from django.db import connection
LOG = logging.getLogger(__name__)
res = []
if connection.vendor == 'mysql':
cursor = connection.cursor();
try:
innodb_table_count = cursor.execute('''
SELECT *
FROM information_schema.tables
WHERE table_schema=DATABASE() AND engine = "innodb"''')
total_table_count = cursor.execute('''
SELECT *
FROM information_schema.tables
WHERE table_schema=DATABASE()''')
if innodb_table_count != 0 and innodb_table_count != total_table_count:
res.append(('MYSQL_STORAGE_ENGINE', unicode(_('''All tables in the database must be of the same
storage engine type (preferably InnoDB).'''))))
except Exception, ex:
LOG.exception("Error in config validation of MYSQL_STORAGE_ENGINE: %s", ex)
return res
def config_validator(user):
"""
config_validator() -> [ (config_variable, error_message) ]
Called by core check_config() view.
"""
from desktop.lib import i18n
res = []
if not SECRET_KEY.get():
res.append((SECRET_KEY, unicode(_("Secret key should be configured as a random string. All sessions will be lost on restart"))))
# Validate SSL setup
if SSL_CERTIFICATE.get():
res.extend(validate_path(SSL_CERTIFICATE, is_dir=False))
if not SSL_PRIVATE_KEY.get():
res.append((SSL_PRIVATE_KEY, unicode(_("SSL private key file should be set to enable HTTPS."))))
else:
res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False))
# Validate encoding
if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()):
res.append((DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported."))))
# Validate kerberos
if KERBEROS.HUE_KEYTAB.get() is not None:
res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False))
# Keytab should not be world or group accessible
kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get())
if stat.S_IMODE(kt_stat.st_mode) & 0077:
res.append((KERBEROS.HUE_KEYTAB,
force_unicode(_("Keytab should have 0600 permissions (has %o).") %
stat.S_IMODE(kt_stat.st_mode))))
res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False))
res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False))
if LDAP.LDAP_SERVERS.get():
for ldap_record_key in LDAP.LDAP_SERVERS.get():
res.extend(validate_ldap(user, LDAP.LDAP_SERVERS.get()[ldap_record_key]))
else:
res.extend(validate_ldap(user, LDAP))
# Validate MYSQL storage engine of all tables
res.extend(validate_mysql_storage())
return res
def get_redaction_policy():
"""
Return the configured redaction policy.
"""
return LOG_REDACTION_FILE.get()
def get_secret_key():
secret_key = SECRET_KEY.get()
if secret_key is None:
secret_key = SECRET_KEY_SCRIPT.get()
return secret_key
def get_ssl_password():
password = SSL_PASSWORD.get()
if password is None:
password = SSL_PASSWORD_SCRIPT.get()
return password
def get_database_password():
password = DATABASE.PASSWORD.get()
if password is None:
password = DATABASE.PASSWORD_SCRIPT.get()
return password
def get_smtp_password():
password = SMTP.PASSWORD.get()
if password is None:
password = SMTP.PASSWORD_SCRIPT.get()
return password
def get_ldap_password():
password = LDAP_PASSWORD.get()
if password is None:
password = LDAP_PASSWORD_SCRIPT.get()
return password
def get_ldap_bind_password(ldap_config):
password = ldap_config.BIND_PASSWORD.get()
if password is None:
password = ldap_config.BIND_PASSWORD_SCRIPT.get()
return password
|
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.github
~~~~~~~~~~~~~~~~~~~~
This sphinx-based resume builder system is pivoted toward programmers on
github to create a solid resume.
:copyright: (c) 2013 by Tony Narlock.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function, absolute_import
from sphinx.util.compat import Directive
from docutils.parsers.rst import directives
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
from pprint import pprint
import os
gh_repo_tpl = """\
{name} watch {watchers} forks {forks}
"""
gh_pr_tpl = """\
+{{additions}} -{{deletions}} {{created_at}}
"""
class GitHubRepoDirective(Directive):
"""Directive for Github Repositories."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = False
option_spec = {
"travis": directives.uri,
"docs": directives.uri,
"api": directives.uri,
"pypi": directives.uri,
"homepage": directives.uri,
"use_gh_description": directives.flag,
}
def run(self):
repo = self.arguments[0]
env = self.state.document.settings.env
try:
repo_user, repo_name = repo.split('/')
repo = gh.repository(repo_user, repo_name)
except Exception as e:
raise self.error("GitHub API error: %s" % e.message)
tpl = gh_repo_tpl
html = tpl.format(**repo.__dict__)
if not hasattr(env, 'github_repo_all_repos'):
env.github_repo_all_repos = []
env.github_repo_all_repos.append({
'docname': env.docname,
'lineno': self.lineno,
'repo': repo,
})
repo_link = nodes.reference('', 'github', refuri=repo.html_url)
title = nodes.paragraph()
title += repo_link,
if 'travis' in self.options:
title += nodes.inline('', ' - ')
title += nodes.reference(
'', 'travis', refuri=self.options.get('travis'))
if 'docs' in self.options:
title += nodes.inline('', ' - ')
title += nodes.reference(
'', 'docs', refuri=self.options.get('docs'))
if 'api' in self.options:
title += nodes.inline('', ' - ')
title += nodes.reference(
'', 'api', refuri=self.options.get('api'))
if 'pypi' in self.options:
title += nodes.inline('', ' - ')
title += nodes.reference(
'', 'pypi', refuri=self.options.get('pypi'))
if 'homepage' in self.options:
title += nodes.inline('', ' - ')
title += nodes.reference(
'', 'homepage', refuri=self.options.get('homepage'))
if repo.watchers > 10:
title += nodes.inline('', ' - %s watchers' % str(repo.watchers))
if repo.forks > 10:
title += nodes.inline('', ' - %s forks' % str(repo.forks))
new_nodes = [title]
if 'use_gh_description' in self.options:
new_nodes.append(nodes.paragraph('', repo.description))
return new_nodes
def purge_repos(app, env, docname):
if not hasattr(env, 'github_repo_all_repos'):
return
env.github_repo_all_repos = [
repo for repo in env.github_repo_all_repos if repo['docname'] != docname
]
def github_repo_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""github repo role."""
try:
repo_user, repo_name = text.split('/')
repo = gh.repository(repo_user, repo_name)
except Exception as e:
msg = inliner.reporter.error(
'GitHub API error: %s for "%s"' % e.message, text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
tpl = gh_repo_tpl
html = tpl.format(**repo.__dict__)
title = nodes.paragraph()
title += nodes.inline('', repo_name + ': ')
title += nodes.reference('', 'github', refuri=repo.html_url)
return [title], []
def github_pr_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Here are some docs.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Link type (issue, changeset, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func.
"""
try:
pr = text
if not pr or len(pr) <= 0 or not isinstance(text, basestring):
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'pull request should be in the format of /:user/:repo/pull/:pull_id'
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
repo_user, repo_name, pull, pull_id = pr.split('/')
repo = gh.repository(repo_user, repo_name)
pull = repo.pull_request(pull_id)
tpl = gh_pr_tpl
attributes = pull.__dict__
attributes['repo_name'] = pull.repository[1]
pr_details = gh_pr_tpl.format(attributes)
# <a href={{repo.html_url}}>repo_name</a>
repo_link = nodes.reference(
rawtext, repo_name, refuri=repo.html_url, **options)
# <em>pull.title</em>
pr_title_emphasized = nodes.emphasis(rawtext, pull.title, **options)
# ./tpl/gh_pr.rst
pr_details_node = nodes.emphasis(rawtext, pr_details, **options)
pr_number_link = nodes.reference(rawtext, '#' + str(
pull.number), refuri=pull.html_url, **options)
pr_additions = nodes.inline(rawtext, str(pull.additions) + ' additions(+)')
pr_deletions = nodes.inline(rawtext, str(pull.deletions) + ' deletions(-)')
pr_created_at = nodes.inline(rawtext, pull.created_at.strftime('%Y-%m-%d'))
title = nodes.paragraph()
title += repo_link,
title += nodes.inline(rawtext, ' ')
title += nodes.inline(rawtext, ' (')
title += pr_number_link
title += nodes.inline(rawtext, ') ')
title += nodes.inline(rawtext, ' '),
title += pr_title_emphasized,
details = nodes.paragraph()
details += pr_additions
details += nodes.inline(rawtext, ', ')
details += pr_deletions
details += nodes.inline(rawtext, ' '),
details += pr_created_at
return [title, details], []
def visit_github_pr_node(self, node):
pass
def depart_github_pr_node(self, node):
pass
def setup(app):
app.add_directive('github-repo', GitHubRepoDirective)
app.add_role('github-repo', github_repo_role)
app.add_role('github-pr', github_pr_role)
app.connect('env-purge-doc', purge_repos)
print('wat0')
|
|
# Copyright 2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import pickle
import re
import os
import sys
import typing as T
from ..backend.ninjabackend import TargetDependencyScannerInfo, ninja_quote
from ..compilers.compilers import lang_suffixes
CPP_IMPORT_RE = re.compile(r'\w*import ([a-zA-Z0-9]+);')
CPP_EXPORT_RE = re.compile(r'\w*export module ([a-zA-Z0-9]+);')
FORTRAN_INCLUDE_PAT = r"^\s*include\s*['\"](\w+\.\w+)['\"]"
FORTRAN_MODULE_PAT = r"^\s*\bmodule\b\s+(\w+)\s*(?:!+.*)*$"
FORTRAN_SUBMOD_PAT = r"^\s*\bsubmodule\b\s*\((\w+:?\w+)\)\s*(\w+)"
FORTRAN_USE_PAT = r"^\s*use,?\s*(?:non_intrinsic)?\s*(?:::)?\s*(\w+)"
FORTRAN_MODULE_RE = re.compile(FORTRAN_MODULE_PAT, re.IGNORECASE)
FORTRAN_SUBMOD_RE = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)
FORTRAN_USE_RE = re.compile(FORTRAN_USE_PAT, re.IGNORECASE)
class DependencyScanner:
def __init__(self, pickle_file: str, outfile: str, sources: T.List[str]):
with open(pickle_file, 'rb') as pf:
self.target_data = pickle.load(pf) # type: TargetDependencyScannerInfo
self.outfile = outfile
self.sources = sources
self.provided_by = {} # type: T.Dict[str, str]
self.exports = {} # type: T.Dict[str, str]
self.needs = {} # type: T.Dict[str, T.List[str]]
self.sources_with_exports = [] # type: T.List[str]
def scan_file(self, fname: str) -> None:
suffix = os.path.splitext(fname)[1][1:].lower()
if suffix in lang_suffixes['fortran']:
self.scan_fortran_file(fname)
elif suffix in lang_suffixes['cpp']:
self.scan_cpp_file(fname)
else:
sys.exit(f'Can not scan files with suffix .{suffix}.')
def scan_fortran_file(self, fname: str) -> None:
fpath = pathlib.Path(fname)
modules_in_this_file = set()
for line in fpath.read_text().split('\n'):
import_match = FORTRAN_USE_RE.match(line)
export_match = FORTRAN_MODULE_RE.match(line)
submodule_export_match = FORTRAN_SUBMOD_RE.match(line)
if import_match:
needed = import_match.group(1).lower()
# In Fortran you have an using declaration also for the module
# you define in the same file. Prevent circular dependencies.
if needed not in modules_in_this_file:
if fname in self.needs:
self.needs[fname].append(needed)
else:
self.needs[fname] = [needed]
if export_match:
exported_module = export_match.group(1).lower()
assert(exported_module not in modules_in_this_file)
modules_in_this_file.add(exported_module)
if exported_module in self.provided_by:
raise RuntimeError(f'Multiple files provide module {exported_module}.')
self.sources_with_exports.append(fname)
self.provided_by[exported_module] = fname
self.exports[fname] = exported_module
if submodule_export_match:
# Store submodule "Foo" "Bar" as "foo:bar".
# A submodule declaration can be both an import and an export declaration:
#
# submodule (a1:a2) a3
# - requires [email protected]
# - produces [email protected]
parent_module_name_full = submodule_export_match.group(1).lower()
parent_module_name = parent_module_name_full.split(':')[0]
submodule_name = submodule_export_match.group(2).lower()
concat_name = f'{parent_module_name}:{submodule_name}'
self.sources_with_exports.append(fname)
self.provided_by[concat_name] = fname
self.exports[fname] = concat_name
# Fortran requires that the immediate parent module must be built
# before the current one. Thus:
#
# submodule (parent) parent <- requires parent.mod (really parent.smod, but they are created at the same time)
# submodule (a1:a2) a3 <- requires [email protected]
#
# a3 does not depend on the a1 parent module directly, only transitively.
if fname in self.needs:
self.needs[fname].append(parent_module_name_full)
else:
self.needs[fname] = [parent_module_name_full]
def scan_cpp_file(self, fname: str) -> None:
fpath = pathlib.Path(fname)
for line in fpath.read_text().split('\n'):
import_match = CPP_IMPORT_RE.match(line)
export_match = CPP_EXPORT_RE.match(line)
if import_match:
needed = import_match.group(1)
if fname in self.needs:
self.needs[fname].append(needed)
else:
self.needs[fname] = [needed]
if export_match:
exported_module = export_match.group(1)
if exported_module in self.provided_by:
raise RuntimeError(f'Multiple files provide module {exported_module}.')
self.sources_with_exports.append(fname)
self.provided_by[exported_module] = fname
self.exports[fname] = exported_module
def objname_for(self, src: str) -> str:
objname = self.target_data.source2object[src]
assert(isinstance(objname, str))
return objname
def module_name_for(self, src: str) -> str:
suffix = os.path.splitext(src)[1][1:].lower()
if suffix in lang_suffixes['fortran']:
exported = self.exports[src]
# Module foo:bar goes to a file name [email protected]
# Module Foo goes to a file name foo.mod
namebase = exported.replace(':', '@')
if ':' in exported:
extension = 'smod'
else:
extension = 'mod'
return os.path.join(self.target_data.private_dir, f'{namebase}.{extension}')
elif suffix in lang_suffixes['cpp']:
return '{}.ifc'.format(self.exports[src])
else:
raise RuntimeError('Unreachable code.')
def scan(self) -> int:
for s in self.sources:
self.scan_file(s)
with open(self.outfile, 'w') as ofile:
ofile.write('ninja_dyndep_version = 1\n')
for src in self.sources:
objfilename = self.objname_for(src)
mods_and_submods_needed = []
module_files_generated = []
module_files_needed = []
if src in self.sources_with_exports:
module_files_generated.append(self.module_name_for(src))
if src in self.needs:
for modname in self.needs[src]:
if modname not in self.provided_by:
# Nothing provides this module, we assume that it
# comes from a dependency library somewhere and is
# already built by the time this compilation starts.
pass
else:
mods_and_submods_needed.append(modname)
for modname in mods_and_submods_needed:
provider_src = self.provided_by[modname]
provider_modfile = self.module_name_for(provider_src)
# Prune self-dependencies
if provider_src != src:
module_files_needed.append(provider_modfile)
quoted_objfilename = ninja_quote(objfilename, True)
quoted_module_files_generated = [ninja_quote(x, True) for x in module_files_generated]
quoted_module_files_needed = [ninja_quote(x, True) for x in module_files_needed]
if quoted_module_files_generated:
mod_gen = '| ' + ' '.join(quoted_module_files_generated)
else:
mod_gen = ''
if quoted_module_files_needed:
mod_dep = '| ' + ' '.join(quoted_module_files_needed)
else:
mod_dep = ''
build_line = 'build {} {}: dyndep {}'.format(quoted_objfilename,
mod_gen,
mod_dep)
ofile.write(build_line + '\n')
return 0
def run(args: T.List[str]) -> int:
pickle_file = args[0]
outfile = args[1]
sources = args[2:]
scanner = DependencyScanner(pickle_file, outfile, sources)
return scanner.scan()
|
|
"""Manages the v6 recipe diff schema"""
from __future__ import unicode_literals
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from recipe.diff.exceptions import InvalidDiff
SCHEMA_VERSION = '7'
SCHEMA_VERSIONS = ['6', '7']
RECIPE_DIFF_SCHEMA = {
'type': 'object',
'required': ['version', 'can_be_reprocessed', 'reasons', 'nodes'],
'additionalProperties': False,
'properties': {
'version': {
'description': 'Version of the recipe diff schema',
'type': 'string',
},
'can_be_reprocessed': {
'description': 'Whether this recipe type can be re-processed',
'type': 'boolean',
},
'reasons': {
'description': 'The reasons why the recipe type cannot be re-processed',
'type': 'array',
'items': {
'$ref': '#/definitions/reason',
},
},
'nodes': {
'description': 'The diff for each node in the recipe graph',
'type': 'object',
'additionalProperties': {
'$ref': '#/definitions/node'
},
},
},
'definitions': {
'change': {
'description': 'A change that occurred for this recipe node from previous revision to current revision',
'type': 'object',
'required': ['name', 'description'],
'additionalProperties': False,
'properties': {
'name': {
'description': 'The unique name (key) of the change',
'type': 'string',
},
'description': {
'description': 'The human-readable description of the change',
'type': 'string',
},
},
},
'dependency': {
'description': 'A dependency on another recipe node',
'type': 'object',
'required': ['name'],
'additionalProperties': False,
'properties': {
'name': {
'description': 'The name of the recipe node',
'type': 'string',
},
},
},
'node': {
'description': 'The diff for a node in the recipe graph',
'type': 'object',
'required': ['status', 'changes', 'reprocess_new_node', 'force_reprocess', 'dependencies', 'node_type'],
'additionalProperties': False,
'properties': {
'status': {
'description': 'The diff status for this recipe node compared to the previous revision',
'enum': ['DELETED', 'UNCHANGED', 'CHANGED', 'NEW'],
},
'changes': {
'description': 'The changes for this recipe node from previous revision to current revision',
'type': 'array',
'items': {
'$ref': '#/definitions/change',
},
},
'reprocess_new_node': {
'description': 'Whether this node will be re-processed',
'type': 'boolean',
},
'force_reprocess': {
'description': 'If true, this node will be re-processed even if its status is UNCHANGED',
'type': 'boolean',
},
'dependencies': {
'description': 'The other recipe nodes upon which this node is dependent',
'type': 'array',
'items': {
'$ref': '#/definitions/dependency',
},
},
'prev_node_type': {
'description': 'The type of this node in the previous revision',
'enum': ['job', 'recipe'],
},
'node_type': {
'description': 'The type of the node',
'oneOf': [
{'$ref': '#/definitions/condition_node'},
{'$ref': '#/definitions/job_node'},
{'$ref': '#/definitions/recipe_node'},
],
},
},
},
'condition_node': {
'description': 'The diff details for a condition node in the recipe graph',
'type': 'object',
'required': ['node_type'],
'additionalProperties': False,
'properties': {
'node_type': {
'description': 'The name of the node type',
'enum': ['condition'],
},
},
},
'job_node': {
'description': 'The diff details for a job node in the recipe graph',
'type': 'object',
'required': ['node_type', 'job_type_name', 'job_type_version', 'job_type_revision'],
'additionalProperties': False,
'properties': {
'node_type': {
'description': 'The name of the node type',
'enum': ['job'],
},
'job_type_name': {
'description': 'The name of the job type',
'type': 'string',
},
'job_type_version': {
'description': 'The version of the job type',
'type': 'string',
},
'job_type_revision': {
'description': 'The revision of the job type',
'type': 'integer',
},
'prev_job_type_name': {
'description': 'The name of the job type in the previous revision',
'type': 'string',
},
'prev_job_type_version': {
'description': 'The version of the job type in the previous revision',
'type': 'string',
},
'prev_job_type_revision': {
'description': 'The revision of the job type in the previous revision',
'type': 'integer',
},
},
},
'recipe_node': {
'description': 'The diff details for a recipe node in the recipe graph',
'type': 'object',
'required': ['node_type', 'recipe_type_name', 'recipe_type_revision'],
'additionalProperties': False,
'properties': {
'node_type': {
'description': 'The name of the node type',
'enum': ['recipe'],
},
'recipe_type_name': {
'description': 'The name of the recipe type',
'type': 'string',
},
'recipe_type_revision': {
'description': 'The revision of the recipe type',
'type': 'integer',
},
'prev_recipe_type_name': {
'description': 'The name of the recipe type in the previous revision',
'type': 'string',
},
'prev_recipe_type_revision': {
'description': 'The revision of the recipe type in the previous revision',
'type': 'integer',
},
},
},
'reason': {
'description': 'Explanation for why the recipe type cannot be reprocessed due to the diff changes',
'type': 'object',
'required': ['name', 'description'],
'additionalProperties': False,
'properties': {
'name': {
'description': 'The unique name (key) of the reason',
'type': 'string',
},
'description': {
'description': 'The human-readable description of the reason',
'type': 'string',
},
},
},
},
}
# TODO: remove this once old recipe definitions are removed
def convert_diff_to_v6(graph_diff):
"""Returns the v6 recipe graph diff JSON for the given graph diff
:param graph_diff: The recipe graph diff
:type graph_diff: :class:`recipe.handlers.graph_delta.RecipeGraphDelta`
:returns: The v6 recipe graph diff JSON
:rtype: :class:`recipe.diff.json.diff_v6.RecipeDiffV6`
"""
# Must grab job type revisions numbers from database
from job.models import JobType
revision_lookup = {}
for job_type in JobType.objects.all():
revision_lookup[job_type.name + ' ' + job_type.version] = job_type.revision_num
reasons = []
nodes = {}
json_dict = {'version': SCHEMA_VERSION, 'can_be_reprocessed': graph_diff.can_be_reprocessed, 'reasons': reasons,
'nodes': nodes}
if not graph_diff.can_be_reprocessed:
reasons.extend([{'name': r.name, 'description': r.description} for r in graph_diff.reasons])
for recipe_node in graph_diff._graph_b._nodes.values():
name = recipe_node.node_name
force_reprocess = name in graph_diff._force_reprocess
if name in graph_diff._new_nodes:
status = 'NEW'
elif name in graph_diff._identical_nodes:
status = 'UNCHANGED'
elif name in graph_diff._changed_nodes:
status = 'CHANGED'
else:
continue
reprocess_new_node = (status in ['NEW', 'CHANGED'] or force_reprocess) and graph_diff.can_be_reprocessed
changes = []
if status == 'CHANGED' and name in graph_diff._changes:
changes.extend([{'name': c.name, 'description': c.description} for c in graph_diff._changes[name]])
job_type_name = recipe_node.job_type_name
job_type_version = recipe_node.job_type_version
job_type = {'node_type': 'job', 'job_type_name': job_type_name, 'job_type_version': job_type_version,
'job_type_revision': revision_lookup[job_type_name + ' ' + job_type_version]}
if status == 'CHANGED' and name in graph_diff._graph_a._nodes:
prev_node = graph_diff._graph_a._nodes[name]
if recipe_node.job_type_name != prev_node.job_type_name:
job_type['prev_job_type_name'] = prev_node.job_type_name
if recipe_node.job_type_version != prev_node.job_type_version:
job_type['prev_job_type_version'] = prev_node.job_type_version
dependencies = [{'name': p.node_name} for p in recipe_node.parents]
job_node = {'reprocess_new_node': reprocess_new_node, 'force_reprocess': force_reprocess, 'status': status,
'changes': changes, 'node_type': job_type, 'dependencies': dependencies}
nodes[name] = job_node
for recipe_node in graph_diff._graph_a._nodes.values():
name = recipe_node.node_name
if name not in graph_diff._deleted_nodes:
continue
job_type_name = recipe_node.job_type_name
job_type_version = recipe_node.job_type_version
job_type = {'node_type': 'job', 'job_type_name': job_type_name, 'job_type_version': job_type_version,
'job_type_revision': revision_lookup[job_type_name + ' ' + job_type_version]}
dependencies = [{'name': p.node_name} for p in recipe_node.parents]
job_node = {'reprocess_new_node': False, 'force_reprocess': False, 'status': 'DELETED', 'changes': [],
'node_type': job_type, 'dependencies': dependencies}
nodes[name] = job_node
return RecipeDiffV6(diff=json_dict, do_validate=False)
def convert_recipe_diff_to_v6_json(recipe_diff):
"""Returns the v6 recipe diff JSON for the given recipe diff
:param recipe_diff: The recipe diff
:type recipe_diff: :class:`recipe.diff.diff.RecipeDiff`
:returns: The v6 recipe diff JSON
:rtype: :class:`recipe.diff.json.diff_v6.RecipeDiffV6`
"""
reasons = [{'name': r.name, 'description': r.description} for r in recipe_diff.reasons]
nodes_dict = {n.name: convert_node_diff_to_v6_json(n) for n in recipe_diff.graph.values()}
json_dict = {'can_be_reprocessed': recipe_diff.can_be_reprocessed, 'reasons': reasons, 'nodes': nodes_dict}
return RecipeDiffV6(diff=json_dict, do_validate=False)
def convert_node_diff_to_v6_json(node_diff):
"""Returns the v6 diff JSON dict for the given node diff
:param node_diff: The node diff
:type node_diff: :class:`recipe.diff.node.NodeDiff`
:returns: The v6 diff JSON dict for the node
:rtype: dict
"""
changes = [{'name': c.name, 'description': c.description} for c in node_diff.changes]
dependencies = [{'name': name} for name in node_diff.parents.keys()]
node_dict = {'status': node_diff.status, 'changes': changes, 'reprocess_new_node': node_diff.reprocess_new_node,
'force_reprocess': node_diff.force_reprocess, 'dependencies': dependencies,
'node_type': node_diff.get_node_type_dict()}
if node_diff.prev_node_type is not None:
node_dict['prev_node_type'] = node_diff.prev_node_type
return node_dict
class RecipeDiffV6(object):
"""Represents a v6 recipe graph diff JSON for the difference between two recipe graphs"""
def __init__(self, diff=None, do_validate=False):
"""Creates a v6 recipe graph diff JSON object from the given dictionary
:param diff: The recipe graph diff JSON dict
:type diff: dict
:param do_validate: Whether to perform validation on the JSON schema
:type do_validate: bool
:raises :class:`recipe.diff.exceptions.InvalidDiff`: If the given diff is invalid
"""
if not diff:
diff = {}
self._diff = diff
if 'version' not in self._diff:
self._diff['version'] = SCHEMA_VERSION
if self._diff['version'] not in SCHEMA_VERSIONS:
raise InvalidDiff('%s is an unsupported version number' % self._diff['version'])
self._populate_default_values()
try:
if do_validate:
validate(self._diff, RECIPE_DIFF_SCHEMA)
except ValidationError as ex:
raise InvalidDiff('Invalid recipe graph diff: %s' % unicode(ex))
def get_dict(self):
"""Returns the internal dictionary
:returns: The internal dictionary
:rtype: dict
"""
return self._diff
def _populate_default_values(self):
"""Populates any missing required values with defaults
"""
if 'can_be_reprocessed' not in self._diff:
self._diff['can_be_reprocessed'] = True
if 'reasons' not in self._diff:
self._diff['reasons'] = []
if 'nodes' not in self._diff:
self._diff['nodes'] = {}
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_export_tessellated_body3398
except ImportError:
bt_export_tessellated_body3398 = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_body3398"
]
try:
from onshape_client.oas.models import bt_export_tessellated_faces_body1321_all_of
except ImportError:
bt_export_tessellated_faces_body1321_all_of = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_faces_body1321_all_of"
]
try:
from onshape_client.oas.models import bt_export_tessellated_faces_face1192
except ImportError:
bt_export_tessellated_faces_face1192 = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_faces_face1192"
]
try:
from onshape_client.oas.models import bt_graphics_appearance1152
except ImportError:
bt_graphics_appearance1152 = sys.modules[
"onshape_client.oas.models.bt_graphics_appearance1152"
]
try:
from onshape_client.oas.models import bt_vector3d389
except ImportError:
bt_vector3d389 = sys.modules["onshape_client.oas.models.bt_vector3d389"]
class BTExportTessellatedFacesBody1321(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("body_type",): {
"SOLID": "SOLID",
"SHEET": "SHEET",
"WIRE": "WIRE",
"POINT": "POINT",
"MATE_CONNECTOR": "MATE_CONNECTOR",
"COMPOSITE": "COMPOSITE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"appearance": (
bt_graphics_appearance1152.BTGraphicsAppearance1152,
), # noqa: E501
"body_type": (str,), # noqa: E501
"bt_type": (str,), # noqa: E501
"faces": (
[bt_export_tessellated_faces_face1192.BTExportTessellatedFacesFace1192],
), # noqa: E501
"facet_points": ([bt_vector3d389.BTVector3d389],), # noqa: E501
"constituents": ([str],), # noqa: E501
"id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"appearance": "appearance", # noqa: E501
"body_type": "bodyType", # noqa: E501
"bt_type": "btType", # noqa: E501
"faces": "faces", # noqa: E501
"facet_points": "facetPoints", # noqa: E501
"constituents": "constituents", # noqa: E501
"id": "id", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_export_tessellated_faces_body1321.BTExportTessellatedFacesBody1321 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
appearance (bt_graphics_appearance1152.BTGraphicsAppearance1152): [optional] # noqa: E501
body_type (str): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
faces ([bt_export_tessellated_faces_face1192.BTExportTessellatedFacesFace1192]): [optional] # noqa: E501
facet_points ([bt_vector3d389.BTVector3d389]): [optional] # noqa: E501
constituents ([str]): [optional] # noqa: E501
id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_export_tessellated_body3398.BTExportTessellatedBody3398,
bt_export_tessellated_faces_body1321_all_of.BTExportTessellatedFacesBody1321AllOf,
],
"oneOf": [],
}
|
|
import mock
import unittest
from pyramid import testing
from kinto.core.utils import sqlalchemy
from kinto.core.storage import exceptions
from kinto.core.permission import (PermissionBase,
memory as memory_backend,
postgresql as postgresql_backend, heartbeat)
from kinto.core.testing import skip_if_no_postgresql, DummyRequest
class PermissionBaseTest(unittest.TestCase):
def setUp(self):
self.permission = PermissionBase()
def test_mandatory_overrides(self):
calls = [
(self.permission.initialize_schema,),
(self.permission.flush,),
(self.permission.add_user_principal, '', ''),
(self.permission.remove_user_principal, '', ''),
(self.permission.remove_principal, ''),
(self.permission.get_user_principals, ''),
(self.permission.add_principal_to_ace, '', '', ''),
(self.permission.remove_principal_from_ace, '', '', ''),
(self.permission.get_object_permission_principals, '', ''),
(self.permission.get_objects_permissions, ''),
(self.permission.replace_object_permissions, '', {}),
(self.permission.delete_object_permissions, ''),
(self.permission.get_accessible_objects, [], ''),
(self.permission.get_authorized_principals, []),
]
for call in calls:
self.assertRaises(NotImplementedError, *call)
class BaseTestPermission(object):
backend = None
settings = {}
def setUp(self):
super(BaseTestPermission, self).setUp()
self.permission = self.backend.load_from_config(self._get_config())
self.permission.initialize_schema()
self.request = DummyRequest()
self.client_error_patcher = []
def _get_config(self, settings=None):
"""Mock Pyramid config object.
"""
if settings is None:
settings = self.settings
config = testing.setUp()
config.add_settings(settings)
return config
def tearDown(self):
mock.patch.stopall()
super(BaseTestPermission, self).tearDown()
self.permission.flush()
def test_backend_error_is_raised_anywhere(self):
for patch in self.client_error_patcher:
patch.start()
calls = [
(self.permission.flush,),
(self.permission.add_user_principal, '', ''),
(self.permission.remove_user_principal, '', ''),
(self.permission.get_user_principals, ''),
(self.permission.add_principal_to_ace, '', '', ''),
(self.permission.remove_principal_from_ace, '', '', ''),
(self.permission.get_object_permission_principals, '', ''),
(self.permission.get_object_permissions, ''),
(self.permission.replace_object_permissions, '', {'write': []}),
(self.permission.delete_object_permissions, ''),
(self.permission.get_accessible_objects, []),
(self.permission.get_authorized_principals, [('*', 'read')]),
]
for call in calls:
self.assertRaises(exceptions.BackendError, *call)
def test_initialize_schema_is_idempotent(self):
self.permission.initialize_schema()
self.permission.initialize_schema() # not raising.
def test_ping_returns_false_if_unavailable(self):
ping = heartbeat(self.permission)
for patch in self.client_error_patcher:
patch.start()
self.assertFalse(ping(self.request))
def test_ping_returns_true_if_available(self):
ping = heartbeat(self.permission)
self.assertTrue(ping(self.request))
def test_ping_returns_false_if_unavailable_in_readonly_mode(self):
self.request.registry.settings['readonly'] = 'true'
ping = heartbeat(self.permission)
with mock.patch.object(self.permission, 'get_user_principals',
side_effect=exceptions.BackendError("Boom!")):
self.assertFalse(ping(self.request))
def test_ping_returns_true_if_available_in_readonly_mode(self):
self.request.registry.settings['readonly'] = 'true'
ping = heartbeat(self.permission)
self.assertTrue(ping(self.request))
def test_ping_logs_error_if_unavailable(self):
for patch in self.client_error_patcher:
patch.start()
ping = heartbeat(self.permission)
with mock.patch('kinto.core.permission.logger.exception') as \
exc_handler:
self.assertFalse(ping(self.request))
self.assertTrue(exc_handler.called)
def test_can_add_a_principal_to_a_user(self):
user_id = 'foo'
principal = 'bar'
self.permission.add_user_principal(user_id, principal)
retrieved = self.permission.get_user_principals(user_id)
self.assertEquals(retrieved, {principal})
def test_add_twice_a_principal_to_a_user_add_it_once(self):
user_id = 'foo'
principal = 'bar'
self.permission.add_user_principal(user_id, principal)
self.permission.add_user_principal(user_id, principal)
retrieved = self.permission.get_user_principals(user_id)
self.assertEquals(retrieved, {principal})
def test_can_remove_a_principal_for_a_user(self):
user_id = 'foo'
principal = 'bar'
principal2 = 'foobar'
self.permission.add_user_principal(user_id, principal)
self.permission.add_user_principal(user_id, principal2)
self.permission.remove_user_principal(user_id, principal)
retrieved = self.permission.get_user_principals(user_id)
self.assertEquals(retrieved, {principal2})
def test_can_remove_a_unexisting_principal_to_a_user(self):
user_id = 'foo'
principal = 'bar'
principal2 = 'foobar'
self.permission.add_user_principal(user_id, principal2)
self.permission.remove_user_principal(user_id, principal)
self.permission.remove_user_principal(user_id, principal2)
retrieved = self.permission.get_user_principals(user_id)
self.assertEquals(retrieved, set())
def test_can_remove_principal_from_every_users(self):
user_id1 = 'foo1'
user_id2 = 'foo2'
principal1 = 'bar'
principal2 = 'foobar'
self.permission.add_user_principal(user_id1, principal1)
self.permission.add_user_principal(user_id2, principal1)
self.permission.add_user_principal(user_id2, principal2)
self.permission.remove_principal(principal1)
self.permission.remove_principal('unknown')
retrieved = self.permission.get_user_principals(user_id1)
self.assertEquals(retrieved, set())
retrieved = self.permission.get_user_principals(user_id2)
self.assertEquals(retrieved, {principal2})
#
# get_object_permission_principals()
#
def test_can_add_a_principal_to_an_object_permission(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
self.permission.add_principal_to_ace(object_id, permission, principal)
retrieved = self.permission.get_object_permission_principals(
object_id, permission)
self.assertEquals(retrieved, {principal})
def test_add_twice_a_principal_to_an_object_permission_add_it_once(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
self.permission.add_principal_to_ace(object_id, permission, principal)
self.permission.add_principal_to_ace(object_id, permission, principal)
retrieved = self.permission.get_object_permission_principals(
object_id, permission)
self.assertEquals(retrieved, {principal})
def test_can_remove_a_principal_from_an_object_permission(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
principal2 = 'foobar'
self.permission.add_principal_to_ace(object_id, permission, principal)
self.permission.add_principal_to_ace(object_id, permission, principal2)
self.permission.remove_principal_from_ace(object_id, permission,
principal)
retrieved = self.permission.get_object_permission_principals(
object_id, permission)
self.assertEquals(retrieved, {principal2})
def test_principals_is_empty_if_no_permission(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
self.permission.add_principal_to_ace(object_id, permission, principal)
self.permission.remove_principal_from_ace(object_id, permission,
principal)
retrieved = self.permission.get_object_permission_principals(
object_id, permission)
self.assertEquals(retrieved, set())
def test_can_remove_an_unexisting_principal_to_an_object_permission(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
principal2 = 'foobar'
self.permission.add_principal_to_ace(object_id, permission, principal2)
self.permission.remove_principal_from_ace(object_id, permission,
principal)
retrieved = self.permission.get_object_permission_principals(
object_id, permission)
self.assertEquals(retrieved, {principal2})
#
# check_permission()
#
def test_check_permission_returns_true_for_userid(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
self.permission.add_principal_to_ace(object_id, permission, principal)
check_permission = self.permission.check_permission(
{principal},
[(object_id, permission)])
self.assertTrue(check_permission)
def test_check_permission_returns_true_for_userid_group(self):
object_id = 'foo'
permission = 'write'
group_id = 'bar'
user_id = 'foobar'
self.permission.add_user_principal(user_id, group_id)
self.permission.add_principal_to_ace(object_id, permission, group_id)
check_permission = self.permission.check_permission(
{user_id, group_id},
[(object_id, permission)])
self.assertTrue(check_permission)
def test_check_permission_returns_true_for_object_inherited(self):
object_id = 'foo'
user_id = 'bar'
self.permission.add_principal_to_ace(object_id, 'write', user_id)
check_permission = self.permission.check_permission(
{user_id},
[(object_id, 'write'), (object_id, 'read')])
self.assertTrue(check_permission)
def test_check_permissions_handles_empty_set(self):
principal = 'bar'
permits = self.permission.check_permission({principal}, [])
self.assertFalse(permits)
def test_check_permission_return_false_for_unknown_principal(self):
object_id = 'foo'
permission = 'write'
principal = 'bar'
check_permission = self.permission.check_permission(
{principal},
[(object_id, permission)])
self.assertFalse(check_permission)
#
# get_authorized_principals()
#
def test_get_authorized_principals_inherit_principals(self):
object_id = 'foo'
user_id = 'bar'
self.permission.add_principal_to_ace(object_id, 'write', user_id)
principals = self.permission.get_authorized_principals(
[(object_id, 'write'), (object_id, 'read')])
self.assertEquals(principals, {user_id})
def test_get_authorized_principals_handles_empty_set(self):
principals = self.permission.get_authorized_principals([])
self.assertEquals(principals, set())
#
# get_accessible_objects()
#
def test_accessible_objects(self):
self.permission.add_principal_to_ace('id1', 'write', 'user1')
self.permission.add_principal_to_ace('id1', 'record:create', 'group')
self.permission.add_principal_to_ace('id2', 'read', 'user1')
self.permission.add_principal_to_ace('id2', 'read', 'user2')
self.permission.add_principal_to_ace('id3', 'write', 'user2')
per_object_ids = self.permission.get_accessible_objects(
['user1', 'group'])
self.assertEquals(sorted(per_object_ids.keys()), ['id1', 'id2'])
self.assertEquals(per_object_ids['id1'],
set(['write', 'record:create']))
self.assertEquals(per_object_ids['id2'], set(['read']))
def test_accessible_objects_from_permission(self):
self.permission.add_principal_to_ace('id1', 'write', 'user1')
self.permission.add_principal_to_ace('id1', 'read', 'user1')
self.permission.add_principal_to_ace('id1', 'read', 'group')
self.permission.add_principal_to_ace('id2', 'write', 'user1')
self.permission.add_principal_to_ace('id2', 'read', 'user2')
self.permission.add_principal_to_ace('id2', 'read', 'group')
self.permission.add_principal_to_ace('id3', 'read', 'user2')
per_object_ids = self.permission.get_accessible_objects(
['user1', 'group'],
[('*', 'read')])
self.assertEquals(sorted(per_object_ids.keys()), ['id1', 'id2'])
def test_accessible_objects_with_pattern(self):
self.permission.add_principal_to_ace('/url1/id', 'write', 'user1')
self.permission.add_principal_to_ace('/url2/id', 'write', 'user1')
per_object_ids = self.permission.get_accessible_objects(
['user1'],
[('*url1*', 'write')])
self.assertEquals(sorted(per_object_ids.keys()), ['/url1/id'])
def test_accessible_objects_several_bound_permissions(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/2', 'read', 'user1')
self.permission.add_principal_to_ace('/url/_/id/2', 'read', 'user1')
per_object_ids = self.permission.get_accessible_objects(
['user1'],
[('/url/a/id/*', 'read'),
('/url/a/id/*', 'write')])
self.assertEquals(sorted(per_object_ids.keys()),
['/url/a/id/1', '/url/a/id/2'])
def test_accessible_objects_without_match(self):
self.permission.add_principal_to_ace('/url/a', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/b/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/2', 'read', 'user1')
self.permission.add_principal_to_ace('/url/b/id/2', 'read', 'user1')
per_object_ids = self.permission.get_accessible_objects(
['user1'],
[('/url/a', 'write'),
('/url/a', 'read'),
('/url/a/id/*', 'write'),
('/url/a/id/*', 'read')])
self.assertEquals(sorted(per_object_ids.keys()),
['/url/a', '/url/a/id/1', '/url/a/id/2'])
#
# get_object_permissions()
#
def test_object_permissions_return_all_object_acls(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user2')
self.permission.add_principal_to_ace('/url/a/id/1', 'read', 'user3')
self.permission.add_principal_to_ace('/url/a/id/1', 'obj:del', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1/sub', 'create', 'me')
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertDictEqual(permissions, {
"write": {"user1", "user2"},
"read": {"user3"},
"obj:del": {"user1"}
})
def test_object_permissions_return_listed_object_acls(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user2')
self.permission.add_principal_to_ace('/url/a/id/1', 'read', 'user3')
self.permission.add_principal_to_ace('/url/a/id/1', 'create', 'user1')
object_permissions = self.permission.get_object_permissions(
'/url/a/id/1', ['write', 'read'])
self.assertDictEqual(object_permissions, {
"write": {"user1", "user2"},
"read": {"user3"}
})
def test_object_permissions_return_empty_dict(self):
self.assertDictEqual(self.permission.get_object_permissions('abc'), {})
def test_replace_object_permission_replace_all_given_sets(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user2')
self.permission.add_principal_to_ace('/url/a/id/1', 'read', 'user3')
self.permission.add_principal_to_ace('/url/a/id/1', 'update', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'obj:del', 'user1')
self.permission.replace_object_permissions('/url/a/id/1', {
"write": ["user1"],
"read": ["user2"],
"update": [],
"obj:del": ["user1"],
"new": ["user3"]
})
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertDictEqual(permissions, {
"write": {"user1"},
"read": {"user2"},
"obj:del": {"user1"},
"new": {"user3"}
})
def test_replace_object_permission_only_replace_given_sets(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user2')
self.permission.add_principal_to_ace('/url/a/id/1', 'read', 'user3')
self.permission.add_principal_to_ace('/url/a/id/1', 'obj:del', 'user1')
self.permission.replace_object_permissions('/url/a/id/1', {
"write": ["user1"],
"new": set(["user2"])
})
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertDictEqual(permissions, {
"write": {"user1"},
"read": {"user3"},
"new": {"user2"},
"obj:del": {"user1"}
})
def test_replace_object_permission_supports_empty_input(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.replace_object_permissions('/url/a/id/1', {})
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertDictEqual(permissions, {
"write": {"user1"}
})
def test_replace_object_permission_supports_duplicated_entries(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.replace_object_permissions('/url/a/id/1', {
"write": ["user1", "user1"]
})
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertDictEqual(permissions, {
"write": {"user1"}
})
def test_replace_object_permission_supports_empty_list(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.replace_object_permissions('/url/a/id/1', {
"write": set()
})
permissions = self.permission.get_object_permissions('/url/a/id/1')
self.assertEqual(len(permissions), 0)
def test_delete_object_permissions_remove_all_given_objects_acls(self):
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user1')
self.permission.add_principal_to_ace('/url/a/id/1', 'write', 'user2')
self.permission.add_principal_to_ace('/url/a/id/1', 'read', 'user3')
self.permission.add_principal_to_ace('/url/a/id/1', 'create', 'user1')
self.permission.add_principal_to_ace('/url/a/id/2', 'create', 'user3')
self.permission.add_principal_to_ace('/url/a/id/3', 'create', 'user4')
self.permission.delete_object_permissions('/url/a/id/1',
'/url/a/id/2')
self.assertDictEqual(self.permission.get_object_permissions(
'/url/a/id/1'), {})
self.assertDictEqual(self.permission.get_object_permissions(
'/url/a/id/2'), {})
self.assertDictEqual(self.permission.get_object_permissions(
'/url/a/id/3'), {"create": {"user4"}})
def test_delete_object_permissions_supports_empty_list(self):
self.permission.delete_object_permissions() # Not failing
class MemoryPermissionTest(BaseTestPermission, unittest.TestCase):
backend = memory_backend
def test_backend_error_is_raised_anywhere(self):
pass
def test_ping_returns_false_if_unavailable(self):
pass
def test_ping_logs_error_if_unavailable(self):
pass
@skip_if_no_postgresql
class PostgreSQLPermissionTest(BaseTestPermission, unittest.TestCase):
backend = postgresql_backend
settings = {
'permission_backend': 'kinto.core.permission.postgresql',
'permission_pool_size': 10,
'permission_url': 'postgres://postgres:postgres@localhost:5432/testdb'
}
def setUp(self):
super(PostgreSQLPermissionTest, self).setUp()
self.client_error_patcher = [mock.patch.object(
self.permission.client,
'session_factory',
side_effect=sqlalchemy.exc.SQLAlchemyError)]
|
|
import md5
import os
from twisted.python import util
from twisted.trial.test import packages
from twisted.trial import runner, reporter, unittest
from twisted.python.modules import getModule
# XXX - this is used in test_script, perhaps it should be in a utility module
def testNames(test):
"""
Return the id of each test within the given test suite or case.
"""
testIDs = []
def visit(test):
testIDs.append(test.id())
test.visit(visit)
return testIDs
class FinderTest(packages.PackageTest):
def setUp(self):
packages.PackageTest.setUp(self)
self.loader = runner.TestLoader()
def tearDown(self):
packages.PackageTest.tearDown(self)
def test_findPackage(self):
sample1 = self.loader.findByName('twisted')
import twisted as sample2
self.failUnlessEqual(sample1, sample2)
def test_findModule(self):
sample1 = self.loader.findByName('twisted.trial.test.sample')
import sample as sample2
self.failUnlessEqual(sample1, sample2)
def test_findFile(self):
path = util.sibpath(__file__, 'sample.py')
sample1 = self.loader.findByName(path)
import sample as sample2
self.failUnlessEqual(sample1, sample2)
def test_findObject(self):
sample1 = self.loader.findByName('twisted.trial.test.sample.FooTest')
import sample
self.failUnlessEqual(sample.FooTest, sample1)
def test_findNonModule(self):
self.failUnlessRaises(AttributeError,
self.loader.findByName,
'twisted.trial.test.nonexistent')
def test_findNonPackage(self):
self.failUnlessRaises(ValueError,
self.loader.findByName,
'nonextant')
def test_findNonFile(self):
path = util.sibpath(__file__, 'nonexistent.py')
self.failUnlessRaises(ValueError, self.loader.findByName, path)
class FileTest(packages.SysPathManglingTest):
def test_notFile(self):
self.failUnlessRaises(ValueError,
runner.filenameToModule, 'doesntexist')
def test_moduleInPath(self):
sample1 = runner.filenameToModule(util.sibpath(__file__, 'sample.py'))
import sample as sample2
self.failUnlessEqual(sample2, sample1)
def test_moduleNotInPath(self):
self.mangleSysPath(self.oldPath)
sample1 = runner.filenameToModule(os.path.join(self.parent,
'goodpackage',
'test_sample.py'))
self.mangleSysPath(self.newPath)
from goodpackage import test_sample as sample2
self.failUnlessEqual(os.path.splitext(sample2.__file__)[0],
os.path.splitext(sample1.__file__)[0])
def test_packageInPath(self):
package1 = runner.filenameToModule(os.path.join(self.parent,
'goodpackage'))
import goodpackage
self.failUnlessEqual(goodpackage, package1)
def test_packageNotInPath(self):
self.mangleSysPath(self.oldPath)
package1 = runner.filenameToModule(os.path.join(self.parent,
'goodpackage'))
self.mangleSysPath(self.newPath)
import goodpackage
self.failUnlessEqual(os.path.splitext(goodpackage.__file__)[0],
os.path.splitext(package1.__file__)[0])
def test_directoryNotPackage(self):
self.failUnlessRaises(ValueError, runner.filenameToModule,
util.sibpath(__file__, 'directory'))
def test_filenameNotPython(self):
self.failUnlessRaises(ValueError, runner.filenameToModule,
util.sibpath(__file__, 'notpython.py'))
def test_filenameMatchesPackage(self):
filename = os.path.join(self.parent, 'goodpackage.py')
fd = open(filename, 'w')
fd.write(packages.testModule)
fd.close()
try:
module = runner.filenameToModule(filename)
self.failUnlessEqual(filename, module.__file__)
finally:
os.remove(filename)
class LoaderTest(packages.SysPathManglingTest):
def setUp(self):
self.loader = runner.TestLoader()
packages.SysPathManglingTest.setUp(self)
def test_sortCases(self):
import sample
suite = self.loader.loadClass(sample.AlphabetTest)
self.failUnlessEqual(['test_a', 'test_b', 'test_c'],
[test._testMethodName for test in suite._tests])
newOrder = ['test_b', 'test_c', 'test_a']
sortDict = dict(zip(newOrder, range(3)))
self.loader.sorter = lambda x : sortDict.get(x.shortDescription(), -1)
suite = self.loader.loadClass(sample.AlphabetTest)
self.failUnlessEqual(newOrder,
[test._testMethodName for test in suite._tests])
def test_loadMethod(self):
import sample
suite = self.loader.loadMethod(sample.FooTest.test_foo)
self.failUnlessEqual(1, suite.countTestCases())
self.failUnlessEqual('test_foo', suite._testMethodName)
def test_loadFailingMethod(self):
# test added for issue1353
from twisted.trial import reporter
import erroneous
suite = self.loader.loadMethod(erroneous.TestRegularFail.test_fail)
result = reporter.TestResult()
suite.run(result)
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 1)
def test_loadNonMethod(self):
import sample
self.failUnlessRaises(TypeError, self.loader.loadMethod, sample)
self.failUnlessRaises(TypeError,
self.loader.loadMethod, sample.FooTest)
self.failUnlessRaises(TypeError, self.loader.loadMethod, "string")
self.failUnlessRaises(TypeError,
self.loader.loadMethod, ('foo', 'bar'))
def test_loadClass(self):
import sample
suite = self.loader.loadClass(sample.FooTest)
self.failUnlessEqual(2, suite.countTestCases())
self.failUnlessEqual(['test_bar', 'test_foo'],
[test._testMethodName for test in suite._tests])
def test_loadWithoutForcedGarbageCollection(self):
"""
Tests loaded by default should not be set to force garbage collection.
This test checks 'loadMethod'.
"""
import sample
test = self.loader.loadMethod(sample.FooTest.test_foo)
self.assertEqual(test.forceGarbageCollection, False)
def test_loadWithForcedGarbageCollection(self):
"""
If the loader is set to force garbage collection, any tests it loads
should also be set to force garbage collection. This test checks
'loadMethod'.
"""
import sample
self.loader.forceGarbageCollection = True
test = self.loader.loadMethod(sample.FooTest.test_foo)
self.assertEqual(test.forceGarbageCollection, True)
def test_loadWithoutForcedGarbageCollectionClass(self):
"""
Tests loaded by default should not be set to force garbage collection.
This test checks 'loadClass'.
"""
import sample
suite = self.loader.loadClass(sample.FooTest)
def visitor(case):
self.assertEqual(case.forceGarbageCollection, False)
suite.visit(visitor)
def test_loadWithForcedGarbageCollectionClass(self):
"""
If the loader is set to force garbage collection, any tests it loads
should also be set to force garbage collection. This test checks
'loadClass'.
"""
import sample
self.loader.forceGarbageCollection = True
suite = self.loader.loadClass(sample.FooTest)
def visitor(case):
self.assertEqual(case.forceGarbageCollection, True)
suite.visit(visitor)
def test_loadNonClass(self):
import sample
self.failUnlessRaises(TypeError, self.loader.loadClass, sample)
self.failUnlessRaises(TypeError,
self.loader.loadClass, sample.FooTest.test_foo)
self.failUnlessRaises(TypeError, self.loader.loadClass, "string")
self.failUnlessRaises(TypeError,
self.loader.loadClass, ('foo', 'bar'))
def test_loadNonTestCase(self):
import sample
self.failUnlessRaises(ValueError, self.loader.loadClass,
sample.NotATest)
def test_loadModule(self):
import sample
suite = self.loader.loadModule(sample)
self.failUnlessEqual(7, suite.countTestCases())
def test_loadNonModule(self):
import sample
self.failUnlessRaises(TypeError,
self.loader.loadModule, sample.FooTest)
self.failUnlessRaises(TypeError,
self.loader.loadModule, sample.FooTest.test_foo)
self.failUnlessRaises(TypeError, self.loader.loadModule, "string")
self.failUnlessRaises(TypeError,
self.loader.loadModule, ('foo', 'bar'))
def test_loadPackage(self):
import goodpackage
suite = self.loader.loadPackage(goodpackage)
self.failUnlessEqual(7, suite.countTestCases())
def test_loadNonPackage(self):
import sample
self.failUnlessRaises(TypeError,
self.loader.loadPackage, sample.FooTest)
self.failUnlessRaises(TypeError,
self.loader.loadPackage, sample.FooTest.test_foo)
self.failUnlessRaises(TypeError, self.loader.loadPackage, "string")
self.failUnlessRaises(TypeError,
self.loader.loadPackage, ('foo', 'bar'))
def test_loadModuleAsPackage(self):
import sample
## XXX -- should this instead raise a ValueError? -- jml
self.failUnlessRaises(TypeError, self.loader.loadPackage, sample)
def test_loadPackageRecursive(self):
import goodpackage
suite = self.loader.loadPackage(goodpackage, recurse=True)
self.failUnlessEqual(14, suite.countTestCases())
def test_loadAnythingOnModule(self):
import sample
suite = self.loader.loadAnything(sample)
self.failUnlessEqual(sample.__name__,
suite._tests[0]._tests[0].__class__.__module__)
def test_loadAnythingOnClass(self):
import sample
suite = self.loader.loadAnything(sample.FooTest)
self.failUnlessEqual(2, suite.countTestCases())
def test_loadAnythingOnMethod(self):
import sample
suite = self.loader.loadAnything(sample.FooTest.test_foo)
self.failUnlessEqual(1, suite.countTestCases())
def test_loadAnythingOnPackage(self):
import goodpackage
suite = self.loader.loadAnything(goodpackage)
self.failUnless(isinstance(suite, self.loader.suiteFactory))
self.failUnlessEqual(7, suite.countTestCases())
def test_loadAnythingOnPackageRecursive(self):
import goodpackage
suite = self.loader.loadAnything(goodpackage, recurse=True)
self.failUnless(isinstance(suite, self.loader.suiteFactory))
self.failUnlessEqual(14, suite.countTestCases())
def test_loadAnythingOnString(self):
# the important thing about this test is not the string-iness
# but the non-handledness.
self.failUnlessRaises(TypeError,
self.loader.loadAnything, "goodpackage")
def test_importErrors(self):
import package
suite = self.loader.loadPackage(package, recurse=True)
result = reporter.Reporter()
suite.run(result)
self.failUnlessEqual(False, result.wasSuccessful())
self.failUnlessEqual(2, len(result.errors))
errors = [test.id() for test, error in result.errors]
errors.sort()
self.failUnlessEqual(errors, ['package.test_bad_module',
'package.test_import_module'])
def test_loadModuleWith_test_suite(self):
"""
Check that C{test_suite} is used when present and other L{TestCase}s are
not included.
"""
from twisted.trial.test import mockcustomsuite
suite = self.loader.loadModule(mockcustomsuite)
self.failUnlessEqual(0, suite.countTestCases())
self.failUnlessEqual("MyCustomSuite", getattr(suite, 'name', None))
def test_loadModuleWith_testSuite(self):
"""
Check that C{testSuite} is used when present and other L{TestCase}s are
not included.
"""
from twisted.trial.test import mockcustomsuite2
suite = self.loader.loadModule(mockcustomsuite2)
self.assertEqual(0, suite.countTestCases())
self.assertEqual("MyCustomSuite", getattr(suite, 'name', None))
def test_loadModuleWithBothCustom(self):
"""
Check that if C{testSuite} and C{test_suite} are both present in a
module then C{testSuite} gets priority.
"""
from twisted.trial.test import mockcustomsuite3
suite = self.loader.loadModule(mockcustomsuite3)
self.assertEqual('testSuite', getattr(suite, 'name', None))
def test_customLoadRaisesAttributeError(self):
"""
Make sure that any C{AttributeError}s raised by C{testSuite} are not
swallowed by L{TestLoader}.
"""
def testSuite():
raise AttributeError('should be reraised')
from twisted.trial.test import mockcustomsuite2
mockcustomsuite2.testSuite, original = (testSuite,
mockcustomsuite2.testSuite)
try:
self.assertRaises(AttributeError, self.loader.loadModule,
mockcustomsuite2)
finally:
mockcustomsuite2.testSuite = original
# XXX - duplicated and modified from test_script
def assertSuitesEqual(self, test1, test2):
loader = runner.TestLoader()
names1 = testNames(test1)
names2 = testNames(test2)
names1.sort()
names2.sort()
self.assertEqual(names1, names2)
def test_loadByNamesDuplicate(self):
"""
Check that loadByNames ignores duplicate names
"""
module = 'twisted.trial.test.test_test_visitor'
suite1 = self.loader.loadByNames([module, module], True)
suite2 = self.loader.loadByName(module, True)
self.assertSuitesEqual(suite1, suite2)
def test_loadDifferentNames(self):
"""
Check that loadByNames loads all the names that it is given
"""
modules = ['goodpackage', 'package.test_module']
suite1 = self.loader.loadByNames(modules)
suite2 = runner.TestSuite(map(self.loader.loadByName, modules))
self.assertSuitesEqual(suite1, suite2)
class ZipLoadingTest(LoaderTest):
def setUp(self):
from twisted.test.test_paths import zipit
LoaderTest.setUp(self)
zipit(self.parent, self.parent+'.zip')
self.parent += '.zip'
self.mangleSysPath(self.oldPath+[self.parent])
class PackageOrderingTest(packages.SysPathManglingTest):
def setUp(self):
self.resultingTests = []
self.loader = runner.TestLoader()
self.topDir = self.mktemp()
parent = os.path.join(self.topDir, "uberpackage")
os.makedirs(parent)
file(os.path.join(parent, "__init__.py"), "wb").close()
packages.SysPathManglingTest.setUp(self, parent)
self.mangleSysPath(self.oldPath + [self.topDir])
def visitCase(self, case):
self.resultingTests.append(case)
def _trialSortAlgorithm(self, sorter):
"""
Right now, halfway by accident, trial sorts like this:
1. all modules are grouped together in one list and sorted.
2. within each module, the classes are grouped together in one list
and sorted.
3. finally within each class, each test method is grouped together
in a list and sorted.
This attempts to return a sorted list of testable thingies following
those rules, so that we can compare the behavior of loadPackage.
The things that show as 'cases' are errors from modules which failed to
import, and test methods. Let's gather all those together.
"""
pkg = getModule('uberpackage')
testModules = []
for testModule in pkg.walkModules():
if testModule.name.split(".")[-1].startswith("test_"):
testModules.append(testModule)
sortedModules = util.dsu(testModules, sorter) # ONE
for modinfo in sortedModules:
# Now let's find all the classes.
module = modinfo.load(None)
if module is None:
yield modinfo
else:
testClasses = []
for attrib in modinfo.iterAttributes():
if runner.isTestCase(attrib.load()):
testClasses.append(attrib)
sortedClasses = util.dsu(testClasses, sorter) # TWO
for clsinfo in sortedClasses:
testMethods = []
for attr in clsinfo.iterAttributes():
if attr.name.split(".")[-1].startswith('test'):
testMethods.append(attr)
sortedMethods = util.dsu(testMethods, sorter) # THREE
for methinfo in sortedMethods:
yield methinfo
def loadSortedPackages(self, sorter=runner.name):
"""
Verify that packages are loaded in the correct order.
"""
import uberpackage
self.loader.sorter = sorter
suite = self.loader.loadPackage(uberpackage, recurse=True)
suite.visit(self.visitCase)
manifest = list(self._trialSortAlgorithm(sorter))
for number, (manifestTest, actualTest) in enumerate(
zip(manifest, self.resultingTests)):
self.assertEqual(
manifestTest.name, actualTest.id(),
"#%d: %s != %s" %
(number, manifestTest.name, actualTest.id()))
self.assertEqual(len(manifest), len(self.resultingTests))
def test_sortPackagesDefaultOrder(self):
self.loadSortedPackages()
def test_sortPackagesSillyOrder(self):
def sillySorter(s):
# This has to work on fully-qualified class names and class
# objects, which is silly, but it's the "spec", such as it is.
# if isinstance(s, type) or isinstance(s, types.ClassType):
# return s.__module__+'.'+s.__name__
n = runner.name(s)
d = md5.new(n).hexdigest()
return d
self.loadSortedPackages(sillySorter)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor Handle Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import resource_handle_pb2
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
def encode_resource_handle(resource_handle):
"""Encode a ResourceHandle proto as custom numpy struct type."""
return np.asarray(bytearray(resource_handle.SerializeToString()),
dtype=dtypes.np_resource)
class TensorHandle(object):
"""Represents a handle for a live tensor in a session."""
def __init__(self, handle, dtype, session):
"""Constructs a new tensor handle.
A tensor handle for a persistent tensor is a python string
that has the form of "tensor_name;unique_id;device_name".
Args:
handle: A tensor handle.
dtype: The data type of the tensor represented by `handle`.
session: The session in which the tensor is produced.
"""
self._handle = compat.as_str_any(handle)
self._resource_handle = None
self._dtype = dtype
self._session = session
self._auto_gc_enabled = True
def __del__(self):
if self._auto_gc_enabled:
self._session._register_dead_handle(self.handle)
def __str__(self):
return self._handle
def _get_resource_handle(self):
"""The ResourceHandle representation of this handle."""
if not self._resource_handle:
self._resource_handle = resource_handle_pb2.ResourceHandleProto()
self._resource_handle.device = self._handle.split(";")[-1]
self._resource_handle.container = (
pywrap_tensorflow_internal.TENSOR_HANDLE_KEY)
self._resource_handle.name = self._handle
return self._resource_handle
def to_numpy_array(self):
"""Convert a TensorHandle object to a feedable numpy value.
Returns:
A numpy array of a custom struct type that can be used as a feed value
to run().
"""
return encode_resource_handle(self._get_resource_handle())
@property
def handle(self):
"""The string representation of this handle."""
return self._handle
def eval(self):
"""Return the value of the tensor represented by this handle."""
if not self._auto_gc_enabled:
raise TypeError("Persistent tensor %s may have already been deleted."
% self.handle)
holder, reader = _get_handle_reader(self._session.graph, self._handle,
self._dtype)
return self._session.run(reader, feed_dict={holder: self._handle})
def delete(self):
"""Force the deletion of this persistent tensor."""
if not self._auto_gc_enabled:
raise TypeError("Persistent tensor %s may have already been deleted."
% self.handle)
self._auto_gc_enabled = False
holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle)
self._session.run(deleter, feed_dict={holder: self.handle})
def get_raw_handle(self):
"""Return the raw handle of the tensor.
Note that the method disables the automatic garbage collection of this
persistent tensor. The caller is now responsible for managing the life
time of the tensor.
"""
self._auto_gc_enabled = False
return self._handle
@staticmethod
def _get_device_name(handle):
"""The device name encoded in the handle."""
handle_str = compat.as_str_any(handle)
return pydev.canonical_name(handle_str.split(";")[-1])
@staticmethod
def _get_reader_key(handle):
"""The graph key for reader."""
handle_parts = str(handle).split(";")
return handle_parts[0] + ";" + handle_parts[-1]
@staticmethod
def _get_mover_key(feeder, handle):
"""The graph key for mover."""
return feeder.op.name + ";" + TensorHandle._get_reader_key(handle)
@tf_export(v1=["get_session_handle"])
def get_session_handle(data, name=None):
"""Return the handle of `data`.
This is EXPERIMENTAL and subject to change.
Keep `data` "in-place" in the runtime and create a handle that can be
used to retrieve `data` in a subsequent run().
Combined with `get_session_tensor`, we can keep a tensor produced in
one run call in place, and use it as the input in a future run call.
Args:
data: A tensor to be stored in the session.
name: Optional name prefix for the return tensor.
Returns:
A scalar string tensor representing a unique handle for `data`.
Raises:
TypeError: if `data` is not a Tensor.
Example:
```python
c = tf.multiply(a, b)
h = tf.get_session_handle(c)
h = sess.run(h)
p, a = tf.get_session_tensor(h.handle, tf.float32)
b = tf.multiply(a, 10)
c = sess.run(b, feed_dict={p: h.handle})
```
"""
if not isinstance(data, ops.Tensor):
raise TypeError("`data` must be of type Tensor.")
# Colocate this operation with data.
with ops.colocate_with(data):
return gen_data_flow_ops.get_session_handle(data, name=name)
@tf_export(v1=["get_session_tensor"])
def get_session_tensor(handle, dtype, name=None):
"""Get the tensor of type `dtype` by feeding a tensor handle.
This is EXPERIMENTAL and subject to change.
Get the value of the tensor from a tensor handle. The tensor
is produced in a previous run() and stored in the state of the
session.
Args:
handle: The string representation of a persistent tensor handle.
dtype: The type of the output tensor.
name: Optional name prefix for the return tensor.
Returns:
A pair of tensors. The first is a placeholder for feeding a
tensor handle and the second is the tensor in the session state
keyed by the tensor handle.
Example:
```python
c = tf.multiply(a, b)
h = tf.get_session_handle(c)
h = sess.run(h)
p, a = tf.get_session_tensor(h.handle, tf.float32)
b = tf.multiply(a, 10)
c = sess.run(b, feed_dict={p: h.handle})
```
"""
handle_device = TensorHandle._get_device_name(handle)
with ops.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
_register_handle_feeder(holder.graph, holder, dtype)
tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name)
return (holder, tensor)
@tf_export(v1=["delete_session_tensor"])
def delete_session_tensor(handle, name=None):
"""Delete the tensor for the given tensor handle.
This is EXPERIMENTAL and subject to change.
Delete the tensor of a given tensor handle. The tensor is produced
in a previous run() and stored in the state of the session.
Args:
handle: The string representation of a persistent tensor handle.
name: Optional name prefix for the return tensor.
Returns:
A pair of graph elements. The first is a placeholder for feeding a
tensor handle and the second is a deletion operation.
"""
handle_device = TensorHandle._get_device_name(handle)
with ops.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
deleter = gen_data_flow_ops.delete_session_tensor(holder, name=name)
return (holder, deleter)
def _register_handle_feeder(graph, feeder, dtype):
graph._handle_feeders[feeder.op.name] = dtype
def _get_handle_feeder(graph, feeder):
return graph._handle_feeders.get(feeder.op.name)
def _get_handle_reader(graph, handle, dtype):
"""Return a read subgraph for this handle."""
graph_key = TensorHandle._get_reader_key(handle)
result = graph._handle_readers.get(graph_key)
if result is None:
# Create reader if we haven't done it.
handle_device = TensorHandle._get_device_name(handle)
with graph.as_default(), graph.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
_register_handle_feeder(holder.graph, holder, dtype)
reader = gen_data_flow_ops.get_session_tensor(holder, dtype)
result = (holder, reader)
graph._handle_readers[graph_key] = result
return result
def _get_handle_mover(graph, feeder, handle):
"""Return a move subgraph for this pair of feeder and handle."""
dtype = _get_handle_feeder(graph, feeder)
if dtype is None:
return None
handle_device = TensorHandle._get_device_name(handle)
if feeder.op.device == handle_device:
return None
# Now we know we have to move the tensor.
graph_key = TensorHandle._get_mover_key(feeder, handle)
result = graph._handle_movers.get(graph_key)
if result is None:
# Create mover if we haven't done it.
holder, reader = _get_handle_reader(graph, handle, dtype)
with graph.as_default(), graph.device(feeder.op.device):
mover = gen_data_flow_ops.get_session_handle(reader)
result = (holder, mover)
graph._handle_movers[graph_key] = result
return result
def _get_handle_deleter(graph, deleter_key, handle):
"""Return a deletion subgraph for this handle."""
result = graph._handle_deleters.get(deleter_key)
if result is None:
# Create deleter if we haven't done it.
handle_device = TensorHandle._get_device_name(handle)
with graph.as_default(), graph.device(handle_device):
holder = array_ops.placeholder(dtypes.string)
deleter = gen_data_flow_ops.delete_session_tensor(holder)
result = (holder, deleter)
graph._handle_deleters[deleter_key] = result
return result
|
|
# Using Linear Regression to predict
# family home sale prices in Ames, Iowa
# Packages
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
from tabulate import tabulate
from statsmodels.iolib.summary2 import summary_col
# Set some options for the output
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 40)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 120)
# Read in the data
path = 'C:/Users/sgran/Desktop/northwestern/predict_410/assignment_1/'
train = pd.read_csv(path + 'ames_train.csv')
test = pd.read_csv(path + 'ames_test.csv')
# Convert all variable names to lower case
train.columns = [col.lower() for col in train.columns]
test.columns = [col.lower() for col in test.columns]
# EDA
print('\n----- Summary of Train Data -----\n')
print('Object type: ', type(train))
print('Number of observations & variables: ', train.shape)
# Variable names and information
print(train.info())
print(train.dtypes.value_counts())
# Descriptive statistics
print(train.describe())
print(tabulate(
train[[
'saleprice',
'yrsold',
'yearbuilt',
'overallqual',
'grlivarea',
'garagecars'
]].describe().round(1),
headers='keys',
tablefmt='psql'
))
# show a portion of the beginning of the DataFrame
print(train.head(10))
print(train.shape)
train.loc[:, train.isnull().any()].isnull().sum().sort_values(ascending=False)
train[train == 0].count().sort_values(ascending=False)
t_null = train.isnull().sum()
t_zero = train[train == 0].count()
t_good = train.shape[0] - (t_null + t_zero)
xx = range(train.shape[1])
plt.figure(figsize=(8,8))
plt.bar(xx, t_good, color='g', width=1,
bottom=t_null+t_zero)
plt.bar(xx, t_zero, color='y', width=1,
bottom=t_null)
plt.bar(xx, t_null, color='r', width=1)
plt.show()
print(t_null[t_null > 1000].sort_values(ascending=False))
print(t_zero[t_zero > 1900].sort_values(ascending=False))
drop_cols = (t_null > 1000) | (t_zero > 1900)
train = train.loc[:, -drop_cols]
# Some quick plots of the data
train.hist(figsize=(18,14))
train.plot(
kind='box',
subplots=True,
layout=(5,9),
sharex=False,
sharey=False,
figsize=(18,14)
)
train.plot.scatter(x='grlivarea', y='saleprice')
train.boxplot(column='saleprice', by='yrsold')
train.plot.scatter(x='subclass', y='saleprice')
train.boxplot(column='saleprice', by='overallqual')
train.boxplot(column='saleprice', by='overallcond')
train.plot.scatter(x='overallcond', y='saleprice')
train.plot.scatter(x='lotarea', y='saleprice')
# Replace NaN values with medians in train data
train = train.fillna(train.median())
train = train.apply(lambda med:med.fillna(med.value_counts().index[0]))
train.head()
t_null = train.isnull().sum()
t_zero = train[train == 0].count()
t_good = train.shape[0] - (t_null + t_zero)
xx = range(train.shape[1])
plt.figure(figsize=(14,14))
plt.bar(xx, t_good, color='g', width=.8,
bottom=t_null+t_zero)
plt.bar(xx, t_zero, color='y', width=.8,
bottom=t_null)
plt.bar(xx, t_null, color='r', width=.8)
plt.show()
train.bldgtype.unique()
train.housestyle.unique()
# Goal is typical family home
# Drop observations too far from typical
iqr = np.percentile(train.saleprice, 75) - np.percentile(train.saleprice, 25)
drop_rows = train.saleprice > iqr * 1.5 + np.percentile(train.saleprice, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.grlivarea, 75) - np.percentile(train.grlivarea, 25)
drop_rows = train.grlivarea > iqr * 1.5 + np.percentile(train.grlivarea, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.lotarea, 75) - np.percentile(train.lotarea, 25)
drop_rows = train.lotarea > iqr * 1.5 + np.percentile(train.lotarea, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.totalbsmtsf, 75) - np.percentile(train.totalbsmtsf, 25)
drop_rows = train.totalbsmtsf > iqr * 1.5 + np.percentile(train.totalbsmtsf, 75)
train = train.loc[-drop_rows, :]
# Replace 0 values with median to living area in train data
m = np.median(train.grlivarea[train.grlivarea > 0])
train = train.replace({'grlivarea': {0: m}})
# Discrete variables
plt.figure()
g = sns.PairGrid(train,
x_vars=["bldgtype",
"exterqual",
"centralair",
"kitchenqual",
"salecondition"],
y_vars=["saleprice"],
aspect=.75, size=3.5)
g.map(sns.violinplot, palette="pastel");
# Print correlations
corr_matrix = train.corr()
print(corr_matrix["saleprice"].sort_values(ascending=False).head(10))
print(corr_matrix["saleprice"].sort_values(ascending=True).head(10))
## Pick 10 variable to focus on
pick_10 = [
'saleprice',
'grlivarea',
'overallqual',
'garagecars',
'yearbuilt',
'totalbsmtsf',
'salecondition',
'bldgtype',
'kitchenqual',
'exterqual',
'centralair'
]
corr = train[pick_10].corr()
blank = np.zeros_like(corr, dtype=np.bool)
blank[np.triu_indices_from(blank)] = True
fig, ax = plt.subplots(figsize=(10, 10))
corr_map = sns.diverging_palette(255, 133, l=60, n=7,
center="dark", as_cmap=True)
sns.heatmap(corr, mask=blank, cmap=corr_map, square=True,
vmax=.3, linewidths=0.25, cbar_kws={"shrink": .5})
# Quick plots
for variable in pick_10[1:]:
if train[variable].dtype.name == 'object':
plt.figure()
sns.stripplot(y="saleprice", x=variable, data=train, jitter=True)
plt.show()
plt.figure()
sns.factorplot(y="saleprice", x=variable, data=train, kind="box")
plt.show()
else:
fig, ax = plt.subplots()
ax.set_ylabel('Sale Price')
ax.set_xlabel(variable)
scatter_plot = ax.scatter(
y=train['saleprice'],
x=train[variable],
facecolors = 'none',
edgecolors = 'blue'
)
plt.show()
plt.figure()
sns.factorplot(x="bldgtype", y="saleprice", col="exterqual", row="kitchenqual",
hue="overallqual", data=train, kind="swarm")
plt.figure()
sns.countplot(y="overallqual", hue="exterqual", data=train, palette="Greens_d")
# Run simple models
model1 = smf.ols(formula='saleprice ~ grlivarea', data=train).fit()
model2 = smf.ols(formula='saleprice ~ grlivarea + overallqual', data=train).fit()
model3 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars' , data=train).fit()
model4 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars + yearbuilt' , data=train).fit()
model5 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars + yearbuilt + totalbsmtsf + kitchenqual + exterqual + centralair', data=train).fit()
print('\n\nmodel 1----------\n', model1.summary())
print('\n\nmodel 2----------\n', model2.summary())
print('\n\nmodel 3----------\n', model3.summary())
print('\n\nmodel 4----------\n', model4.summary())
print('\n\nmodel 5----------\n', model5.summary())
out = [model1,
model2,
model3,
model4,
model5]
out_df = pd.DataFrame()
out_df['labels'] = ['rsquared', 'rsquared_adj', 'fstatistic', 'aic']
i = 0
for model in out:
train['pred'] = model.fittedvalues
plt.figure()
train.plot.scatter(x='saleprice', y='pred', title='model' + str(i+1))
plt.show()
out_df['model' + str(i+1)] = [
model.rsquared.round(3),
model.rsquared_adj.round(3),
model.fvalue.round(3),
model.aic.round(3)
]
i += 1
print(tabulate(out_df, headers=out_df.columns, tablefmt='psql'))
print(summary_col(out, stars=True))
train['predictions'] = model5.fittedvalues
print(train['predictions'])
# Clean test data
test.info()
test[3:] = test[3:].fillna(test[3:].median())
test["kitchenqual"] = test["kitchenqual"].fillna(test["kitchenqual"].value_counts().index[0])
test["exterqual"] = test["exterqual"].fillna(test["exterqual"].value_counts().index[0])
m = np.median(test.grlivarea[test.grlivarea > 0])
test = test.replace({'grlivarea': {0: m}})
print(test)
# Convert the array predictions to a data frame then merge with the index for the test data
test_predictions = model5.predict(test)
test_predictions[test_predictions < 0] = train['saleprice'].min()
print(test_predictions)
dat = {'p_saleprice': test_predictions}
df1 = test[['index']]
df2 = pd.DataFrame(data=dat)
submission = pd.concat([df1,df2], axis = 1, join_axes=[df1.index])
print(submission)
|
|
from test import support
import unittest
import urllib.parse
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = 'http://a/b/c/d;p?q'
SIMPLE_BASE = 'http://a/b/c/d'
# A list of test cases. Each test case is a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
(b"", []),
(b"&", []),
(b"&&", []),
(b"=", [(b'', b'')]),
(b"=a", [(b'', b'a')]),
(b"a", [(b'a', b'')]),
(b"a=", [(b'a', b'')]),
(b"a=", [(b'a', b'')]),
(b"&a=b", [(b'a', b'b')]),
(b"a=a+b&b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1&a=2", [(b'a', b'1'), (b'a', b'2')]),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urllib.parse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urllib.parse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urllib.parse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urllib.parse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urllib.parse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urllib.parse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %r" % orig)
expect_without_blanks = [v for v in expect if len(v[1])]
result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
def test_roundtrips(self):
str_cases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('nfs://server/path/to/file.txt',
('nfs', 'server', '/path/to/file.txt', '', '', ''),
('nfs', 'server', '/path/to/file.txt', '', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '')),
('git+ssh://[email protected]/user/project.git',
('git+ssh', '[email protected]','/user/project.git',
'','',''),
('git+ssh', '[email protected]','/user/project.git',
'', '')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
for url, parsed, split in str_cases + bytes_cases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urllib.parse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
str_cases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
str_schemes = ('http', 'https')
bytes_schemes = (b'http', b'https')
str_tests = str_schemes, str_cases
bytes_tests = bytes_schemes, bytes_cases
for schemes, test_cases in (str_tests, bytes_tests):
for scheme in schemes:
for url, parsed, split in test_cases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
str_components = (base, relurl, expected)
self.assertEqual(urllib.parse.urljoin(base, relurl), expected)
bytes_components = baseb, relurlb, expectedb = [
x.encode('ascii') for x in str_components]
self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
def test_unparse_parse(self):
str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
bytes_cases = [x.encode('ascii') for x in str_cases]
for u in str_cases + bytes_cases:
self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
def test_RFC2368(self):
# Issue 11467: path that starts with a number is not parsed correctly
self.assertEqual(urllib.parse.urlparse('mailto:[email protected]'),
('mailto', '', '[email protected]', '', '', ''))
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
def test_RFC3986(self):
# Test cases from RFC3986
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g:h','g:h')
self.checkJoin(RFC3986_BASE, 'g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, './g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, '/g','http://a/g')
self.checkJoin(RFC3986_BASE, '//g','http://g')
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC3986_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(RFC3986_BASE, '#s','http://a/b/c/d;p?q#s')
self.checkJoin(RFC3986_BASE, 'g#s','http://a/b/c/g#s')
self.checkJoin(RFC3986_BASE, 'g?y#s','http://a/b/c/g?y#s')
self.checkJoin(RFC3986_BASE, ';x','http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g;x','http://a/b/c/g;x')
self.checkJoin(RFC3986_BASE, 'g;x?y#s','http://a/b/c/g;x?y#s')
self.checkJoin(RFC3986_BASE, '','http://a/b/c/d;p?q')
self.checkJoin(RFC3986_BASE, '.','http://a/b/c/')
self.checkJoin(RFC3986_BASE, './','http://a/b/c/')
self.checkJoin(RFC3986_BASE, '..','http://a/b/')
self.checkJoin(RFC3986_BASE, '../','http://a/b/')
self.checkJoin(RFC3986_BASE, '../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, '../..','http://a/')
self.checkJoin(RFC3986_BASE, '../../','http://a/')
self.checkJoin(RFC3986_BASE, '../../g','http://a/g')
#Abnormal Examples
# The 'abnormal scenarios' are incompatible with RFC2986 parsing
# Tests are here for reference.
#self.checkJoin(RFC3986_BASE, '../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '../../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/./g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/../g','http://a/g')
self.checkJoin(RFC3986_BASE, 'g.','http://a/b/c/g.')
self.checkJoin(RFC3986_BASE, '.g','http://a/b/c/.g')
self.checkJoin(RFC3986_BASE, 'g..','http://a/b/c/g..')
self.checkJoin(RFC3986_BASE, '..g','http://a/b/c/..g')
self.checkJoin(RFC3986_BASE, './../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(RFC3986_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(RFC3986_BASE, 'g;x=1/./y','http://a/b/c/g;x=1/y')
self.checkJoin(RFC3986_BASE, 'g;x=1/../y','http://a/b/c/y')
self.checkJoin(RFC3986_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(RFC3986_BASE, 'g?y/../x','http://a/b/c/g?y/../x')
self.checkJoin(RFC3986_BASE, 'g#s/./x','http://a/b/c/g#s/./x')
self.checkJoin(RFC3986_BASE, 'g#s/../x','http://a/b/c/g#s/../x')
#self.checkJoin(RFC3986_BASE, 'http:g','http:g') # strict parser
self.checkJoin(RFC3986_BASE, 'http:g','http://a/b/c/g') #relaxed parser
# Test for issue9721
self.checkJoin('http://a/b/c/de', ';x','http://a/b/c/;x')
def test_urljoins(self):
self.checkJoin(SIMPLE_BASE, 'g:h','g:h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, './g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/g','http://a/g')
self.checkJoin(SIMPLE_BASE, '//g','http://g')
self.checkJoin(SIMPLE_BASE, '?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(SIMPLE_BASE, '.','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, './','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, '..','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, '../..','http://a/')
self.checkJoin(SIMPLE_BASE, '../../g','http://a/g')
self.checkJoin(SIMPLE_BASE, '../../../g','http://a/../g')
self.checkJoin(SIMPLE_BASE, './../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/./g','http://a/./g')
self.checkJoin(SIMPLE_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(SIMPLE_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'http:?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('http:///', '..','http:///')
self.checkJoin('', 'http://a/b/c/g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('', 'http://a/./g', 'http://a/./g')
self.checkJoin('svn://pathtorepo/dir1', 'dir2', 'svn://pathtorepo/dir2')
self.checkJoin('svn+ssh://pathtorepo/dir1', 'dir2', 'svn+ssh://pathtorepo/dir2')
def test_RFC2732(self):
str_cases = [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
('http://[dead:beef::1]:5432/foo/', 'dead:beef::1', 5432),
('http://[dead:beef::]:5432/foo/', 'dead:beef::', 5432),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:5432/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', 5432),
('http://[::12.34.56.78]:5432/foo/', '::12.34.56.78', 5432),
('http://[::ffff:12.34.56.78]:5432/foo/',
'::ffff:12.34.56.78', 5432),
('http://Test.python.org/foo/', 'test.python.org', None),
('http://12.34.56.78/foo/', '12.34.56.78', None),
('http://[::1]/foo/', '::1', None),
('http://[dead:beef::1]/foo/', 'dead:beef::1', None),
('http://[dead:beef::]/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]/foo/',
'::ffff:12.34.56.78', None),
('http://Test.python.org:/foo/', 'test.python.org', None),
('http://12.34.56.78:/foo/', '12.34.56.78', None),
('http://[::1]:/foo/', '::1', None),
('http://[dead:beef::1]:/foo/', 'dead:beef::1', None),
('http://[dead:beef::]:/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]:/foo/',
'::ffff:12.34.56.78', None),
]
def _encode(t):
return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
bytes_cases = [_encode(x) for x in str_cases]
for url, hostname, port in str_cases + bytes_cases:
urlparsed = urllib.parse.urlparse(url)
self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
str_cases = [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
'http://[::ffff:12.34.56.78']
bytes_cases = [x.encode('ascii') for x in str_cases]
for invalid_url in str_cases + bytes_cases:
self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
def test_urldefrag(self):
str_cases = [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]
def _encode(t):
return type(t)(x.encode('ascii') for x in t)
bytes_cases = [_encode(x) for x in str_cases]
for url, defrag, frag in str_cases + bytes_cases:
result = urllib.parse.urldefrag(url)
self.assertEqual(result.geturl(), url)
self.assertEqual(result, (defrag, frag))
self.assertEqual(result.url, defrag)
self.assertEqual(result.fragment, frag)
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
# We handle this by ignoring the first 4 characters of the URL
self.assertEqual(p.geturl()[4:], url[4:])
url = "http://User:[email protected]:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:[email protected]:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://[email protected]:[email protected]:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "[email protected]:[email protected]:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "[email protected]")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# And check them all again, only with bytes this time
url = b"HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"WWW.PYTHON.ORG")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, None)
self.assertEqual(p.geturl()[4:], url[4:])
url = b"http://User:[email protected]:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"User:[email protected]:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"User")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
url = b"http://[email protected]:[email protected]:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"[email protected]:[email protected]:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"[email protected]")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Verify an illegal port is returned as None
url = b"HTTP://WWW.PYTHON.ORG:65536/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.port, None)
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
p = urllib.parse.urlsplit("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urllib.parse.urlparse("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
# Once again, repeat ourselves to test bytes
p = urllib.parse.urlsplit(b"http://www.example.net:foo")
self.assertEqual(p.netloc, b"www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urllib.parse.urlparse(b"http://www.example.net:foo")
self.assertEqual(p.netloc, b"www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:[email protected];maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
# You guessed it, repeating the test with bytes input
uri = b"sip:[email protected];maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urllib.parse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
self.assertEqual(urllib.parse.urlparse(b"http://example.com?blahblah=/foo"),
(b'http', b'example.com', b'', b'', b'blahblah=/foo', b''))
def test_withoutscheme(self):
# Test urlparse without scheme
# Issue 754016: urlparse goes wrong with IP:port without scheme
# RFC 1808 specifies that netloc should start with //, urlparse expects
# the same, otherwise it classifies the portion of url as path.
self.assertEqual(urllib.parse.urlparse("path"),
('','','path','','',''))
self.assertEqual(urllib.parse.urlparse("//www.python.org:80"),
('','www.python.org:80','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# Repeat for bytes input
self.assertEqual(urllib.parse.urlparse(b"path"),
(b'',b'',b'path',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"//www.python.org:80"),
(b'',b'www.python.org:80',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_portseparator(self):
# Issue 754016 makes changes for port separator ':' from scheme separator
self.assertEqual(urllib.parse.urlparse("path:80"),
('','','path:80','','',''))
self.assertEqual(urllib.parse.urlparse("http:"),('http','','','','',''))
self.assertEqual(urllib.parse.urlparse("https:"),('https','','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# As usual, need to check bytes input as well
self.assertEqual(urllib.parse.urlparse(b"path:80"),
(b'',b'',b'path:80',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http:"),(b'http',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"https:"),(b'https',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_usingsys(self):
# Issue 3314: sys module is used in the error
self.assertRaises(TypeError, urllib.parse.urlencode, "foo")
def test_anyscheme(self):
# Issue 7904: s3://foo.com/stuff has netloc "foo.com".
self.assertEqual(urllib.parse.urlparse("s3://foo.com/stuff"),
('s3', 'foo.com', '/stuff', '', '', ''))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff"),
('x-newscheme', 'foo.com', '/stuff', '', '', ''))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query#fragment"),
('x-newscheme', 'foo.com', '/stuff', '', 'query', 'fragment'))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query"),
('x-newscheme', 'foo.com', '/stuff', '', 'query', ''))
# And for bytes...
self.assertEqual(urllib.parse.urlparse(b"s3://foo.com/stuff"),
(b's3', b'foo.com', b'/stuff', b'', b'', b''))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'', b''))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query#fragment"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b'fragment'))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b''))
def test_mixed_types_rejected(self):
# Several functions that process either strings or ASCII encoded bytes
# accept multiple arguments. Check they reject mixed type input
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(( b"http", "www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(("http", b"www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit((b"http", "www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit(("http", b"www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin("http://python.org", b"http://python.org")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin(b"http://python.org", "http://python.org")
def _check_result_type(self, str_type):
num_args = len(str_type._fields)
bytes_type = str_type._encoded_counterpart
self.assertIs(bytes_type._decoded_counterpart, str_type)
str_args = ('',) * num_args
bytes_args = (b'',) * num_args
str_result = str_type(*str_args)
bytes_result = bytes_type(*bytes_args)
encoding = 'ascii'
errors = 'strict'
self.assertEqual(str_result, str_args)
self.assertEqual(bytes_result.decode(), str_args)
self.assertEqual(bytes_result.decode(), str_result)
self.assertEqual(bytes_result.decode(encoding), str_args)
self.assertEqual(bytes_result.decode(encoding), str_result)
self.assertEqual(bytes_result.decode(encoding, errors), str_args)
self.assertEqual(bytes_result.decode(encoding, errors), str_result)
self.assertEqual(bytes_result, bytes_args)
self.assertEqual(str_result.encode(), bytes_args)
self.assertEqual(str_result.encode(), bytes_result)
self.assertEqual(str_result.encode(encoding), bytes_args)
self.assertEqual(str_result.encode(encoding), bytes_result)
self.assertEqual(str_result.encode(encoding, errors), bytes_args)
self.assertEqual(str_result.encode(encoding, errors), bytes_result)
def test_result_pairs(self):
# Check encoding and decoding between result pairs
result_types = [
urllib.parse.DefragResult,
urllib.parse.SplitResult,
urllib.parse.ParseResult,
]
for result_type in result_types:
self._check_result_type(result_type)
def test_parse_qs_encoding(self):
result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd\ufffd']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd-']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, {'key': ['\u0141-']})
def test_parse_qsl_encoding(self):
result = urllib.parse.parse_qsl("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd\ufffd')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd-')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, [('key', '\u0141-')])
def test_splitport(self):
splitport = urllib.parse.splitport
self.assertEqual(splitport('parrot:88'), ('parrot', '88'))
self.assertEqual(splitport('parrot'), ('parrot', None))
self.assertEqual(splitport('parrot:'), ('parrot', None))
self.assertEqual(splitport('127.0.0.1'), ('127.0.0.1', None))
self.assertEqual(splitport('parrot:cheese'), ('parrot:cheese', None))
def test_splitnport(self):
splitnport = urllib.parse.splitnport
self.assertEqual(splitnport('parrot:88'), ('parrot', 88))
self.assertEqual(splitnport('parrot'), ('parrot', -1))
self.assertEqual(splitnport('parrot', 55), ('parrot', 55))
self.assertEqual(splitnport('parrot:'), ('parrot', -1))
self.assertEqual(splitnport('parrot:', 55), ('parrot', 55))
self.assertEqual(splitnport('127.0.0.1'), ('127.0.0.1', -1))
self.assertEqual(splitnport('127.0.0.1', 55), ('127.0.0.1', 55))
self.assertEqual(splitnport('parrot:cheese'), ('parrot', None))
self.assertEqual(splitnport('parrot:cheese', 55), ('parrot', None))
def test_splitquery(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified (testcase ensuring coverage)
result = urllib.parse.splitquery('http://python.org/fake?foo=bar')
self.assertEqual(result, ('http://python.org/fake', 'foo=bar'))
result = urllib.parse.splitquery('http://python.org/fake?foo=bar?')
self.assertEqual(result, ('http://python.org/fake?foo=bar', ''))
result = urllib.parse.splitquery('http://python.org/fake')
self.assertEqual(result, ('http://python.org/fake', None))
def test_splitvalue(self):
# Normal cases are exercised by other tests; test pathological cases
# with no key/value pairs. (testcase ensuring coverage)
result = urllib.parse.splitvalue('foo=bar')
self.assertEqual(result, ('foo', 'bar'))
result = urllib.parse.splitvalue('foo=')
self.assertEqual(result, ('foo', ''))
result = urllib.parse.splitvalue('foobar')
self.assertEqual(result, ('foobar', None))
def test_to_bytes(self):
result = urllib.parse.to_bytes('http://www.python.org')
self.assertEqual(result, 'http://www.python.org')
self.assertRaises(UnicodeError, urllib.parse.to_bytes,
'http://www.python.org/medi\u00e6val')
def test_urlencode_sequences(self):
# Other tests incidentally urlencode things; test non-covered cases:
# Sequence and object values.
result = urllib.parse.urlencode({'a': [1, 2], 'b': (3, 4, 5)}, True)
# we cannot rely on ordering here
assert set(result.split('&')) == {'a=1', 'a=2', 'b=3', 'b=4', 'b=5'}
class Trivial:
def __str__(self):
return 'trivial'
result = urllib.parse.urlencode({'a': Trivial()}, True)
self.assertEqual(result, 'a=trivial')
def test_quote_from_bytes(self):
self.assertRaises(TypeError, urllib.parse.quote_from_bytes, 'foo')
result = urllib.parse.quote_from_bytes(b'archaeological arcana')
self.assertEqual(result, 'archaeological%20arcana')
result = urllib.parse.quote_from_bytes(b'')
self.assertEqual(result, '')
def test_unquote_to_bytes(self):
result = urllib.parse.unquote_to_bytes('abc%20def')
self.assertEqual(result, b'abc def')
result = urllib.parse.unquote_to_bytes('')
self.assertEqual(result, b'')
def test_quote_errors(self):
self.assertRaises(TypeError, urllib.parse.quote, b'foo',
encoding='utf-8')
self.assertRaises(TypeError, urllib.parse.quote, b'foo', errors='strict')
def test_issue14072(self):
p1 = urllib.parse.urlsplit('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urllib.parse.urlsplit('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
# assert the behavior for urlparse
p1 = urllib.parse.urlparse('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urllib.parse.urlparse('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
def test_telurl_params(self):
p1 = urllib.parse.urlparse('tel:123-4;phone-context=+1-650-516')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '123-4')
self.assertEqual(p1.params, 'phone-context=+1-650-516')
p1 = urllib.parse.urlparse('tel:+1-201-555-0123')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+1-201-555-0123')
self.assertEqual(p1.params, '')
p1 = urllib.parse.urlparse('tel:7042;phone-context=example.com')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '7042')
self.assertEqual(p1.params, 'phone-context=example.com')
p1 = urllib.parse.urlparse('tel:863-1234;phone-context=+1-914-555')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '863-1234')
self.assertEqual(p1.params, 'phone-context=+1-914-555')
def test_main():
support.run_unittest(UrlParseTestCase)
if __name__ == "__main__":
test_main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_available_agent_pool_versions_request, build_get_request, build_get_upgrade_profile_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster. The operation returns properties
of each agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_11_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the agent pool.
Gets the details of the agent pool by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_11_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: Parameters supplied to the Create or Update an agent pool operation.
:type parameters: ~azure.mgmt.containerservice.v2019_11_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_11_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool.
Deletes the agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolUpgradeProfile":
"""Gets upgrade profile for an agent pool.
Gets the details of the upgrade profile for an agent pool with a specified resource group and
managed cluster name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_11_01.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_available_agent_pool_versions(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.AgentPoolAvailableVersions":
"""Gets a list of supported versions for the specified agent pool.
Gets a list of supported versions for the specified agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_11_01.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_available_agent_pool_versions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_available_agent_pool_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
|
|
'''
Script: FitsToCSV_AndLS
Version: v09
-- This scripts takes FITS files (for the available quarters), remove 3.5sigma
outliers, join quarters using linear fit, and calculate Fast Lomb Scargle periodogram.
The scripts returns the LS for each light curve, up to 100th peak, and a resume
table with periodogram info up to 4th peak, for all analyzed LCs.
Also a list of KICs that were given as input but were not found in the target folder.
This script is similar to FitsToCSV.py script, in this repo, but has an additional feature of Lomb Scargle
Python version: 2.7n
If use/modify, refer Francisco Paz-Chinchon, francisco at dfte.ufrn.br ,
UFRN, Brazil. 2014.
Have fun & play your game.
'''
#_______________________________________________________________________________
# further PYFITS resources see: http://pythonhosted.org/pyfits/users_guide/users_tutorial.html#opening-a-fits-file
#
import os # walk through directories
import pyfits as pyf # handle fits
import pandas # dataframe use
from pandas import *
import numpy as np # numpy
import time # to see time
import gc # to free memory
from sys import exit # use exit(0) to exit programm
#...........
#......................
#.................................
#.......BEGIN OF LS ROUTINES.........
# """ Fast algorithm for spectral analysis of unevenly sampled data
# The Lomb-Scargle method performs spectral analysis on unevenly sampled
# data and is known to be a powerful way to find, and test the
# significance of, weak periodic signals. The method has previously been
# thought to be 'slow', requiring of order 10(2)N(2) operations to analyze
# N data points. We show that Fast Fourier Transforms (FFTs) can be used
# in a novel way to make the computation of order 10(2)N log N. Despite
# its use of the FFT, the algorithm is in no way equivalent to
# conventional FFT periodogram analysis.
# Keywords:
# DATA SAMPLING, FAST FOURIER TRANSFORMATIONS,
# SPECTRUM ANALYSIS, SIGNAL PROCESSING
# Example:
# > import numpy
# > import lomb
# > x = numpy.arange(10)
# > y = numpy.sin(x)
# > fx,fy, nout, jmax, prob = lomb.fasper(x,y, 6., 6.)
# Reference:
# Press, W. H. & Rybicki, G. B. 1989
# ApJ vol. 338, p. 277-280.
# Fast algorithm for spectral analysis of unevenly sampled data
# bib code: 1989ApJ...338..277P
# """
from numpy import *
from numpy.fft import *
def __spread__(y, yy, n, x, m):
# """
# Given an array yy(0:n-1), extirpolate (spread) a value y into
# m actual array elements that best approximate the "fictional"
# (i.e., possible noninteger) array element number x. The weights
# used are coefficients of the Lagrange interpolating polynomial
# Arguments:
# y :
# yy :
# n :
# x :
# m :
# Returns:
# """
nfac=[0,1,1,2,6,24,120,720,5040,40320,362880]
if m > 10. :
print 'factorial table too small in spread'
return
ix=long(x)
if x == float(ix):
yy[ix]=yy[ix]+y
else:
ilo = long(x-0.5*float(m)+1.0)
ilo = min( max( ilo , 1 ), n-m+1 )
ihi = ilo+m-1
nden = nfac[m]
fac=x-ilo
for j in range(ilo+1,ihi+1): fac = fac*(x-j)
yy[ihi] = yy[ihi] + y*fac/(nden*(x-ihi))
for j in range(ihi-1,ilo-1,-1):
nden=(nden/(j+1-ilo))*(j-ihi)
yy[j] = yy[j] + y*fac/(nden*(x-j))
def fasper(x,y,ofac,hifac, MACC=4):
# """ function fasper
# Given abscissas x (which need not be equally spaced) and ordinates
# y, and given a desired oversampling factor ofac (a typical value
# being 4 or larger). this routine creates an array wk1 with a
# sequence of nout increasing frequencies (not angular frequencies)
# up to hifac times the "average" Nyquist frequency, and creates
# an array wk2 with the values of the Lomb normalized periodogram at
# those frequencies. The arrays x and y are not altered. This
# routine also returns jmax such that wk2(jmax) is the maximum
# element in wk2, and prob, an estimate of the significance of that
# maximum against the hypothesis of random noise. A small value of prob
# indicates that a significant periodic signal is present.
# Reference:
# Press, W. H. & Rybicki, G. B. 1989
# ApJ vol. 338, p. 277-280.
# Fast algorithm for spectral analysis of unevenly sampled data
# (1989ApJ...338..277P)
# Arguments:
# X : Abscissas array, (e.g. an array of times).
# Y : Ordinates array, (e.g. corresponding counts).
# Ofac : Oversampling factor.
# Hifac : Hifac * "average" Nyquist frequency = highest frequency
# for which values of the Lomb normalized periodogram will
# be calculated.
# Returns:
# Wk1 : An array of Lomb periodogram frequencies.
# Wk2 : An array of corresponding values of the Lomb periodogram.
# Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
# Jmax : The array index corresponding to the MAX( Wk2 ).
# Prob : False Alarm Probability of the largest Periodogram value
# MACC : Number of interpolation points per 1/4 cycle
# of highest frequency
# History:
# 02/23/2009, v1.0, MF
# Translation of IDL code (orig. Numerical recipies)
# """
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print 'Incompatible arrays.'
return
nout = 0.5*ofac*hifac*n
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64L # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y-y.mean())**2).sum()/(len(y)-1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax-xmin
#extirpolate the data into the workspaces
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x-xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0L, n):
__spread__(y[j]-ave,wk1,ndim,ck[j],MACC)
__spread__(1.0,wk2,ndim,ckk[j],MACC)
#Take the Fast Fourier Transforms
wk1 = ifft( wk1 )*len(wk1)
wk2 = ifft( wk2 )*len(wk1)
wk1 = wk1[1:nout+1]
wk2 = wk2[1:nout+1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0 * abs( wk2 )
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5+hc2wt)
swt = sign(hs2wt)*(sqrt(0.5-hc2wt))
den = 0.5*n+hc2wt*rwk2+hs2wt*iwk2
cterm = (cwt*rwk1+swt*iwk1)**2./den
sterm = (cwt*iwk1-swt*rwk1)**2./(n-den)
wk1 = df*(arange(nout, dtype='float')+1.)
wk2 = (cterm+sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0-(1.0-expy)**effm
return wk1,wk2,nout,jmax,prob
def getSignificance(wk1, wk2, nout, ofac):
# """ returns the peak false alarm probabilities
# Hence the lower is the probability and the more significant is the peak
# """
expy = exp(-wk2)
effm = 2.0*(nout)/ofac
sig = effm*expy
ind = (sig > 0.01).nonzero()
sig[ind] = 1.0-(1.0-expy[ind])**effm
return sig
#...........
#......................
#.................................
#............END OF LS ROUTINES........
#
# FUNCTIONS
#
#)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
#)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
#)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
# Read LCs
#
def Read_lc(fileLC):
hdu_lc = pyf.open(fileLC, memmap=True) # hdu is a Python like list
KIC = int( hdu_lc[0].header['KEPLERID'] )
Q = int( hdu_lc[0].header['QUARTER'] )
S = int( hdu_lc[0].header['SEASON'] )
additional = [KIC, Q, S]
timeJD = hdu_lc[1].data.field(0)
pdc_initial = hdu_lc[1].data.field(7)
# to remove NaN :
timeJD = timeJD[~np.isnan(pdc_initial)]
pdc = pdc_initial[~np.isnan(pdc_initial)]
pdc = pdc / pdc.mean() # mean avoiding NaN, using numpy
# pdc = pdc / nanmean(pdc)
# nanmean calculates the mean of an array, without
# consider nan values. This package is from scipy.stats
hdu_lc.close() # closes the hdu
# time and pdc are numpy.arrays
# I'm gonna construct a data frame for the signal, normalized by its
# average
# O u t l i e r s removal
# In each quarter, flux data out 4sigma will be erased
sigma_f = pdc.std()
deg3_fit = np.poly1d( np.polyfit(timeJD, pdc, 3) )
# evaluate in all the flux array. Abs value
pdc_diff = np.abs( deg3_fit(timeJD) - pdc )
pdc = pdc[pdc_diff < 3.5*sigma_f]
timeJD = timeJD[pdc_diff < 3.5*sigma_f]
# Add quarter as a third column, to made the linear fit
# print Q,fileLC
Q_arr = np.linspace(int(Q),int(Q),len(pdc)) #number of elemnts is tha same as time series
# To fix some error bigendian little-endian that would appear due to
# computer architecture
#timeJD = timeJD.byteswap().newbyteorder()
#pdc = pdc.byteswap().newbyteorder()
df_tserie = DataFrame({'time':timeJD, 'nflux':pdc, 'Q':Q_arr})
# note that the flux is already normalized, so
# the quartes can be simply joined into one
return df_tserie, additional
# Fit a 1st order polinome and adjust differences
#
def StepFit(df_timeser):
# receives the temporal series as a dataframe with 3 columns: time, nflux, Q
# NaN must be already droped
# df must be already sorted by time
# An array of quarters, non duplicate items.
# nondup = df_timeser.drop_duplicates(cols=['Q'] )['Q'].values # only 'Q' column
nondup = np.unique(df_timeser['Q'].values)
print nondup
#print nondup, len(nondup)
# Go through time series, quarter by quarter
if len(nondup) > 1:
# Save the first quarter of the LC
df_Fit = df_timeser[ df_timeser.Q == nondup[0] ]
# Iterate up to the n-1 element
for index,q_item in enumerate( nondup[:len(nondup)-1] ): # indexing is OK!!!
df_tmp1 = df_timeser[ df_timeser.Q == q_item ]
df_tmp2 = df_timeser[ df_timeser.Q == nondup[index+1] ]
# fit the 2 linear fits using: np.polyfit, np.polyval, p = np.poly1d(z)
p1 = np.poly1d( np.polyfit(df_tmp1['time'].values, df_tmp1['nflux'].values,1) )
p2 = np.poly1d( np.polyfit(df_tmp2['time'].values, df_tmp2['nflux'].values,1) )
# then evaluate the borders of each piece, in the corresponding fit
# and determine the offset.
Offset = p1(df_tmp1['time'].values[-1]) - p2(df_tmp2['time'].values[-1])
# now add the offset to the second piece
df_tmp2['nflux'] += Offset
# and concatenate the 2nd piece with the previous
df_Fit = concat( [df_Fit, df_tmp2] )
else:
df_Fit = df_timeser
print 'no fit made, only ONE quarter in LC'
return df_Fit
# Phase plot
#
def PhaseD(df_lightC,Period1):
# function is called by a DataFrame and a float
# for time: df_lc['time'].astype('float64').values
# for flux: df_lc['nflux'].astype('float64').values
# for period: P1
Period1 = float(Period1)
LONG_CAD = 29.4 # minutes, long cadence
bin_days = (LONG_CAD / 60.) / 24. # days, 0.02042
# Define the number windows to make the Phase plot
N_window = abs( df_lightC['time'].values[-1] - \
df_lightC['time'].values[0] ) / Period1
# if NaN values were not drop:
# N_window = abs( df_lightC['time'].astype('float64').values[-1] - \
# df_lightC['time'].astype('float64').values[0] ) / Period1
# Unique Time for the phase diagram.
# make a REAL axis and superpose the discrete (binned)
#
# make CONTINUOS axis and over this axis put the DISCRETE one
# we have the real and the discrete: having the real, cut and paste the discrete
# Dataframe to harbor the phase plot
df_phase = None
# Sort to avoid errors. Reset index.
# df_lightC = df_lightC.sort(['time'], ascending=[True])
idx_sort3 = df_lightC['time'].values[:].argsort()
df_lightC['time'] = df_lightC['time'].values[idx_sort3]
df_lightC['nflux'] = df_lightC['nflux'].values[idx_sort3]
df_lightC['Q'] = df_lightC['Q'].values[idx_sort3]
df_lightC = df_lightC.reset_index(drop=True)
# Lets set zero the first element of time in DF
df_lightC['time'] = df_lightC['time'] - df_lightC['time'].values[0]
# Borders of the continuos axis:
NN = 0
# Counter of how many window phase plot are made
counter = 0
# Change the vision: iterate over the continuos axis up to reach the max of the
# time series array, and in the path assign the pieces to a phase plot
while 1:
Cont_up = (NN+1) * Period1
Cont_dw = NN * Period1
#print Cont_up, Cont_dw, Cont_up-Cont_dw
if (Cont_up - df_lightC['time'].values[-1]) > Period1:
break
# value in LC of the nearest value to lower window border
# this method returns the DF index
D_dw_idx = (np.abs(df_lightC['time'].values - Cont_dw)).argmin()
Discrete_dw = df_lightC['time'][ D_dw_idx ]
if (Discrete_dw >= Cont_dw) and (Discrete_dw < Cont_up):
D_up_idx = (np.abs(df_lightC['time'].values - Cont_up)).argmin()
Discrete_up = df_lightC['time'][ D_up_idx ]
if Discrete_up > Cont_up:
restar_ind = 1
while 1:
aux_index = (np.abs(df_lightC['time'].values - Cont_up)).argmin() - restar_ind
aux_index -= 1
if df_lightC['time'].values[aux_index] <= Cont_up:
D_up_idx = aux_index
Discrete_up = df_lightC['time'][D_up_idx]
break
restar_ind += 1
# Continue: offset and save in df
# offset_up = Cont_up - df_lightC['time'].values[discrete_up] # d2 <= c2
offset_dw = df_lightC['time'].values[D_dw_idx] - Cont_dw # d1 >= c1
df_tmp = df_lightC[D_dw_idx : D_up_idx+1]
df_tmp['time'] = (df_tmp['time'] - df_tmp['time'].values[0]) + offset_dw
df_phase = concat( [ df_phase, df_tmp ] )
df_tmp = None
counter += 1
else:
#aaa=1
print 'no data in the range: {0} -- {1} d'.format(Cont_dw,Cont_up)
NN = NN + 1
print 'Number of phase plots: {0}'.format(counter)
# DataFrame sort() was replaced by numpy functions ONLY to
# uniform script, because pandas.sort() worked well inside this function
#df_phase = df_phase.sort(['time'], ascending=[True])
#
idx_sort2 = df_phase['time'].values[:].argsort()
df_phase['time'] = df_phase['time'].values[idx_sort2]
df_phase['nflux'] = df_phase['nflux'].values[idx_sort2]
df_phase['Q'] = df_phase['Q'].values[idx_sort2]
#
df_phase = df_phase.reset_index(drop=True)
# IF Normalize time:
#time_arr = time_arr/Per1
# Set DoPhasePlot to "True" in order to see phase plot
#
DoPhasePlot = False
# if DoPhasePlot:
# plt.plot(df_phase['time'].values, df_phase['nflux'].values, 'b,')
# plt.show()
return df_phase, counter
#
# NOTE: time output isn't normalized [0,1] to reach desired
# precission in SIGMA calculation
#
# <><><><><><><><><><><><><><><><><><><><>
#
# C O R P U S
#
# <><><><><><><><><><><><><><><><><><><><>
if __name__ == "__main__":
general_time = time.time()
# Superior-Path to files
# path_test = '/home/fj/Dropbox/2014/aux'
path_fits = '/dados/home/fcoj/Work/KOI/fits_Dec2013'
# Filename of the IDs list
df_IDs = read_table('kicPl_all.csv',sep=',') #list of type kplr00xxxxxxx
print '\n\tworking...\n'
# Walk around folders and subfolders, ID by ID
info_aux = [[],[],[]]
summary_ls = [] # summary list, wil show the LCs that aren't availables in cluster folders
# list to save the KIC and the ppal period
# periods_tab = [['KICname'],['Period_(d)'],['LinearFreq'],['Power'], ['Prob'],['Peak']]
periods_tab = [[],[],[],[],[],[]]
# To change every run:
OVERSAMPL = 15
HIFREQ = 2.0
MAINSAMPL = 'd13'
RUN_i = 'r05'
for index1,kplr_id in enumerate( df_IDs['kic_name'].values ):
# for kplr_id in ['kplr000757450']:
# free memory
# gc.collect()
counter_Qs = 0 # in each KIC it resets
for (path, dirs, files) in os.walk(path_fits):
for index2,FILE in enumerate(files): #file is a string
if ("_llc.fits" in FILE) and ( str(kplr_id) in FILE ) :
# control flux, if file contains
# FITS in name. Only LONG cadence
# print path_test,'/',FILE
if counter_Qs == 0: # for the first
print '\t first LC of \t {0}. time:\t {1} minutes'.format(kplr_id,(time.time()-general_time)/60.)
df_lc = Read_lc(path_fits+'/'+FILE)[0]
aux_1 = Read_lc(path_fits+'/'+FILE)[1]
info_aux[0].append( aux_1[0] )
info_aux[1].append( aux_1[1] )
info_aux[2].append( aux_1[2] )
counter_Qs += 1
elif counter_Qs > 0:
df_lc = concat( [ df_lc, Read_lc(path_fits+'/'+FILE)[0] ] )
aux_2 = Read_lc(path_fits+'/'+FILE)[1]
info_aux[0].append( aux_2[0] )
info_aux[1].append( aux_2[1] )
info_aux[2].append( aux_2[2] )
counter_Qs += 1
# each LC is concatenated with the previous
# this way, a single LC is constructed for
# each ID
#### NOTE: partial LCs are not charged chronologically so I MUST SORT
# Up to here, the dataframe of the entire Ligth Curve is in: df_lc
# Remember it has 3 columns: flux, time, quarter
#
# I must discriminate between LCs with one querter and LCs with more than one quarter,
# in order to perform (or not): sort, Fit, reset of indices
# If KIC have no fits in folder
if (counter_Qs == 0) or (len(df_lc['nflux'].values) == 0):
summary_ls.append(kplr_id)
# To know how time passed...
if (index1+1)%100 == 0:
print '\t\t\t>>> LC number: {0} \t elapsed time: {1} h'.format(index1+1, (time.time()-general_time)/3600.)
#
# CASE 1) O N L Y O N E QUARTER
#
if (counter_Qs == 1) and (len(df_lc['nflux'].values) > 4):
print '\n\t\t>>> there is ONLY ONE quarter for {0}'.format(kplr_id)
# MAKE UP
# ------------
# 1.- Erase NaN
# To erase the NaN values of the readed Light Curve, we exclude
# these rows, using np.isfinite()
# These dataframes will be used to call LombScargle below...
# mask the NaN:
#df_lc = df_lc[np.isfinite(df_lc['time'])]
#df_lc = df_lc[np.isfinite(df_lc['nflux'])] # ok!, 2 columns
# or equivalent, erase the rows with any NaN column:
# df_lc = df_lc.dropna(how='any')
# We erased NaN in the function ReadLC(), with a numpy method
# 2.- Sort LC using time
# Do not need to sort because we assume the time series is already sorted in the quarter
# 3.- Reset index
# As no sort is performed, we not need to reset indices
# 4.- Fit a line (a*x+b) Q by Q
# As only one quarter is present, no Fit can be done between quarters
# Note that:
# When use np.insfinite()...
# In order to recover data from dataframe, in format float, we must use:
#
# Time = df_lcNaN['time_BKJD'].astype('float64').values
# Flux = df_lcNaN['norm_flux'].astype('float64').values
#...................................................................................................................
# FAST LOMB SCARGLE
# ------------------------------
# (we're inside the loop of kepler IDs...)
#
# calculates NOT ANGULAR frequency
# px, py: periodogram axis
# nout: number of calculated frequecies
# jmax: array index corresponding to the ppal peak: argmax(py)
# prob: an estimate of the significance of that
# maximum against the hypothesis of random noise. A small value of prob
# indicates that a significant periodic signal is present.
oversampl = OVERSAMPL # 4 or more, v08:20
hifreq = HIFREQ # 1, v08:2.0
px,py,nout,jmax,prob = fasper(df_lc['time'].values, \
df_lc['nflux'].values, oversampl, hifreq)
f1 = px[jmax] # PPAL PEAK FREQUENCY
P1 = 1.0/f1 # PPAL PEAK PERIOD
#
#
DO_info_LS = False
if DO_info_LS:
print '\n Fast LombScargle'
print 'Frequency of the ppal peak (not angular freq): {}'.format(px[jmax])
print 'Freq power, peak A: {0}'.format(py[jmax])
print 'Period of peak A: {0} days'.format(1/px[jmax])
print 'Probability (lower, more persistent signal): {0}'.format(prob)
print 'time elapsed up to LS... {0} sec'.format(time.time()-general_time)
print
# Save periodogram
# I use pandas.sort() because contain the output np.array from LS
df_LS = DataFrame({'1_Period(d)':1.0/px,'2_LinearFreq':px,'3_Power':py}) # all arrays must have the same length
df_LS = df_LS.sort(['3_Power'], ascending=[False]) # sort by power of each period
df_LS = df_LS.reset_index(drop=True) # re-index data frame after sort
N_freqs = nout
print '\n\tnumber of calculated frequencies: {0}\n'.format(N_freqs)
# To save up to 4th peak
for pp in [0,1,2,3]:
periods_tab[0].append(kplr_id[ kplr_id.find('kplr')+4: ]) # save the KIC
periods_tab[1].append(df_LS['1_Period(d)'].values[pp])
periods_tab[2].append(df_LS['2_LinearFreq'].values[pp])
periods_tab[3].append(df_LS['3_Power'].values[pp]) # ppal Power, as sort ascending is the first
periods_tab[4].append(prob) # v08
periods_tab[5].append(pp+1) # v09
# XX.- erase variables
px,py,nout,jmax,prob = None,None,None,None,None
#...................................................................................................................
# W R I T E I N F O T O F I L E S (I)
# ----------------------------------
#
# KIC by KIC: To Save periodogram first 100 frequencies
out_name = 'tables/LS/'+ MAINSAMPL +'.' + RUN_i + '_LS_' + kplr_id + '.csv'
df_LS[0:100].to_csv(out_name, sep=',', index=False, header=True)
#...................................................................................................................
#
# CASE 2) M O R E THAN O N E QUARTER
#
elif (counter_Qs > 1) and (len(df_lc['nflux'].values) > 4): # by Kolmogorov
# MAKE UP
# ------------
# 1.- Erase NaN
# To erase the NaN values of the readed Light Curve, we exclude
# these rows, using np.isfinite()
# These dataframes will be used to call LombScargle below...
# mask the NaN:
#df_lc = df_lc[np.isfinite(df_lc['time'])]
#df_lc = df_lc[np.isfinite(df_lc['nflux'])] # ok!, 2 columns
# or equivalent, erase the rows with any NaN column:
# df_lc = df_lc.dropna(how='any')
# 2.- Sort LC using time
# Due to LC is a merge of LCs with NO CHRONOLOGICAL order, we must sort
# using time.
idx_sort = df_lc['time'].values[:].argsort()
df_lc['time'] = df_lc['time'].values[idx_sort]
df_lc['nflux'] = df_lc['nflux'].values[idx_sort]
df_lc['Q'] = df_lc['Q'].values[idx_sort]
# I used this way instead of pandas.sort to avoid the ugly
# problems with big-endian / little-endian
# df_lc = df_lc.sort(['time'], ascending=[True])
# 3.- Reset index
# After sort, indices must be re-written
# VERY IMPORTANT POINT!
df_lc = df_lc.reset_index(drop=True)
# 4.- Fit a line (a*x+b) Q by Q
# A linear fit is performed quarter by quarter (in pairs), and the offset is applied
# to the 2nd
df_lc = StepFit( df_lc )
# Note that:
# When use np.insfinite()...
# In order to recover data from dataframe, in format float, we must use:
#
# Time = df_lcNaN['time_BKJD'].astype('float64').values
# Flux = df_lcNaN['norm_flux'].astype('float64').values
#...................................................................................................................
# FAST LOMB SCARGLE
# ------------------------------
# (we're inside the loop of kepler IDs...)
#
# calculates NOT ANGULAR frequency
# px, py: periodogram axis
# nout: number of calculated frequecies
# jmax: array index corresponding to the ppal peak: argmax(py)
# prob: an estimate of the significance of that
# maximum against the hypothesis of random noise. A small value of prob
# indicates that a significant periodic signal is present.
oversampl = OVERSAMPL # 4 or more, v08: now 20
hifreq = HIFREQ # 1 , v08: now 2
px,py,nout,jmax,prob = fasper(df_lc['time'].values, \
df_lc['nflux'].values, oversampl, hifreq)
f1 = px[jmax] # PPAL PEAK FREQUENCY
P1 = 1.0/f1 # PPAL PEAK PERIOD
#
#
DO_info_LS = False
if DO_info_LS:
print '\n Fast LombScargle'
print 'Frequency of the ppal peak (not angular freq): {}'.format(px[jmax])
print 'Freq power, peak A: {0}'.format(py[jmax])
print 'Period of peak A: {0} days'.format(1/px[jmax])
print 'Probability (lower, more persistent signal): {0}'.format(prob)
print 'time elapsed up to LS... {0} sec'.format(time.time()--general_time)
print
# Save periodogram
# I use pandas.sort() because contain the output np.array from LS
df_LS = DataFrame({'1_Period(d)':1.0/px,'2_LinearFreq':px,'3_Power':py}) # all arrays must have the same length
df_LS = df_LS.sort(['3_Power'], ascending=[False]) # sort by power of each period
df_LS = df_LS.reset_index(drop=True) # re-index data frame after sort
N_freqs = nout
print '\n\tnumber of calculated frequencies: {0}\n'.format(N_freqs)
# To save up to 4th peak
for pp in [0,1,2,3]:
periods_tab[0].append(kplr_id[ kplr_id.find('kplr')+4: ]) # save the KIC
periods_tab[1].append(df_LS['1_Period(d)'].values[pp])
periods_tab[2].append(df_LS['2_LinearFreq'].values[pp])
periods_tab[3].append(df_LS['3_Power'].values[pp]) # ppal Power, as sort ascending is the first
periods_tab[4].append(prob) # v08
periods_tab[5].append(pp+1) # v09
# XX.- erase variables
px,py,nout,jmax,prob = None,None,None,None,None
#...................................................................................................................
# W R I T E I N F O T O F I L E S (I)
# ----------------------------------
#
# KIC by KIC: To Save periodogram first 100 frequencies
out_name = 'tables/LS/'+ MAINSAMPL +'.' + RUN_i + '_LS_' + kplr_id + '.csv'
df_LS[0:100].to_csv(out_name, sep=',', index=False, header=True)
#...................................................................................................................
# PHASE PLOT
# -----------------
#
# Some thinhgs to remember, before call Phase plot:
# period of ppal peak: P1
# time serie w/o NaN: TimeSer = df_lc['time_BKJD'].astype('float64').values
# flux serie w/o NaN: FluxSer = df_lc['norm_flux'].astype('float64').values
# PhaseD() : returns a Dataframe with normalized flux and norm time
# Phase, Nphases = PhaseD(df_lc, P1)
# print 'elapsed time up to 1st phase plot: {0} sec'.format(abs(general_time - time.time()))
#...................................................................................................................
# closing the FOR...
# OUT OF FOR THAT WALK THROUGH KICs...
# W R I T E I N F O T O F I L E S (II)
#
# GENERAL: To save first peak info and KIC
df_Ppal = DataFrame({'1_KIC':periods_tab[0][:], '2_Period(d)':periods_tab[1][:], '3_LinearFreq':periods_tab[2][:], \
'4_Power':periods_tab[3][:],'5_FAP_Prob':periods_tab[4][:], '6_Peak':periods_tab[5][:]})
fname_Ppal = 'tables/'+ MAINSAMPL +'.' + RUN_i + '_4PeakInfo.csv'
df_Ppal.to_csv(fname_Ppal, sep=',', index=False, header=True)
# GENERAL: To Save the additional info of each light curve
df_AddInfo = DataFrame({'1_KIC':info_aux[0][:],'2_Quarter':info_aux[1][:],'3_Season':info_aux[2][:]})
fname_AddInfo = 'tables/'+ MAINSAMPL +'.' + RUN_i + '_LCsInfo.csv'
df_AddInfo.to_csv(fname_AddInfo, sep=',', index=False, header=True)
# GENERAL: missed KIC
if len(summary_ls) > 0:
fn_miss = 'tables/' + MAINSAMPL +'.' + RUN_i + '_noKIC.csv'
DataFrame({'Missed_KIC':summary_ls}).to_csv(fn_miss,index=False, header=True)
#...................................................................................................................
print 'Thanks... The End!'
print 'Total elapsed time: {0} hours'.format( (time.time()-general_time)/3600. )
else:
print '\n\t\t NOTE: this script is was imported from another script/program'
print '\t\t -------------------------------------------------------------------------------------'
|
|
import pytz
import json
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse, get_user_auth
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.exceptions import NodeStateError
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import NodeSerializer, NodeProviderSerializer
from api.nodes.serializers import NodeLinksSerializer, NodeLicenseSerializer
from api.nodes.serializers import NodeContributorsSerializer, NodeTagField
from api.base.serializers import (IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileCommentRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
JSONAPIListField, ShowIfVersion, VersionedDateTimeField,)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError
class BaseRegistrationSerializer(NodeSerializer):
title = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = HideIfWithdrawal(ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string))
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
node_license = HideIfWithdrawal(NodeLicenseSerializer(read_only=True))
tags = HideIfWithdrawal(JSONAPIListField(child=NodeTagField(), required=False))
public = HideIfWithdrawal(ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes'))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.'))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.'))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.'))
withdrawn = ser.BooleanField(source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.')
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(source='retraction.date_retracted', read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
withdrawal_justification = ser.CharField(source='retraction.justification', read_only=True)
template_from = HideIfWithdrawal(ser.CharField(read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.'))
registration_supplement = ser.SerializerMethodField()
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.'))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'}
))
registered_from = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'}
))
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<_id>'}
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'}
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-providers',
related_view_kwargs={'node_id': '<_id>'}
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'}
))
license = HideIfWithdrawal(RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'}
))
node_links = ShowIfVersion(HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.'
)), min_version='2.0', max_version='2.0')
parent = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
))
root = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'}
))
affiliated_institutions = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'}
))
registration_schema = RelationshipField(
related_view='metaschemas:metaschema-detail',
related_view_kwargs={'metaschema_id': '<registered_schema_id>'}
)
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'}
)))
identifiers = HideIfWithdrawal(RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'}
))
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'}
))
links = LinksField({'self': 'get_registration_url', 'html': 'get_absolute_html_url'})
def get_registration_url(self, obj):
return absolute_reverse('registrations:registration-detail', kwargs={
'node_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return self.get_registration_url(obj)
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft')
registration_choice = validated_data.pop('registration_choice', 'immediate')
embargo_lifted = validated_data.pop('lift_embargo', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
try:
draft.validate_metadata(metadata=draft.registration_metadata, reviewer=reviewer, required_fields=True)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
registration = draft.register(auth, save=True)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = obj.registered_meta.values()[0]
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def update(self, registration, validated_data):
auth = Auth(self.context['request'].user)
# Update tags
if 'tags' in validated_data:
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(err.message)
is_public = validated_data.get('is_public', None)
if is_public is not None:
if is_public:
try:
registration.update(validated_data, auth=auth)
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(err.message)
else:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
return registration
class Meta:
type_ = 'registrations'
class RegistrationSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields
"""
draft_registration = ser.CharField(write_only=True)
registration_choice = ser.ChoiceField(write_only=True, choices=['immediate', 'embargo'])
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
class RegistrationDetailSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
comments = FileCommentRelationshipField(related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'}
)
node = RelationshipField(related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<node._id>'},
help_text='The registration that this file belongs to'
)
class RegistrationProviderSerializer(NodeProviderSerializer):
"""
Overrides NodeProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionsOperations:
"""ExpressRouteCrossConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]:
"""Retrieves all the ExpressRouteCrossConnections in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]:
"""Retrieves all the ExpressRouteCrossConnections in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
"""Gets details about the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group (peering location of the circuit).
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection (service key of the
circuit).
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
parameters: "_models.ExpressRouteCrossConnection",
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCrossConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
parameters: "_models.ExpressRouteCrossConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnection"]:
"""Update the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param parameters: Parameters supplied to the update express route crossConnection operation.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
cross_connection_name: str,
cross_connection_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
"""Updates an express route cross connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the cross connection.
:type cross_connection_name: str
:param cross_connection_parameters: Parameters supplied to update express route cross
connection tags.
:type cross_connection_parameters: ~azure.mgmt.network.v2020_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cross_connection_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def _list_arp_table_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def begin_list_arp_table(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsArpTableListResult"]:
"""Gets the currently advertised ARP table associated with the express route cross connection in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_arp_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def _list_routes_table_summary_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def begin_list_routes_table_summary(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]:
"""Gets the route table summary associated with the express route cross connection in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def _list_routes_table_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def begin_list_routes_table(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]:
"""Gets the currently advertised routes table associated with the express route cross connection
in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from fabric import colors
from fabric.utils import indent
from itertools import chain
import netaddr
from lib.base import (
BGPContainer,
OSPFContainer,
CmdBuffer,
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
)
class QuaggaBGPContainer(BGPContainer):
WAIT_FOR_BOOT = 1
SHARED_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/quagga', zebra=False):
super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.zebra = zebra
def run(self):
super(QuaggaBGPContainer, self).run()
return self.WAIT_FOR_BOOT
def get_global_rib(self, prefix='', rf='ipv4'):
rib = []
if prefix != '':
return self.get_global_rib_with_prefix(prefix, rf)
out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
if out.startswith('No BGP network exists'):
return rib
read_next = False
for line in out.split('\n'):
ibgp = False
if line[:2] == '*>':
line = line[2:]
ibgp = False
if line[0] == 'i':
line = line[1:]
ibgp = True
elif not read_next:
continue
elems = line.split()
if len(elems) == 1:
read_next = True
prefix = elems[0]
continue
elif read_next:
nexthop = elems[0]
else:
prefix = elems[0]
nexthop = elems[1]
read_next = False
rib.append({'prefix': prefix, 'nexthop': nexthop,
'ibgp': ibgp})
return rib
def get_global_rib_with_prefix(self, prefix, rf):
rib = []
lines = [line.strip() for line in self.vtysh('show bgp {0} unicast {1}'.format(rf, prefix), config=False).split('\n')]
if lines[0] == '% Network not in table':
return rib
lines = lines[2:]
if lines[0].startswith('Not advertised'):
lines.pop(0) # another useless line
elif lines[0].startswith('Advertised to non peer-group peers:'):
lines = lines[2:] # other useless lines
else:
raise Exception('unknown output format {0}'.format(lines))
if lines[0] == 'Local':
aspath = []
else:
aspath = [int(asn) for asn in lines[0].split()]
nexthop = lines[1].split()[0].strip()
info = [s.strip(',') for s in lines[2].split()]
attrs = []
if 'metric' in info:
med = info[info.index('metric') + 1]
attrs.append({'type': BGP_ATTR_TYPE_MULTI_EXIT_DISC, 'metric': int(med)})
if 'localpref' in info:
localpref = info[info.index('localpref') + 1]
attrs.append({'type': BGP_ATTR_TYPE_LOCAL_PREF, 'value': int(localpref)})
rib.append({'prefix': prefix, 'nexthop': nexthop,
'aspath': aspath, 'attrs': attrs})
return rib
def get_neighbor_state(self, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
info = [l.strip() for l in self.vtysh('show bgp neighbors {0}'.format(neigh_addr), config=False).split('\n')]
if not info[0].startswith('BGP neighbor is'):
raise Exception('unknown format')
idx1 = info[0].index('BGP neighbor is ')
idx2 = info[0].index(',')
n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
if n_addr == neigh_addr:
idx1 = info[2].index('= ')
state = info[2][idx1 + len('= '):]
if state.startswith('Idle'):
return BGP_FSM_IDLE
elif state.startswith('Active'):
return BGP_FSM_ACTIVE
elif state.startswith('Established'):
return BGP_FSM_ESTABLISHED
else:
return state
raise Exception('not found peer {0}'.format(peer.router_id))
def send_route_refresh(self):
self.vtysh('clear ip bgp * soft', config=False)
def create_config(self):
self._create_config_bgp()
if self.zebra:
self._create_config_zebra()
def _create_config_bgp(self):
c = CmdBuffer()
c << 'hostname bgpd'
c << 'password zebra'
c << 'router bgp {0}'.format(self.asn)
c << 'bgp router-id {0}'.format(self.router_id)
if any(info['graceful_restart'] for info in self.peers.itervalues()):
c << 'bgp graceful-restart'
version = 4
for peer, info in self.peers.iteritems():
version = netaddr.IPNetwork(info['neigh_addr']).version
n_addr = info['neigh_addr'].split('/')[0]
if version == 6:
c << 'no bgp default ipv4-unicast'
c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
if info['is_rs_client']:
c << 'neighbor {0} route-server-client'.format(n_addr)
for typ, p in info['policies'].iteritems():
c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
typ)
if info['passwd']:
c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
if info['passive']:
c << 'neighbor {0} passive'.format(n_addr)
if version == 6:
c << 'address-family ipv6 unicast'
c << 'neighbor {0} activate'.format(n_addr)
c << 'exit-address-family'
for route in chain.from_iterable(self.routes.itervalues()):
if route['rf'] == 'ipv4':
c << 'network {0}'.format(route['prefix'])
elif route['rf'] == 'ipv6':
c << 'address-family ipv6 unicast'
c << 'network {0}'.format(route['prefix'])
c << 'exit-address-family'
else:
raise Exception('unsupported route faily: {0}'.format(route['rf']))
if self.zebra:
if version == 6:
c << 'address-family ipv6 unicast'
c << 'redistribute connected'
c << 'exit-address-family'
else:
c << 'redistribute connected'
for name, policy in self.policies.iteritems():
c << 'access-list {0} {1} {2}'.format(name, policy['type'],
policy['match'])
c << 'route-map {0} permit 10'.format(name)
c << 'match ip address {0}'.format(name)
c << 'set metric {0}'.format(policy['med'])
c << 'debug bgp as4'
c << 'debug bgp fsm'
c << 'debug bgp updates'
c << 'debug bgp events'
c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new zebra.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def vtysh(self, cmd, config=True):
if not isinstance(cmd, list):
cmd = [cmd]
cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
if config:
return self.local("vtysh -d bgpd -c 'enable' -c 'conf t' -c 'router bgp {0}' {1}".format(self.asn, cmd), capture=True)
else:
return self.local("vtysh -d bgpd {0}".format(cmd), capture=True)
def reload_config(self):
daemon = ['bgpd']
if self.zebra:
daemon.append('zebra')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd, capture=True)
class RawQuaggaBGPContainer(QuaggaBGPContainer):
def __init__(self, name, config, ctn_image_name='osrg/quagga', zebra=False):
asn = None
router_id = None
for line in config.split('\n'):
line = line.strip()
if line.startswith('router bgp'):
asn = int(line[len('router bgp'):].strip())
if line.startswith('bgp router-id'):
router_id = line[len('bgp router-id'):].strip()
if not asn:
raise Exception('asn not in quagga config')
if not router_id:
raise Exception('router-id not in quagga config')
self.config = config
super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name, zebra)
def create_config(self):
with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name))
print colors.yellow(indent(self.config))
f.writelines(self.config)
class QuaggaOSPFContainer(OSPFContainer):
SHARED_VOLUME = '/etc/quagga'
ZAPI_V2_IMAGE = 'osrg/quagga'
ZAPI_V3_IMAGE = 'osrg/quagga:v1.0'
def __init__(self, name, image=ZAPI_V2_IMAGE, zapi_verion=2,
zebra_config=None, ospfd_config=None):
if zapi_verion != 2:
image = self.ZAPI_V3_IMAGE
super(QuaggaOSPFContainer, self).__init__(name, image)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.zapi_vserion = zapi_verion
# Example:
# zebra_config = {
# 'interfaces': { # interface settings
# 'eth0': [
# 'ip address 192.168.0.1/24',
# ],
# },
# 'routes': [ # static route settings
# 'ip route 172.16.0.0/16 172.16.0.1',
# ],
# }
self.zebra_config = zebra_config or {}
# Example:
# ospfd_config = {
# 'redistribute_types': [
# 'connected',
# ],
# 'networks': {
# '192.168.1.0/24': '0.0.0.0', # <network>: <area>
# },
# }
self.ospfd_config = ospfd_config or {}
def run(self):
super(QuaggaOSPFContainer, self).run()
# self.create_config() is called in super(...).run()
self._start_zebra()
self._start_ospfd()
return self.WAIT_FOR_BOOT
def create_config(self):
self._create_config_zebra()
self._create_config_ospfd()
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
for name, settings in self.zebra_config.get('interfaces', {}).items():
c << 'interface {0}'.format(name)
for setting in settings:
c << str(setting)
for route in self.zebra_config.get('routes', []):
c << str(route)
c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new zebra.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _create_config_ospfd(self):
c = CmdBuffer()
c << 'hostname ospfd'
c << 'password zebra'
c << 'router ospf'
for redistribute in self.ospfd_config.get('redistributes', []):
c << ' redistribute {0}'.format(redistribute)
for network, area in self.ospfd_config.get('networks', {}).items():
self.networks[network] = area # for superclass
c << ' network {0} area {1}'.format(network, area)
c << 'log file {0}/ospfd.log'.format(self.SHARED_VOLUME)
c << ''
with open('{0}/ospfd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new ospfd.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _start_zebra(self):
# Do nothing. supervisord will automatically start Zebra daemon.
return
def _start_ospfd(self):
if self.zapi_vserion == 2:
ospfd_cmd = '/usr/lib/quagga/ospfd'
else:
ospfd_cmd = 'ospfd'
self.local(
'{0} -f {1}/ospfd.conf'.format(ospfd_cmd, self.SHARED_VOLUME),
detach=True)
|
|
# -*- coding: utf-8 -*-
#
# django-codenerix
#
# Copyright 2017 Centrologic Computational Logistic Center S.L.
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Debugger helps to debug the system
'''
__version__ = "2017082500"
__all__ = ['Debugger', 'lineno']
import time
import datetime
import inspect
from codenerix.lib.colors import colors
def lineno():
'''
Returns the current line number in our program.
'''
return inspect.currentframe().f_back.f_lineno
class Debugger(object):
__indebug = {}
__inname = None
def __autoconfig(self):
# Define debug configuration
import sys
debugger = {}
debugger['screen'] = (sys.stdout, ['*'])
# debugger['log'] = (open("log/debug.log","a"), ['*'] )
self.set_debug(debugger)
def set_debug(self, debug=None):
if debug is None:
self.__autoconfig()
else:
if type(debug) is dict:
# Set the deepness system
idebug = debug.copy()
if 'deepness' in debug:
if debug['deepness']:
idebug['deepness'] -= 1
else:
idebug = {}
# Save internal debugger
self.__indebug = idebug
else:
raise IOError("Argument is not a dictionary")
def get_debug(self):
return self.__indebug
def set_name(self, name):
self.__inname = name
def get_name(self):
return self.__inname
def color(self, color):
# Colors$
if color in colors:
(darkbit, subcolor) = colors[color]
return u"\033[%1d;%02dm" % (darkbit, subcolor)
else:
if color:
self.debug(u"\033[1;31mColor '%s' unknown\033[1;00m\n" % (color))
return ''
def debug(self, msg=None, header=None, color=None, tail=None, head=None, footer=None):
# Allow better names for debug calls
if header is None:
if head is None:
header = True
else:
header = head
if tail is None:
if footer is None:
tail = True
else:
tail = footer
# Retrieve the name of the class
clname = self.__class__.__name__
# Retrieve tabular
if 'tabular' in self.__indebug:
tabular = self.__indebug['tabular']
else:
tabular = ''
# For each element inside indebug
for name in self.__indebug:
# If this is no deepeness key, keep going
if name not in ['deepness', 'tabular']:
# Get color
if name != 'screen':
color = None
color_ini = self.color(color)
color_end = self.color('close')
# Get file output handler and indebug list
(handler, indebug) = self.__indebug[name]
if msg and type(handler) == str:
# Open handler buffer
handlerbuf = open(handler, "a")
else:
handlerbuf = handler
# Look up if the name of the class is inside indebug
if (clname in indebug) or (('*' in indebug) and ('-%s' % (clname) not in indebug)):
# Set line head name
if self.__inname:
headname = self.__inname
else:
headname = clname
# Build the message
message = color_ini
if header:
now = datetime.datetime.fromtimestamp(time.time())
message += "%02d/%02d/%d %02d:%02d:%02d %-15s - %s" % (now.day, now.month, now.year, now.hour, now.minute, now.second, headname, tabular)
if msg:
try:
message += str(msg)
except UnicodeEncodeError:
message += str(msg.encode('ascii', 'ignore'))
message += color_end
if tail:
message += '\n'
# Print it on the buffer handler
if msg:
handlerbuf.write(message)
handlerbuf.flush()
else:
# If we shouldn't show the output, say to the caller we should output something
return True
# Autoclose handler when done
if msg and type(handler) == str:
handlerbuf.close()
# If we shouldn't show the output
if not msg:
# Say to the caller we shouldn't output anything
return False
def warning(self, msg, header=True, tail=True):
self.warningerror(msg, header, 'WARNING', 'yellow', tail)
def error(self, msg, header=True, tail=True):
self.warningerror(msg, header, 'ERROR', 'red', tail)
def warningerror(self, msg, header, prefix, color, tail):
# Retrieve the name of the class
clname = self.__class__.__name__
# Retrieve tabular
if 'tabular' in self.__indebug:
tabular = self.__indebug['tabular']
else:
tabular = ''
# For each element inside indebug
for name in self.__indebug:
# If this is no deepeness key, keep going
if name not in ['deepness', 'tabular']:
# Get file output handler and indebug list
(handler, indebug) = self.__indebug[name]
if type(handler) == str:
# Open handler buffer
handlerbuf = open(handler, "a")
else:
handlerbuf = handler
# Get color
if name != 'screen':
color = None
color_ini = self.color(color)
color_end = self.color('close')
# Build the message
message = color_ini
if header:
# Set line head name
if self.__inname:
headname = self.__inname
else:
headname = clname
now = datetime.datetime.fromtimestamp(time.time())
message += "\n%s - %02d/%02d/%d %02d:%02d:%02d %-15s - %s" % (prefix, now.day, now.month, now.year, now.hour, now.minute, now.second, headname, tabular)
if msg:
try:
message += str(msg)
except UnicodeEncodeError:
message += str(msg.encode('ascii', 'ignore'))
message += color_end
if tail:
message += '\n'
# Print it on the buffer handler
handlerbuf.write(message)
handlerbuf.flush()
# Autoclose handler when done
if type(handler) == str:
handlerbuf.close()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import six
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# A seed for random ops (stateful and stateless) will always be 1024
# bits, all of which will be sent to the C++ code. The actual C++
# implementation of some algorithms may only use a lower part of the bits.
UINT64_HALF_SPAN = 2**63
MAX_INT64 = UINT64_HALF_SPAN - 1
MIN_INT64 = -UINT64_HALF_SPAN
UINT64_SPAN = UINT64_HALF_SPAN * 2
# 'Variable' doesn't support uint32 or uint64 yet (due to reasons explained in
# b/111604096 and cl/171681867), so I use signed int here. I choose int64
# instead of int32 here because `VarHandleOp` doesn't support int32 on GPU.
SEED_TYPE = "int64"
SEED_MIN = MIN_INT64
SEED_MAX = MAX_INT64
SEED_UINT_SPAN = UINT64_SPAN
SEED_TYPE_BITS = 64
SEED_BIT_MASK = 0xFFFFFFFFFFFFFFFF
SEED_SIZE = 16 # in units of SEED_TYPE
STATE_TYPE = SEED_TYPE
ALGORITHM_TYPE = STATE_TYPE
PHILOX_STATE_SIZE = 3
THREEFRY_STATE_SIZE = 2
@tf_export("random.Algorithm", "random.experimental.Algorithm")
class Algorithm(enum.Enum):
PHILOX = 1
THREEFRY = 2
RNG_ALG_PHILOX = Algorithm.PHILOX.value
RNG_ALG_THREEFRY = Algorithm.THREEFRY.value
DEFAULT_ALGORITHM = RNG_ALG_PHILOX
def non_deterministic_ints(shape, dtype=dtypes.int64):
"""Non-deterministically generates some integers.
This op may use some OS-provided source of non-determinism (e.g. an RNG), so
each execution will give different results.
Args:
shape: the shape of the result.
dtype: (optional) the dtype of the result.
Returns:
a tensor whose element values are non-deterministically chosen.
"""
return gen_stateful_random_ops.non_deterministic_ints(
shape=shape, dtype=dtype)
def _uint_to_int(n):
if isinstance(n, int) and n > SEED_MAX:
n = n - SEED_UINT_SPAN
return n
def _make_1d_state(state_size, seed):
"""Makes a 1-D RNG state.
Args:
state_size: an integer.
seed: an integer or 1-D tensor.
Returns:
a 1-D tensor of shape [state_size] and dtype STATE_TYPE.
"""
if isinstance(seed, six.integer_types):
# chop the Python integer (infinite precision) into chunks of SEED_TYPE
ls = []
for _ in range(state_size):
ls.append(seed & SEED_BIT_MASK)
seed >>= SEED_TYPE_BITS
seed = ls
# to avoid overflow error from ops.convert_to_tensor
seed = nest.map_structure(_uint_to_int, seed)
seed = math_ops.cast(seed, STATE_TYPE)
seed = array_ops.reshape(seed, [-1])
seed = seed[0:state_size]
# Padding with zeros on the *left* if too short. Padding on the right would
# cause a small seed to be used as the "counter" while the "key" is always
# zero (for counter-based RNG algorithms), because in the current memory
# layout counter is stored before key. In such a situation two RNGs with
# two different small seeds may generate overlapping outputs.
seed_size = seed.shape[0]
if seed_size is None:
seed_size = array_ops.shape(seed)[0]
padding_size = math_ops.maximum(state_size - seed_size, 0)
padding = array_ops.zeros([padding_size], seed.dtype)
# can't use `pad` because it doesn't support integer dtypes on GPU
seed = array_ops.concat([padding, seed], axis=0)
seed.set_shape([state_size])
return seed
def _get_counter_size(alg):
if alg == RNG_ALG_PHILOX:
return 2
elif alg == RNG_ALG_THREEFRY:
return 1
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def _get_state_size(alg):
if alg == RNG_ALG_PHILOX:
return PHILOX_STATE_SIZE
elif alg == RNG_ALG_THREEFRY:
return THREEFRY_STATE_SIZE
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def _check_state_shape(shape, alg):
if isinstance(alg, ops.Tensor) and not context.executing_eagerly():
return
shape.assert_is_compatible_with([_get_state_size(int(alg))])
def _make_state_from_seed(seed, alg):
return _make_1d_state(_get_state_size(alg), seed)
def _convert_alg_to_int(alg):
"""Converts algorithm to an integer.
Args:
alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed
strings are "philox" and "threefry".
Returns:
An integer, unless the input is a Tensor in which case a Tensor is returned.
"""
if isinstance(alg, six.integer_types):
return alg
if isinstance(alg, Algorithm):
return alg.value
if isinstance(alg, ops.Tensor):
return alg
if isinstance(alg, str):
if alg == "philox":
return RNG_ALG_PHILOX
elif alg == "threefry":
return RNG_ALG_THREEFRY
else:
raise ValueError("Unknown algorithm name: %s" % alg)
else:
raise TypeError("Can't convert algorithm %s of type %s to int" %
(alg, type(alg)))
@tf_export("random.create_rng_state", "random.experimental.create_rng_state")
def create_rng_state(seed, alg):
"""Creates a RNG state from an integer or a vector.
Example:
>>> tf.random.create_rng_state(
... 1234, "philox")
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([1234, 0, 0])>
>>> tf.random.create_rng_state(
... [12, 34], "threefry")
<tf.Tensor: shape=(2,), dtype=int64, numpy=array([12, 34])>
Args:
seed: an integer or 1-D numpy array.
alg: the RNG algorithm. Can be a string, an `Algorithm` or an integer.
Returns:
a 1-D numpy array whose size depends on the algorithm.
"""
alg = _convert_alg_to_int(alg)
return _make_state_from_seed(seed, alg)
def _shape_tensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int64 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int64
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
def _convert_to_state_tensor(t):
# to avoid out-of-range error from ops.convert_to_tensor
t = nest.map_structure(_uint_to_int, t)
return math_ops.cast(t, STATE_TYPE)
def get_replica_id():
rctx = ds_context.get_replica_context()
if rctx is None:
return None
return rctx.replica_id_in_sync_group
@tf_export("random.Generator", "random.experimental.Generator")
class Generator(tracking.AutoTrackable):
"""Random-number generator.
Example:
Creating a generator from a seed:
>>> g = tf.random.Generator.from_seed(1234)
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[ 0.9356609 , 1.0854305 , -0.93788373],
[-0.5061547 , 1.3169702 , 0.7137579 ]], dtype=float32)>
Creating a generator from a non-deterministic state:
>>> g = tf.random.Generator.from_non_deterministic_state()
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...>
All the constructors allow explicitly choosing an Random-Number-Generation
(RNG) algorithm. Supported algorithms are `"philox"` and `"threefry"`. For
example:
>>> g = tf.random.Generator.from_seed(123, alg="philox")
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[ 0.8673864 , -0.29899067, -0.9310337 ],
[-1.5828488 , 1.2481191 , -0.6770643 ]], dtype=float32)>
CPU, GPU and TPU with the same algorithm and seed will generate the same
integer random numbers. Float-point results (such as the output of `normal`)
may have small numerical discrepancies between different devices.
This class uses a `tf.Variable` to manage its internal state. Every time
random numbers are generated, the state of the generator will change. For
example:
>>> g = tf.random.Generator.from_seed(1234)
>>> g.state
<tf.Variable ... numpy=array([1234, 0, 0])>
>>> g.normal(shape=(2, 3))
<...>
>>> g.state
<tf.Variable ... numpy=array([2770, 0, 0])>
The shape of the state is algorithm-specific.
There is also a global generator:
>>> g = tf.random.get_global_generator()
>>> g.normal(shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...>
When creating a generator inside a `tf.distribute.Strategy` scope, each
replica will get a different stream of random numbers.
Note: `tf.distribute.experimental.CentralStorageStrategy` and
`tf.distribute.experimental.ParameterServerStrategy` are not supported yet.
For example, in this code:
```
strat = tf.distribute.MirroredStrategy(devices=["cpu:0", "cpu:1"])
with strat.scope():
g = tf.random.Generator.from_seed(1)
def f():
return g.normal([])
results = strat.run(f).values
```
`results[0]` and `results[1]` will have different values.
If the generator is seeded (e.g. created via `Generator.from_seed`), the
random numbers will be determined by the seed, even though different replicas
get different numbers. One can think of a random number generated on a
replica as a hash of the replica ID and a "master" random number that may be
common to all replicas. Hence, the whole system is still deterministic.
(Note that the random numbers on different replicas are not correlated, even
if they are deterministically determined by the same seed. They are not
correlated in the sense that no matter what statistics one calculates on them,
there won't be any discernable correlation.)
Generators can be freely saved and restored using `tf.train.Checkpoint`. The
checkpoint can be restored in a distribution strategy with a different number
of replicas than the original strategy. If a replica ID is present in both the
original and the new distribution strategy, its state will be properly
restored (i.e. the random-number stream from the restored point will be the
same as that from the saving point) unless the replicas have already diverged
in their RNG call traces before saving (e.g. one replica has made one RNG call
while another has made two RNG calls). We don't have such guarantee if the
generator is saved in a strategy scope and restored outside of any strategy
scope, or vice versa.
"""
@classmethod
def from_state(cls, state, alg):
"""Creates a generator from a state.
See `__init__` for description of `state` and `alg`.
Args:
state: the new state.
alg: the RNG algorithm.
Returns:
The new generator.
"""
return cls(alg=alg, state=state)
@classmethod
def from_seed(cls, seed, alg=None):
"""Creates a generator from a seed.
A seed is a 1024-bit unsigned integer represented either as a Python
integer or a vector of integers. Seeds shorter than 1024-bit will be
padded. The padding, the internal structure of a seed and the way a seed
is converted to a state are all opaque (unspecified). The only semantics
specification of seeds is that two different seeds are likely to produce
two independent generators (but no guarantee).
Args:
seed: the seed for the RNG.
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if alg is None:
# TODO(b/170668986): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
alg = _convert_alg_to_int(alg)
state = create_rng_state(seed, alg)
return cls(state=state, alg=alg)
@classmethod
def from_non_deterministic_state(cls, alg=None):
"""Creates a generator by non-deterministically initializing its state.
The source of the non-determinism will be platform- and time-dependent.
Args:
alg: (optional) the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
if alg is None:
# TODO(b/170668986): more sophisticated algorithm selection
alg = DEFAULT_ALGORITHM
alg = _convert_alg_to_int(alg)
state = non_deterministic_ints(shape=[_get_state_size(alg)],
dtype=SEED_TYPE)
return cls(state=state, alg=alg)
@classmethod
def from_key_counter(cls, key, counter, alg):
"""Creates a generator from a key and a counter.
This constructor only applies if the algorithm is a counter-based algorithm.
See method `key` for the meaning of "key" and "counter".
Args:
key: the key for the RNG, a scalar of type STATE_TYPE.
counter: a vector of dtype STATE_TYPE representing the initial counter for
the RNG, whose length is algorithm-specific.,
alg: the RNG algorithm. If None, it will be auto-selected. See
`__init__` for its possible values.
Returns:
The new generator.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
alg = _convert_alg_to_int(alg)
counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
return cls(state=state, alg=alg)
def __init__(self, copy_from=None, state=None, alg=None):
"""Creates a generator.
The new generator will be initialized by one of the following ways, with
decreasing precedence:
(1) If `copy_from` is not None, the new generator is initialized by copying
information from another generator.
(2) If `state` and `alg` are not None (they must be set together), the new
generator is initialized by a state.
Args:
copy_from: a generator to be copied from.
state: a vector of dtype STATE_TYPE representing the initial state of the
RNG, whose length and semantics are algorithm-specific. If it's a
variable, the generator will reuse it instead of creating a new
variable.
alg: the RNG algorithm. Possible values are
`tf.random.Algorithm.PHILOX` for the Philox algorithm and
`tf.random.Algorithm.THREEFRY` for the ThreeFry algorithm
(see paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]).
The string names `"philox"` and `"threefry"` can also be used.
Note `PHILOX` guarantees the same numbers are produced (given
the same random state) across all architectures (CPU, GPU, XLA etc).
"""
# TODO(b/175072242): Remove distribution-strategy dependencies in this file.
if ds_context.has_strategy():
self._distribution_strategy = ds_context.get_strategy()
else:
self._distribution_strategy = None
if copy_from is not None:
# All other arguments should be None
assert (alg or state) is None
self._state_var = self._create_variable(copy_from.state, dtype=STATE_TYPE,
trainable=False)
self._alg = copy_from.algorithm
else:
assert alg is not None and state is not None
if ds_context.has_strategy():
strat_name = type(ds_context.get_strategy()).__name__
# TODO(b/174610856): Support CentralStorageStrategy and
# ParameterServerStrategy.
if "CentralStorage" in strat_name or "ParameterServer" in strat_name:
raise ValueError("%s is not supported yet" % strat_name)
alg = _convert_alg_to_int(alg)
if isinstance(state, variables.Variable):
_check_state_shape(state.shape, alg)
self._state_var = state
else:
state = _convert_to_state_tensor(state)
_check_state_shape(state.shape, alg)
self._state_var = self._create_variable(state, dtype=STATE_TYPE,
trainable=False)
self._alg = alg
def _create_variable(self, *args, **kwargs):
"""Creates a variable.
Args:
*args: positional arguments passed along to `variables.Variable.
**kwargs: keyword arguments passed along to `variables.Variable.
Returns:
The created variable.
"""
return variables.Variable(*args, **kwargs)
def reset(self, state):
"""Resets the generator by a new state.
See `__init__` for the meaning of "state".
Args:
state: the new state.
"""
state = _convert_to_state_tensor(state)
state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])
self._state_var.assign(state)
def reset_from_seed(self, seed):
"""Resets the generator by a new seed.
See `from_seed` for the meaning of "seed".
Args:
seed: the new seed.
"""
state = create_rng_state(seed, self.algorithm)
self._state_var.assign(state)
def reset_from_key_counter(self, key, counter):
"""Resets the generator by a new key-counter pair.
See `from_key_counter` for the meaning of "key" and "counter".
Args:
key: the new key.
counter: the new counter.
"""
counter = _convert_to_state_tensor(counter)
key = _convert_to_state_tensor(key)
counter.shape.assert_is_compatible_with(
[_get_state_size(self.algorithm) - 1])
key.shape.assert_is_compatible_with([])
key = array_ops.reshape(key, [1])
state = array_ops.concat([counter, key], 0)
self._state_var.assign(state)
@property
def state(self):
"""The internal state of the RNG."""
return self._state_var
@property
def algorithm(self):
"""The RNG algorithm id (a Python integer or scalar integer Tensor)."""
return self._alg
def _standard_normal(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_normal_v2(
shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
@property
def key(self):
"""The 'key' part of the state of a counter-based RNG.
For a counter-base RNG algorithm such as Philox and ThreeFry (as
described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3'
[https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]),
the RNG state consists of two parts: counter and key. The output is
generated via the formula: output=hash(key, counter), i.e. a hashing of
the counter parametrized by the key. Two RNGs with two different keys can
be thought as generating two independent random-number streams (a stream
is formed by increasing the counter).
Returns:
A scalar which is the 'key' part of the state, if the RNG algorithm is
counter-based; otherwise it raises a ValueError.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
return self._state_var[-1]
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def _skip_single_var(self, var, delta):
# TODO(wangpeng): Cache the cast algorithm instead of casting everytime.
return gen_stateful_random_ops.rng_read_and_skip(
var.handle,
alg=math_ops.cast(self.algorithm, dtypes.int32),
delta=math_ops.cast(delta, dtypes.uint64))
def skip(self, delta):
"""Advance the counter of a counter-based RNG.
Args:
delta: the amount of advancement. The state of the RNG after
`skip(n)` will be the same as that after `normal([n])`
(or any other distribution). The actual increment added to the
counter is an unspecified implementation detail.
Returns:
A `Tensor` of type `int64`.
"""
def update_fn(v):
return self._skip_single_var(v, delta)
# TODO(b/170515001): Always call strategy.extended.update after calling it
# from both replica context and cross-replica context is supported.
if values_util.is_saving_non_distributed():
# Assumes replica context with replica_id=0, since we only save the first
# replica.
return update_fn(self.state)
if self._distribution_strategy is not None:
with ds_context.enter_or_assert_strategy(self._distribution_strategy):
if ds_context.in_cross_replica_context():
# Code that operates on all replicas of a variable cannot be saved
# without retracing.
values_util.mark_as_unsaveable()
# In cross-replica context we need to use strategy.extended.update.
return ds_context.get_strategy().extended.update(
self.state, update_fn)
return update_fn(self.state)
def _preprocess_key(self, key):
if self._distribution_strategy is None:
return key
with ds_context.enter_or_assert_strategy(self._distribution_strategy):
replica_id = get_replica_id()
if replica_id is not None:
replica_id = array_ops.stack([replica_id, 0], axis=0)
replica_id = math_ops.cast(replica_id, dtypes.uint64)
# Conceptually: key = hash(key, replica_id)
key = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
shape=[1], key=key, counter=replica_id, dtype=dtypes.uint64,
alg=self.algorithm)
return key
def _prepare_key_counter(self, shape):
delta = math_ops.reduce_prod(shape)
counter_key = self.skip(delta)
counter_size = _get_counter_size(self.algorithm)
counter = array_ops.bitcast(counter_key[:counter_size], dtypes.uint64)
key = array_ops.bitcast(counter_key[counter_size:counter_size + 1],
dtypes.uint64)
key = self._preprocess_key(key)
return key, counter
# The following functions return a tensor and as a side effect update
# self._state_var.
def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateful_normal", [shape, mean, stddev]) as name:
shape = _shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._standard_normal(shape, dtype=dtype)
return math_ops.add(rnd * stddev, mean, name=name)
def _truncated_normal(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_truncated_normal_v2(
shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
def truncated_normal(self, shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than
2 standard deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal
values.
"""
with ops.name_scope(
name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _shape_tensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = self._truncated_normal(shape_tensor, dtype=dtype)
mul = rnd * stddev_tensor
return math_ops.add(mul, mean_tensor, name=name)
def _uniform(self, shape, dtype):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_v2(
shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)
def _uniform_full_int(self, shape, dtype, name=None):
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(
shape=shape,
key=key,
counter=counter,
dtype=dtype,
alg=self.algorithm,
name=name)
def uniform(self, shape, minval=0, maxval=None,
dtype=dtypes.float32, name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded. (For float numbers especially
low-precision types like bfloat16, because of
rounding, the result may sometimes include `maxval`.)
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
For full-range random integers, pass `minval=None` and `maxval=None` with an
integer `dtype` (for integer dtypes, `minval` and `maxval` must be both
`None` or both not `None`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
minval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs
to be a scalar). The lower bound (included) on the range of random
values to generate. Pass `None` for full-range integers. Defaults to 0.
maxval: A Tensor or Python value of type `dtype`, broadcastable with
`shape` (for integer types, broadcasting is not supported, so it needs
to be a scalar). The upper bound (excluded) on the range of random
values to generate. Pass `None` for full-range integers. Defaults to 1
if `dtype` is floating point.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if dtype.is_integer:
if (minval is None) != (maxval is None):
raise ValueError("For integer dtype {}, minval and maxval must be both "
"`None` or both non-`None`; got minval={} and "
"maxval={}".format(dtype, minval, maxval))
elif maxval is None:
maxval = 1
with ops.name_scope(name, "stateful_uniform",
[shape, minval, maxval]) as name:
shape = _shape_tensor(shape)
if dtype.is_integer and minval is None:
return self._uniform_full_int(shape=shape, dtype=dtype, name=name)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
if dtype.is_integer:
key, counter = self._prepare_key_counter(shape)
return gen_stateless_random_ops_v2.stateless_random_uniform_int_v2(
shape=shape,
key=key,
counter=counter,
minval=minval,
maxval=maxval,
alg=self.algorithm,
name=name)
else:
rnd = self._uniform(shape=shape, dtype=dtype)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):
"""Uniform distribution on an integer type's entire range.
This method is the same as setting `minval` and `maxval` to `None` in the
`uniform` method.
Args:
shape: the shape of the output.
dtype: (optional) the integer type, default to uint64.
name: (optional) the name of the node.
Returns:
A tensor of random numbers of the required shape.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "stateful_uniform_full_int",
[shape]) as name:
shape = _shape_tensor(shape)
return self._uniform_full_int(shape=shape, dtype=dtype, name=name)
def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):
"""Outputs random values from a binomial distribution.
The generated values follow a binomial distribution with specified count and
probability of success parameters.
Example:
```python
counts = [10., 20.]
# Probability of success.
probs = [0.8]
rng = tf.random.Generator.from_seed(seed=234)
binomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs)
counts = ... # Shape [3, 1, 2]
probs = ... # Shape [1, 4, 2]
shape = [3, 4, 3, 4, 2]
rng = tf.random.Generator.from_seed(seed=1717)
# Sample shape will be [3, 4, 3, 4, 2]
binomial_samples = rng.binomial(shape=shape, counts=counts, probs=probs)
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
counts: Tensor. The counts of the binomial distribution. Must be
broadcastable with `probs`, and broadcastable with the rightmost
dimensions of `shape`.
probs: Tensor. The probability of success for the
binomial distribution. Must be broadcastable with `counts` and
broadcastable with the rightmost dimensions of `shape`.
dtype: The type of the output. Default: tf.int32
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random binomial
values. For each i, each samples[i, ...] is an independent draw from
the binomial distribution on counts[i] trials with probability of
success probs[i].
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope(name, "binomial", [shape, counts, probs]) as name:
counts = ops.convert_to_tensor(counts, name="counts")
probs = ops.convert_to_tensor(probs, name="probs")
shape_tensor = _shape_tensor(shape)
return gen_stateful_random_ops.stateful_random_binomial(
self.state.handle,
self.algorithm,
shape=shape_tensor,
counts=counts,
probs=probs,
dtype=dtype,
name=name)
# TODO(wangpeng): implement other distributions
def _make_int64_keys(self, shape=()):
# New independent keys are generated via
# `new_key[i] = hash(old_key, counter+i)`, which is exactly what
# `uniform_full_int(dtype=int64)` does for PhiloxRandom_64_128_128 and
# ThreeFry_64_64_64.
return self.uniform_full_int(shape=shape, dtype=dtypes.int64)
def make_seeds(self, count=1):
"""Generates seeds for stateless random ops.
For example:
```python
seeds = get_global_generator().make_seeds(count=10)
for i in range(10):
seed = seeds[:, i]
numbers = stateless_random_normal(shape=[2, 3], seed=seed)
...
```
Args:
count: the number of seed pairs (note that stateless random ops need a
pair of seeds to invoke).
Returns:
A tensor of shape [2, count] and dtype int64.
"""
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
# The two seeds for stateless random ops don't have individual semantics
# and are scrambled together, so setting one to zero is fine.
zeros = array_ops.zeros_like(keys)
return array_ops.stack([keys, zeros])
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
def split(self, count=1):
"""Returns a list of independent `Generator` objects.
Two generators are independent of each other in the sense that the
random-number streams they generate don't have statistically detectable
correlations. The new generators are also independent of the old one.
The old generator's state will be changed (like other random-number
generating methods), so two calls of `split` will return different
new generators.
For example:
```python
gens = get_global_generator().split(count=10)
for gen in gens:
numbers = gen.normal(shape=[2, 3])
# ...
gens2 = get_global_generator().split(count=10)
# gens2 will be different from gens
```
The new generators will be put on the current device (possible different
from the old generator's), for example:
```python
with tf.device("/device:CPU:0"):
gen = Generator(seed=1234) # gen is on CPU
with tf.device("/device:GPU:0"):
gens = gen.split(count=10) # gens are on GPU
```
Args:
count: the number of generators to return.
Returns:
A list (length `count`) of `Generator` objects independent of each other.
The new generators have the same RNG algorithm as the old one.
"""
def _key_to_state(alg, key):
# Padding with zeros on the left. The zeros will be the counter.
return [0] * (_get_state_size(alg) - 1) + [key]
alg = self.algorithm
if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY:
keys = self._make_int64_keys(shape=[count])
return [Generator(state=_key_to_state(alg, key), alg=alg)
for key in array_ops.unstack(keys, num=count)]
else:
raise ValueError("Unsupported algorithm id: %s" % alg)
# It's not safe to create TF ops before `init_google` is called, so this is
# initialized to None and get a value the first time `get_global_generator` is
# called.
global_generator = None
@tf_export("random.get_global_generator",
"random.experimental.get_global_generator")
def get_global_generator():
"""Retrieves the global generator.
This function will create the global generator the first time it is called,
and the generator will be placed at the default device at that time, so one
needs to be careful when this function is first called. Using a generator
placed on a less-ideal device will incur performance regression.
Returns:
The global `tf.random.Generator` object.
"""
global global_generator
if global_generator is None:
with ops.init_scope():
global_generator = Generator.from_non_deterministic_state()
return global_generator
@tf_export("random.set_global_generator",
"random.experimental.set_global_generator")
def set_global_generator(generator):
"""Replaces the global generator with another `Generator` object.
This function creates a new Generator object (and the Variable object within),
which does not work well with tf.function because (1) tf.function puts
restrictions on Variable creation thus reset_global_generator can't be freely
used inside tf.function; (2) redirecting a global variable to
a new object is problematic with tf.function because the old object may be
captured by a 'tf.function'ed function and still be used by it.
A 'tf.function'ed function only keeps weak references to variables,
so deleting a variable and then calling that function again may raise an
error, as demonstrated by
random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun .
Args:
generator: the new `Generator` object.
"""
global global_generator
global_generator = generator
|
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient._i18n import _
from neutronclient.common import utils
from neutronclient.common import validators
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.bgp import peer as bgp_peer
# Allowed BGP Autonomous number range
MIN_AS_NUM = 1
MAX_AS_NUM = 4294967295
def get_network_id(client, id_or_name):
return neutronv20.find_resourceid_by_name_or_id(client,
'network',
id_or_name)
def get_bgp_speaker_id(client, id_or_name):
return neutronv20.find_resourceid_by_name_or_id(client,
'bgp_speaker',
id_or_name)
def validate_speaker_attributes(parsed_args):
# Validate AS number
validators.validate_int_range(parsed_args, 'local_as',
MIN_AS_NUM, MAX_AS_NUM)
def add_common_arguments(parser):
utils.add_boolean_argument(
parser, '--advertise-floating-ip-host-routes',
help=_('Whether to enable or disable the advertisement '
'of floating-ip host routes by the BGP speaker. '
'By default floating ip host routes will be '
'advertised by the BGP speaker.'))
utils.add_boolean_argument(
parser, '--advertise-tenant-networks',
help=_('Whether to enable or disable the advertisement '
'of tenant network routes by the BGP speaker. '
'By default tenant network routes will be '
'advertised by the BGP speaker.'))
def args2body_common_arguments(body, parsed_args):
neutronv20.update_dict(parsed_args, body,
['name',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks'])
class ListSpeakers(neutronv20.ListCommand):
"""List BGP speakers."""
resource = 'bgp_speaker'
list_columns = ['id', 'name', 'local_as', 'ip_version']
pagination_support = True
sorting_support = True
class ShowSpeaker(neutronv20.ShowCommand):
"""Show information of a given BGP speaker."""
resource = 'bgp_speaker'
class CreateSpeaker(neutronv20.CreateCommand):
"""Create a BGP Speaker."""
resource = 'bgp_speaker'
def add_known_arguments(self, parser):
parser.add_argument(
'name',
metavar='NAME',
help=_('Name of the BGP speaker to create.'))
parser.add_argument(
'--local-as',
metavar='LOCAL_AS',
required=True,
help=_('Local AS number. (Integer in [%(min_val)s, %(max_val)s] '
'is allowed.)') % {'min_val': MIN_AS_NUM,
'max_val': MAX_AS_NUM})
parser.add_argument(
'--ip-version',
type=int, choices=[4, 6],
default=4,
help=_('IP version for the BGP speaker (default is 4).'))
add_common_arguments(parser)
def args2body(self, parsed_args):
body = {}
validate_speaker_attributes(parsed_args)
body['local_as'] = parsed_args.local_as
body['ip_version'] = parsed_args.ip_version
args2body_common_arguments(body, parsed_args)
return {self.resource: body}
class UpdateSpeaker(neutronv20.UpdateCommand):
"""Update BGP Speaker's information."""
resource = 'bgp_speaker'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Name of the BGP speaker to update.'))
add_common_arguments(parser)
def args2body(self, parsed_args):
body = {}
args2body_common_arguments(body, parsed_args)
return {self.resource: body}
class DeleteSpeaker(neutronv20.DeleteCommand):
"""Delete a BGP speaker."""
resource = 'bgp_speaker'
class AddPeerToSpeaker(neutronv20.NeutronCommand):
"""Add a peer to the BGP speaker."""
def get_parser(self, prog_name):
parser = super(AddPeerToSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'bgp_peer',
metavar='BGP_PEER',
help=_('ID or name of the BGP peer to add.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_peer_id = bgp_peer.get_bgp_peer_id(neutron_client,
parsed_args.bgp_peer)
neutron_client.add_peer_to_bgp_speaker(_speaker_id,
{'bgp_peer_id': _peer_id})
print(_('Added BGP peer %(peer)s to BGP speaker %(speaker)s.') %
{'peer': parsed_args.bgp_peer,
'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class RemovePeerFromSpeaker(neutronv20.NeutronCommand):
"""Remove a peer from the BGP speaker."""
def get_parser(self, prog_name):
parser = super(RemovePeerFromSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'bgp_peer',
metavar='BGP_PEER',
help=_('ID or name of the BGP peer to remove.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_peer_id = bgp_peer.get_bgp_peer_id(neutron_client,
parsed_args.bgp_peer)
neutron_client.remove_peer_from_bgp_speaker(_speaker_id,
{'bgp_peer_id': _peer_id})
print(_('Removed BGP peer %(peer)s from BGP speaker %(speaker)s.') %
{'peer': parsed_args.bgp_peer,
'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class AddNetworkToSpeaker(neutronv20.NeutronCommand):
"""Add a network to the BGP speaker."""
def get_parser(self, prog_name):
parser = super(AddNetworkToSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'network',
metavar='NETWORK',
help=_('ID or name of the network to add.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_net_id = get_network_id(neutron_client,
parsed_args.network)
neutron_client.add_network_to_bgp_speaker(_speaker_id,
{'network_id': _net_id})
print(_('Added network %(net)s to BGP speaker %(speaker)s.') %
{'net': parsed_args.network, 'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class RemoveNetworkFromSpeaker(neutronv20.NeutronCommand):
"""Remove a network from the BGP speaker."""
def get_parser(self, prog_name):
parser = super(RemoveNetworkFromSpeaker, self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
parser.add_argument(
'network',
metavar='NETWORK',
help=_('ID or name of the network to remove.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
_net_id = get_network_id(neutron_client,
parsed_args.network)
neutron_client.remove_network_from_bgp_speaker(_speaker_id,
{'network_id': _net_id})
print(_('Removed network %(net)s from BGP speaker %(speaker)s.') %
{'net': parsed_args.network, 'speaker': parsed_args.bgp_speaker},
file=self.app.stdout)
class ListRoutesAdvertisedBySpeaker(neutronv20.ListCommand):
"""List routes advertised by a given BGP speaker."""
list_columns = ['id', 'destination', 'next_hop']
resource = 'advertised_route'
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListRoutesAdvertisedBySpeaker,
self).get_parser(prog_name)
parser.add_argument(
'bgp_speaker',
metavar='BGP_SPEAKER',
help=_('ID or name of the BGP speaker.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
_speaker_id = get_bgp_speaker_id(neutron_client,
parsed_args.bgp_speaker)
data = neutron_client.list_route_advertised_from_bgp_speaker(
_speaker_id, **search_opts)
return data
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from os.path import basename, dirname
import re
import sys
from .._vendor.auxlib.ish import dals
from ..base.constants import ROOT_ENV_NAME
from ..base.context import context
from ..common.constants import NULL
from ..common.io import swallow_broken_pipe
from ..common.path import paths_equal
from ..common.serialize import json_dump
from ..models.match_spec import MatchSpec
def confirm(message="Proceed", choices=('yes', 'no'), default='yes'):
assert default in choices, default
if context.dry_run:
from ..exceptions import DryRunExit
raise DryRunExit()
options = []
for option in choices:
if option == default:
options.append('[%s]' % option[0])
else:
options.append(option[0])
message = "%s (%s)? " % (message, '/'.join(options))
choices = {alt: choice
for choice in choices
for alt in [choice, choice[0]]}
choices[''] = default
while True:
# raw_input has a bug and prints to stderr, not desirable
sys.stdout.write(message)
sys.stdout.flush()
user_choice = sys.stdin.readline().strip().lower()
if user_choice not in choices:
print("Invalid choice: %s" % user_choice)
else:
sys.stdout.write("\n")
sys.stdout.flush()
return choices[user_choice]
def confirm_yn(message="Proceed", default='yes', dry_run=NULL):
dry_run = context.dry_run if dry_run is NULL else dry_run
if dry_run:
from ..exceptions import DryRunExit
raise DryRunExit()
if context.always_yes:
return True
try:
choice = confirm(message=message, choices=('yes', 'no'), default=default)
except KeyboardInterrupt: # pragma: no cover
from ..exceptions import CondaSystemExit
raise CondaSystemExit("\nOperation aborted. Exiting.")
if choice == 'no':
from ..exceptions import CondaSystemExit
raise CondaSystemExit("Exiting.")
return True
def ensure_name_or_prefix(args, command):
if not (args.name or args.prefix):
from ..exceptions import CondaValueError
raise CondaValueError('either -n NAME or -p PREFIX option required,\n'
'try "conda %s -h" for more details' % command)
def arg2spec(arg, json=False, update=False):
try:
spec = MatchSpec(arg)
except:
from ..exceptions import CondaValueError
raise CondaValueError('invalid package specification: %s' % arg)
name = spec.name
if not spec._is_simple() and update:
from ..exceptions import CondaValueError
raise CondaValueError("""version specifications not allowed with 'update'; use
conda update %s%s or
conda install %s""" % (name, ' ' * (len(arg) - len(name)), arg))
return str(spec)
def specs_from_args(args, json=False):
return [arg2spec(arg, json=json) for arg in args]
spec_pat = re.compile(r'(?P<name>[^=<>!\s]+)' # package name # lgtm [py/regex/unmatchable-dollar]
r'\s*' # ignore spaces
r'('
r'(?P<cc>=[^=]+(=[^=]+)?)' # conda constraint
r'|'
r'(?P<pc>(?:[=!]=|[><]=?).+)' # new (pip-style) constraint(s)
r')?$',
re.VERBOSE) # lgtm [py/regex/unmatchable-dollar]
def strip_comment(line):
return line.split('#')[0].rstrip()
def spec_from_line(line):
m = spec_pat.match(strip_comment(line))
if m is None:
return None
name, cc, pc = (m.group('name').lower(), m.group('cc'), m.group('pc'))
if cc:
return name + cc.replace('=', ' ')
elif pc:
return name + ' ' + pc.replace(' ', '')
else:
return name
def specs_from_url(url, json=False):
from ..gateways.connection.download import TmpDownload
explicit = False
with TmpDownload(url, verbose=False) as path:
specs = []
try:
for line in open(path):
line = line.strip()
if not line or line.startswith('#'):
continue
if line == '@EXPLICIT':
explicit = True
if explicit:
specs.append(line)
continue
spec = spec_from_line(line)
if spec is None:
from ..exceptions import CondaValueError
raise CondaValueError("could not parse '%s' in: %s" %
(line, url))
specs.append(spec)
except IOError as e:
from ..exceptions import CondaFileIOError
raise CondaFileIOError(path, e)
return specs
def names_in_specs(names, specs):
return any(spec.split()[0] in names for spec in specs)
def disp_features(features):
if features:
return '[%s]' % ' '.join(features)
else:
return ''
@swallow_broken_pipe
def stdout_json(d):
getLogger("conda.stdout").info(json_dump(d))
def stdout_json_success(success=True, **kwargs):
result = {'success': success}
actions = kwargs.pop('actions', None)
if actions:
if 'LINK' in actions:
actions['LINK'] = [prec.dist_fields_dump() for prec in actions['LINK']]
if 'UNLINK' in actions:
actions['UNLINK'] = [prec.dist_fields_dump() for prec in actions['UNLINK']]
result['actions'] = actions
result.update(kwargs)
stdout_json(result)
def print_envs_list(known_conda_prefixes, output=True):
if output:
print("# conda environments:")
print("#")
def disp_env(prefix):
fmt = '%-20s %s %s'
default = '*' if prefix == context.default_prefix else ' '
if prefix == context.root_prefix:
name = ROOT_ENV_NAME
elif any(paths_equal(envs_dir, dirname(prefix)) for envs_dir in context.envs_dirs):
name = basename(prefix)
else:
name = ''
if output:
print(fmt % (name, default, prefix))
for prefix in known_conda_prefixes:
disp_env(prefix)
if output:
print('')
def check_non_admin():
from ..common._os import is_admin
if not context.non_admin_enabled and not is_admin():
from ..exceptions import OperationNotAllowed
raise OperationNotAllowed(dals("""
The create, install, update, and remove operations have been disabled
on your system for non-privileged users.
"""))
|
|
"""Test different accessory types: Lights."""
from pyhap.const import HAP_REPR_AID, HAP_REPR_CHARS, HAP_REPR_IID, HAP_REPR_VALUE
import pytest
from homeassistant.components.homekit.const import ATTR_VALUE
from homeassistant.components.homekit.type_lights import Light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
EVENT_HOMEASSISTANT_START,
PERCENTAGE,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry as er
from tests.common import async_mock_service
async def test_light_basic(hass, hk_driver, events):
"""Test light with char state."""
entity_id = "light.demo"
hass.states.async_set(entity_id, STATE_ON, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.aid == 1
assert acc.category == 5 # Lightbulb
assert acc.char_on.value
await acc.run()
await hass.async_block_till_done()
assert acc.char_on.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
assert acc.char_on.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_on.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_on.value == 0
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
char_on_iid = acc.char_on.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1}
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_on.client_update_value, 1)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "Set state to 1"
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 0}
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == "Set state to 0"
@pytest.mark.parametrize(
"supported_color_modes", [["brightness"], ["hs"], ["color_temp"]]
)
async def test_light_brightness(hass, hk_driver, events, supported_color_modes):
"""Test light with brightness."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_COLOR_MODES: supported_color_modes, ATTR_BRIGHTNESS: 255},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# brightness to 100 when turning on a light on a freshly booted up server.
assert acc.char_brightness.value != 0
char_on_iid = acc.char_on.to_HAP()[HAP_REPR_IID]
char_brightness_iid = acc.char_brightness.to_HAP()[HAP_REPR_IID]
await acc.run()
await hass.async_block_till_done()
assert acc.char_brightness.value == 100
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 102})
await hass.async_block_till_done()
assert acc.char_brightness.value == 40
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 20,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[0]
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[0].data[ATTR_BRIGHTNESS_PCT] == 20
assert len(events) == 1
assert (
events[-1].data[ATTR_VALUE] == f"Set state to 1, brightness at 20{PERCENTAGE}"
)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 40,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[1]
assert call_turn_on[1].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[1].data[ATTR_BRIGHTNESS_PCT] == 40
assert len(events) == 2
assert (
events[-1].data[ATTR_VALUE] == f"Set state to 1, brightness at 40{PERCENTAGE}"
)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] == f"Set state to 0, brightness at 0{PERCENTAGE}"
# 0 is a special case for homekit, see "Handle Brightness"
# in update_state
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 0})
await hass.async_block_till_done()
assert acc.char_brightness.value == 1
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 255})
await hass.async_block_till_done()
assert acc.char_brightness.value == 100
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 0})
await hass.async_block_till_done()
assert acc.char_brightness.value == 1
# Ensure floats are handled
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 55.66})
await hass.async_block_till_done()
assert acc.char_brightness.value == 22
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 108.4})
await hass.async_block_till_done()
assert acc.char_brightness.value == 43
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 0.0})
await hass.async_block_till_done()
assert acc.char_brightness.value == 1
async def test_light_color_temperature(hass, hk_driver, events):
"""Test light with color temperature."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_COLOR_MODES: ["color_temp"], ATTR_COLOR_TEMP: 190},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_color_temperature.value == 190
await acc.run()
await hass.async_block_till_done()
assert acc.char_color_temperature.value == 190
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
char_color_temperature_iid = acc.char_color_temperature.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_color_temperature_iid,
HAP_REPR_VALUE: 250,
}
]
},
"mock_addr",
)
await hass.async_add_executor_job(
acc.char_color_temperature.client_update_value, 250
)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[0].data[ATTR_COLOR_TEMP] == 250
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "color temperature at 250"
@pytest.mark.parametrize(
"supported_color_modes", [["ct", "hs"], ["ct", "rgb"], ["ct", "xy"]]
)
async def test_light_color_temperature_and_rgb_color(
hass, hk_driver, events, supported_color_modes
):
"""Test light with color temperature and rgb color not exposing temperature."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_COLOR_MODES: supported_color_modes,
ATTR_COLOR_TEMP: 190,
ATTR_HS_COLOR: (260, 90),
},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 2, None)
assert acc.char_hue.value == 260
assert acc.char_saturation.value == 90
assert not hasattr(acc, "char_color_temperature")
hass.states.async_set(entity_id, STATE_ON, {ATTR_COLOR_TEMP: 224})
await hass.async_block_till_done()
await acc.run()
await hass.async_block_till_done()
assert acc.char_hue.value == 27
assert acc.char_saturation.value == 27
hass.states.async_set(entity_id, STATE_ON, {ATTR_COLOR_TEMP: 352})
await hass.async_block_till_done()
await acc.run()
await hass.async_block_till_done()
assert acc.char_hue.value == 28
assert acc.char_saturation.value == 61
@pytest.mark.parametrize("supported_color_modes", [["hs"], ["rgb"], ["xy"]])
async def test_light_rgb_color(hass, hk_driver, events, supported_color_modes):
"""Test light with rgb_color."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_COLOR_MODES: supported_color_modes, ATTR_HS_COLOR: (260, 90)},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_hue.value == 260
assert acc.char_saturation.value == 90
await acc.run()
await hass.async_block_till_done()
assert acc.char_hue.value == 260
assert acc.char_saturation.value == 90
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
char_hue_iid = acc.char_hue.to_HAP()[HAP_REPR_IID]
char_saturation_iid = acc.char_saturation.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_hue_iid,
HAP_REPR_VALUE: 145,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_saturation_iid,
HAP_REPR_VALUE: 75,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[0].data[ATTR_HS_COLOR] == (145, 75)
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "set color at (145, 75)"
async def test_light_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = er.async_get(hass)
registry.async_get_or_create("light", "hue", "1234", suggested_object_id="simple")
registry.async_get_or_create(
"light",
"hue",
"9012",
suggested_object_id="all_info_set",
capabilities={"supported_color_modes": ["brightness"], "max": 100},
supported_features=5,
device_class="mock-device-class",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", "light.simple", 1, None)
hk_driver.add_accessory(acc)
assert acc.category == 5 # Lightbulb
assert acc.chars == []
assert acc.char_on.value == 0
acc = Light(hass, hk_driver, "Light", "light.all_info_set", 2, None)
assert acc.category == 5 # Lightbulb
assert acc.chars == ["Brightness"]
assert acc.char_on.value == 0
async def test_light_set_brightness_and_color(hass, hk_driver, events):
"""Test light with all chars in one go."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_COLOR_MODES: ["hs"],
ATTR_BRIGHTNESS: 255,
},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# brightness to 100 when turning on a light on a freshly booted up server.
assert acc.char_brightness.value != 0
char_on_iid = acc.char_on.to_HAP()[HAP_REPR_IID]
char_brightness_iid = acc.char_brightness.to_HAP()[HAP_REPR_IID]
char_hue_iid = acc.char_hue.to_HAP()[HAP_REPR_IID]
char_saturation_iid = acc.char_saturation.to_HAP()[HAP_REPR_IID]
await acc.run()
await hass.async_block_till_done()
assert acc.char_brightness.value == 100
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 102})
await hass.async_block_till_done()
assert acc.char_brightness.value == 40
hass.states.async_set(entity_id, STATE_ON, {ATTR_HS_COLOR: (4.5, 9.2)})
await hass.async_block_till_done()
assert acc.char_hue.value == 4
assert acc.char_saturation.value == 9
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 20,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_hue_iid,
HAP_REPR_VALUE: 145,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_saturation_iid,
HAP_REPR_VALUE: 75,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[0]
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[0].data[ATTR_BRIGHTNESS_PCT] == 20
assert call_turn_on[0].data[ATTR_HS_COLOR] == (145, 75)
assert len(events) == 1
assert (
events[-1].data[ATTR_VALUE]
== f"Set state to 1, brightness at 20{PERCENTAGE}, set color at (145, 75)"
)
async def test_light_set_brightness_and_color_temp(hass, hk_driver, events):
"""Test light with all chars in one go."""
entity_id = "light.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_COLOR_MODES: ["color_temp"],
ATTR_BRIGHTNESS: 255,
},
)
await hass.async_block_till_done()
acc = Light(hass, hk_driver, "Light", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# brightness to 100 when turning on a light on a freshly booted up server.
assert acc.char_brightness.value != 0
char_on_iid = acc.char_on.to_HAP()[HAP_REPR_IID]
char_brightness_iid = acc.char_brightness.to_HAP()[HAP_REPR_IID]
char_color_temperature_iid = acc.char_color_temperature.to_HAP()[HAP_REPR_IID]
await acc.run()
await hass.async_block_till_done()
assert acc.char_brightness.value == 100
hass.states.async_set(entity_id, STATE_ON, {ATTR_BRIGHTNESS: 102})
await hass.async_block_till_done()
assert acc.char_brightness.value == 40
hass.states.async_set(entity_id, STATE_ON, {ATTR_COLOR_TEMP: (224.14)})
await hass.async_block_till_done()
assert acc.char_color_temperature.value == 224
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_on_iid, HAP_REPR_VALUE: 1},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 20,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_color_temperature_iid,
HAP_REPR_VALUE: 250,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[0]
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert call_turn_on[0].data[ATTR_BRIGHTNESS_PCT] == 20
assert call_turn_on[0].data[ATTR_COLOR_TEMP] == 250
assert len(events) == 1
assert (
events[-1].data[ATTR_VALUE]
== f"Set state to 1, brightness at 20{PERCENTAGE}, color temperature at 250"
)
|
|
import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox <[email protected]>
# Copyright (C) 2015 Tom Barron <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses Swift as the backend
**Related Flags**
:backup_swift_url: The URL of the Swift endpoint (default: None, use catalog).
:backup_swift_auth_url: The URL of the Keystone endpoint for authentication
(default: None, use catalog).
:swift_catalog_info: Info to match when looking for swift in the service '
catalog.
:keystone_catalog_info: Info to match when looking for keystone in the service
catalog.
:backup_swift_object_size: The size in bytes of the Swift objects used
for volume backups (default: 52428800).
:backup_swift_retry_attempts: The number of retries to make for Swift
operations (default: 10).
:backup_swift_retry_backoff: The backoff time in seconds between retrying
failed Swift operations (default: 10).
:backup_compression_algorithm: Compression algorithm to use for volume
backups. Supported options are:
None (to disable), zlib and bz2 (default: zlib)
:backup_swift_ca_cert_file: The location of the CA certificate file to use
for swift client requests (default: None)
:backup_swift_auth_insecure: If true, bypass verification of server's
certificate for SSL connections (default: False)
"""
import hashlib
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from swiftclient import client as swift
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder import interface
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
cfg.URIOpt('backup_swift_url',
help='The URL of the Swift endpoint'),
cfg.URIOpt('backup_swift_auth_url',
help='The URL of the Keystone endpoint'),
cfg.StrOpt('swift_catalog_info',
default='object-store:swift:publicURL',
help='Info to match when looking for swift in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type> - '
'Only used if backup_swift_url is unset'),
cfg.StrOpt('keystone_catalog_info',
default='identity:Identity Service:publicURL',
help='Info to match when looking for keystone in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type> - '
'Only used if backup_swift_auth_url is unset'),
cfg.StrOpt('backup_swift_auth',
default='per_user',
help='Swift authentication mechanism'),
cfg.StrOpt('backup_swift_auth_version',
default='1',
help='Swift authentication version. Specify "1" for auth 1.0'
', or "2" for auth 2.0 or "3" for auth 3.0'),
cfg.StrOpt('backup_swift_tenant',
help='Swift tenant/account name. Required when connecting'
' to an auth 2.0 system'),
cfg.StrOpt('backup_swift_user_domain',
default=None,
help='Swift user domain name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_project_domain',
default=None,
help='Swift project domain name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_project',
default=None,
help='Swift project/account name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_user',
help='Swift user name'),
cfg.StrOpt('backup_swift_key',
secret=True,
help='Swift key for authentication'),
cfg.StrOpt('backup_swift_container',
default='volumebackups',
help='The default Swift container to use'),
cfg.IntOpt('backup_swift_object_size',
default=52428800,
help='The size in bytes of Swift backup objects'),
cfg.IntOpt('backup_swift_block_size',
default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_swift_object_size '
'has to be multiple of backup_swift_block_size.'),
cfg.IntOpt('backup_swift_retry_attempts',
default=3,
help='The number of retries to make for Swift operations'),
cfg.IntOpt('backup_swift_retry_backoff',
default=2,
help='The backoff time in seconds between Swift retries'),
cfg.BoolOpt('backup_swift_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the Swift backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_swift_ca_cert_file',
help='Location of the CA certificate file to use for swift '
'client requests.'),
cfg.BoolOpt('backup_swift_auth_insecure',
default=False,
help='Bypass verification of server certificate when '
'making SSL connection to Swift.'),
]
CONF = cfg.CONF
CONF.register_opts(swiftbackup_service_opts)
@interface.backupdriver
class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within Swift."""
def __init__(self, context, db_driver=None):
chunk_size_bytes = CONF.backup_swift_object_size
sha_block_size_bytes = CONF.backup_swift_block_size
backup_default_container = CONF.backup_swift_container
enable_progress_timer = CONF.backup_swift_enable_progress_timer
super(SwiftBackupDriver, self).__init__(context, chunk_size_bytes,
sha_block_size_bytes,
backup_default_container,
enable_progress_timer,
db_driver)
if CONF.backup_swift_url is None:
self.swift_url = None
info = CONF.swift_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.BackupDriverException(_(
"Failed to parse the configuration option "
"'swift_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
# it is safe to break out of the loop
self.swift_url = entry.get(
'endpoints')[0].get(endpoint_type)
break
else:
self.swift_url = '%s%s' % (CONF.backup_swift_url,
context.project_id)
if self.swift_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Swift endpoint to use. This can "
"either be set in the service catalog or with the "
"cinder.conf config option 'backup_swift_url'."))
if CONF.backup_swift_auth_url is None:
self.auth_url = None
info = CONF.keystone_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.BackupDriverException(_(
"Failed to parse the configuration option "
"'keystone_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
# it is safe to break out of the loop
self.auth_url = entry.get(
'endpoints')[0].get(endpoint_type)
break
else:
self.auth_url = CONF.backup_swift_auth_url
if self.auth_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Keystone endpoint to use. This can "
"either be set in the service catalog or with the "
"cinder.conf config option 'backup_swift_auth_url'."))
LOG.debug("Using swift URL %s", self.swift_url)
LOG.debug("Using auth URL %s", self.auth_url)
self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff
LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_url,
CONF.backup_swift_auth)
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set"),
{'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
os_options = {}
if CONF.backup_swift_user_domain is not None:
os_options['user_domain_name'] = CONF.backup_swift_user_domain
if CONF.backup_swift_project_domain is not None:
os_options['project_domain_name'] = (
CONF.backup_swift_project_domain
)
if CONF.backup_swift_project is not None:
os_options['project_name'] = CONF.backup_swift_project
self.conn = swift.Connection(
authurl=self.auth_url,
auth_version=CONF.backup_swift_auth_version,
tenant_name=CONF.backup_swift_tenant,
user=CONF.backup_swift_user,
key=CONF.backup_swift_key,
os_options=os_options,
retries=self.swift_attempts,
starting_backoff=self.swift_backoff,
insecure=self.backup_swift_auth_insecure,
cacert=CONF.backup_swift_ca_cert_file)
else:
self.conn = swift.Connection(retries=self.swift_attempts,
preauthurl=self.swift_url,
preauthtoken=self.context.auth_token,
starting_backoff=self.swift_backoff,
insecure= (
self.backup_swift_auth_insecure),
cacert=CONF.backup_swift_ca_cert_file)
class SwiftObjectWriter(object):
def __init__(self, container, object_name, conn):
self.container = container
self.object_name = object_name
self.conn = conn
self.data = bytearray()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
self.data += data
def close(self):
reader = six.BytesIO(self.data)
try:
etag = self.conn.put_object(self.container, self.object_name,
reader,
content_length=len(self.data))
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
LOG.debug('swift MD5 for %(object_name)s: %(etag)s',
{'object_name': self.object_name, 'etag': etag, })
md5 = hashlib.md5(self.data).hexdigest()
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': self.object_name, 'md5': md5})
if etag != md5:
err = _('error writing object to swift, MD5 of object in '
'swift %(etag)s is not the same as MD5 of object sent '
'to swift %(md5)s'), {'etag': etag, 'md5': md5}
raise exception.InvalidBackup(reason=err)
return md5
class SwiftObjectReader(object):
def __init__(self, container, object_name, conn):
self.container = container
self.object_name = object_name
self.conn = conn
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def read(self):
try:
(_resp, body) = self.conn.get_object(self.container,
self.object_name)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
return body
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
try:
self.conn.put_container(container)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
return
def get_container_entries(self, container, prefix):
"""Get container entry names"""
try:
swift_objects = self.conn.get_container(container,
prefix=prefix,
full_listing=True)[1]
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
swift_object_names = [swift_obj['name'] for swift_obj in swift_objects]
return swift_object_names
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
Swift object store.
"""
return self.SwiftObjectWriter(container, object_name, self.conn)
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a Swift object store.
"""
return self.SwiftObjectReader(container, object_name, self.conn)
def delete_object(self, container, object_name):
"""Deletes a backup object from a Swift object store."""
try:
self.conn.delete_object(container, object_name)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
def _generate_object_name_prefix(self, backup):
"""Generates a Swift backup object name prefix."""
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('generate_object_name_prefix: %s', prefix)
return prefix
def update_container_name(self, backup, container):
"""Use the container name as provided - don't update."""
return container
def get_extra_metadata(self, backup, volume):
"""Swift driver does not use any extra metadata."""
return None
def get_backup_driver(context):
return SwiftBackupDriver(context)
|
|
import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from itertools import chain, izip
"""
authors: Clement Gehring
contact: [email protected]
date: May 2015
"""
################## VARIOUS INTERFACES #######################################
""" Generic map from a state (as an array), to a feature vector (as an array)
"""
class Projector(object):
def __init__(self):
pass
""" Project a vector (or matrix of row vectors) to a corresponding
feature vector. It should handle 1-D arrays and 2-D arrays.
"""
def __call__(self, state):
raise NotImplementedError("Subclasses should implement this!")
@property
def size(self):
raise NotImplementedError("Subclasses should implement this!")
""" Generic map from a state-action pair (as two arrays), to a feature
vector (as an array)
"""
class StateActionProjector(object):
def __init__(self):
pass
""" Project two vectors (or two matrices of row vectors) to a corresponding
feature vector. It should handle 1-D arrays and 2-D arrays for both
arguments and any combination of these cases, i.e., one state but
several actions and vice-versa.
"""
def __call__(self, state, action):
raise NotImplementedError("Subclasses should implement this!")
@property
def size(self):
raise NotImplementedError("Subclasses should implement this!")
""" Hashing an array of indices to a single index.
Mostly used for tile coding.
"""
class Hashing(object):
def __init__(self, **kargs):
pass
""" Hash several indices (typically, one per dimension) onto
one index (typically, index of a tile). This could be a simple
cartesian-product, i.e., unique index for every combination, or
some sort of randomized hash function, e.g., UNH.
Must be able to deal with 2D arrays of indices.
"""
def __call__(self, indices):
raise NotImplementedError("Subclasses should implement this!")
################## HELPER CLASSES FOR STATE PROJECTOR TO #####################
################## STATE-ACTION PROJECTOR CONVERSION #####################
""" Simple method that ensures both arrays are 2D and that they share the same
number of rows (for when only one row was given for one argument but not
the other).
"""
def tile_and_adjust_state_action(state, action):
# make everything 2-D
if state.ndim == 1:
state = state.reshape((1,-1))
if action.ndim == 1:
action = action.reshape((1,-1))
# if one is 2D and but not the other, tile such that the dimensions
# match.
if state.shape[0] == 1 and action.shape[0]> 1:
state = np.tile(state, (action.shape[0], 1))
elif action.shape[0] == 1 and state.shape[0]> 1:
action = np.tile(action, (state.shape[0], 1))
return state, action
""" Simple state-action projector that simply treats the actions as
extra dimensions and feeds it to a projector.
"""
class ConcatStateAction(StateActionProjector):
def __init__(self, projector):
self.projector = projector
def __call__(self, state, action):
state, action = tile_and_adjust_state_action(state, action)
sa = np.hstack((state, action))
return self.projector(sa)
@property
def size(self):
return self.projector.size
""" Simple state-action projector that simply ignores the action. Mainly
to be used when only a value function is needed.
"""
class RemoveAction(StateActionProjector):
def __init__(self, projector):
self.projector = projector
def __call__(self, state, action):
state, action = tile_and_adjust_state_action(state, action)
return self.projector(state)
@property
def size(self):
return self.projector.size
""" Create a tabular actions representaton with a state projector. The output
vectors are padded with zeros such that the total dimension is
n*num_actions, where n is the output dimension of the projector.
If action i is given, then the whole vector is zero with the exception
of columns n*i to n*(i+1), where the projected state will be encoded.
The resulting output can be either dense or sparse.
"""
class TabularAction(StateActionProjector):
def __init__(self, projector, num_actions, sparse = True):
self.phi = projector
self.__size = self.phi.size * num_actions
self.num_actions = num_actions
self.sparse = sparse
def __call__(self, state, action):
state, action = tile_and_adjust_state_action(state, action)
phi_s = self.phi(state)
phi_s = csr_matrix(phi_s)
# this step assumes that, if sparse, each row has the same number of
# non zero elements.
action = np.tile(action, (1, phi_s.indptr[1] - phi_s.indptr[0])).reshape(-1).astype('int')
phi_sa = csr_matrix((phi_s.data,
phi_s.indices + action*self.phi.size,
phi_s.indptr),
shape = (phi_s.shape[0], self.size))
if not self.sparse:
phi_sa = phi_sa.toarray()
return phi_sa
@property
def size(self):
return self.__size
################## HELPER CLASS FOR INDEX TO VECTOR CONVERSION ################
""" Projector converting a projector, which generate indices, to a
projector generating sparse vectors (as a csr matrix)
"""
class IndexToBinarySparse(Projector):
""" Constructor to go from a projector generating indices to a
projector generating sparse vectors (as a csr matrix)
index_projector: the projector generating indices, it needs to be
be able to handle 2-D arrays
"""
def __init__(self, index_projector, normalize = False):
super(IndexToBinarySparse, self).__init__()
self.index_projector = index_projector
self.normalize = normalize
if normalize:
self.entry_value = 1.0/np.sqrt(self.index_projector.nonzeros)
else:
self.entry_value = 1.0
def __call__(self, state):
# generate indices for a single (or several) binary sparse vector(s).
indices = self.index_projector(state)
if indices.ndim == 1:
indices = indices.reshape((1,-1))
# set value of all active features
vals = np.empty(indices.size)
vals[:] = self.entry_value
# create row pointers, this assumes each row has the same number
# of non-zero entries
row_ptr = np.arange(0, indices.size+1, indices.shape[1])
# flatten the column indices generate ealier
col_ind = indices.flatten()
return csr_matrix((vals, col_ind, row_ptr),
shape = (indices.shape[0], self.index_projector.size))
@property
def size(self):
return self.index_projector.size
@property
def xTxNorm(self):
return 1.0 if self.normalize else self.index_projector.nonzeros
""" Projector converting a projector, which generate indices, to a
projector generating dense vectors (as an array)
"""
class IndexToDense(Projector):
""" Constructor to go from a projector generating indices to a
projector generating dense vectors (as an array)
index_projector: the projector generating indices, it needs to be
be able to handle 2-D arrays
"""
def __init__(self, index_projector, normalize = False):
super(IndexToDense, self).__init__()
self.index_projector = index_projector
self.normalize = normalize
if normalize:
self.entry_value = 1.0/np.sqrt(self.index_projector.nonzeros)
else:
self.entry_value = 1.0
def __call__(self, state):
# generate indices for a single (or several) binary vectors
indices = self.index_projector(state)
if indices.ndim == 1:
indices = indices.reshape((1,-1))
# allocate dense array
output = np.zeros((indices.shape[0], self.size))
# create row indices
row_ind = np.tile(np.arange(indices.shape[0]).reshape((-1,1)),
(1, indices.shape[1])).flatten()
# set values for all active features
output[row_ind, indices.flatten()] = self.entry_value
# squeeze out useless dimensions, if any
return output.squeeze()
@property
def size(self):
return self.index_projector.size
@property
def xTxNorm(self):
return 1.0 if self.normalize else self.index_projector.nonzeros
""" Projector concatenating several projectors into the same representation
by concatenating their outputs.
"""
class ConcatProjector(Projector):
def __init__(self, projectors):
super(ConcatProjector, self).__init__()
self.phis = projectors
def __call__(self, state):
return np.hstack((phi(state) for phi in self.phis))
@property
def size(self):
return sum([phi.size for phi in self.phis])
class SparseRPTilecoding(IndexToBinarySparse):
def __init__(self, index_projector, random_proj, normalize = False, output_dense = True):
super(SparseRPTilecoding, self).__init__(index_projector, normalize = normalize)
self.random_proj = random_proj
self.output_dense = output_dense
def __call__(self, X):
phi = super(self.__class__, self).__call__(X)
pphi = self.random_proj.dot(phi.T).T
if self.output_dense:
pphi = pphi.todense()
return pphi
@property
def size(self):
return self.random_proj.shape[0]
################## TILE CODING KERNEL FUNCTION ###############################
class DenseKernel(IndexToDense):
def __call__(self, X1, X2=None):
phi1 = super(self.__class__, self).__call__(X1)
if X2 is None:
return phi1.dot(phi1.T)
else:
phi2 = super(self.__class__, self).__call__(X2)
return phi1.dot(phi2.T)
class SparseKernel(IndexToBinarySparse):
def __call__(self, X1, X2=None):
phi1 = super(self.__class__, self).__call__(X1)
if X2 is None:
return phi1.dot(phi1.T)
else:
phi2 = super(self.__class__, self).__call__(X2)
return phi1.dot(phi2.T)
################## TILE CODING IMPLEMENTATION ################################
""" Represents a series of layer of tile coding. This is equivalent to a single
discretization of the input space.
"""
class Tiling(object):
""" Constructor for a set of tilings.
input_index: array (or list) of the input indices to be considered. This allows
to specify on which inputs are the tilings defined.
ntiles: integer, or array of integers, specifying how many uniform
divisions for each dimension (or all dimensions if a single
integer is given). Each layer in this set will have the same
number of divisions in each dimension.
ntilings: The number of individual layers in this set of tilings.
state_range: range of each dimension
offset: (optional) the offsets between each layer in this set of
tilings. By default, each layer is uniformly offset from
each other. Shape: (#dimensions, ntilings), i.e. for each
dimension you have to specify the offset of each tiling. For
dimension d, the offset should be negative and > -1/ntiles[d].
So if you want random offsets for one dimension, you could use
something like this:
-1.0/ntiles[d] * np.random.random_sample(size=ntilings)
hashing: (optional) map from the individual bin index for each
dimension to a tile index. By default, this is assumed
to be a cartesian product, i.e., each combination if
mapped to a unique index. This is equivalent to laying a
grid over all input dimensions at once. Alternatively,
this could map could be defined by a random hash funciton
(e.g., UNH).
"""
def __init__(self,
input_index,
ntiles,
ntilings,
state_range,
rnd_stream,
offset = None,
hashing = None):
self.hashing = hashing
if isinstance(ntiles, int):
ntiles = np.array([ntiles]*len(input_index), dtype='int')
else:
ntiles = np.array(ntiles)
self.state_range = [state_range[0][input_index].copy().astype(float)[None,:,None],
state_range[1][input_index].copy().astype(float)[None,:,None]]
if ntiles.ndim > 1:
ntiles = ntiles[None,:,:]
else:
ntiles = ntiles[None,:,None]
self.state_range[0] = self.state_range[0] - (self.state_range[1]-self.state_range[0])/(ntiles-1)
self.offset = offset
if offset is None:
self.offset = np.empty((ntiles.shape[1], ntilings))
for i in xrange(ntiles.shape[1]):
self.offset[i,:] = -rnd_stream.random_sample(ntilings)/ntiles[0,i]
if self.hashing == None:
self.hashing = IdentityHash(ntiles)
self.input_index = np.array(input_index, dtype='int')
self.size = ntilings*(self.hashing.memory)
self.index_offset = (self.hashing.memory * np.arange(ntilings)).astype('int')
self.ntiles = ntiles
def __call__(self, state):
return self.getIndices(state)
def getIndices(self, state):
if state.ndim == 1:
state = state.reshape((1,-1))[:,:,None]
else:
state = state[:,:,None]
nstate = (state[:, self.input_index, :] - self.state_range[0])/(self.state_range[1]-self.state_range[0])
indicies =((self.offset[None,:,:] + nstate)*self.ntiles).astype(np.int)
return self.hashing(indicies) + self.index_offset[None,:]
@property
def ntilings(self):
return self.offset.shape[1]
""" Full tile coding implementation. This represents a projector, from states
to features.
"""
class TileCoding(Projector):
""" Constructor for a tile coding projector. The constructor builds
several sets of individual tilings based on the input arguments.
input_indicies: a list of arrays of indices. Each array of indicies
specifies which input dimensions are considered by
each set of tilings. There will be as many sets of
tilings as there are array of indices.
e.g., input_indices = [ [0,1], [1,2] ] will generate
two sets of tilings, one defined on the first and
second dimension, and the other on the second and
third dimension.
ntiles: list of a mix of integers or array of integers. This specifies
the how fine is the discretization in each set of tilings. There
should be an element (either integer or array of integers) for
each set of tilings. If a set of tilings is given an integer,
each dimensions are discretized in that many bins. If a set of
tilings is given an array, it should be of the same size as
the number of input dimensions it uses. In this case, it will
discretize each dimensions in as many bins as the corresponding
integer in the given array.
e.g., ntiles = [ 4, [2,6] ] will generate two sets of tilings
where the first discretizes all its input dimensions in 4 bins,
and the second discretizes its first input dimension in 2 bins
and its second, in 6 bins.
ntilings: array (or list) of integers corresponding to how many
individual layers are in each set of tilings. In this
implementation, individual layers in the same set are
uniformly offset from each other.
hashing: either None, or list of hashing functions. This specifies
the hashing function to be used by each set of tilings. It
is assumed that each individual layer part of the same set
share the same hash funciton. If None is given, then each
set of tilings will use a cartesian product, i.e., each
combination of indices is mapped to a unique tile. This is
equivalent to laying a n-d grid on the input dimensions.
state_range: range of each dimension
offsets: (optional) the offsets between the layers for each set of
tilings. By default, all layers are uniformly offset from
each other. If you provide a list of lists of offsets (which
is recommended), this must hold: len(offsets) ==
len(input_indices). Each item in offsets is passed to the
constructor of Tiling, so see there for further
documentation.
bias_term: (optional) boolean specifying whether to add an extra bias term which
is always on. By default, a bias_term is added.
"""
def __init__(self,
input_indices,
ntiles,
ntilings,
hashing,
state_range,
rnd_stream = None,
offsets = None,
bias_term = True):
super(TileCoding, self).__init__()
if hashing == None:
hashing = [None]*len(ntilings)
if offsets is None:
offsets = [None] * len(input_indices)
if offsets is None and rnd_stream is None:
raise Exception('Either offsets for each tiling or a random stream (numpy) needs to be given in the constructor')
self.state_range = np.array(state_range)
self.tilings = [Tiling(in_index, nt, t, self.state_range, rnd_stream, offset=o, hashing = h)
for in_index, nt, t, h, o
in zip(input_indices, ntiles, ntilings, hashing, offsets)]
self.__size = sum(map(lambda x: x.size, self.tilings))
self.bias_term = bias_term
self.index_offset = np.zeros(len(ntilings), dtype = 'int')
self.index_offset[1:] = np.cumsum(map(lambda x: x.size, self.tilings[:-1]))
self.index_offset = np.hstack( [np.array([off]*t, dtype='int')
for off, t in zip(self.index_offset, ntilings)])
if bias_term:
self.index_offset = np.hstack((self.index_offset, np.array(self.__size, dtype='int')))
self.__size += 1
self.__size = int(self.__size)
""" Map a state vector, or several state vectors, to its corresponding
tile indices.
"""
def __call__(self, state):
if state.ndim == 1:
state = state.reshape((1,-1))
# add bias term if needed, concatenate set of indices of all
# the sets of tilings.
if self.bias_term:
indices = np.hstack(chain((t(state) for t in self.tilings),
[np.zeros((state.shape[0], 1), dtype='int')])) \
+ self.index_offset
else:
indices = np.hstack((t(state) for t in self.tilings)) \
+ self.index_offset
return indices.squeeze()
@property
def size(self):
return self.__size
@property
def nonzeros(self):
return np.sum([t.ntilings for t in self.tilings]) + (1 if self.bias_term else 0)
class UNH(Hashing):
# constants were taken from rlpark's implementation.
increment = 470
def __init__(self, memory, rnd_stream):
super(UNH, self).__init__()
self.rndseq = np.zeros(16384, dtype='int')
self.memory = int(memory)
for i in range(4):
self.rndseq = self.rndseq << 8 | rnd_stream.random_integers(np.iinfo('int16').min,
np.iinfo('int16').max,
16384) & 0xff
def __call__(self, indices):
rnd_seq = self.rndseq
a = self.increment*np.arange(indices.shape[1])
index = indices + a[None,:,None]
index = index - (index.astype(np.int)/rnd_seq.size)*rnd_seq.size
hashed_index = (np.sum(rnd_seq[index], axis=1)).astype(np.int)
return (hashed_index - (hashed_index/self.memory).astype(np.int)*self.memory).astype('int')
class IdentityHash(Hashing):
def __init__(self, dims, wrap = False):
super(IdentityHash, self).__init__()
self.memory = np.prod(dims)
self.dims = dims.astype('int')
self.wrap = wrap
self.dim_offset = np.cumprod(np.vstack((np.ones((self.dims.shape[2],1)), self.dims[0,:0:-1,:])),
axis = 0).astype('int')[None,::-1,:]
def __call__(self, indices):
if self.wrap:
indices = np.remainder(indices, self.dims)
else:
indices = np.clip(indices, 0, self.dims-1)
return np.sum(indices*self.dim_offset, axis=1)
################## END OF TILE CODING IMPLEMENTATION #########################
################## RBF IMPLEMENTATION ########################################
class RBFCoding(Projector):
""" Constructor for an RBF encoding.
stddev: scaling of the dimensions when computing the distance. Each
dimension needs a scale. If only a 1-D array is given, all
RBFs are assumed to have the same scaling, otherwise, it is
assumed that there is a row specifying the scale for each
RBF.
c: centers of the RBFs. The number of rows corresponds to the number
of RBFs. The number of column should be equal to the input
dimension. Each row is a center for a RBF.
normalized: Boolean to decided whether the RBFs should be normalized.
bias_term: Boolean to decided whether the output should be augmented
with a constant 1.0 bias term.
"""
def __init__(self,
widths,
centers,
normalized = False,
bias_term = True,
**params):
super(RBFCoding, self).__init__()
# the centers of the rbfs
self.c = centers.T[None,:,:]
# the widths of the rbfs, each rbf can have different widths
if widths.ndim == 1:
self.w = widths[None,:,None]
else:
self.w = widths.T[None,:,:]
# should the output of the rbfs sum to one
self.normalized = normalized
# include a bias term (always equal to one)
self.bias_term = bias_term
# size of the encoded vectors
self.__size = centers.shape[0]
# if bias term is included, increment the size
if bias_term:
self.__size += 1
def __call__(self, state):
# if only on 1-D array given, reshape to a compatible shape
if state.ndim == 1:
state = state.reshape((1,-1))
# allocate and set bias term if needed
last_index = self.size
output = np.empty((state.shape[0], self.size))
if self.bias_term:
last_index -= 1
output[:,-1] = 1.0
# compute squared weighted distance distance
dsqr = -(((state[:,:,None] - self.c)/self.w)**2).sum(axis=1)
if self.normalized:
# compute the normalized rbfs from the distances
e_x = np.exp(dsqr - dsqr.min(axis=1)[:,None])
output[:,:last_index] = e_x/ e_x.sum(axis=1)[:,None]
else:
# compute the rbfs from the distances
output[:,:last_index] = np.exp(dsqr)
# return encoded input, squeeze out extra dimensions (in the case
# only on input row was given)
return output.squeeze()
@property
def size(self):
return self.__size
""" Method to generate grids of points (typically for RBF coding).
state_range: range of each dimension
num_centers: An integer or an array (or list) of integers which
corresponds the number of points to distribute on each
dimensions. If a single integer is given, all dimensions
will have the same number of points.
"""
def grid_of_points(state_range, num_centers):
if isinstance(num_centers, int):
num_centers = [num_centers] * state_range[0].shape[0]
points = [ np.linspace(start, stop, num, endpoint = True)
for start, stop, num in izip(state_range[0],
state_range[1],
num_centers)]
points = np.meshgrid(*points)
points = np.concatenate([ p.reshape((-1,1)) for p in points], axis=1)
return points
################## END OF RBF IMPLEMENTATION ##################################
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._pipeline_runs_operations import build_cancel_request, build_get_request, build_query_by_factory_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PipelineRunsOperations:
"""PipelineRunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datafactory.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def query_by_factory(
self,
resource_group_name: str,
factory_name: str,
filter_parameters: "_models.RunFilterParameters",
**kwargs: Any
) -> "_models.PipelineRunsQueryResponse":
"""Query pipeline runs in the factory based on input filter conditions.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param filter_parameters: Parameters to filter the pipeline run.
:type filter_parameters: ~azure.mgmt.datafactory.models.RunFilterParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRunsQueryResponse, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.PipelineRunsQueryResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRunsQueryResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(filter_parameters, 'RunFilterParameters')
request = build_query_by_factory_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
template_url=self.query_by_factory.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRunsQueryResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
query_by_factory.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/queryPipelineRuns'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
factory_name: str,
run_id: str,
**kwargs: Any
) -> "_models.PipelineRun":
"""Get a pipeline run by its run ID.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param run_id: The pipeline run identifier.
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRun, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.PipelineRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
run_id=run_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}'} # type: ignore
@distributed_trace_async
async def cancel(
self,
resource_group_name: str,
factory_name: str,
run_id: str,
is_recursive: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Cancel a pipeline run by its run ID.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param run_id: The pipeline run identifier.
:type run_id: str
:param is_recursive: If true, cancel all the Child pipelines that are triggered by the current
pipeline.
:type is_recursive: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
run_id=run_id,
is_recursive=is_recursive,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}/cancel'} # type: ignore
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=================================================
Internal debugging support - debug output logging
=================================================
Provides a way to generate debugging (logging) output on standard output that
can be filtered to just what is needed at the time.
* Some Axon classes create/use write debug output using an instance of debug()
* debug uses debugConfigFile.readConfig() to read a configuration for what
should and should not be output
What debugging output actually gets output (and what is filtered out) is
controlled by two things: what *section* the debugging output is under and the
*level* of detail of a given piece of output.
Each call to output debugging information specifies what section it belongs to
and the level of detail it represents.
The filtering of this is configured from a configuration file (see
Axon.deugConfigFile for information on the format) which lists each expected
section and the maximum level of detail that will be output for that section.
How to use it
-------------
Create a debug object::
debugger = Axon.debug.debug()
Specify the configuration to use, either specifying a file, or letting the
debugger choose its own defaults::
debugger.useConfig(filename="my_debug_config_file")
Any subsequent debug objects you create will use the same configuration when
you call their useConfig() method - irrespective of whether you specify a
filename or not!
Call the note() method whenever you potentially want debugging output;
specifying the "section name" and minimum debug level under which it should be
reported::
while 1:
...
assert self.debugger.note("MyObject.main", 10, "loop begins")
...
if something_happened():
...
assert self.debugger.note("MyObject.main", 5, "received ", msg)
...
* Using different section names for different parts of your debugging output
allow you to select which bits you are interested in.
* Use the 'level' number to indicate the level of detail of any given piece of
debugging output.
The note() method always returns True, meaning you can wrap it in an
assert statement. If you then use python's "-O" command line flag, assert
statements will be ignored, completely removing any performance overhead the
due to the debugging output.
Adjusting the configuration of individual debugger objects
----------------------------------------------------------
All debug objects share the same initial configuration - when you call their
useConfig() method, all pick up the same configuration file that was specified
on the first call.
However, after the useConfig() call you can customise the configuration of
individual debug objects.
You can increase or decrease the maximum level of detail that will be output
for a given section::
debugger.increaseDebug("MyObject.main")
debugger.decreaseDebug("MyObject.main")
You can add (or replace) the configuration for individual debugging sections -
ie. (re)specify what the maximum level of detail will be for a given section::
debugger.addDebugSections()
Or you can replace the entire set::
replacementSections = { "MyObject.main" : 10,
"MyObject.init" : 5,
...
}
debugger.addDebug(**replacementSections)
"""
import time
import random
import debugConfigFile
import debugConfigDefaults
class debug(object):
"""\
debug([assertBadDebug]) -> new debug object.
Object for outputting debugging output, filtered as required. Only outputs
debugging data for section names it recognises - as specified in a debug
config file.
Keyword arguments:
- assertBadDebug -- Optional. If evaluates to true, then any debug output for an unrecognised section (as defined in the configuration) causes an exception (default=1)
"""
configs = None
noConfig = True
def __init__(self, assertBadDebug=1):
self.assertBadDebug = assertBadDebug
self.debugOn = True
def readConfig(self,configFile="debug.conf"):
"""\
**INTERNAL**
Reads specified debug configuration file.
Uses Axon.debugConfigFile
"""
result = debugConfigFile.readConfig(configFile)
debug.noConfig = False
return result
def useConfig(self, filename="debug.conf"):
"""\
Instruct this object to set up its debugging configuration.
If this, or another debug object has previously set it up, then that is
applied for this object; otherwise it is loaded from the specified file.
However, if no file is specified or the file could not be read, then
alternative defaults are used. This configuration is then used for all
future debug objects.
"""
if (not debug.configs):
try:
debug.configs = self.readConfig(filename)
except IOError:
# Can't read the debug config file.
#
# Use defaults instead.
#
debug.configs = debugConfigDefaults.defaultConfig()
# debug.noConfig = True
if debug.configs:
try:
for section in debug.configs.keys():
level,location = debug.configs[section]
self.addDebugSection(section, level)
except KeyError:
pass # XXXX No debug information requested by user for the
# requested module - not an error
def addDebugSection(self, section, level):
"""\
Add a section name for which debug output can be generated, specifying
a maximum debug level for which there will be output.
This does not affect the configuration of other debug objects.
"""
try:
self.debugSections[section] = level
except AttributeError:
self.debugSections = dict()
self.debugSections[section] = level
def addDebug(self, **debugSections):
"""\
Add several debug sections. Each argument's name corresponds to a section
name fo rwhich debug output can be generated. The value is the maximum
debug level for which there will be output.
This does not affect the configuration of other debug objects.
"""
sections = debugSections.keys()
for section in sections:
self.addDebugSection(section, debugSections[section])
def increaseDebug(self, section):
"""\
Increases the maximum debug level for which output will be generated for
the specified section.
This does not affect the configuration of other debug objects.
"""
try:
self.debugSections[section] = self.debugSections[section] + 5
except KeyError:
self.addDebugSection(section,5)
def decreaseDebug(self, section):
"""\
Decreases the maximum debug level for which output will be generated for
the specified section.
This does not affect the configuration of other debug objects.
"""
try:
self.debugSections[section] = self.debugSections[section] - 5
if self.debugSections[section] < 0:
self.debugSections[section] = 0
except KeyError:
pass
def setDebugSections(self,**debugSections):
"""\
Set the debug sections. Replaces any existing ones.
Each argument's name corresponds to a section name fo rwhich debug output
can be generated. The value is the maximum debug level for which there
will be output.
This does not affect the configuration of other debug objects.
"""
self.debugSections = debugSections
def areDebugging(self,section,level):
"""\
Returns true if we are debugging this level, doesn't try to enforce
correctness
"""
try:
if self.debugSections[section] >= level:
return True
except KeyError, key:
pass
except AttributeError, error:
pass
return False
def debugmessage(self, section, *message):
"""\
Output a debug messge (never filtered)
Keyword arguments:
- section --
- \*message -- object(s) to print as the debugging output
"""
print time.asctime(), "|", section, "|",
for arg in message:
print arg,
print # Force new line
def debug(self,section, level, *message):
"""\
Output a debug message.
Specify the 'section' the debug message should come under. The user will
have specified the maximum 'level' to be outputted for that section.
* Use higher level numbers for more detailed debugging output.
* Use different section names for different parts of your code to allow
the user to select which sections they want output for
Always returns True, so can be used as argument to an assert statement.
This means you can then disable debugging output (and any associated
performance overhead) by using python's "-O" command line flag.
Keyword arguments:
- section -- the section you want this debugging output classified under
- level -- the level of detail of this debugging output (number)
- \*message -- object(s) to print as the debugging output
"""
try:
if self.debugSections[section] >= level:
print time.asctime(), "|", section, "|",
for arg in message:
print arg,
print # Force new line
except KeyError, key:
if not debug.noConfig:
print "OI! YOU TRIED TO USE A NON-DEFINED DEBUG SECTION", key
print "This may be due to the following:"
print " * You haven't added the debug section to the debug.conf file"
print " * You have misspelt (typo?) the debug section"
print " * You have trailling or leading spaces in your use of the debug section"
if self.assertBadDebug:
m=""
for arg in message:
print arg,
m=m+str(arg)
raise AxonException("BadDebug Undefined section: "+section+", Message: "+m)
except AttributeError, error:
try:
self.debugSections # we expect this to be the reason we go
# here, so this should fail. If it doesn't
# our expectations are wrong. Our
# expectation is that we are running
# interactively in a directory with no
# debug.conf file.
except AttributeError:
if not debug.noConfig:
raise error
return True
note = debug
if __name__=="__main__":
class debugTestClass:
def __init__(self):
self.debugger = debug()
self.debugger.useConfig()#("debugTestClass")
self.debugger.note("debugTestClass.__init__",1,"Initialised")
#
def run(self,counter):
self.debugger.note("debugTestClass.run",1, "START")
self.counter=counter
while self.counter > 0:
self.debugger.note("debugTestClass.run",5, "LOOP")
if self.counter % 2 == 0:
self.debugger.note("debugTestClass.run",10, "DOEVEN")
self.even(self.counter)
else:
if self.counter % 3 == 0:
self.debugger.note("debugTestClass.run",10, "DOTRIPLE")
self.triple(self.counter)
else:
self.debugger.note("debugTestClass.run",10, "DORANDOM")
self.randomChange(self.counter)
self.counter = self.counter - 1
#
def even(self,counter):
self.debugger.note("debugTestClass.even",1, "EVEN",self.counter)
#
def triple(self,counter):
self.debugger.note("debugTestClass.triple",1, "TRIPLE",self.counter)
#
def randomChange(self,counter):
self.debugger.note("debugTestClass.randomChange", 1, "START")
action = random.randrange(10)
if action < 4:
self.counter = self.counter + 1
self.debugger.note("debugTestClass.randomChange", 5, "Increment",self.counter)
else:
if action > 4:
self.counter = self.counter - 1
self.debugger.note("debugTestClass.randomChange", 5, "Decrement",self.counter)
else:
self.counter = self.counter * 2
self.debugger.note("debugTestClass.randomChange", 5, "Double",self.counter)
debugTestClass().run(10)
|
|
# Copyright 2016 Christoph Groth (INAC / CEA Grenoble).
#
# This file is subject to the 2-clause BSD license as found at
# http://kwant-project.org/license.
"""Replace symmetries of Kwant builders with momentum parameters to the
system."""
import sys
import itertools
import collections
import cmath
import numpy as np
import tinyarray as ta
import kwant
from kwant.builder import herm_conj
if sys.version_info >= (3, 0):
def _hashable(obj):
return isinstance(obj, collections.Hashable)
else:
def _hashable(obj):
return (isinstance(obj, collections.Hashable)
and not isinstance(obj, np.ndarray))
def _memoize(f):
"""Decorator to memoize a function that works even with unhashable args.
This decorator will even work with functions whose args are not hashable.
The cache key is made up by the hashable arguments and the ids of the
non-hashable args. It is up to the user to make sure that non-hashable
args do not change during the lifetime of the decorator.
This decorator will keep reevaluating functions that return None.
"""
def lookup(*args):
key = tuple(arg if _hashable(arg) else id(arg) for arg in args)
result = cache.get(key)
if result is None:
cache[key] = result = f(*args)
return result
cache = {}
return lookup
def wraparound(builder, keep=None):
"""Replace translational symmetries by momentum parameters.
A new Builder instance is returned. By default, each symmetry is replaced
by one scalar momentum parameter that is appended to the already existing
arguments of the system. Optionally, one symmetry may be kept by using the
`keep` argument.
"""
@_memoize
def bind_site(val):
assert callable(val)
return lambda a, *args: val(a, *args[:mnp])
@_memoize
def bind_hopping_as_site(elem, val):
def f(a, *args):
phase = cmath.exp(1j * ta.dot(elem, args[mnp:]))
v = val(a, sym.act(elem, a), *args[:mnp]) if callable(val) else val
pv = phase * v
return pv + herm_conj(pv)
return f
@_memoize
def bind_hopping(elem, val):
def f(a, b, *args):
phase = cmath.exp(1j * ta.dot(elem, args[mnp:]))
v = val(a, sym.act(elem, b), *args[:mnp]) if callable(val) else val
return phase * v
return f
@_memoize
def bind_sum(*vals):
return lambda *args: sum((val(*args) if callable(val) else val)
for val in vals)
if keep is None:
ret = kwant.Builder()
sym = builder.symmetry
else:
periods = list(builder.symmetry.periods)
ret = kwant.Builder(kwant.TranslationalSymmetry(periods.pop(keep)))
sym = kwant.TranslationalSymmetry(*periods)
mnp = -len(sym.periods) # Used by the bound functions above.
# Store lists of values, so that multiple values can be assigned to the
# same site or hopping.
for site, val in builder.site_value_pairs():
ret[site] = [bind_site(val) if callable(val) else val]
for hop, val in builder.hopping_value_pairs():
a, b = hop
b_dom = sym.which(b)
b_wa = sym.act(-b_dom, b)
if a == b_wa:
# The hopping gets wrapped-around into an onsite Hamiltonian.
# Since site `a` already exists in the system, we can simply append.
ret[a].append(bind_hopping_as_site(b_dom, val))
else:
# The hopping remains a hopping.
if b != b_wa or callable(val):
# The hopping got wrapped-around or is a function.
val = bind_hopping(b_dom, val)
if (a, b_wa) in ret:
ret[a, b_wa].append(val)
else:
ret[a, b_wa] = [val]
# Convert lists of more than one element into summing functions.
summed_vals = {}
for site_or_hop, vals in itertools.chain(ret.site_value_pairs(),
ret.hopping_value_pairs()):
ret[site_or_hop] = vals[0] if len(vals) == 1 else bind_sum(*vals)
return ret
def plot_bands_2d(syst, args=(), momenta=(31, 31)):
"""Plot the bands of a system with two wrapped-around symmetries."""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
if not isinstance(syst, kwant.system.FiniteSystem):
raise TypeError("Need a system without symmetries.")
fig = pyplot.figure()
ax = fig.gca(projection='3d')
kxs = np.linspace(-np.pi, np.pi, momenta[0])
kys = np.linspace(-np.pi, np.pi, momenta[1])
energies = [[np.sort(np.linalg.eigvalsh(syst.hamiltonian_submatrix(
args + (kx, ky), sparse=False)).real)
for ky in kys] for kx in kxs]
energies = np.array(energies)
mesh_x, mesh_y = np.meshgrid(kxs, kys)
for i in range(energies.shape[-1]):
ax.plot_wireframe(mesh_x, mesh_y, energies[:, :, i],
rstride=1, cstride=1)
pyplot.show()
def _simple_syst(lat, E=0, t=1):
"""Create a builder for a simple infinite system."""
sym = kwant.TranslationalSymmetry(lat.vec((1, 0)), lat.vec((0, 1)))
# Build system with 2d periodic BCs. This system cannot be finalized in
# Kwant <= 1.2.
syst = kwant.Builder(sym)
syst[lat.shape(lambda p: True, (0, 0))] = E
syst[lat.neighbors(1)] = t
return syst
def test_consistence_with_bands(kx=1.9, nkys=31):
kys = np.linspace(-np.pi, np.pi, nkys)
for lat in [kwant.lattice.honeycomb(), kwant.lattice.square()]:
syst = _simple_syst(lat)
wa_keep_1 = wraparound(syst, keep=1).finalized()
wa_keep_none = wraparound(syst).finalized()
bands = kwant.physics.Bands(wa_keep_1, (kx,))
energies_a = [bands(ky) for ky in
(kys if kwant.__version__ > "1.0" else reversed(kys))]
energies_b = []
for ky in kys:
H = wa_keep_none.hamiltonian_submatrix((kx, ky), sparse=False)
evs = np.sort(np.linalg.eigvalsh(H).real)
energies_b.append(evs)
np.testing.assert_almost_equal(energies_a, energies_b)
def test_value_types(k=(-1.1, 0.5), E=0, t=1):
for lat in [kwant.lattice.honeycomb(), kwant.lattice.square()]:
syst = wraparound(_simple_syst(lat, E, t)).finalized()
H = syst.hamiltonian_submatrix(k, sparse=False)
for E1, t1 in [(float(E), float(t)),
(np.array([[E]], float), np.array([[1]], float)),
(ta.array([[E]], float), ta.array([[1]], float))]:
for E2 in [E1, lambda a: E1]:
for t2 in [t1, lambda a, b: t1]:
syst = wraparound(_simple_syst(lat, E2, t2)).finalized()
H_alt = syst.hamiltonian_submatrix(k, sparse=False)
np.testing.assert_equal(H_alt, H)
def test():
test_consistence_with_bands()
test_value_types()
def demo():
"""Calculate and plot the band structure of graphene."""
lat = kwant.lattice.honeycomb()
syst = wraparound(_simple_syst(lat)).finalized()
plot_bands_2d(syst)
if __name__ == '__main__':
test()
demo()
|
|
import collections
import copy
import pickle
import unittest
class DictSetTest(unittest.TestCase):
def test_constructors_not_callable(self):
kt = type({}.keys())
self.assertRaises(TypeError, kt, {})
self.assertRaises(TypeError, kt)
it = type({}.items())
self.assertRaises(TypeError, it, {})
self.assertRaises(TypeError, it)
vt = type({}.values())
self.assertRaises(TypeError, vt, {})
self.assertRaises(TypeError, vt)
def test_dict_keys(self):
d = {1: 10, "a": "ABC"}
keys = d.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), {1, "a"})
self.assertEqual(keys, {1, "a"})
self.assertNotEqual(keys, {1, "a", "b"})
self.assertNotEqual(keys, {1, "b"})
self.assertNotEqual(keys, {1})
self.assertNotEqual(keys, 42)
self.assertIn(1, keys)
self.assertIn("a", keys)
self.assertNotIn(10, keys)
self.assertNotIn("Z", keys)
self.assertEqual(d.keys(), d.keys())
e = {1: 11, "a": "def"}
self.assertEqual(d.keys(), e.keys())
del e["a"]
self.assertNotEqual(d.keys(), e.keys())
def test_dict_items(self):
d = {1: 10, "a": "ABC"}
items = d.items()
self.assertEqual(len(items), 2)
self.assertEqual(set(items), {(1, 10), ("a", "ABC")})
self.assertEqual(items, {(1, 10), ("a", "ABC")})
self.assertNotEqual(items, {(1, 10), ("a", "ABC"), "junk"})
self.assertNotEqual(items, {(1, 10), ("a", "def")})
self.assertNotEqual(items, {(1, 10)})
self.assertNotEqual(items, 42)
self.assertIn((1, 10), items)
self.assertIn(("a", "ABC"), items)
self.assertNotIn((1, 11), items)
self.assertNotIn(1, items)
self.assertNotIn((), items)
self.assertNotIn((1,), items)
self.assertNotIn((1, 2, 3), items)
self.assertEqual(d.items(), d.items())
e = d.copy()
self.assertEqual(d.items(), e.items())
e["a"] = "def"
self.assertNotEqual(d.items(), e.items())
def test_dict_mixed_keys_items(self):
d = {(1, 1): 11, (2, 2): 22}
e = {1: 1, 2: 2}
self.assertEqual(d.keys(), e.items())
self.assertNotEqual(d.items(), e.keys())
def test_dict_values(self):
d = {1: 10, "a": "ABC"}
values = d.values()
self.assertEqual(set(values), {10, "ABC"})
self.assertEqual(len(values), 2)
def test_dict_repr(self):
d = {1: 10, "a": "ABC"}
self.assertIsInstance(repr(d), str)
r = repr(d.items())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or
r == "dict_items([(1, 10), ('a', 'ABC')])")
r = repr(d.keys())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_keys(['a', 1])" or
r == "dict_keys([1, 'a'])")
r = repr(d.values())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_values(['ABC', 10])" or
r == "dict_values([10, 'ABC'])")
def test_keys_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'b': 3, 'c': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(d1.keys() & d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() & d2.keys(), {'b'})
self.assertEqual(d1.keys() & d3.keys(), set())
self.assertEqual(d1.keys() & set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() & set(d2.keys()), {'b'})
self.assertEqual(d1.keys() & set(d3.keys()), set())
self.assertEqual(d1.keys() & tuple(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() | d2.keys(), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | set(d2.keys()), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | (1, 2), {'a', 'b', 1, 2})
self.assertEqual(d1.keys() ^ d1.keys(), set())
self.assertEqual(d1.keys() ^ d2.keys(), {'a', 'c'})
self.assertEqual(d1.keys() ^ d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ set(d1.keys()), set())
self.assertEqual(d1.keys() ^ set(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() ^ set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ tuple(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() - d1.keys(), set())
self.assertEqual(d1.keys() - d2.keys(), {'a'})
self.assertEqual(d1.keys() - d3.keys(), {'a', 'b'})
self.assertEqual(d1.keys() - set(d1.keys()), set())
self.assertEqual(d1.keys() - set(d2.keys()), {'a'})
self.assertEqual(d1.keys() - set(d3.keys()), {'a', 'b'})
self.assertEqual(d1.keys() - (0, 1), {'a', 'b'})
self.assertFalse(d1.keys().isdisjoint(d1.keys()))
self.assertFalse(d1.keys().isdisjoint(d2.keys()))
self.assertFalse(d1.keys().isdisjoint(list(d2.keys())))
self.assertFalse(d1.keys().isdisjoint(set(d2.keys())))
self.assertTrue(d1.keys().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.keys().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.keys().isdisjoint(['x', 'y']))
self.assertTrue(d1.keys().isdisjoint({}))
self.assertTrue(d1.keys().isdisjoint(d3.keys()))
de = {}
self.assertTrue(de.keys().isdisjoint(set()))
self.assertTrue(de.keys().isdisjoint([]))
self.assertTrue(de.keys().isdisjoint(de.keys()))
self.assertTrue(de.keys().isdisjoint([1]))
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(
d1.items() & d1.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() & d2.items(), {('b', 2)})
self.assertEqual(d1.items() & d3.items(), set())
self.assertEqual(d1.items() & set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() & set(d2.items()), {('b', 2)})
self.assertEqual(d1.items() & set(d3.items()), set())
self.assertEqual(d1.items() | d1.items(),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | d2.items(),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() | set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | set(d2.items()),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | set(d3.items()),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() ^ d1.items(), set())
self.assertEqual(d1.items() ^ d2.items(),
{('a', 1), ('a', 2)})
self.assertEqual(d1.items() ^ d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() - d1.items(), set())
self.assertEqual(d1.items() - d2.items(), {('a', 1)})
self.assertEqual(d1.items() - d3.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() - set(d1.items()), set())
self.assertEqual(d1.items() - set(d2.items()), {('a', 1)})
self.assertEqual(d1.items() - set(d3.items()), {('a', 1), ('b', 2)})
self.assertFalse(d1.items().isdisjoint(d1.items()))
self.assertFalse(d1.items().isdisjoint(d2.items()))
self.assertFalse(d1.items().isdisjoint(list(d2.items())))
self.assertFalse(d1.items().isdisjoint(set(d2.items())))
self.assertTrue(d1.items().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.items().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.items().isdisjoint({}))
self.assertTrue(d1.items().isdisjoint(d3.items()))
de = {}
self.assertTrue(de.items().isdisjoint(set()))
self.assertTrue(de.items().isdisjoint([]))
self.assertTrue(de.items().isdisjoint(de.items()))
self.assertTrue(de.items().isdisjoint([1]))
def test_recursive_repr(self):
d = {}
d[42] = d.values()
self.assertRaises(RecursionError, repr, d)
def test_copy(self):
d = {1: 10, "a": "ABC"}
self.assertRaises(TypeError, copy.copy, d.keys())
self.assertRaises(TypeError, copy.copy, d.values())
self.assertRaises(TypeError, copy.copy, d.items())
def test_pickle(self):
d = {1: 10, "a": "ABC"}
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.keys(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.values(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.items(), proto)
def test_abc_registry(self):
d = dict(a=1)
self.assertIsInstance(d.keys(), collections.KeysView)
self.assertIsInstance(d.keys(), collections.MappingView)
self.assertIsInstance(d.keys(), collections.Set)
self.assertIsInstance(d.keys(), collections.Sized)
self.assertIsInstance(d.keys(), collections.Iterable)
self.assertIsInstance(d.keys(), collections.Container)
self.assertIsInstance(d.values(), collections.ValuesView)
self.assertIsInstance(d.values(), collections.MappingView)
self.assertIsInstance(d.values(), collections.Sized)
self.assertIsInstance(d.items(), collections.ItemsView)
self.assertIsInstance(d.items(), collections.MappingView)
self.assertIsInstance(d.items(), collections.Set)
self.assertIsInstance(d.items(), collections.Sized)
self.assertIsInstance(d.items(), collections.Iterable)
self.assertIsInstance(d.items(), collections.Container)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (C) 2015 Thomas Huang
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" The `natume.paser` provider the dsl test file paser, it parses and builds the dsl to Test Case Class
the line rules::
test case class name::
firstly it builds a test case class name based the file prefix name like 'test.smoke' , it named TestTest
the method section::
the section like '[index]' is a test method section , builds to def test_index(self):
the send command::
all the line start with a '>' is send request command.
rule::
> $METHOD $PATH $PAYLOAD
Example::
> POST /login user='test_user' pass=@pass
> GET /index
the assert line:
the line inclue (':' , '<-', '=~')
the variable rule::
the variable:
the key start with '@' is the test case instance attribute
the set header:
the line include '=>' will build in headers dict
the regex assert s:
when the value start and end with '/' will build to a regex check assert
"""
import re
import codecs
import os.path
class BaseParser(object):
COMMAND_TOKEN = '>'
SET_HEADER_TOKEN = '=>'
ASSERT_TOKENS = (':', '<-', '=~', '~~')
SET_VAR_TOKEN = '='
VAR_TOKEN = '@'
VAR_REGEX = re.compile(r'%s[a-zA-Z_][a-zA-Z0-9_]+' % (VAR_TOKEN))
SETCTION_REGEX = re.compile(r'\[\s*?(.+)\s*?\]')
SET_HEADER_REGEX = re.compile(r'([a-zA-Z0-9_]+)\s*%s\s*(.*)' % (SET_HEADER_TOKEN))
ASSERT_REGEX = re.compile(r'([a-zA-Z0-9_]+)\s*(%s)\s*(.*)' % ('|'.join(ASSERT_TOKENS)))
ASSIGN_REGEX = re.compile(r'%s([a-zA-Z_][a-zA-Z0-9_]+)\s*%s\s*(.*)' % (VAR_TOKEN, SET_VAR_TOKEN))
def complie(self):
self.scope.write(self.writer)
@property
def code(self):
return self.writer.code
def parse(self, *args, **kw):
raise NotImplementedError('Must implement in subclass')
def parse_line(self, line, lineno):
line = line.strip()
if not line or line.startswith('#'):
return
# smoke section handle
m = self.SETCTION_REGEX.match(line)
if m:
if self.current_section:
self.scope.add_node(self.current_section)
name = '_'.join([i for i in m.group(1).split(' ') if i.strip()])
self.current_section = MethodScope(name)
return
if line.startswith(self.COMMAND_TOKEN):
chunk = re.split(' +', line, 3)
method, path = chunk[1], chunk[2]
payload = {}
if len(chunk) == 4:
data = chunk[3]
data = re.sub(r'=\s*%s([a-zA-Z_][a-zA-Z0-9_]+)' % (self.VAR_TOKEN), r'=self.\1', data)
data = data.split()
for i in data:
k, v = i.split('=')
payload[k] = v
self.current_section.add_node(CommandScope(method, path, payload))
return
m = self.SET_HEADER_REGEX.match(line)
if m:
head, value = m.group(1), m.group(2)
self.current_section.nodes[-1].headers.append((head, value))
return
m = self.ASSERT_REGEX.match(line)
if m:
key, op, value = m.group(1), m.group(2), m.group(3)
assert_command = AssertCommand(key, op, value, line, lineno)
self.current_section.nodes[-1].asserts.append(assert_command)
return
m = self.ASSIGN_REGEX.match(line)
if m:
key, value = m.group(1), m.group(2)
key = 'self.' + key
self.current_section.var_nodes.append((key, value))
class DSLParser(BaseParser):
def __init__(self):
self.current_section = None
self.current_command = None
self.writer = Writer()
self.class_name = None
self.scope = None
def parse(self, path, filename):
filepath = os.path.join(path, filename)
fp = codecs.open(filepath, "r", "utf-8")
lineno = 0
class_name = filename.split('.', 1)[0]
self.class_name = self._class_name(class_name)
self.scope = ClassScope(self.class_name)
while True:
line = fp.readline()
if not line:
break
lineno += 1
self.parse_line(line, lineno)
if self.current_section:
self.scope.add_node(self.current_section)
fp.close()
def _class_name(self, name):
parts = re.split(r'[_-]', name.lower())
parts = [_.capitalize() for _ in parts]
return ''.join(parts) + 'Test'
class DocDSLParser(BaseParser):
def __init__(self):
self.current_section = None
self.current_command = None
self.methods = []
self.writer = Writer()
self.scope = DocMethodsScpoe()
def parse(self, string, lineno=0):
lines = string.split('\n')
for line in lines:
self.parse_line(line, lineno)
lineno += 1
if self.current_section:
self.scope.add_node(self.current_section)
for method_scope in self.scope.nodes:
self.methods.append('test_' + method_scope.name)
class CommandDslParser(BaseParser):
def __init__(self):
self.current_command = None
self.writer = Writer()
self.current_section = CommandsScope()
def parse(self, string, lineno=0):
lines = string.split('\n')
for line in lines:
self.parse_line(line, lineno)
lineno += 1
def parse_line(self, line, lineno):
line = line.strip()
if not line or line.startswith('#'):
return
if line.startswith(self.COMMAND_TOKEN):
chunk = re.split(' +', line, 3)
method, path = chunk[1], chunk[2]
payload = {}
if len(chunk) == 4:
data = chunk[3]
data = re.sub(r'=\s*%s([a-zA-Z_][a-zA-Z0-9_]+)' % (self.VAR_TOKEN), r'=self.\1', data)
data = data.split()
for i in data:
k, v = i.split('=')
payload[k] = v
self.current_section.add_node(CommandScope(method, path, payload))
return
m = self.SET_HEADER_REGEX.match(line)
if m:
head, value = m.group(1), m.group(2)
self.current_section.nodes[-1].headers.append((head, value))
return
m = self.ASSERT_REGEX.match(line)
if m:
key, op, value = m.group(1), m.group(2), m.group(3)
assert_command = AssertCommand(key, op, value, line, lineno)
self.current_section.nodes[-1].asserts.append(assert_command)
return
m = self.ASSIGN_REGEX.match(line)
if m:
key, value = m.group(1), m.group(2)
key = 'self.' + key
self.current_section.var_nodes.append((key, value))
def complie(self):
self.current_section.write(self.writer)
@property
def code(self):
return self.writer.code
class BaseScope(object):
def __init__(self, indent=0, node=None):
self.indent = indent
self.nodes = [node] if node else []
def add_node(self, node):
self.nodes.append(node)
class ClassScope(BaseScope):
def __init__(self, name, base_class='WebTestCase', indent=0, node=None):
BaseScope.__init__(self, indent, node)
self.name = name
self.base_class = base_class
def write(self, writer):
writer.puts('class %s(%s):\n' % (self.name, self.base_class))
indent = self.indent + 1
self.write_setup_method(writer, indent)
for node in self.nodes:
node.write(writer, indent)
def write_setup_method(self, writer, indent):
writer.puts('def setUp(self):', indent)
writer.puts('self.client = client', indent + 1)
writer.puts('self.initialize()', indent + 1)
class DocMethodsScpoe(BaseScope):
def write(self, writer, indent=0):
indent = indent or self.indent
for node in self.nodes:
node.write(writer, indent)
class CommandsScope(BaseScope):
def __init__(self, indent=0, node=None):
BaseScope.__init__(self, indent, node)
self.var_nodes = []
def write(self, writer, indent=None):
indent = indent or self.indent
writer.puts('')
for node in self.nodes:
node.write(writer, indent)
if not self.nodes:
writer.puts('pass', indent)
class MethodScope(BaseScope):
def __init__(self, name, indent=0, node=None):
BaseScope.__init__(self, indent, node)
self.name = name
self.var_nodes = []
def write(self, writer, indent=None):
indent = indent or self.indent
writer.puts('')
if self.name == 'initialize':
writer.puts('def initialize(self):', indent)
indent += 1
for key, value in self.var_nodes:
writer.puts('%s=%s' % (key, value), indent)
return
writer.puts('def test_%s(self):' % (self.name), indent)
indent += 1
for node in self.nodes:
node.write(writer, indent)
if not self.nodes:
writer.puts('pass', indent)
class CommandScope(BaseScope):
def __init__(self, method, path, data=None, indent=None, node=None):
BaseScope.__init__(self, indent, node)
self.asserts = []
self.headers = []
self.sets = []
self.path = path
self.data = data or {}
self.method = method
def write(self, writer, indent):
writer.puts('headers = {}', indent)
for k, v in self.headers:
k = self.format_header_key(k)
writer.puts('headers[%r] = %r' % (k, v), indent)
writer.puts('data = {}', indent)
for k, v in self.data.items():
writer.puts('data[%r] = %s' % (k, v), indent)
writer.puts('self.client.do_request(%r, %r, data, headers)\n' % (self.method, self.path), indent)
for assert_command in self.asserts:
assert_command.write(writer, indent)
def format_header_key(self, key):
parts = re.split(r'[_-]', key.lower())
parts = [_.capitalize() for _ in parts]
return '-'.join(parts)
def __str__(self):
return '<CommandScope method : %s, nodes:%d>' % (self.method, len(self.nodes))
class BaseCommand(object):
def __init__(self, indent=0):
self.indent = indent
def write(self, writer, indent):
pass
class AssertCommand(BaseCommand):
def __init__(self, key, op, value, line, lineno, indent=0):
BaseCommand.__init__(self, indent)
self.key = key
self.op = op
self.value = value
self.line = line
self.lineno = lineno
def write(self, writer, indent=None):
indent = indent if indent is not None else self.indent
assert_key = self._key(self.key)
if assert_key in ('Status', 'Code', 'ContentType', 'Charset'):
self._line(writer, 'self.assert%s(%r)' % (assert_key, self.value), indent)
elif assert_key == 'Json':
key, value = self.value.split("=")
key = key.strip()
value = value.strip()
writer.puts("json = self.client.json", indent)
if key:
writer.puts("json = json%s" % (key), indent)
self._line(writer, 'self.assertJson(json, %r, %s)' % (self.op, value), indent)
elif assert_key == 'Content':
self._line(writer, 'self.assertContent(%r, %r)' % (self.op, self.value), indent)
elif self.op == ':':
if assert_key not in ('Content', 'Json'):
key = self._key(self.key)
self._line(writer, 'self.assertHeader(%r, %r)' % (key, self.value), indent)
else:
key = self._key(self.key)
self._line(writer, 'self.assertHeader(%r, %r, %r)' % (key, self.op, self.value), indent)
def _line(self, writer, line, indent):
writer.puts(line + " # lineno " + str(self.lineno) + ": " + self.line, indent)
def _key(self, k):
parts = re.split(r'[_-]', k.lower())
parts = [_.capitalize() for _ in parts]
return ''.join(parts)
class Writer(object):
def __init__(self):
self.code = ''
self.indent = 0
def puts(self, line, indent=None):
indent = indent or self.indent
self.write('\t' * indent + line + '\n')
def write(self, text):
self.code += text
|
|
#! /usr/bin/env python
'''
Andrew Till
Winter 2015
Python-based PDT cross section reader / writer
'''
#STDLIB
from datetime import datetime
#TPL
import numpy as np
#########################################################################################
def read_PDT_xs_generally(filePath):
'''Read a PDT cross section and store in PDT_XS class. Must have 1 temperature and density'''
with open(filePath, 'r') as fid:
# >>>>>> HEADER <<<<<<
# Skip initial lines
for i in range(2):
fid.readline()
# Read xs type (MG/MB)
t = fid.readline().split()
xsType = t[4]
# Read number of groups. Assume 1 temperature and 1 density
t = fid.readline().split()
numGroups = int(t[5])
# Read number of 1D cross sections and transfer matrices
fid.readline()
t = fid.readline().split()
num1D = int(t[0])
numXfer = int(t[4])
# Read number of Legendre moments
t = fid.readline().split()
numMoments = int(t[2]) + 1
# Read xs units (micro/macro)
fid.readline()
xsUnits = fid.readline().strip()
# Read temperature
for i in range(2):
fid.readline()
t = fid.readline().split()
temperature = float(t[0])
for i in range(5):
fid.readline()
# Initialize number of delayed neutron groups as 0
numDNGs = 0
# >>>>>> GROUP BOUNDARIES <<<<<<
# From now on, must deal with fixed-length rows (5 entries per row)
loc = 0
entriesPerLine = 5
groupBdrs = np.zeros(numGroups+1)
read_fixed_line(groupBdrs, numGroups+1, entriesPerLine, fid)
groupWidths = - np.diff(groupBdrs)
# >>>>>> XS <<<<<<
# Get all 0D, 1D, and 3D xs
for i in range(3):
fid.readline()
xsDict = {}
read1D, readXfer = 0, 0
while read1D < num1D or readXfer < numXfer:
t = fid.readline().split()
if not t:
print 'File said it contained {0} cross sections and {1} transfer matrices, but only contained {2} and {3}, respectively.'.format(num1D, numXfer, read1D, readXfer)
break
MT = int(t[1].strip(','))
if MT in [457, 458]:
# Read a 0D value (halflife or energy per fission)
read1D += 1
t = fid.readline().split()
xsDict[MT] = float(t[0])
elif MT == 1054:
# Read delay neutron decay constant. 1D value, size = numDNGs
read1D += 1
line = fid.readline().split()
numDNGs = int(line[5])
xsDict[MT] = np.zeros(numDNGs)
read_fixed_line(xsDict[MT], numDNGs, entriesPerLine, fid)
elif MT == 2055:
# Read delay neutron spectra. numDNGs 1D value's
read1D += 1
line = fid.readline().split()
numDNGs = int(line[5])
xsDict[MT] = np.zeros((numDNGs, numGroups))
for iDNGs in range(numDNGs):
line = fid.readline().split()
assert iDNGs == int(line[1])
sliceChi = xsDict[MT][iDNGs, :]
read_fixed_line(sliceChi, numGroups, entriesPerLine, fid)
elif MT < 2500:
# Read a 1D cross section
read1D += 1
xsDict[MT] = np.zeros(numGroups)
read_fixed_line(xsDict[MT], numGroups, entriesPerLine, fid)
elif MT == 2518:
# Read total fission matrix which only has 0-th moment
readXfer += 1
xsDict[MT] = np.zeros((numGroups, numGroups))
fissionXfer = xsDict[MT]
for g2 in range(numGroups):
t = fid.readline().split()
sink, first, last = int(t[3]), int(t[4]), int(t[5])
if last < first:
fid.readline()
else:
sliceFission = fissionXfer[sink, first:last+1]
read_fixed_line(sliceFission, last-first+1, entriesPerLine, fid)
else:
# Read a 3D transfer matrix
readXfer += 1
xsDict[MT] = np.zeros((numMoments, numGroups, numGroups))
# Index scatXfer by [moment, group to, group from]. Uses aliasing
scatXfer = xsDict[MT]
for m in range(numMoments):
for g2 in range(numGroups):
t = fid.readline().split()
sink, first, last = int(t[3]), int(t[4]), int(t[5])
if last < first:
# No data for this row of the matrix
fid.readline()
else:
sliceScat = scatXfer[m, sink, first:last+1]
read_fixed_line(sliceScat, last-first+1, entriesPerLine, fid)
if m < (numMoments-1):
fid.readline()
return PDT_XS(numGroups, numMoments, numDNGs, temperature, xsType, xsUnits, groupBdrs, groupWidths, xsDict)
def write_PDT_xs_generally(filePath, xsDat, fromStr='barnfire'):
temperatureList = [xsDat.T]
write_PDT_xs_header(filePath, xsDat, temperatureList, fromStr)
write_PDT_xs_body(filePath, xsDat)
def write_PDT_xs_header(filePath, xsDat, temperatureList=[], fromStr='barnfire'):
'''Write a PDT XS from a PDT_XS object'''
# Get XS meta-information
timeStr = datetime.strftime(datetime.now(), '%c')
numGroups = xsDat.G
numMoments = xsDat.M
typeStr = xsDat.typeStr
microStr = xsDat.microStr
groupBoundaries = xsDat.Eg
numTemperatures = len(temperatureList)
# Print all reactions in xsDat, but print the weight first, if it's included
mtWgt = 1099
oneDMTOrder = sorted([key for key in xsDat.xs.keys() if (key != mtWgt and key < 2500)])
xferMTOrder = sorted([key for key in xsDat.xs.keys() if key >= 2500])
if mtWgt in xsDat.xs.keys():
oneDMTOrder.insert(0, 1099)
num1D = len(oneDMTOrder)
numXfer = len(xferMTOrder)
oneDStr = '{0} neutron process'.format(num1D)
xferStr = '{0} transfer process'.format(numXfer)
if num1D > 1:
oneDStr += 'es'
if numXfer > 1:
xferStr += 'es'
# Write XS in PDT format
with open(filePath, 'w') as fid:
fid.write('PDT Format Material Data File created {0}\n'.format(timeStr))
fid.write('\n')
fid.write('This file is a {0} neutron library generated from {1}.\n'.format(typeStr, fromStr))
fid.write('{0} temperatures, 1 densities, and {1} groups.\n'.format(numTemperatures, numGroups))
fid.write('\n')
fid.write('{0} and {1}.\n'.format(oneDStr, xferStr))
fid.write('Scattering order {0}\n'.format(numMoments-1))
fid.write('\n')
fid.write('{0}\n'.format(microStr))
fid.write('\n')
fid.write('Temperatures in Kelvin:\n')
fid.write(multiline_string(temperatureList, 15, 5, 7))
fid.write('\n')
fid.write('Densities in g/cc:\n')
fid.write('{0:>15}\n'.format(0))
fid.write('\n')
fid.write('Group boundaries in eV:\n')
fid.write(multiline_string(groupBoundaries, 15, 5, 7))
fid.write('\n')
def write_PDT_xs_body(filePath, xsDat):
'''Write a PDT XS from a PDT_XS object'''
# Get XS meta-information
timeStr = datetime.strftime(datetime.now(), '%c')
numGroups = xsDat.G
numDNGs = xsDat.D
numMoments = xsDat.M
temperature = xsDat.T
# Define special reaction (MT) numbers; zeroDMT are MT numbers that have only 1 value
zeroDMTList = [457, 458]
# Print all reactions in xsDat, but print the weight first, if it's included
mtWgt = 1099
mtDecayConst = 1054
mtDelayedChi = 2055
mtFissionMatrix = 2518
oneDMTOrder = sorted([key for key in xsDat.xs.keys() if (key not in [mtWgt, mtDecayConst, mtDelayedChi] and key < 2500)])
xferMTOrder = sorted([key for key in xsDat.xs.keys() if key >= 2500])
if mtWgt in xsDat.xs.keys():
oneDMTOrder.insert(0, 1099)
num1D = len(oneDMTOrder)
numXfer = len(xferMTOrder)
# Write XS in PDT format
with open(filePath, 'a') as fid:
fid.write('T = {0:g} density = 0\n'.format(temperature))
fid.write('---------------------------------------------------\n')
for MT in oneDMTOrder:
fid.write('MT {0}\n'.format(MT))
vectorAlias = xsDat.xs[MT]
if not hasattr(vectorAlias, '__iter__'):
print MT
# If MT number corresponds to a 0D quantity, print it as a length-1 vector
vectorAlias = np.array([vectorAlias])
print vectorAlias
fid.write(multiline_string(vectorAlias, 20, 5, 12))
# write decay constants for delayed neutron groups
if mtDecayConst in xsDat.xs.keys():
MT = mtDecayConst
fid.write('MT {0}\n'.format(MT))
vectorAlias = xsDat.xs[MT]
fid.write(' Number of delayed neutron groups: {0}\n'.format(numDNGs))
fid.write(multiline_string(vectorAlias, 20, 5, 12))
# write delayed neutron spectra
if mtDelayedChi in xsDat.xs.keys():
MT = mtDelayedChi
fid.write('MT {0}\n'.format(MT))
vectorAlias = xsDat.xs[MT]
fid.write(' Number of delayed neutron groups: {0}\n'.format(numDNGs))
for iDNG in range(numDNGs):
fid.write(' DNG {0}\n'.format(iDNG))
fid.write(multiline_string(vectorAlias[iDNG,:], 20, 5, 12))
# write fission matrix
if mtFissionMatrix in xferMTOrder:
MT = mtFissionMatrix
fissionMatrix = xsDat.xs[MT]
fid.write('MT {0}, Moment {1}\n'.format(MT, 0))
for g in range(numGroups):
fid.write(' Sink, first, last: ')
first = 0
last = numGroups - 1
vec = [g, first, last]
fid.write(multiline_string(vec, 5, 3, 10))
fid.write(multiline_string(fissionMatrix[g, :], 20, 5, 12))
# write transfer matrices except for fission matrix
for MT in [MT for MT in xferMTOrder if MT != mtFissionMatrix]:
scatMatrix = xsDat.xs[MT]
for m in range(numMoments):
fid.write('MT {0}, Moment {1}\n'.format(MT, m))
for gTo in range(numGroups):
scatSlice = scatMatrix[m,gTo,:]
nonzeroLeft = np.argmin(scatSlice==0)
nonzeroRight = numGroups - np.argmin(scatSlice[::-1]==0)
fid.write(' Sink, first, last: ')
if all(scatSlice==0):
vec = [gTo, -1, -2]
fid.write(multiline_string(vec, 5, 3, 10))
fid.write('\n')
else:
vec = [gTo, nonzeroLeft, nonzeroRight-1]
fid.write(multiline_string(vec, 5, 3, 10))
fid.write(multiline_string(scatSlice[nonzeroLeft:nonzeroRight], 20, 5, 12))
fid.write('\n')
#########################################################################################
def read_fixed_line(obj, objSize, numPerLine, fid):
"""
Reads into obj from a file using readline(), where the file has
at most numPerLine values per line. The final size of obj is objSize.
obj is returned as a numpy array.
"""
loc = 0
requiredLines = objSize / numPerLine # integer math!
lastLineSize = objSize % numPerLine
for L in range(requiredLines):
t = fid.readline().split()
for i in range(5):
obj[loc] = t[i]
loc += 1
if lastLineSize > 0:
t = fid.readline().split()
# print t
for i in range(lastLineSize):
# print i, t[i]
obj[loc] = t[i]
loc += 1
def multiline_string(vector, spacing, numberPerLine, decimals):
outStr = ''
N = int(np.ceil(len(vector)/float(numberPerLine)))*numberPerLine
for i in range(numberPerLine, N+1, numberPerLine):
strs = ['{0:>{1}.{2}g}'.format(vi, spacing, decimals) for vi in vector[i-numberPerLine:i]]
outStr += ''.join(strs) + '\n'
return outStr
#########################################################################################
class PDT_XS():
def __init__(self, numGroups, numMoments, numDNGs, temperature, typeStr, microStr, groupBdrs, groupWidths, xsDict):
self.G = numGroups
self.M = numMoments
self.D = numDNGs
self.T = temperature
self.typeStr = typeStr
self.microStr = microStr
self.Eg = groupBdrs
self.dE = groupWidths
self.xs = xsDict
def print_stats(self):
print 'numGroups numMoments temperature type(MG/MB) type(micro/macro)'
print self.G, self.M, self.D, self.T, self.typeStr.lower(), self.microStr.lower().split()[0]
print 'MT list'
print sorted(self.xs.keys())
#########################################################################################
def print_PDT_MT_enum():
print '''
These are the MT numbers used in PDT. The rule to move from PDT_MT to (MF,MT) used in ENDF is:
if PDT_MT < 1000:
MF = 3
MT = PDT_MT
elif PDT_MT >= 2502:
MF = 6
MT = PDT_MT - 2500
else:
PDT_MT is a derived quantity does not have an ENDF (MF,MT) classification
// ===========================================================================
// Scalar neutron processes
// ===========================================================================
MT_half_life , // MT = 457, half life of the nuclide
MT_E_per_fission , // MT = 458, total energy per fission (eV) minus neutrinos
MT_N_SCALAR_COUNT , // All neutron scalar values above here
// ===========================================================================
// Single group (1D) neutron processes
// ===========================================================================
// common cross sections
// =====================
MT_total , // MT = 1, total cross section
MT_elastic , // MT = 2, elastic scattering
MT_nonelastic , // MT = 3, nonelastic, sig_t - sig_el
MT_inelastic , // MT = 4, inelastic scattering
MT_transfer , // MT = 5, transfer (sum over final group)
MT_loss , // MT = 15, total - transfer
MT_absorption , // MT = 27, absorption
MT_n2n , // MT = 16, (n, 2n)
MT_n3n , // MT = 17, (n, 3n)
MT_n4n , // MT = 37, (n, 4n)
MT_n_nalpha , // MT = 22, (n, n+alpha)
MT_n_np , // MT = 28, (n, n+p)
MT_n_gamma , // MT = 102, (n, gamma)
MT_n_p , // MT = 103, (n, proton)
MT_n_alpha , // MT = 107, (n, alpha)
MT_n_disappear , // MT = 101, disappearance (no exit neutron)
MT_inv_velocity , // MT = 259, flux-weighted inverse velocity
MT_weight_func , // MT = 1099, group-averaged weight function
// less-common cross sections
// ==========================
//MT_n_n3alpha , // MT = 23, (n, n+3alpha)
//MT_n_2nalpha , // MT = 24, (n, 2n+alpha)
//MT_n_3nalpha , // MT = 25, (n, 3n+alpha)
//MT_n_n2alpha , // MT = 23, (n, n+2alpha)
//MT_n_n2alpha , // MT = 29, (n, n+2alpha)
//MT_n_2n2alpha , // MT = 30, (n, 2n+2alpha)
//MT_n_ndeuteron , // MT = 32, (n, n+deuteron)
//MT_n_ntriton , // MT = 33, (n, n+triton)
//MT_n_nhe3 , // MT = 34, (n, n+3He)
//MT_n_ndeuteron2alpha , // MT = 35, (n, n+deuteron+2alpha)
//MT_n_ntriton2alpha , // MT = 36, (n, n+triton+2alpha)
MT_n_deuteron , // MT = 104, (n, deuteron)
MT_n_triton , // MT = 105, (n, triton)
//MT_n_he3 , // MT = 106, (n, 3He)
// To add more MT numbers, see
// www.nndc.bnl.gov/exfor/help7.jsp
// fission related cross sections
// ==============================
MT_nu_sig_f , // MT = 1452,
MT_fission , // MT = 18, total fission
MT_nubar , // MT = 452, total nubar, average # n0 per fission
MT_chi , // MT = 1018, total fission spectrum
MT_lambda_del , // MT = 1054, decay constants of delayed neutron precursor
MT_nubar_del , // MT = 455, nubar, delayed neutrons
MT_chi_del , // MT = 1055, delayed neutron spectrum
MT_chis_del , // MT = 2055, delayed neutron spectra for all delayed neutron groups
MT_nubar_prompt , // MT = 456, nubar, prompt neutrons
MT_chi_prompt , // MT = 1056, prompt neutron spectrum
MT_f_first , // MT = 19, first chance fission (n, f)
MT_nubar_p1 , // MT = 4561, prompt nubar, first chance fission
MT_chi_1 , // MT = 1019, first chance fission spectrum
MT_f_second , // MT = 20, second chance fission (n, n'f)
MT_nubar_p2 , // MT = 4562, prompt nubar, second chance fission
MT_chi_2 , // MT = 1020, second chance fission spectrum
MT_f_third , // MT = 21, third chance fission (n, 2n'f)
MT_nubar_p3 , // MT = 4563, prompt nubar, third chance fission
MT_chi_3 , // MT = 1021, third chance fission spectrum
MT_f_fourth , // MT = 38, fourth chance fission (n, 3n'f)
MT_nubar_p4 , // MT = 4564, prompt nubar, fourth chance fission
MT_chi_4 , // MT = 1038, fourth chance fission spectrum
// inelastic scattering by discrete post-interaction nuclear state
// ===============================================================
MT_in_1 , // MT = 51, inelastic, 1st level
MT_in_2 , // MT = 52, inelastic, 2nd level
MT_in_3 , // MT = 53, inelastic, 3rd ...
MT_in_4 , // MT = 54, inelastic, 4th ...
MT_in_5 , // MT = 55, inelastic, 5th ...
MT_in_6 , MT_in_7 , MT_in_8 , MT_in_9 , MT_in_10, // MT = level + 50
MT_in_11, MT_in_12, MT_in_13, MT_in_14, MT_in_15,
MT_in_16, MT_in_17, MT_in_18, MT_in_19, MT_in_20,
MT_in_21, MT_in_22, MT_in_23, MT_in_24, MT_in_25,
MT_in_26, MT_in_27, MT_in_28, MT_in_29, MT_in_30,
MT_in_31, MT_in_32, MT_in_33, MT_in_34, MT_in_35,
MT_in_36, MT_in_37, MT_in_38, MT_in_39, MT_in_40,
MT_in_cont , // MT = 91, inelastic continuum not covered above
// 1D thermal scattering xs
// =====================================
MT_th_free , // MT = 221, free gas model
MT_th_h2o , // MT = 222, H in H2O
MT_th_poly_incoh , // MT = 223, H in polyethylene (CH2) incoherent
MT_th_poly_coh , // MT = 224, H in polyethylene (CH2) coherent
MT_th_zrhyd_h_incoh , // MT = 225, H in ZrH incoherent
MT_th_zrhyd_h_coh , // MT = 226, H in ZrH coherent
MT_th_benz_incoh , // MT = 227, benzene incoherent
MT_th_d2o , // MT = 228, D in D2O
MT_th_graph_incoh , // MT = 229, C in graphite incoherent
MT_th_graph_coh , // MT = 230, C in graphite coherent
MT_th_be_incoh , // MT = 231, Be metal incoherent
MT_th_be_coh , // MT = 232, Be metal coherent
MT_th_beo_incoh , // MT = 233, BeO incoherent
MT_th_beo_coh , // MT = 234, BeO coherent
MT_th_zrhyd_zr_incoh , // MT = 235, Zr in ZrH incoherent
MT_th_zrhyd_zr_coh , // MT = 236, Zr in ZrH coherent
// ===========================================================================
// Transfer (group-to-group) processes - Neutron AND Gamma combined
// ===========================================================================
MT_x_transfer , // MT = 2500, total transfer (group to group)
MT_x_scatter , // MT = 2501, total scattering transfer
MT_x_not_fission , // MT = 2519, all transfer except fission
// ===========================================================================
// Transfer (group-to-group) processes - Neutron
// ===========================================================================
MT_x_elastic , // MT = 2502, elastic scattering
MT_x_inelastic , // MT = 2504, inelastic scattering
MT_x_n2n , // MT = 2516, (n, 2n)
MT_x_n3n , // MT = 2517, (n, 3n)
MT_x_fission , // MT = 2518, total fission transfer matrix (chi and nusigf)
// inelastic scattering by discrete post-interaction nuclear state
// ===============================================================
MT_x_1 , // MT = 2551, inelastic, 1st level
MT_x_2 , // MT = 2552, inelastic, 2nd level
MT_x_3 , // MT = 2553, inelastic, 3rd ...
MT_x_4 , // MT = 2554, inelastic, 4th ...
MT_x_5 , // MT = 2555, inelastic, 5th ...
MT_x_6 , MT_x_7 , MT_x_8 , MT_x_9 , MT_x_10, // MT = level + 2550
MT_x_11, MT_x_12, MT_x_13, MT_x_14, MT_x_15,
MT_x_16, MT_x_17, MT_x_18, MT_x_19, MT_x_20,
MT_x_21, MT_x_22, MT_x_23, MT_x_24, MT_x_25,
MT_x_26, MT_x_27, MT_x_28, MT_x_29, MT_x_30,
MT_x_31, MT_x_32, MT_x_33, MT_x_34, MT_x_35,
MT_x_36, MT_x_37, MT_x_38, MT_x_39, MT_x_40,
MT_x_cont , // MT = 2591, inelastic continuum not covered above
// transfer thermal scattering processes
// =====================================
MT_x_th_free , // MT = 2721, free gas model
MT_x_th_h2o , // MT = 2722, H in H2O
MT_x_th_poly_incoh , // MT = 2723, H in polyethylene (CH2) incoherent
MT_x_th_poly_coh , // MT = 2724, H in polyethylene (CH2) coherent
MT_x_th_zrhyd_h_incoh , // MT = 2725, H in ZrH incoherent
MT_x_th_zrhyd_h_coh , // MT = 2726, H in ZrH coherent
MT_x_th_benz_incoh , // MT = 2727, benzene incoherent
MT_x_th_d2o , // MT = 2728, D in D2O
MT_x_th_graph_incoh , // MT = 2729, C in graphite incoherent
MT_x_th_graph_coh , // MT = 2730, C in graphite coherent
MT_x_th_be_incoh , // MT = 2731, Be metal incoherent
MT_x_th_be_coh , // MT = 2732, Be metal coherent
MT_x_th_beo_incoh , // MT = 2733, BeO incoherent
MT_x_th_beo_coh , // MT = 2734, BeO coherent
MT_x_th_zrhyd_zr_incoh, // MT = 2735, Zr in ZrH incoherent
MT_x_th_zrhyd_zr_coh , // MT = 2736, Zr in ZrH coherent
'''
|
|
# This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import test.support
import os
import os.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import unittest
from test.support.script_helper import (spawn_python, kill_python, assert_python_ok,
make_script, make_zip_script)
verbose = test.support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related tests
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import (test_doctest, sample_doctest, sample_doctest_no_doctests,
sample_doctest_no_docstrings)
def _run_object_doctest(obj, module):
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__qualname__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.support.TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print ('doctest (%s) ... %d tests with zero failures' % (module.__name__, t))
return f, t
class ZipSupportTests(unittest.TestCase):
# This used to use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test. However, that restores
# *too much* information and breaks for the invocation
# of test_doctest. So we do our own thing and leave
# sys.modules alone.
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with test.support.temp_dir() as d:
init_name = make_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = make_zip_script(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
try:
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
finally:
del sys.modules["zip_pkg"]
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
# The sample doctest files rewritten to include in the zipped version.
sample_sources = {}
for mod in [sample_doctest, sample_doctest_no_doctests,
sample_doctest_no_docstrings]:
src = inspect.getsource(mod)
src = src.replace("test.test_doctest", "test_zipped_doctest")
# Rewrite the module name so that, for example,
# "test.sample_doctest" becomes "sample_zipped_doctest".
mod_name = mod.__name__.split(".")[-1]
mod_name = mod_name.replace("sample_", "sample_zipped_")
sample_sources[mod_name] = src
with test.support.temp_dir() as d:
script_name = make_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = make_zip_script(d, 'test_zip',
script_name)
with zipfile.ZipFile(zip_name, 'a') as z:
for mod_name, src in sample_sources.items():
z.writestr(mod_name + ".py", src)
if verbose:
with zipfile.ZipFile(zip_name, 'r') as zip_file:
print ('Contents of %r:' % zip_name)
zip_file.printdir()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
try:
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
for obj in known_good_tests:
_run_object_doctest(obj, test_zipped_doctest)
finally:
del sys.modules["test_zipped_doctest"]
def test_doctest_main_issue4197(self):
test_src = textwrap.dedent("""\
class Test:
">>> 'line 2'"
pass
import doctest
doctest.testmod()
""")
pattern = 'File "%s", line 2, in %s'
with test.support.temp_dir() as d:
script_name = make_script(d, 'script', test_src)
rc, out, err = assert_python_ok(script_name)
expected = pattern % (script_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
rc, out, err = assert_python_ok(zip_name)
expected = pattern % (run_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
def test_pdb_issue4201(self):
test_src = textwrap.dedent("""\
def f():
pass
import pdb
pdb.Pdb(nosigint=True).runcall(f)
""")
with test.support.temp_dir() as d:
script_name = make_script(d, 'script', test_src)
p = spawn_python(script_name)
p.stdin.write(b'l\n')
data = kill_python(p)
# bdb/pdb applies normcase to its filename before displaying
self.assertIn(os.path.normcase(script_name.encode('utf-8')), data)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
p = spawn_python(zip_name)
p.stdin.write(b'l\n')
data = kill_python(p)
# bdb/pdb applies normcase to its filename before displaying
self.assertIn(os.path.normcase(run_name.encode('utf-8')), data)
def tearDownModule():
test.support.reap_children()
if __name__ == '__main__':
unittest.main()
|
|
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
pytest.raises(ValueError, idx.astype, float)
pytest.raises(ValueError, idx.astype, 'timedelta64')
pytest.raises(ValueError, idx.astype, 'timedelta64[ns]')
pytest.raises(ValueError, idx.astype, 'datetime64')
pytest.raises(ValueError, idx.astype, 'datetime64[D]')
def test_index_convert_to_datetime_array(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_explicit_pytz(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz=pytz.timezone('US/Eastern'))
rng_utc = date_range('20090415', '20090519', tz=pytz.utc)
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_dateutil(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz='dateutil/US/Eastern')
rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.tzutc())
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
class TestToPeriod(object):
def setup_method(self, method):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
self.index = DatetimeIndex(data)
def test_to_period_millisecond(self):
index = self.index
period = index.to_period(freq='L')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123Z', 'L')
assert period[1] == Period('2007-01-01 10:11:13.789Z', 'L')
def test_to_period_microsecond(self):
index = self.index
period = index.to_period(freq='U')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123456Z', 'U')
assert period[1] == Period('2007-01-01 10:11:13.789123Z', 'U')
def test_to_period_tz_pytz(self):
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_tz_explicit_pytz(self):
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz=pytz.timezone('US/Eastern'))
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=pytz.utc)
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_tz_dateutil(self):
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='dateutil/US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.tzutc())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
|
|
# Library Kerasimo
# you might have to set "export LANG=en_US.UTF-8"
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.models import Model
import numpy as np
import math
import collections
maxheight = 0
class Neuron:
def __init__(self, x, y, a):
self.x = x
self.y = y
self.a = a
class DenseLayer:
def __init__(self, layer, columns, activity):
global maxheight
self.layer = layer
self.activity = activity
self.columns = columns
self.n = len(activity)
maxheight = max(maxheight, self.GetHeight())
def GetWidth(self):
return self.columns*25
def GetHeight(self):
return (self.n//self.columns)*25
def GetCoordinates(self):
points = list()
maxa = -1e99
mina = 1e99
for i in range(0, self.n):
a = self.activity[i]
maxa = max(maxa, a)
mina = min(mina, a)
for i in range(0, self.n):
a = self.activity[i]
if self.layer and self.layer.get_config() and 'activation' in self.layer.get_config():
if self.layer.get_config()['activation'] == 'relu':
if (maxa != 0): a = a/(maxa*0.5)
points.append(Neuron(
(i % self.columns)*25,
(i // self.columns)*25,
a))
return points
class ConvolutedLayer:
def __init__(self, layer, columns, activity):
global maxheight
self.layer = layer
self.activity = activity
#self.activity = np.transpose(activity, (1,2,0))
self.nx = self.activity.shape[0]
self.ny = self.activity.shape[1]
self.nz = self.activity.shape[2]
self.columns = columns
self.n = len(self.activity)
maxyn = self.ny*self.nz + 2*self.nz
maxheight = max(maxheight, self.GetHeight())
def GetWidth(self):
return self.nx*self.columns*25 + self.columns * 50
def GetHeight(self):
rows = self.nz // self.columns
return self.ny*25*rows + rows*50
def GetCoordinates(self):
points = list()
for ky in range(0, self.nz // self.columns):
for kx in range(0, self.columns):
maxa = -1e99
mina = 1e99
for j in range(0, self.ny):
for i in range(0, self.nx):
a = self.activity[i][j][kx+ky*self.columns]
maxa = max(maxa, a)
mina = min(mina, a)
for j in range(0, self.ny):
for i in range(0, self.nx):
a = self.activity[i][j][kx+ky*self.columns]
if self.layer and self.layer.get_config() and 'activation' in self.layer.get_config():
if self.layer.get_config()['activation'] == 'relu':
if (maxa != 0): a = a/(maxa*0.5)
points.append(Neuron(
i * 25 + self.nx*kx*25 + kx*50,
j * 25 + ky*self.ny*25 + ky*50,
a
))
return points
def AddLine(strlist, p1, p2):
dx = p2.x - p1.x
dy = p2.y - p1.y
r = math.sqrt(dx*dx + dy*dy)
dx = dx / r
dy = dy / r
strlist.append('<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="#888" stroke-width="1" marker-end="url(#arrow)" />\n'
% (p2.x-dx*10, p2.y-dy*10, p1.x+dx*18, p1.y+dy*18))
def AddCircle(strlist, p):
colorr = 0
colorb = 0
if (p.a>0):
colorr = int(min(p.a, 1.)*255)
else:
colorb = int(min(-p.a, 1.)*255);
strlist.append('<circle cx="%d" cy="%d" r="10" stroke="black" stroke-width="1" fill="rgb(%d,0,%d)" />\n'
% (p.x, p.y, colorr, colorb))
def CalcNeuronCoordinates(layeridx, layers):
global maxheight
width = 70
points = layers[layeridx].GetCoordinates()
x1 = 10
for i in range(0, layeridx): x1 = x1 + width + layers[i].GetWidth()
y1 = 10 + (maxheight-layers[layeridx].GetHeight()) / 2.
for p in points:
p.x = p.x + x1
p.y = p.y + y1
return points
def GetSize(layers):
width = 20
height = 20
for l in layers:
width = width + l.GetWidth() + 70
height = max(height, l.GetHeight())
return (width, height)
def WriteSVG(f, layers, showarrows):
global maxheight
xrect = 0
layeridx = 0
circlelist = list()
linelist = list()
for l in layers:
neurons1 = CalcNeuronCoordinates(layeridx, layers)
for n in neurons1: AddCircle(circlelist, n)
if (layeridx != 0) and (showarrows):
neurons2 = CalcNeuronCoordinates(layeridx-1, layers)
for n1 in neurons1:
for n2 in neurons2:
AddLine(linelist, n1, n2)
circlelist.append("\n")
linelist.append("\n")
#--------
#rectcolor = 220
#if (layeridx&1) == 0: rectcolor = 255
#f.write('<rect x="%d" y="%d" width="%d" height="%d" fill="rgb(%d,%d,%d)"/>\n'
# % (xrect, 0, l.GetWidth()+70, maxheight, rectcolor, rectcolor, rectcolor))
#xrect = xrect + l.GetWidth() + 70
#-------
layeridx = layeridx + 1;
for lstr in linelist: f.write(lstr)
for cstr in circlelist: f.write(cstr)
def ToSVG(name, model, X, **kwargs):
columns = kwargs.get('columns', [1 for i in range(len(model.layers)+1)])
showarrows = kwargs.get('showarrows', True)
batch_size = kwargs.get('batch_size', 32)
showreshape = kwargs.get('showreshape', False)
print('Kerasimo')
print(' class: ', model.__class__.__name__);
print(' layers: ', len(model.layers));
print(' columns: ', columns);
print(' training data: ', X.shape);
for m in model.layers:
print("====================================================================")
print(m.__class__.__name__)
#if (m.__class__.__name__ == 'Lambda'): continue
if "get_config" in dir(m):
print(m.get_config())
if m.get_weights():
print('weights list len: ', len(m.get_weights()))
for w in m.get_weights():
print('weights shape: ', w.shape, ' total: ', w.size)
print('input shape: ', m.input_shape)
print('output shape: ', m.output_shape)
print("====================================================================")
samples = list()
for x in X:
if model.layers[0].__class__.__name__ == 'InputLayer':
samples.append(list([ConvolutedLayer(model.layers[0], columns[0], x)]))
if model.layers[0].__class__.__name__ == 'Dense':
samples.append(list([DenseLayer(model.layers[0], columns[0], x)]))
if model.layers[0].__class__.__name__ == 'Conv2D':
samples.append(list([ConvolutedLayer(model.layers[0], columns[0], x)]))
if model.layers[0].__class__.__name__ == 'ZeroPadding2D':
samples.append(list([ConvolutedLayer(model.layers[0], columns[0], x)]))
if model.layers[0].__class__.__name__ == 'MaxPooling2D':
samples.append(list([ConvolutedLayer(model.layers[0], columns[0], x)]))
print('generated list for ', len(samples), ' samples')
if (len(samples) == 0): return
i = 1
for l in model.layers:
intermediate_model = Model(inputs=model.input, outputs=l.output)
result = intermediate_model.predict(X, batch_size=batch_size)
print('train to layer: ', i, ' with result len: ', result.shape)
for j in range(0, len(result)):
if l.__class__.__name__ == 'Dense':
samples[j].append(DenseLayer(l, columns[i], result[j]))
if l.__class__.__name__ == 'Flatten' and showreshape:
samples[j].append(DenseLayer(l, columns[i], result[j]))
if l.__class__.__name__ == 'Conv2D':
samples[j].append(ConvolutedLayer(l, columns[i], result[j]))
if l.__class__.__name__ == 'Reshape' and showreshape:
samples[j].append(ConvolutedLayer(l, columns[i], result[j]))
if l.__class__.__name__ == 'Conv2DTranspose':
samples[j].append(ConvolutedLayer(l, columns[i], result[j]))
#if l.__class__.__name__ == 'ZeroPadding2D':
# samples[j].append(ConvolutedLayer(l, l.output_shape, columns[i], result[j]))
if l.__class__.__name__ == 'MaxPooling2D':
samples[j].append(ConvolutedLayer(l, columns[i], result[j]))
i = i + 1
print('Plotted layers + input: %d' % len(samples[0]))
(width, height) = GetSize(samples[0])
print('width: %d, height: %d' % (width, height))
for i in range(0, len(samples)):
filename = '%s%02d.svg' % (name, i)
print('Store file %s' % filename)
f = open(filename, 'w')
f.write('<?xml version="1.0" encoding="UTF-8"?>\n');
f.write('<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" baseProfile="full" width="%dpx" height="%dpx">\n' % (width, height));
f.write('<defs>\n')
f.write('<marker id="arrow" markerWidth="10" markerHeight="10" refX="0" refY="3" orient="auto" markerUnits="strokeWidth">\n')
f.write('<path d="M0,0 L0,6 L9,3 z" fill="#888" />\n')
f.write('</marker>\n')
f.write('</defs>\n');
WriteSVG(f, samples[i], showarrows)
f.write("</svg>\n");
f.close()
|
|
from __future__ import absolute_import, division, print_function
import sys
import platform
import _pytest._code
import pytest
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('debugging')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == 'Darwin':
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile("""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect('(Pdb)')
child.sendline('p self.filename')
child.sendeof()
rest = child.read().decode("utf8")
assert 'debug.me' in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile("""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
""")
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect('Skipping also with pdb active')
child.expect('1 skipped in')
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("get\\x20rekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_print_captured_stderr(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stderr")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("(Pdb)")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" % (sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("xxx")
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect('custom set_trace>')
self.flush(child)
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import socket
import sys
import tempfile
import time
from rmake.build import buildjob, buildtrove
from rmake.subscribers import xmlrpc
def _getUri(client):
if not isinstance(client.uri, str) or client.uri.startswith('unix://'):
fd, tmpPath = tempfile.mkstemp()
os.close(fd)
uri = 'unix://' + tmpPath
else:
host = socket.gethostname()
uri = 'http://%s' % host
tmpPath = None
return uri, tmpPath
def monitorJob(client, jobId, showTroveDetails=False, showBuildLogs=False,
exitOnFinish=None, uri=None, serve=True, out=None,
displayClass=None):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
if not displayClass:
displayClass = JobLogDisplay
try:
display = displayClass(client, showBuildLogs=showBuildLogs, out=out,
exitOnFinish=exitOnFinish)
client = client.listenToEvents(uri, jobId, display,
showTroveDetails=showTroveDetails,
serve=serve)
return client
finally:
if serve and tmpPath:
os.remove(tmpPath)
def waitForJob(client, jobId, uri=None, serve=True):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
try:
display = SilentDisplay(client)
display._primeOutput(jobId)
return client.listenToEvents(uri, jobId, display, serve=serve)
finally:
if tmpPath:
os.remove(tmpPath)
class _AbstractDisplay(xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=True):
self.client = client
self.finished = False
self.exitOnFinish = True # override exitOnFinish setting
self.showBuildLogs = showBuildLogs
if not out:
out = sys.stdout
self.out = out
def close(self):
pass
def _serveLoopHook(self):
pass
def _msg(self, msg, *args):
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
class SilentDisplay(_AbstractDisplay):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=None):
_AbstractDisplay.__init__(self, client, out=out,
showBuildLogs=showBuildLogs,
exitOnFinish=exitOnFinish)
self.buildingTroves = {}
def _tailBuildLog(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [mark, True]
self.out.write('Tailing %s build log:\n\n' % troveTuple[0])
def _stopTailing(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [ mark, False ]
def _serveLoopHook(self):
if not self.buildingTroves:
return
for (jobId, troveTuple), (mark, tail) in self.buildingTroves.items():
if not tail:
continue
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
moreData = True
data = ''
self.out.write(data)
if not moreData:
del self.buildingTroves[jobId, troveTuple]
else:
self.buildingTroves[jobId, troveTuple][0] = mark
def _jobTrovesSet(self, jobId, troveData):
self._msg('[%d] - job troves set' % jobId)
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob._getStateName(state)
if self._isFinished():
self._serveLoopHook()
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state in (buildtrove.TROVE_STATE_BUILDING,
buildtrove.TROVE_STATE_RESOLVING))
state = buildtrove._getStateName(state)
self._msg('[%d] - %s - State: %s' % (jobId, troveTuple[0], state))
if status:
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
if isBuilding and self.showBuildLogs:
self._tailBuildLog(jobId, troveTuple)
else:
self._stopTailing(jobId, troveTuple)
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
state = buildtrove._getStateName(state)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
def _primeOutput(self, jobId):
logMark = 0
while True:
newLogs = self.client.getJobLogs(jobId, logMark)
if not newLogs:
break
logMark += len(newLogs)
for (timeStamp, message, args) in newLogs:
print '[%s] [%s] - %s' % (timeStamp, jobId, message)
BUILDING = buildtrove.TROVE_STATE_BUILDING
troveTups = self.client.listTrovesByState(jobId, BUILDING).get(BUILDING, [])
for troveTuple in troveTups:
self._tailBuildLog(jobId, troveTuple)
_AbstractDisplay._primeOutput(self, jobId)
|
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorx as tx
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import SimpleRNNCell, LSTMCell, GRUCell, Attention
def test_rnn_cell():
n_inputs = 3
n_units = 4
batch_size = 2
inputs = tx.Input(n_units=n_inputs)
rnn0 = tx.RNNCell(inputs, n_units)
# Keras RNN cell
rnn1 = SimpleRNNCell(n_units)
state = rnn1.get_initial_state(inputs, batch_size=1)
assert tx.tensor_equal(state, rnn0.previous_state[0]())
inputs.value = tf.ones([batch_size, n_inputs])
res1 = rnn1(inputs, (state,))
rnn1.kernel = rnn0.layer_state.w.weights
rnn1.bias = rnn0.layer_state.w.bias
rnn1.recurrent_kernel = rnn0.layer_state.u.weights
res2 = rnn1(inputs, (state,))
assert not tx.tensor_equal(res1[0], res2[0])
assert not tx.tensor_equal(res1[1], res2[1])
res0 = rnn0()
assert tx.tensor_equal(res2[0], res0)
def test_lstm_cell():
n_inputs = 3
n_units = 4
batch_size = 1
inputs = tx.Input(n_units=n_inputs)
lstm0 = tx.LSTMCell(inputs, n_units,
activation=tf.tanh,
gate_activation=tf.sigmoid,
forget_bias_init=tf.initializers.ones(),
)
lstm1 = LSTMCell(n_units,
activation='tanh',
recurrent_activation='sigmoid',
unit_forget_bias=True,
implementation=2)
state0 = [s() for s in lstm0.previous_state]
# get_initial_state from keras returns either a tuple or a single
# state see `test_rnn_cell`, but the __call__ API requires an iterable
state1 = lstm1.get_initial_state(inputs, batch_size=1)
assert tx.tensor_equal(state1, state0)
inputs.value = tf.ones([batch_size, n_inputs])
res1 = lstm1(inputs, state0)
res1_ = lstm1(inputs, state0)
for r1, r2 in zip(res1, res1_):
assert tx.tensor_equal(r1, r2)
# the only difference is that keras kernels are fused together
kernel = tf.concat([w.weights.value() for w in lstm0.layer_state.w], axis=-1)
w_i, _, _, _ = tf.split(kernel, 4, axis=1)
assert tx.tensor_equal(w_i, lstm0.w[0].weights.value())
recurrent_kernel = tf.concat([u.weights for u in lstm0.layer_state.u], axis=-1)
bias = tf.concat([w.bias for w in lstm0.layer_state.w], axis=-1)
assert tx.tensor_equal(tf.shape(kernel), tf.shape(lstm1.kernel))
assert tx.tensor_equal(tf.shape(recurrent_kernel), tf.shape(lstm1.recurrent_kernel))
assert tx.tensor_equal(tf.shape(bias), tf.shape(lstm1.bias))
lstm1.kernel = kernel
lstm1.recurrent_kernel = recurrent_kernel
lstm1.bias = bias
res2 = lstm1(inputs, state0)
for i in range(len(res1)):
assert not tx.tensor_equal(res1[i], res2[i])
res0 = lstm0()
assert tx.tensor_equal(res0, res2[0])
def test_lstm_rnn_stateful():
n_units = 4
batch_size = 12
seq_size = 3
n_features = 16
embed_size = 6
feature_indices = np.random.randint(0, high=n_features, size=[batch_size, seq_size])
inputs = tx.Input(init_value=feature_indices,
n_units=seq_size,
dtype=tf.int32)
lookup = tx.Lookup(inputs, seq_size=seq_size, embedding_shape=[n_features, embed_size])
seq = lookup.permute_batch_time()
# (N, T, M)
# print(np.shape(seq()))
lstm_cell = tx.LSTMCell.config(n_units=n_units,
activation=tf.tanh,
gate_activation=tf.sigmoid,
forget_bias_init=tf.initializers.ones()
)
# state0 = [s() for s in lstm0.previous_state]
# inputs.value = tf.ones([batch_size, n_features])
# res1 = lstm1(inputs, state0)
# res1_ = lstm1(inputs, state0)
lstm_layer = tx.RNN(input_seq=seq, cell_config=lstm_cell, stateful=True, return_state=True)
state0 = [s() for s in lstm_layer.previous_state]
lstm_layer()
state1 = [s() for s in lstm_layer.previous_state]
for i in range(len(state0)):
assert not tx.tensor_equal(state0[i], state1[i])
assert np.shape(state1[0]) == (batch_size, n_units)
tx_cell = lstm_layer.cell
kernel = tf.concat([w.weights.value() for w in tx_cell.w], axis=-1)
recurrent_kernel = tf.concat([u.weights.value() for u in tx_cell.u], axis=-1)
bias = tf.concat([w.bias.value() for w in tx_cell.w], axis=-1)
# create keras lstm and update with the same cell state
# since LSTM initializes the cell state internally this was
# the only way to initializing that state from the tensorx state
class FromOther(tf.keras.initializers.Initializer):
def __init__(self, value):
self.value = value
def __call__(self, shape, dtype=None):
if not tf.TensorShape(shape).is_compatible_with(tf.shape(self.value)):
raise Exception(f"init called with shape {shape} != value shape {tf.shape(self.value)}")
else:
return self.value
# seq = lookup()
# seq = tf.transpose(seq, [1, 0, 2])
# lstm_cell = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units=n_units)
# lstm_cell.build(np.shape(seq[0]))
# full_kernel = tf.concat([kernel, recurrent_kernel], axis=0)
# lstm_cell = (full_kernel, bias)
# lstm_cell.weights[0] = full_kernel
# lstm_cell.weights[1] = bias
# print(type())
# print(lstm_cell(seq[0],state=tuple(state1)))
# rnn = tf.keras.layers.RNN(cell=lstm_cell,
# dtype=tf.float32,
# return_sequences=True,
# time_major=True,
# unroll=False)
# print(rnn(seq))
# print(lstm_layer())
# tf_lstm_output = rnn(seq, tuple(state1))
# tx_lstm_output = lstm_layer()
keras_lstm = tf.keras.layers.LSTM(units=n_units,
activation=tf.tanh,
kernel_initializer=FromOther(kernel.numpy()),
recurrent_initializer=FromOther(recurrent_kernel.numpy()),
bias_initializer=FromOther(bias.numpy()),
recurrent_activation=tf.sigmoid,
unit_forget_bias=False,
implementation=2,
time_major=True,
unroll=True,
return_sequences=True,
stateful=False)
#
# lookup is of form [batch x features x input_dim] instead of [features x batch x input_dim]
keras_lstm_output = keras_lstm(seq(), initial_state=tuple(state1))
assert tx.tensor_equal(keras_lstm.cell.kernel.value(), kernel)
assert tx.tensor_equal(keras_lstm.cell.recurrent_kernel.value(), recurrent_kernel)
assert tx.tensor_equal(keras_lstm.cell.bias.value(), bias)
tx_lstm_output = lstm_layer()[0]
assert tx.tensor_all_close(keras_lstm_output, tx_lstm_output)
def test_gru_cell():
n_inputs = 3
n_units = 4
batch_size = 1
inputs = tx.Input(n_units=n_inputs)
gru0 = tx.GRUCell(inputs, n_units,
activation=tf.tanh,
gate_activation=tf.sigmoid)
# applies gate after matrix multiplication and uses
# recurrent biases, this makes it compatible with cuDNN
# implementation
gru1 = GRUCell(n_units,
activation='tanh',
recurrent_activation='sigmoid',
reset_after=False,
implementation=1,
use_bias=True)
assert not hasattr(gru1, "kernel")
state0 = [s() for s in gru0.previous_state]
# get_initial_state from keras returns either a tuple or a single
# state see test_rnn_cell, but the __call__ API requires an iterable
state1 = gru1.get_initial_state(inputs, batch_size=1)
assert tx.tensor_equal(state1, state0[0])
inputs.value = tf.ones([batch_size, n_inputs])
res1 = gru1(inputs, state0)
res1_ = gru1(inputs, state0)
for r1, r2 in zip(res1, res1_):
assert tx.tensor_equal(r1, r2)
# the only difference is that keras kernels are fused together
kernel = tf.concat([w.weights.value() for w in gru0.layer_state.w], axis=-1)
recurrent_kernel = tf.concat([u.weights for u in gru0.layer_state.u], axis=-1)
bias = tf.concat([w.bias for w in gru0.layer_state.w], axis=-1)
assert tx.same_shape(kernel, gru1.kernel)
assert tx.same_shape(recurrent_kernel, gru1.recurrent_kernel)
assert tx.same_shape(bias, gru1.bias)
gru1.kernel = kernel
gru1.recurrent_kernel = recurrent_kernel
gru1.bias = bias
res2 = gru1(inputs, state0)
for i in range(len(res1)):
assert not tx.tensor_equal(res1[i], res2[i])
res0 = gru0()
# res0_ = gru0.state[0]()
assert tx.tensor_equal(res0, res2[0])
def test_conv1d():
n_features = 3
embed_size = 128
seq_size = 3
batch_size = 2
inputs = tx.Constant(np.random.random([batch_size, seq_size]), n_units=seq_size, dtype=tf.int32)
emb = tx.Lookup(inputs, seq_size=seq_size, embedding_shape=[n_features, embed_size])
seq = emb()
n_units = 100
filter_size = 4
cnn = tf.keras.layers.Conv1D(
filters=n_units,
kernel_size=filter_size,
padding='same')
res = cnn(seq)
cnn2 = tx.Conv1D(emb, n_units=100, filter_size=filter_size)
res2 = cnn2(seq)
assert len(cnn.variables) == len(cnn.variables)
cnn.kernel = cnn2.filters
cnn.bias = cnn2.bias
res3 = cnn(seq)
assert not tx.tensor_equal(res, res2)
assert tx.tensor_equal(res2, res3)
def test_attention():
n_features = 3
embed_size = 8
seq_size = 3
batch_size = 2
inputs = tx.Constant(np.random.random([batch_size, seq_size]), n_units=seq_size, dtype=tf.int32)
emb = tx.Lookup(inputs, seq_size=seq_size, embedding_shape=[n_features, embed_size])
seq = emb()
# keras attention doesn't have multiple heads
attention = Attention(use_scale=False)
res = attention([seq, seq, seq])
attention2 = tx.MHAttention(emb, emb, emb, n_units=embed_size, n_heads=1)
assert len(attention2.variables) == 3
attention2.wq = tx.Linear(emb, n_units=None,
weights=tf.linalg.eye(embed_size, embed_size),
add_bias=False)
attention2.wk = tx.Linear(emb, n_units=None,
weights=tf.linalg.eye(embed_size, embed_size),
add_bias=False)
attention2.wv = tx.Linear(emb, n_units=None,
weights=tf.linalg.eye(embed_size, embed_size),
add_bias=False)
assert tx.tensor_equal(attention2.wq(seq), seq)
res2 = attention2()
g = tx.Graph.build(inputs=emb, outputs=attention2)
g = g.as_function(ord_inputs=emb, ord_outputs=attention2)
res3 = g(seq)
assert tx.tensor_equal(res, res2)
assert tx.tensor_equal(res, res3)
|
|
#!/usr/bin/python
import json
import sys
import os
import time
import random
import getpass
import re
import atexit
import paramiko
import md5
import errno
import PerfUtils
import requests
class H2OUseCloudNode:
"""
A class representing one node in an H2O cloud which was specified by the user.
Don't try to build or tear down this kind of node.
use_ip: The given ip of the cloud.
use_port: The given port of the cloud.
"""
def __init__(self, use_ip, use_port):
self.use_ip = use_ip
self.use_port = use_port
def start(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
return self.use_ip
def get_port(self):
return self.use_port
class H2OUseCloud:
"""
A class representing an H2O cloud which was specified by the user.
Don't try to build or tear down this kind of cloud.
"""
def __init__(self, cloud_num, use_ip, use_port):
self.cloud_num = cloud_num
self.use_ip = use_ip
self.use_port = use_port
self.nodes = []
node = H2OUseCloudNode(self.use_ip, self.use_port)
self.nodes.append(node)
def start(self):
pass
def wait_for_cloud_to_be_up(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
node = self.nodes[0]
return node.get_ip()
def get_port(self):
node = self.nodes[0]
return node.get_port()
def all_ips(self):
res = []
for node in self.nodes:
res += [node.get_ip()]
return ','.join(res)
def all_pids(self):
res = []
for node in self.nodes:
res += [node.request_pid()]
return ','.join(res)
class H2OCloudNode:
"""
A class representing one node in an H2O cloud.
Note that the base_port is only a request for H2O.
H2O may choose to ignore our request and pick any port it likes.
So we have to scrape the real port number from stdout as part of cloud startup.
port: The actual port chosen at run time.
pid: The process id of the node.
output_file_name: Where stdout and stderr go. They are merged.
child: subprocess.Popen object.
terminated: Only from a signal. Not normal shutdown.
"""
def __init__(self, cloud_num, nodes_per_cloud, node_num, cloud_name, h2o_jar, ip, base_port,
xmx, output_dir, isEC2):
"""
Create a node in a cloud.
@param cloud_num: Dense 0-based cloud index number.
@param nodes_per_cloud: How many H2O java instances are in a cloud. Clouds are symmetric.
@param node_num: This node's dense 0-based node index number.
@param cloud_name: The H2O -name command-line argument.
@param h2o_jar: Path to H2O jar file.
@param base_port: The starting port number we are trying to get our nodes to listen on.
@param xmx: Java memory parameter.
@param output_dir: The directory where we can create an output file for this process.
@param isEC2: Whether or not this node is an EC2 node.
@return: The node object.
"""
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.node_num = node_num
self.cloud_name = cloud_name
self.h2o_jar = h2o_jar
self.ip = ip
self.base_port = base_port
self.xmx = xmx
self.output_dir = output_dir
self.isEC2 = isEC2
self.addr = self.ip
self.http_addr = self.ip
self.username = getpass.getuser()
self.password = getpass.getuser()
if self.isEC2:
self.username = 'ubuntu'
self.password = None
self.ssh = paramiko.SSHClient()
policy = paramiko.AutoAddPolicy()
self.ssh.set_missing_host_key_policy(policy)
self.ssh.load_system_host_keys()
if self.password is None:
self.ssh.connect(self.addr, username=self.username)
else:
self.ssh.connect(self.addr, username=self.username, password=self.password)
# keep connection - send keepalive packet evety 5minutes
self.ssh.get_transport().set_keepalive(300)
self.uploaded = {}
self.port = -1
self.output_file_name = ""
self.error_file_name = ""
self.terminated = False
# Choose my base port number here. All math is done here. Every node has the same
# base_port and calculates it's own my_base_port.
ports_per_node = 2
self.my_base_port = \
int(self.base_port) + \
int(self.cloud_num * self.nodes_per_cloud * ports_per_node) + \
int(self.node_num * ports_per_node)
def open_channel(self):
ch = self.ssh.get_transport().open_session()
ch.get_pty() # force the process to die without the connection
return ch
def get_ticks(self):
"""
Get process_total_ticks, system_total_ticks, sys_idle_ticks.
"""
#poll on url until get a valid http response
max_retries = 5
m = 0
got_url_sys = False
got_url_proc = False
while m < max_retries:
if m != 0:
print "DEBUG: Restarting serve_proc!"
print "Stopping service"
cmd_serve = "ps -efww | grep 0xdiag | awk '{print %2}' | xargs kill"
tryKill = self.open_channel()
tryKill.exec_command(cmd_serve)
print "Starting service"
cmd_serve = ["python", "/home/0xdiag/serve_proc.py"]
self.channelServe = self.open_channel()
self.channelServe.exec_command(' '.join(cmd_serve))
r_sys = ""
r_proc = ""
print "Performing try : " + str(m) + " out of total tries = " + str(max_retries)
url_sys = "http://{}:{}/stat".format(self.ip, 8000)
url_proc = "http://{}:{}/{}/stat".format(self.ip, 8000, self.pid)
try:
r_sys = requests.get(url_sys, timeout=10).text.split('\n')[0]
r_proc = requests.get(url_proc, timeout=10).text.strip().split()
except:
m += 1
continue # usually timeout, but just catch all and continue, error out downstream.
if r_sys == "" or r_proc == "":
m += 1
continue
if not got_url_sys:
if not ("404" and "not" and "found") in r_sys:
got_url_sys = True
if not got_url_proc:
if not ("404" and "not" and "found") in r_proc:
got_url_proc = True
if got_url_proc and got_url_sys:
break
m += 1
time.sleep(1)
if not (got_url_proc and got_url_sys):
print "Max retries on /proc scrape exceeded! Did the JVM properly start?"
return -1
#raise Exception("Max retries on /proc scrape exceeded! Did the JVM properly start?")
url_sys = "http://{}:{}/stat".format(self.ip, 8000)
url_proc = "http://{}:{}/{}/stat".format(self.ip, 8000, self.pid)
r_sys = requests.get(url_sys, timeout=120).text.split('\n')[0]
r_proc = requests.get(url_proc, timeout=120).text.strip().split()
sys_user = int(r_sys.split()[1])
sys_nice = int(r_sys.split()[2])
sys_syst = int(r_sys.split()[3])
sys_idle = int(r_sys.split()[4])
sys_total_ticks = sys_user + sys_nice + sys_syst + sys_idle
try:
print "DEBUGGING /proc scraped values served up: "
print r_proc
print " End of try 1."
proc_utime = int(r_proc[13])
proc_stime = int(r_proc[14])
process_total_ticks = proc_utime + proc_stime
except:
print "DEBUGGING /proc/<pid>/"
print "This is try 2... Try 1 failed!"
print "Did H2O shutdown first before this scrape occured?"
print r_proc
print "End of try 2...."
r_proc = requests.get(url_proc).text.strip().split()
proc_utime = int(r_proc[13])
proc_stime = int(r_proc[14])
process_total_ticks = proc_utime + proc_stime
return {"process_total_ticks": process_total_ticks, "system_total_ticks": sys_total_ticks,
"system_idle_ticks": sys_idle}
def is_contaminated(self):
"""
Checks for contamination.
@return: 1 for contamination, 0 for _no_ contamination
"""
cur_ticks = self.get_ticks()
first_ticks = self.first_ticks
if cur_ticks != -1 and first_ticks != -1:
proc_delta = cur_ticks["process_total_ticks"] - first_ticks["process_total_ticks"]
sys_delta = cur_ticks["system_total_ticks"] - first_ticks["system_total_ticks"]
idle_delta = cur_ticks["system_idle_ticks"] - first_ticks["system_idle_ticks"]
sys_frac = 100 * (1 - idle_delta * 1. / sys_delta)
proc_frac = 100 * (proc_delta * 1. / sys_delta)
print "DEBUG: sys_frac, proc_frac"
print sys_frac, proc_frac
print ""
print ""
#20% diff
if proc_frac + 5 <= sys_frac:
self.is_contaminated = True
return 1
return 0
return 0
def start_remote(self):
"""
Start one node of H2O.
(Stash away the self.child and self.pid internally here.)
@return: none
"""
#upload flat_file
#upload aws_creds
#upload hdfs_config
cmd = ["java",
"-Xmx" + self.xmx,
#"-ea",
"-jar", self.uploaded[self.h2o_jar],
"-name", self.cloud_name,
"-baseport", str(self.my_base_port)]
# Add S3N credentials to cmd if they exist.
ec2_hdfs_config_file_name = os.path.expanduser("/home/spencer/.ec2/core-site.xml")
if os.path.exists(ec2_hdfs_config_file_name):
cmd.append("-hdfs_config")
cmd.append(ec2_hdfs_config_file_name)
self.output_file_name = "java_" + str(self.cloud_num) + "_" + str(self.node_num)
self.error_file_name = "java_" + str(self.cloud_num) + "_" + str(self.node_num)
cmd = ' '.join(cmd)
self.channel = self.open_channel()
self.stdouterr = "" # somehow cat outfile & errorfile?
outfd, self.output_file_name = PerfUtils.tmp_file(prefix="remoteH2O-" + self.output_file_name, suffix=".out",
directory=self.output_dir)
errfd, self.error_file_name = PerfUtils.tmp_file(prefix="remoteH2O-" + self.error_file_name, suffix=".err",
directory=self.output_dir)
PerfUtils.drain(self.channel.makefile(), outfd)
PerfUtils.drain(self.channel.makefile_stderr(), errfd)
self.channel.exec_command(cmd)
cmd_serve = ["python", "/home/0xdiag/serve_proc.py"]
self.channelServe = self.open_channel()
self.channelServe.exec_command(' '.join(cmd_serve))
@atexit.register
def kill_process():
try:
try:
self.stop_remote()
self.channel.exec_command('exit')
self.ssh.close()
except:
pass
try:
self.stop_local()
except:
pass
except OSError:
pass
print "+ CMD: " + cmd
def request_pid(self):
"""
Use a request for /Cloud.json and look for pid.
"""
name = self.ip + ":" + self.port
time.sleep(3)
r = requests.get("http://" + name + "/Cloud.json")
name = "/" + name
j = json.loads(r.text)
for node in j["nodes"]:
if node["name"] == name:
return node["PID"]
def scrape_port_from_stdout_remote(self):
"""
Look at the stdout log and figure out which port the JVM chose.
Write this to self.port.
This call is blocking.
Exit if this fails.
@return: none
"""
retries = 30
while retries > 0:
if self.terminated:
return
f = open(self.output_file_name, "r")
s = f.readline()
while len(s) > 0:
if self.terminated:
return
match_groups = re.search(r"Listening for HTTP and REST traffic on http://(\S+):(\d+)", s)
if match_groups is not None:
port = match_groups.group(2)
if port is not None:
self.port = port
self.pid = self.request_pid()
f.close()
print("H2O Cloud {} Node {} started with output file {}".format(self.cloud_num,
self.node_num,
self.output_file_name))
time.sleep(1)
self.first_ticks = self.get_ticks()
return
s = f.readline()
f.close()
retries -= 1
if self.terminated:
return
time.sleep(1)
print("")
print("ERROR: Too many retries starting cloud.")
print("")
sys.exit(1)
def stop_remote(self):
"""
Normal node shutdown.
Ignore failures for now.
@return: none
"""
try:
requests.get("http://" + self.ip + ":" + self.port + "/Shutdown.html", timeout=1)
try:
r2 = requests.get("http://" + self.ip + ":" + self.port + "/Cloud.html", timeout=2)
except Exception, e:
pass
except Exception, e:
pass
try:
try:
self.channel.exec_command('exit')
self.ssh.close()
except:
pass
try:
self.stop_local()
except:
pass
except OSError:
pass
try:
requests.get("http://" + self.ip + ":" + self.port + "/Shutdown.html", timeout=1)
except Exception, e:
print "Got Exception trying to shutdown H2O:"
print e
pass
print "Successfully shutdown h2o!"
self.pid = -1
def stop_local(self):
"""
Normal node shutdown.
Ignore failures for now.
@return: none
"""
#TODO: terminate self.child
try:
requests.get(self.ip + ":" + self.port + "/Shutdown.html")
except Exception, e:
pass
self.pid = -1
def terminate_remote(self):
"""
Terminate a running node. (Due to a signal.)
@return: none
"""
self.terminated = True
self.stop_remote()
def terminate_local(self):
"""
Terminate a running node. (Due to a signal.)
@return: none
"""
self.terminated = True
self.stop_local()
def get_ip(self):
""" Return the ip address this node is really listening on. """
return self.ip
def get_output_file_name(self):
""" Return the directory to the output file name. """
return self.output_file_name
def get_error_file_name(self):
""" Return the directory to the error file name. """
return self.error_file_name
def get_port(self):
""" Return the port this node is really listening on. """
return self.port
def __str__(self):
s = ""
s += " node {}\n".format(self.node_num)
s += " xmx: {}\n".format(self.xmx)
s += " my_base_port: {}\n".format(self.my_base_port)
s += " port: {}\n".format(self.port)
s += " pid: {}\n".format(self.pid)
return s
class H2OCloud:
"""
A class representing one of the H2O clouds.
"""
def __init__(self, cloud_num, hosts_in_cloud, nodes_per_cloud, h2o_jar, base_port, output_dir, isEC2, remote_hosts):
"""
Create a cloud.
See node definition above for argument descriptions.
@return: The cloud object.
"""
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.h2o_jar = h2o_jar
self.base_port = base_port
self.output_dir = output_dir
self.isEC2 = isEC2
self.remote_hosts = remote_hosts
self.hosts_in_cloud = hosts_in_cloud
# Randomly choose a five digit cloud number.
n = random.randint(10000, 99999)
user = getpass.getuser()
user = ''.join(user.split())
self.cloud_name = "H2O_perfTest_{}_{}".format(user, n)
self.nodes = []
self.jobs_run = 0
for node_num, node_ in enumerate(self.hosts_in_cloud):
node = H2OCloudNode(self.cloud_num, self.nodes_per_cloud,
node_num, self.cloud_name, self.h2o_jar,
node_['ip'],
node_['port'],
#self.base_port,
node_['memory_bytes'],
self.output_dir, isEC2)
self.nodes.append(node)
self.distribute_h2o()
def distribute_h2o(self):
"""
Distribute the H2O to the remote hosts.
@return: none.
"""
f = self.h2o_jar
def prog(sofar, total):
# output is bad for jenkins.
username = getpass.getuser()
if username != 'jenkins':
p = int(10.0 * sofar / total)
sys.stdout.write('\rUploading jar [%s%s] %02d%%' % ('#' * p, ' ' * (10 - p), 100 * sofar / total))
sys.stdout.flush()
for node in self.nodes:
m = md5.new()
m.update(open(f).read())
m.update(getpass.getuser())
dest = '/tmp/' + m.hexdigest() + "-" + os.path.basename(f)
print "Uploading h2o jar to: " + dest + "on " + node.ip
sftp = node.ssh.open_sftp()
try:
sftp.stat(dest)
print "Skipping upload of file {0}. File {1} exists on remote side!".format(f, dest)
except IOError, e:
if e.errno == errno.ENOENT:
sftp.put(f, dest, callback=prog)
finally:
sftp.close()
node.uploaded[f] = dest
# sys.stdout.flush()
def check_contaminated(self):
"""
Each node checks itself for contamination.
@return: True if contaminated, False if _not_ contaminated
"""
for node in self.nodes:
if node.is_contaminated():
return [1, "Node " + node.ip + " was contaminated."]
return [0, " "]
def start_remote(self):
"""
Start an H2O cloud remotely.
@return: none
"""
for node in self.nodes:
node.start_remote()
def start_local(self):
"""
Start H2O cloud.
The cloud is not up until wait_for_cloud_to_be_up() is called and returns.
@return: none
"""
if self.nodes_per_cloud > 1:
print("")
print("ERROR: Unimplemented: wait for cloud size > 1.")
print("")
sys.exit(1)
for node in self.nodes:
node.start_local()
def wait_for_cloud_to_be_up(self):
"""
Blocking call ensuring the cloud is available.
@return: none
"""
if self.remote_hosts:
self._scrape_port_from_stdout_remote()
else:
self._scrape_port_from_stdout_local()
def stop_remote(self):
"""
Normal cloud shutdown.
@return: none
"""
for node in self.nodes:
node.stop_remote()
def stop_local(self):
"""
Normal cloud shutdown.
@return: none
"""
for node in self.nodes:
node.stop_local()
def all_ips(self):
res = []
for node in self.nodes:
res += [node.get_ip()]
return ','.join(res)
def all_pids(self):
res = []
for node in self.nodes:
res += [node.request_pid()]
return ','.join(res)
def terminate_remote(self):
"""
Terminate a running cloud. (Due to a signal.)
@return: none
"""
for node in self.nodes:
node.terminate_remote()
def terminate_local(self):
"""
Terminate a running cloud. (Due to a signal.)
@return: none
"""
for node in self.nodes:
node.terminate_local()
def get_ip(self):
""" Return an ip to use to talk to this cloud. """
node = self.nodes[0]
return node.get_ip()
def get_port(self):
""" Return a port to use to talk to this cloud. """
node = self.nodes[0]
return node.get_port()
def _scrape_port_from_stdout(self):
for node in self.nodes:
node.scrape_port_from_stdout()
def _scrape_port_from_stdout_remote(self):
for node in self.nodes:
node.scrape_port_from_stdout_remote()
def __str__(self):
s = ""
s += "cloud {}\n".format(self.cloud_num)
s += " name: {}\n".format(self.cloud_name)
s += " jobs_run: {}\n".format(self.jobs_run)
for node in self.nodes:
s += str(node)
return s
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Grace Cox (University of Liverpool)
#
# Released under the MIT license, a copy of which is located at the root of
# this project.
"""Module containing functions to parse World Data Centre (WDC) files.
Part of the MagPySV package for geomagnetic data analysis. This module provides
various functions to read, parse and manipulate the contents of World Data
Centre (WDC) formatted files containing geomagnetic data and output data to
comma separated values (CSV) files. Also contains functions to read output of
code used for the COV-OBS magnetic field model series by Gillet et al. (links
below).
"""
import datetime as dt
import glob
import os
import pandas as pd
import numpy as np
def wdc_parsefile(fname):
"""Load a WDC datafile and place the contents into a dataframe.
Load a datafile of WDC hourly geomagnetic data for a single observatory and
extract the contents. Parses the current WDC file format, but not the
previous format containing international quiet (Q) or disturbed (D) day
designation in place of the century field - only the newer format is
downloaded from the BGS servers. Detailed file format description
can be found at http://www.wdc.bgs.ac.uk/catalog/format.html
Args:
fname (str): path to a WDC datafile.
Returns:
data (pandas.DataFrame):
dataframe containing hourly geomagnetic data. First column is a
series of datetime objects (in the format yyyy-mm-dd hh:30:00) and
subsequent columns are the X, Y and Z components of the magnetic
field at the specified times.
"""
# New WDC file format
cols = [(0, 3), (3, 5), (5, 7), (7, 8), (8, 10), (14, 16),
(16, 20), (20, 116)]
col_names = [
'code', 'yr', 'month', 'component', 'day', 'century',
'base', 'hourly_values']
types = {
'code': str, 'year': int, 'month': int, 'component': str,
'day': int, 'century': int, 'base': int, 'hourly_values': str}
data = pd.read_fwf(fname, colspecs=cols, names=col_names,
converters=types, header=None)
data['hourly_values'] = data['hourly_values'].apply(
separate_hourly_vals)
data = data.set_index(['code', 'yr', 'month', 'component', 'day',
'century', 'base'])['hourly_values'].apply(
pd.Series).stack()
data = data.reset_index()
data.columns = ['code', 'yr', 'month', 'component', 'day', 'century',
'base', 'hour', 'hourly_mean_temp']
data['hourly_mean_temp'] = data['hourly_mean_temp'].astype(float)
return data
def separate_hourly_vals(hourstring):
"""Separate individual hourly field means from the string containing all
24 values in the WDC file. Called by wdc_parsefile.
Args:
hourstring (str): string containing the hourly magnetic field means
parsed from a WDC file for a single day.
Returns:
hourly_vals_list (list):
list containing the hourly field values.
"""
n = 4
hourly_vals_list = [hourstring[i:i+n] for i in range(0, len(hourstring),
n)]
return hourly_vals_list
def wdc_datetimes(data):
"""Create datetime objects from the fields extracted from a WDC datafile.
Args:
data (pandas.DataFrame): needs columns for century, year (yy format),
month, day and hour. Called by wdc_parsefile.
Returns:
data (pandas.DataFrame):
the same dataframe with a series of datetime objects (in the format
yyyy-mm-dd hh:30:00) in the first column.
"""
# Convert the century/yr columns to a year
data['year'] = 100 * data['century'] + data['yr']
# Create datetime objects from the century, year, month and day columns of
# the WDC format data file. The hourly mean is given at half past the hour
dates = data.apply(lambda x: dt.datetime.strptime(
"{0} {1} {2} {3} {4}".format(x['year'], x['month'], x['day'],
x['hour'], 30), "%Y %m %d %H %M"), axis=1)
data.insert(0, 'date', dates)
data.drop(['year', 'yr', 'century', 'code', 'day', 'month', 'hour'],
axis=1, inplace=True)
return data
def wdc_xyz(data):
"""Convert extracted WDC data to hourly X, Y and Z components in nT.
Missing values (indicated by 9999 in the datafiles) are replaced with NaNs.
Args:
data (pandas.DataFrame): dataframe containing columns for datetime
objects, magnetic field component (D, I, F, H, X, Y or Z), the
tabular base and hourly mean.
Returns:
data (pandas.DataFrame):
the same dataframe with datetime objects in the first column and
columns for X, Y and Z components of magnetic field (in nT).
"""
# Replace missing values with NaNs
data.replace(9999, np.nan, inplace=True)
# Group the data by field component, calculate the hourly means and form
# a dataframe with separate columns for each field component
data = data.groupby('component').apply(hourly_mean_conversion)
data.reset_index(drop=True, inplace=True)
data.drop(['base', 'hourly_mean_temp'], axis=1, inplace=True)
"""In older versions pd.pivot_table() kept NaNs by default, but we
test for the NaN being present so must force them to be kept."""
data = data.pivot_table(index='date', columns='component',
values='hourly_mean', dropna=False)
data.reset_index(inplace=True)
# Call helper function to convert D and H components to X and Y
if 'D' in data.columns and 'H' in data.columns:
data = angles_to_geographic(data)
# Make sure that the dataframe contains columns for X, Y and Z components,
# and create a column of NaN values if a component is missing
if 'X' not in data.columns:
data['X'] = np.NaN
if 'Y' not in data.columns:
data['Y'] = np.NaN
if 'Z' not in data.columns:
data['Z'] = np.NaN
data = data[['date', 'X', 'Y', 'Z']]
return data
def hourly_mean_conversion(data):
"""Use the tabular base to calculate hourly means in nT or degrees (D, I).
Uses the tabular base and hourly value from the WDC file to calculate the
hourly means of magnetic field components. Value is in nT for H, F, X, Y or
Z components and in degrees for D or I components. Called by wdc_xyz.
hourly_mean = tabular_base*100 + wdc_hourly_value (for components in nT)
hourly_mean = tabular_base + wdc_hourly_value/600 (for D and I components)
Args:
data (pandas.DataFrame): dataframe containing columns for datetime
objects, magnetic field component (D, I, F, H, X, Y or Z), the
tabular base and hourly mean.
Returns:
obs_data (pandas.DataFrame):
dataframe with datetime objects in the first column and hourly
means of the field components in either nT or degrees (depending on
the component).
"""
obs_data = pd.DataFrame()
for group in data.groupby('component'):
if group[0] == 'D' or group[0] == 'I':
group[1]['hourly_mean'] = group[1]['base'] + \
(1 / 600.0) * group[1]['hourly_mean_temp']
obs_data = obs_data.append(group[1], ignore_index=True)
else:
group[1]['hourly_mean'] = 100.0 * group[1]['base'] + \
group[1]['hourly_mean_temp']
obs_data = obs_data.append(group[1], ignore_index=True)
return obs_data
def angles_to_geographic(data):
"""Use D and H values to calculate the X and Y field components.
The declination (D) and horizontal intensity (H) relate to the north (Y)
and east (X) components as follows:
X = H*cos(D)
Y = H*sin(D)
Args:
data (pandas.DataFrame): dataframe containing columns for datetime
objects and hourly means of the magnetic field components (D, I, F,
H, X, Y or Z).
Returns:
data (pandas.DataFrame):
the same dataframe with datetime objects in the first column and
hourly means of the field components in either nT or degrees
(depending on the component).
"""
data.loc[(~np.isnan(data['D']) & ~np.isnan(data['H'])), 'X'] = data.loc[(
~np.isnan(data['D']) & ~np.isnan(data['H'])), 'H'] * np.cos(np.deg2rad(
data.loc[(~np.isnan(data['D']) & ~np.isnan(data['H'])), 'D']))
data.loc[(~np.isnan(data['D']) & ~np.isnan(data['H'])), 'Y'] = data.loc[(
~np.isnan(data['D']) & ~np.isnan(data['H'])), 'H'] * np.sin(np.deg2rad(
data.loc[(~np.isnan(data['D']) & ~np.isnan(data['H'])), 'D']))
return data
def wdc_readfile(fname):
"""Wrapper function to call wdc_parsefile, wdc_datetimes and wdc_xyz.
Args:
fname (str): path to a WDC datafile.
Returns:
data (pandas.DataFrame):
dataframe containing the data read from the WDC file. First column
is a series of datetime objects (in the format yyyy-mm-dd hh:30:00)
and subsequent columns are the X, Y and Z components of the
magnetic field at the specified times (hourly means).
"""
rawdata = wdc_parsefile(fname)
rawdata = wdc_datetimes(rawdata)
data = wdc_xyz(rawdata)
return data
def append_wdc_data(*, obs_name, path=None):
"""Append all WDC data for an observatory into a single dataframe.
Args:
obs_name (str): observatory name (as 3-digit IAGA code).
path (str): path to directory containing WDC datafiles. All files for
the observatory should be located in the same directory.
Returns:
data (pandas.DataFrame):
dataframe containing all available hourly geomagnetic data at a
single observatory. First column is a series of datetime objects
(in the format yyyy-mm-dd hh:30:00) and subsequent columns are the
X, Y and Z components of the magnetic field at the specified times.
"""
data = pd.DataFrame()
data_path = os.path.join(path, obs_name.lower() + '*.wdc')
# Obtain a list of all files containing the observatory name and ending
# .wdc in the specified directory
filenames = sorted(glob.glob(data_path))
# Iterate over the files and append them to previous files
for file in filenames:
print(file)
try:
frame = wdc_readfile(file)
data = data.append(frame, ignore_index=True)
except StopIteration:
pass
return data
def covobs_parsefile(*, fname, data_type):
"""Loads MF and SV predictions from the COV-OBS geomagnetic field model.
Load a datafile containing SV/MF predictions from the COV-OBS magnetic
field model series by Gillet et al. (2013, Geochem. Geophys. Geosyst.,
https://doi.org/10.1002/ggge.20041;
2015, Earth, Planets and Space, https://doi.org/10.1186/s40623-015-0225-z)
field model.
Args:
fname (str): path to a COV-OBS datafile.
data_type (str): specify whether the file contains magnetic field data
('mf') or or secular variation data ('sv')
Returns:
model_data (pandas.DataFrame):
dataframe containing hourly geomagnetic data. First column is a
series of datetime objects (in the format yyyy-mm-dd) and
subsequent columns are the X, Y and Z components of the SV/MF at
the specified times.
"""
model_data = pd.read_csv(fname, sep=r'\s+', header=None,
usecols=[0, 1, 2, 3])
if data_type is 'mf':
model_data.columns = ["year_decimal", "X", "Y", "Z"]
else:
model_data.columns = ["year_decimal", "dX", "dY", "dZ"]
return model_data
def covobs_datetimes(data):
"""Create datetime objects from COV-OBS field model output file.
The format output by the field model is year.decimalmonth e.g. 1960.08 is
Jan 1960.
Args:
data (pandas.DataFrame): needs a column for decimal year (in yyyy.mm
format).
Returns:
data (pandas.DataFrame):
the same dataframe with the decimal year column replced with a
series of datetime objects in the format yyyy-mm-dd.
"""
year_temp = np.floor(data.year_decimal.values.astype(
'float64')).astype('int')
months = (12 * (data.year_decimal - year_temp) + 1).round().astype('int')
data.insert(0, 'year', year_temp)
data.insert(1, 'month', months)
date = data.apply(lambda x: dt.datetime.strptime(
"{0} {1}".format(int(x['year']), int(x['month'])), "%Y %m"),
axis=1)
data.insert(0, 'date', date)
data.drop(['year', 'year_decimal', 'month'], axis=1, inplace=True)
return data
def covobs_readfile(*, fname, data_type):
"""Wrapper function to call covobs_parsefile and covobs_datetimes.
The COV-OBS code (publically available) can be used to produce synthetic
observatory time series for other field models if the appropriate spline
file is used. The output will be of the same format as COV-OBS output and
can be read using MagPySV.
Args:
fname (str): path to a COV-OBS format datafile.
data_type (str): specify whether the file contains magnetic field data
('mf') or or secular variation data ('sv')
Returns:
data (pandas.DataFrame):
dataframe containing the data read from the file. First column is a
series of datetime objects (in the format yyyy-mm-dd) and
subsequent columns are the X, Y and Z components of the SV/MF at
the specified times.
"""
rawdata = covobs_parsefile(fname=fname, data_type=data_type)
data = covobs_datetimes(rawdata)
return data
def wdc_to_hourly_csv(*, wdc_path=None, write_dir, obs_list,
print_obs=True):
"""Convert WDC file to X, Y and Z hourly means and save to CSV file.
Finds WDC hourly data files for all observatories in a directory path
(assumes data for all observatories are located inside the same directory).
The BGS downloading app distributes data inside a single directory
with the naming convention obsyear.wdc where obs is a three digit
observatory name in lowercase and year is a four digit year,
e.g. ngk1990.wdc or clf2013.wdc. This function converts the hourly WDC
format data to hourly X, Y and Z means, appends all years of data for a
single observatory into a single dataframe and saves the dataframe to a
CSV file.
Args:
wdc_path (str): path to the directory containing datafiles.
write_dir (str): directory path to which the output CSV files are
written.
obs_list (list): list of observatory names (as 3-digit IAGA codes).
print_obs (bool): choose whether to print each observatory name as the
function goes through the directories. Useful for checking progress
as it can take a while to read the whole WDC dataset. Defaults to
True.
"""
# Create the output directory if it does not exist
if not os.path.exists(write_dir):
os.makedirs(write_dir)
# Iterate over each given observatory and call append_wdc_data
for observatory in obs_list:
if print_obs is True:
print(observatory)
wdc_data = append_wdc_data(
obs_name=observatory,
path=wdc_path)
write_csv_data(data=wdc_data, write_dir=write_dir,
obs_name=observatory)
def write_csv_data(*, data, write_dir, obs_name, file_prefix=None,
decimal_dates=False, header=True):
"""Write dataframe to a CSV file.
Args:
data (pandas.DataFrame): data to be written to file.
write_dir (str): directory path to which the output CSV file is
written.
obs_name (str): name of observatory at which the data were obtained.
file_prefix (str): optional string to prefix the output CSV filenames
(useful for specifying parameters used to create the dataset etc).
decimal_dates (bool): optional argument to specify that dates should be
written in decimal format rather than datetime objects. Defaults to
False.
header (bool): option to include header in file. Defaults to True.
"""
# Create the output directory if it does not exist
if not os.path.exists(write_dir):
os.makedirs(write_dir)
# Convert datetime objects to decimal dates if required
if decimal_dates is True:
data.date = data.date.apply(datetime_to_decimal)
if file_prefix is not None:
fpath = os.path.join(write_dir, file_prefix + obs_name + '.csv')
else:
fpath = os.path.join(write_dir, obs_name + '.csv')
data.to_csv(fpath, sep=',', na_rep='NA', header=header, index=False)
def read_csv_data(*, fname, data_type):
"""Read dataframe from a CSV file.
Args:
fname (str): path to a CSV datafile.
data_type (str): specify whether the file contains magnetic field data
('mf') or or secular variation data ('sv')
Returns:
data (pandas.DataFrame):
dataframe containing the data read from the CSV file.
"""
if data_type is 'mf':
col_names = ['date', 'X', 'Y', 'Z']
else:
col_names = ['date', 'dX', 'dY', 'dZ']
data = pd.read_csv(fname, sep=',', header=0, names=col_names,
parse_dates=[0])
return data
def combine_csv_data(*, start_date, end_date, sampling_rate='MS',
obs_list, data_path, model_path, day_of_month=1):
"""Read and combine observatory and model SV data for several locations.
Calls read_csv_data to read observatory data and field model predictions
for each observatory in a list. The data and predictions for individual
observatories are combined into their respective large dataframes. The
first column contains datetime objects and subsequent columns contain X, Y
and Z secular variation/field components (in groups of three) for all
observatories.
Args:
start_date (datetime.datetime): the start date of the data analysis.
end_date (datetime.datetime): the end date of the analysis.
sampling_rate (str): the sampling rate for the period of interest. The
default is 'MS', which creates a range of dates between the
specified values at monthly intervals with the day fixed as the
first of each month. Use 'M' for the final day of each month. Other
useful options are 'AS' (a series of dates at annual intervals,
with the day and month fixed at 01 and January respectively) and
'A' (as for 'AS' but with the day/month fixed as 31 December.)
obs_list (list): list of observatory names (as 3-digit IAGA codes).
data_path (str): path to the CSV files containing observatory data.
model_path (str): path to the CSV files containing model SV data.
day_of_month (int): For SV data, first differences of
monthly means have dates at the start of the month (i.e. MF of
mid-Feb minus MF of mid-Jan should give SV at Feb 1st. For annual
differences of monthly means the MF of mid-Jan year 2 minus MF of
mid-Jan year 1 gives SV at mid-July year 1. The dates of COV-OBS
output default to the first day of the month (compatible with dates
of monthly first differences SV data, but not with those of
annual differences). This option is used to set the day part of the
dates column if required. Default to 1 (all output dataframes
will have dates set at the first day of the month.)
Returns:
(tuple): tuple containing:
- obs_data (*pandas.DataFrame*):
dataframe containing SV data for all observatories in obs_list.
- model_sv_data (*pandas.DataFrame*):
dataframe containing SV predictions for all observatories in
obs_list.
- model_mf_data (*pandas.DataFrame*):
dataframe containing magnetic field predictions for all
observatories in obs_list.
"""
# Initialise the dataframe with the appropriate date range
dates = pd.date_range(start_date, end_date, freq=sampling_rate)
obs_data = pd.DataFrame({'date': dates})
model_sv_data = pd.DataFrame({'date': dates})
model_mf_data = pd.DataFrame({'date': dates})
for observatory in obs_list:
obs_file = observatory + '.csv'
model_sv_file = 'sv_' + observatory + '.dat'
model_mf_file = 'mf_' + observatory + '.dat'
obs_data_temp = read_csv_data(fname=os.path.join(data_path, obs_file),
data_type='sv')
model_sv_data_temp = covobs_readfile(fname=os.path.join(model_path,
model_sv_file), data_type='sv')
model_mf_data_temp = covobs_readfile(fname=os.path.join(model_path,
model_mf_file), data_type='mf')
model_sv_data_temp['date'] = model_sv_data_temp['date'].apply(
lambda dt: dt.replace(day=1))
obs_data_temp.rename(
columns={'dX': 'dX' + '_' + observatory,
'dY': 'dY' + '_' + observatory,
'dZ': 'dZ' + '_' + observatory}, inplace=True)
obs_data_temp['date'] = obs_data_temp['date'].apply(
lambda dt: dt.replace(day=1))
model_sv_data_temp.rename(
columns={'dX': 'dX' + '_' + observatory,
'dY': 'dY' + '_' + observatory,
'dZ': 'dZ' + '_' + observatory}, inplace=True)
model_mf_data_temp.rename(
columns={'X': 'X' + '_' + observatory,
'Y': 'Y' + '_' + observatory,
'Z': 'Z' + '_' + observatory}, inplace=True)
# Combine the current observatory data with those of other
# observatories
if observatory == obs_list[0]:
obs_data = pd.merge(
left=obs_data, right=obs_data_temp,
how='left', on='date')
model_sv_data = pd.merge(
left=model_sv_data, right=model_sv_data_temp,
how='left', on='date')
model_mf_data = pd.merge(
left=model_mf_data, right=model_mf_data_temp,
how='left', on='date')
else:
obs_data = pd.merge(
left=obs_data, right=obs_data_temp,
how='left', on='date')
model_sv_data = pd.merge(
left=model_sv_data, right=model_sv_data_temp,
how='left', on='date')
model_mf_data = pd.merge(
left=model_mf_data, right=model_mf_data_temp,
how='left', on='date')
if day_of_month is not 1:
model_sv_data['date'] = model_sv_data['date'].apply(
lambda dt: dt.replace(day=day_of_month))
model_mf_data['date'] = model_sv_data['date']
obs_data['date'] = model_sv_data['date']
return obs_data, model_sv_data, model_mf_data
def datetime_to_decimal(date):
"""Convert a datetime object to a decimal year.
Args:
date (datetime.datetime): datetime object representing an observation
time.
Returns:
date (float): the same date represented as a decimal year.
"""
year_start = dt.datetime(date.year, 1, 1)
year_end = year_start.replace(year=date.year + 1)
decimal_year = date.year + (date - year_start) / (year_end - year_start)
return decimal_year
def ae_parsefile(fname):
"""Load a WDC-like format AE file and place contents into a dataframe.
Load a file of AE (Auroral Electroject)
index hourly data in the format distributed by the Kyoto WDC at
http://wdc.kugi.kyoto-u.ac.jp/dstae/index.html and extract the contents.
Args:
fname (str): path to a WDC-like formatted AE file.
Returns:
data (pandas.DataFrame):
dataframe containing hourly AE data. First column is a
series of datetime objects (in the format yyyy-mm-dd hh:30:00) and
second column contains theAE values at the specified times.
"""
# AE WDC file format
cols = [(0, 2), (3, 5), (5, 7), (8, 10), (14, 16),
(16, 20), (20, 116)]
col_names = [
'code', 'yr', 'month', 'day', 'century',
'base', 'hourly_values']
types = {
'code': str, 'year': int, 'month': int,
'day': int, 'century': int, 'base': int, 'hourly_values': str}
data = pd.read_fwf(fname, colspecs=cols, names=col_names,
converters=types, header=None)
data = data.loc[data['code'] == "AE"]
# Separate the hourly values
try:
data['hourly_values'] = data['hourly_values'].apply(
separate_hourly_vals)
except ValueError:
data['hourly_values'] = data['hourly_values'].apply(
separate_hourly_vals_ae)
data = data.set_index(['code', 'yr', 'month', 'day',
'century', 'base'])['hourly_values'].apply(
pd.Series).stack()
data = data.reset_index()
data.columns = ['code', 'yr', 'month', 'day', 'century',
'base', 'hour', 'hourly_mean_temp']
data['hourly_mean_temp'] = data['hourly_mean_temp'].astype(float)
return data
def separate_hourly_vals_ae(hourstring):
"""Separate individual hourly field means from the string containing all
24 values in the AE file. Called by ae_parsefile.
Args:
hourstring (str): string containing the hourly AE means parsed from
a Kyoto WDC-like file for a single day.
Returns:
hourly_vals_list (list):
list containing the hourly AE values.
"""
n = 4
if hourstring[0] is not '-' and hourstring[0] is not ' ':
hourstring = ' ' + hourstring
hourly_vals_list = [hourstring[i:i+n] for i in range(0, len(hourstring),
n)]
return hourly_vals_list
def ae_readfile(fname):
"""Wrapper function to call ae_parsefile and wdc_datetimes.
Args:
fname (str): path to a AE file in Kyoto WDC-like format. Assumes data
for all years are contained within this file.
Returns:
data (pandas.DataFrame):
dataframe containing the data read from the WDC file. First column
is a series of datetime objects (in the format yyyy-mm-dd hh:30:00)
and second column contains AE values at the specified times
(hourly means).
"""
data = ae_parsefile(fname)
data = wdc_datetimes(data)
data['hourly_mean'] = 100.0 * data['base'] + \
data['hourly_mean_temp']
data.drop(['hourly_mean_temp', 'base'], axis=1, inplace=True)
return data
def append_ae_data(ae_data_path):
"""Append AE data into a single dataframe containing all years.
Data downloaded from
ftp://ftp.ngdc.noaa.gov/STP/GEOMAGNETIC_DATA/INDICES/AURORAL_ELECTROJET/HOURLY/
come in WDC-like format files with one file per year named aeyyyy.wdc (data
provided by the WDC at Kyoto. Can be downloaded directly from
http://wdc.kugi.kyoto-u.ac.jp/dstae/index.html)
Args:
ae_data_path (str): path to directory containing WDC-like format AE
datafiles. All AE files should be located in the same directory.
Returns:
data (pandas.DataFrame):
dataframe containing all available hourly AE data. First column is
a series of datetime objects (in the format yyyy-mm-dd hh:30:00)
and second column contains AE values at the specified times.
"""
data = pd.DataFrame()
# Obtain a list of all files containing 'ae' and ending in .wdc in the
# specified directory
filenames = sorted(glob.glob(ae_data_path + 'ae*.txt'))
# Iterate over the files and append them to previous files
for file in filenames:
print(file)
try:
frame = ae_readfile(file)
data = data.append(frame, ignore_index=True)
except StopIteration:
pass
return data
def ap_readfile(fname):
"""Load an kp/ap file and place the hourly ap values into a dataframe.
Load a datafile of 3-hourly ap data and extract the contents. Each of the
3-hourly values for a given day is repeated three times to give an hourly
mean for all 24 hours of the day. This function is designed to read files
downloaded from the GFZ, Potsdam server at
ftp://ftp.gfz-potsdam.de/pub/home/obs/kp-ap/.
Args:
fname (str): path to an ap datafile.
Returns:
data (pandas.DataFrame):
dataframe containing hourly ap data. First column is a
series of datetime objects (in the format yyyy-mm-dd hh:30:00) and
second column contains ap values at the specified times.
"""
col_names = ['full_string']
types = {'full_string': str}
# Parse the file
if fname[-8] == '2':
cols = [(1, 55)]
data = pd.read_fwf(fname, colspecs=cols, names=col_names,
converters=types, header=None)
data['month'] = data.full_string.str[1:3]
data['day'] = data.full_string.str[3:5]
data['hourly_values'] = data.full_string.str[30:]
else:
cols = [(0, 55)]
data = pd.read_fwf(fname, colspecs=cols, names=col_names,
converters=types, header=None)
data['month'] = data.full_string.str[2:4]
data['day'] = data.full_string.str[4:6]
data['hourly_values'] = data.full_string.str[32:]
data.drop(['full_string'], axis=1, inplace=True)
data['hourly_values'] = data['hourly_values'].apply(
separate_three_hourly_vals)
data = data.set_index(['month', 'day'])['hourly_values'].apply(
pd.Series).stack()
data = data.reset_index()
data.columns = ['month', 'day', 'hour', 'hourly_mean']
data['hourly_mean'] = data['hourly_mean'].astype(float)
data['year'] = int(fname[-8:-4])
dates = data.apply(lambda x: dt.datetime.strptime(
"{0} {1} {2} {3} {4}".format(x['year'], x['month'], x['day'],
x['hour'], 30), "%Y %m %d %H %M"), axis=1)
data.insert(0, 'date', dates)
data.drop(['year', 'day', 'month', 'hour'],
axis=1, inplace=True)
return data
def separate_three_hourly_vals(hourstring):
"""Separate 3-hourly ap means from the string containing all 8 values.
Separate the 8 individual 3-hourly ap means from the string containing all
values for the day. Each value is repeated 3 times to give a value for each
hour. Called by ap_readfile.
Args:
hourstring (str): string containing the 3-hourly ap means parsed from
a Kyoto WDC-like file for a single day.
Returns:
hourly_vals_list (list):
list containing the hourly ap values.
"""
n = 3
hourly_vals_list = [hourstring[i:i+n] for i in range(0, len(hourstring),
n)]
hourly_vals_list = np.repeat(hourly_vals_list, n)
return hourly_vals_list
def append_ap_data(ap_data_path):
"""Append ap data into a single dataframe containing all years.
Data downloaded from ftp://ftp.gfz-potsdam.de/pub/home/obs/kp-ap/wdc/
come in WDC-like format files with one file per year named kpyyyy.wdc. This
function concatenates all years into a single dataframe.
Args:
ap_data_path (str): path to directory containing WDC-like format ap
datafiles. All ap files should be located in the same directory.
Returns:
data (pandas.DataFrame):
dataframe containing all available hourly ap data. First column is
a series of datetime objects (in the format yyyy-mm-dd hh:30:00)
and second column contains ap values at the specified times.
"""
data = pd.DataFrame()
# Obtain a list of all files containing 'ap' and ending in .wdc in the
# specified directory
filenames = sorted(glob.glob(ap_data_path + 'kp*.wdc'))
# Iterate over all files and append data
for file in filenames:
print(file)
try:
frame = ap_readfile(file)
data = data.append(frame, ignore_index=True)
except StopIteration:
pass
return data
|
|
"""
Utility methods to deal with "names" of relations.
To be safe, we always delimit names in queries but would prefer not to during logging.
See TableName.
There are additional methods and classes here to support the feature of choosing relations
by a pattern from the command line.
"""
import fnmatch
import re
import uuid
from typing import List, Optional, Tuple
import etl.config
from etl.errors import ETLSystemError
from etl.text import join_with_single_quotes
def as_staging_name(name):
"""Transform the schema name to its staging position."""
return "$".join(("etl_staging", name))
def as_backup_name(name):
"""Transform the schema name to its backup position."""
return "$".join(("etl_backup", name))
class TableName:
"""
Class to automatically create delimited identifiers for tables.
Given a table s.t, then the cautious identifier for SQL code is: "s"."t"
But the more readable name is still: s.t
Note that the preference for logging is to always use single-quotes, 's.t' (see {:x} below).
Another, more curious use for instances is to store shell patterns for the schema name
and table name so that we can match against other instances.
Comparisons (for schema and table names) are case-insensitive.
TableNames have a notion of known "managed" schemas, which include both
sources and transformations listed in configuration files. A TableName
is considered unmanaged if its schema does not belong to the list of
managed schemas, and in that case its schema property is never translated
into a staging version.
>>> from etl.config.dw import DataWarehouseSchema
>>> orders = TableName.from_identifier("www.orders")
>>> str(orders)
'"www"."orders"'
>>> orders.identifier
'www.orders'
>>> same_orders = TableName.from_identifier("WWW.Orders")
>>> orders == same_orders
True
>>> id(orders) == id(same_orders)
False
>>> hash(orders) == hash(same_orders)
True
>>> w3 = TableName.from_identifier("w3.orders")
>>> orders == w3
False
>>> purchases = TableName.from_identifier("www.purchases")
>>> orders < purchases
True
>>> purchases.managed_schemas = ['www']
>>> staging_purchases = purchases.as_staging_table_name()
>>> staging_purchases.managed_schemas = ['www']
>>> # Now the table names are the same but they are in different schemas (staging vs. not)
>>> staging_purchases.table == purchases.table
True
>>> staging_purchases.schema == purchases.schema
False
"""
__slots__ = ("_schema", "_table", "_is_staging", "_managed_schemas", "_external_schemas")
def __init__(self, schema: Optional[str], table: str, is_staging=False) -> None:
# Concession to subclasses ... schema is optional
self._schema = schema.lower() if schema else None
self._table = table.lower()
self._is_staging = is_staging
self._managed_schemas: Optional[frozenset] = None
self._external_schemas: Optional[frozenset] = None
@property
def schema(self):
if self.is_staging and self.is_managed:
return as_staging_name(self._schema)
else:
return self._schema
@property
def table(self):
return self._table
@property
def is_staging(self):
return self._is_staging
@property
def managed_schemas(self) -> frozenset:
"""
List of schemas that are managed by Arthur.
This contains all schemas not just the schema of this relation.
"""
if self._managed_schemas is None:
try:
schemas = etl.config.get_dw_config().schemas
except AttributeError:
raise ETLSystemError("dw_config has not been set!")
self._managed_schemas = frozenset(schema.name for schema in schemas)
return self._managed_schemas
@managed_schemas.setter
def managed_schemas(self, schema_names: List) -> None:
# This setter only exists for tests.
self._managed_schemas = frozenset(schema_names)
@property
def external_schemas(self) -> frozenset:
"""List external schemas that are not managed by us and may not exist during validation."""
if self._external_schemas is None:
try:
schemas = etl.config.get_dw_config().external_schemas
except AttributeError:
raise ETLSystemError("dw_config has not been set!")
self._external_schemas = frozenset(schema.name for schema in schemas)
return self._external_schemas
def to_tuple(self):
"""
Return schema name and table name as a handy tuple.
>>> tn = TableName("weather", "temp")
>>> schema_name, table_name = tn.to_tuple()
>>> schema_name, table_name
('weather', 'temp')
"""
return self.schema, self.table
@property
def identifier(self) -> str:
"""
Return simple identifier, like one would use on the command line.
>>> tn = TableName("hello", "world")
>>> tn.identifier
'hello.world'
"""
return f"{self.schema}.{self.table}"
@property
def identifier_as_re(self) -> str:
r"""
Return a regular expression that would look for the (unquoted) identifier.
>>> tn = TableName("dw", "fact")
>>> tn.identifier_as_re
'\\bdw\\.fact\\b'
>>> import re
>>> re.match(tn.identifier_as_re, "dw.fact") is not None
True
>>> re.match(tn.identifier_as_re, "dw_fact") is None
True
"""
return r"\b{}\b".format(re.escape(self.identifier))
@property
def is_managed(self) -> bool:
return self._schema in self.managed_schemas
@property
def is_external(self) -> bool:
return self._schema in self.external_schemas
@classmethod
def from_identifier(cls, identifier: str):
"""
Split identifier into schema and table before creating a new TableName instance.
>>> identifier = "ford.mustang"
>>> tn = TableName.from_identifier(identifier)
>>> identifier == tn.identifier
True
"""
schema, table = identifier.split(".", 1)
return cls(schema, table)
def __str__(self):
"""
Return delimited table identifier with quotes around schema and table name.
This safeguards against unscrupulous users who use "default" as table name.
>>> import etl.config
>>> from collections import namedtuple
>>> MockDWConfig = namedtuple('MockDWConfig', ['schemas'])
>>> MockSchema = namedtuple('MockSchema', ['name'])
>>> etl.config._dw_config = MockDWConfig(schemas=[MockSchema(name='hello')])
>>> tn = TableName("hello", "world")
>>> str(tn)
'"hello"."world"'
>>> str(tn.as_staging_table_name())
'"etl_staging$hello"."world"'
"""
return f'"{self.schema}"."{self.table}"'
def __format__(self, code):
"""
Format name as delimited identifier (with quotes) or just as an identifier.
With the default or ':s', it's a delimited identifier with quotes.
With ':x", the name is left bare but single quotes are around it.
>>> pu = TableName("public", "users")
>>> format(pu)
'"public"."users"'
>>> format(pu, 'x')
"'public.users'"
>>> "SELECT * FROM {:s}".format(pu)
'SELECT * FROM "public"."users"'
>>> "Table {:x} contains users".format(pu) # new style with using formatting code
"Table 'public.users' contains users"
>>> "Table '{}' contains users".format(pu.identifier) # old style by accessing property
"Table 'public.users' contains users"
>>> "Oops: {:y}".format(pu)
Traceback (most recent call last):
ValueError: unknown format code 'y' for TableName
"""
if (not code) or (code == "s"):
return str(self)
elif code == "x":
return "'{:s}'".format(self.identifier)
else:
raise ValueError("unknown format code '{}' for {}".format(code, self.__class__.__name__))
def __eq__(self, other: object):
if not isinstance(other, TableName):
return False
return self.to_tuple() == other.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def __lt__(self, other: "TableName"):
"""
Order two table names, case-insensitive.
>>> ta = TableName("Iowa", "Cedar Rapids")
>>> tb = TableName("Iowa", "Davenport")
>>> ta < tb
True
"""
return self.identifier < other.identifier
def match(self, other: "TableName") -> bool:
"""
Treat yo'self as a tuple of patterns and match against the other table.
>>> tp = TableName("w*", "o*")
>>> tn = TableName("www", "orders")
>>> tp.match(tn)
True
>>> tn = TableName("worldwide", "octopus")
>>> tp.match(tn)
True
>>> tn = TableName("sales", "orders")
>>> tp.match(tn)
False
"""
other_schema = other.schema
other_table = other.table
return fnmatch.fnmatch(other_schema, self.schema) and fnmatch.fnmatch(other_table, self.table)
def match_pattern(self, pattern: str) -> bool:
"""
Test whether this table matches the given pattern.
>>> tn = TableName("www", "orders")
>>> tn.match_pattern("w*.o*")
True
>>> tn.match_pattern("o*.w*")
False
"""
return fnmatch.fnmatch(self.identifier, pattern)
def as_staging_table_name(self):
return TableName(*self.to_tuple(), is_staging=True)
class TempTableName(TableName):
r"""
Class to deal with names of temporary relations.
Note that temporary views or tables do not have a schema (*) and have a name starting with '#'.
(* = strictly speaking, their schema is one of the pg_temp% schemas. But who's looking.)
>>> temp = TempTableName("#hello")
>>> str(temp)
'"#hello"'
>>> temp.identifier
'#hello'
>>> "For SQL: {:s}, for logging: {:x}".format(temp, temp)
'For SQL: "#hello", for logging: \'#hello\''
Schema and name comparison in SQL continues to work if you use LIKE for schema names:
>>> temp.schema
'pg_temp%'
"""
def __init__(self, table) -> None:
if not table.startswith("#"):
raise ValueError("name of temporary table must start with '#'")
super().__init__(None, table)
# Enable remembering whether this is a temporary view with late schema binding.
self.is_late_binding_view = False
@property
def schema(self):
return "pg_temp%"
@property
def identifier(self):
return self.table
def __str__(self):
return '"{}"'.format(self.table)
@staticmethod
def for_table(table: TableName):
"""
Return a valid name for a temporary table that's derived from the given table name.
Leaks Redshift spec in that we make sure that names are less than 127 characters long.
>>> example = "public.speakeasy"
>>> tn = TableName.from_identifier(example)
>>> temp = TempTableName.for_table(tn)
>>> temp.identifier
'#public$speakeasy'
>>> str(temp)
'"#public$speakeasy"'
>>> too_long = "public." + "long" * 32
>>> tt = TempTableName.for_table(TableName.from_identifier(too_long))
>>> len(tt.identifier)
127
"""
temp_name = "#{0.schema}${0.table}".format(table)
if len(temp_name) > 127:
temp_name = temp_name[:119] + uuid.uuid4().hex[:8]
return TempTableName(temp_name)
class TableSelector:
"""
Class to hold patterns to filter table names.
Patterns that are supported are based on "glob" matches, which use *, ?, and [] -- just
like the shell does. But note that all matching is done case-insensitive.
There is a concept of "base schemas." This list should be based on the configuration and
defines the set of usable schemas. ("Schemas" here refers to either upstream sources or
schemas storing transformations.) So when base schemas are defined then there is an implied
additional match against them before a table name is tried to be matched against stored
patterns. If no base schemas are set, then we default simply to a list of schemas from the
patterns.
"""
__slots__ = ("_patterns", "_base_schemas")
def __init__(self, patterns=None, base_schemas=None):
"""
Build pattern instance from list of glob patterns.
The list may be empty (or omitted). This is equivalent to a list of ["*.*"].
Note that each pattern is split on the first '.' to separate out
matches against schema names and table names.
To facilitate case-insensitive matching, patterns are stored in their
lower-case form.
The base schemas (if present) basically limit what a '*' means as schema name.
They are stored in their initial order.
>>> ts = TableSelector()
>>> str(ts)
"['*.*']"
>>> len(ts)
0
>>> ts = TableSelector(["finance", "www"])
>>> str(ts)
"['finance.*', 'www.*']"
>>> len(ts)
2
>>> ts = TableSelector(["www.orders*"])
>>> str(ts)
"['www.orders*']"
>>> ts = TableSelector(["www.Users", "www.Products"])
>>> str(ts)
"['www.products', 'www.users']"
>>> ts = TableSelector(["*.orders", "finance.budget"])
>>> str(ts)
"['*.orders', 'finance.budget']"
>>> ts = TableSelector("www.orders")
Traceback (most recent call last):
ValueError: patterns must be a list
>>> ts = TableSelector(["www.*", "finance"], ["www", "finance", "operations"])
>>> ts.base_schemas
('www', 'finance', 'operations')
>>> ts.base_schemas = ["www", "marketing"]
Traceback (most recent call last):
ValueError: bad pattern (no match against base schemas): finance.*
>>> ts.base_schemas = ["www", "finance", "marketing"]
>>> ts = TableSelector(base_schemas=["www"])
>>> ts.match(TableName.from_identifier("www.orders"))
True
>>> ts.match(TableName.from_identifier("operations.shipments"))
False
"""
if patterns is None:
patterns = [] # avoid having a modifiable parameter but still have a for loop
if not isinstance(patterns, list):
raise ValueError("patterns must be a list")
split_patterns = []
for pattern in patterns:
if "." in pattern:
schema, table = pattern.split(".", 1)
split_patterns.append(TableName(schema, table))
else:
split_patterns.append(TableName(pattern, "*"))
self._patterns = tuple(sorted(split_patterns))
self._base_schemas: Tuple[str, ...] = ()
if base_schemas is not None:
self.base_schemas = base_schemas
@property
def base_schemas(self):
return self._base_schemas
@base_schemas.setter
def base_schemas(self, schemas):
"""
Add base schemas (names, not patterns) to match against.
It is an error to have a pattern that does not match against the base schemas.
(So you cannot retroactively reject a pattern by changing the base schemas.)
"""
# Fun fact: you can't have doctests in docstrings for properties
self._base_schemas = tuple(name.lower() for name in schemas)
# Make sure that each pattern matches against at least one base schema
for pattern in self._patterns:
found = fnmatch.filter(self._base_schemas, pattern.schema)
if not found:
raise ValueError(
"bad pattern (no match against base schemas): {}".format(pattern.identifier)
)
def __len__(self) -> int:
return len(self._patterns)
def __str__(self) -> str:
# See __init__ for tests
if len(self._patterns) == 0:
return "['*.*']"
else:
return "[{}]".format(join_with_single_quotes(p.identifier for p in self._patterns))
def match_schema(self, schema) -> bool:
"""
Match this schema name against the patterns.
This returns true if any pattern matches the schema name and the schema is part of the
base schemas (if defined).
>>> tnp = TableSelector(["www.orders", "factory.products"])
>>> tnp.match_schema("www")
True
>>> tnp.match_schema("finance")
False
"""
name = schema.lower()
if not self._patterns:
if not self._base_schemas:
return True
else:
return name in self._base_schemas
else:
for pattern in self._patterns:
if fnmatch.fnmatch(name, pattern.schema):
return True
return False
def selected_schemas(self) -> Tuple[str, ...]:
"""
Return tuple of schemas from base schemas that match the selection.
It is an error if a pattern tries to select a specific table instead of a schema.
This method can thus be called for the side-effect of raising an exception
if you want to test whether the pattern only selects schemas.
>>> ts = TableSelector(["www.*", "marketing"], ["factory", "marketing", "www"])
>>> ts.selected_schemas()
('marketing', 'www')
>>> tx = TableSelector(["www.orders"], ["www"])
>>> tx.selected_schemas()
Traceback (most recent call last):
ValueError: pattern selects table, not schema: '"www"."orders"'
"""
for pattern in self._patterns:
if pattern.table != "*":
raise ValueError("pattern selects table, not schema: '%s'" % pattern)
return tuple(str(schema) for schema in self._base_schemas if self.match_schema(schema))
def match(self, table_name):
"""
Match relation on schema and table patterns (possibly limited to base schemas).
This returns true if any pattern matches and the schema is part of the base schemas
(if defined).
>>> ts = TableSelector(["www.orders", "www.prod*"])
>>> name = TableName("www", "products")
>>> ts.match(name)
True
>>> name = TableName("WWW", "Products")
>>> ts.match(name)
True
>>> name = TableName("finance", "products")
>>> ts.match(name)
False
>>> name = TableName("www", "users")
>>> ts.match(name)
False
"""
schema = table_name.schema
if self._base_schemas and schema not in self._base_schemas:
return False
if not self._patterns:
return True
for pattern in self._patterns:
if pattern.match(table_name):
return True
return False
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_clustering_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
# Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
# which is the square root of the sum of the absolute squares of the elements
# difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
KMC2_INIT = 'kmc2'
# The name of the variable holding the cluster centers. Used by the Estimator.
CLUSTERS_VAR_NAME = 'clusters'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2,
kmc2_chain_length=200):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors. It is assumed that the
data points have been previously randomly permuted.
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if initial_clusters is a tensor or numpy array.
initial_clusters: Specifies the clusters used during initialization. One
of the following:
- a tensor or numpy array with the initial cluster centers.
- a function f(inputs, k) that returns up to k centers from `inputs`.
- "random": Choose centers randomly from `inputs`.
- "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
- "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
In the last three cases, one batch of `inputs` may not yield
`num_clusters` centers, in which case initialization will require
multiple batches until enough centers are chosen. In the case of
"random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
then the entire batch is chosen to be cluster centers.
distance_metric: Distance metric used for clustering. Supported options:
"squared_euclidean", "cosine".
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: Number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
kmc2_chain_length: Determines how many candidate points are used by the
k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
contains less points, one new cluster center is generated from the
(mini-)batch.
Raises:
ValueError: An invalid argument was passed to initial_clusters or
distance_metric.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
self._inputs = inputs if isinstance(inputs, list) else [inputs]
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
(indices, distances) = gen_clustering_ops.nearest_neighbors(
inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances, [-1]),
array_ops.squeeze(indices, [-1])))
return zip(*output)
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _create_variables(self, num_clusters):
"""Creates variables.
Args:
num_clusters: an integer Tensor providing the number of clusters.
Returns:
Tuple with following elements:
- cluster_centers: a Tensor for storing cluster centers
- cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
- cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
- cluster_centers_updated: Tensor representing copy of cluster centers
that are updated every step.
- update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(
init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (
variable_scope.variable(
array_ops.ones([num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
This returns, among other things, an op that chooses initial centers
(init_op), a boolean variable that is set to True when the initial centers
are chosen (cluster_centers_initialized), and an op to perform either an
entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
The caller should use these components as follows. A single worker should
execute init_op multiple times until cluster_centers_initialized becomes
True. Then multiple workers may execute training_op any number of times.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
if (isinstance(self._initial_clusters, str) or
callable(self._initial_clusters)):
initial_clusters = self._initial_clusters
num_clusters = ops.convert_to_tensor(self._num_clusters)
else:
initial_clusters = ops.convert_to_tensor(self._initial_clusters)
num_clusters = array_ops.shape(initial_clusters)[0]
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables(num_clusters)
init_op = _InitializeClustersOpFactory(
self._inputs, num_clusters, initial_clusters, self._distance_metric,
self._random_seed, self._kmeans_plus_plus_num_retries,
self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated,
cluster_centers_initialized).op()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(
inputs, num_clusters, cluster_idx, cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps, ignore_existing=True):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
# Compute \\(sum_i(d_i)\\), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers, unique_ids, cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
num_clusters: an integer Tensor providing the number of clusters.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (
math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
class _InitializeClustersOpFactory(object):
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set
to true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp,
math_ops.to_int64(self._num_remaining), self._random_seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2.
"""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._random_seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return control_flow_ops.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return control_flow_ops.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return control_flow_ops.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from datetime import datetime, timedelta
from pymql.log import LOG
from pymql.mql.error import MQLAccessError
from pymql.mql.error import MQLWriteQuotaError
# This object is very very special. If you give it to me as the user
# field, you get to bypass all the access control checks.
# This means (at least) that you need to be in the same python
# environment as me (or you need to be using eval() -- snicker)
Privileged = object()
# this object when passed as the $privileged field
# enables you to pass another user id as the $authority field
# the write will still be attributed to $user, but $authority's
# permissions will be checked in addition to $user's.
Authority = object()
# this is a very special case because user creation cannot be escalated as a
# privilege to anybody else. created a list because there might be other
# catch-22 situations
MAX_WRITE_EXCEPTED_USERS = [
'#9202a8c04000641f80000000000000aa', # /user/user_administrator
]
class Permission(object):
"""
A little wrapper around a permission guid that allows us to break
out the actual permission queries from the rest of lojson.
This object is more or less temporary - don't keep it anywhere
"""
def __init__(self, querier, guid):
self.querier = querier
self.guid = guid
def user_has_permission_permission(self, userguid, varenv):
"""
Can the user administer objects with this permission?
"""
# do this in one query.
# don't cache the results at the moment.
# (if non-caching is a problem we can always cache later.)
query = {
'@guid': self.guid,
'is_instance_of': {
'@id': '/type/permission'
},
'has_write_group': [{
'is_instance_of': {
'@id': '/type/usergroup'
},
# should check /type/user as the user loader does
# not.
'has_member': {
'@guid': userguid,
'is_instance_of': {
'@id': '/type/user'
}
},
':optional': False,
}],
'has_permission': {
'@guid':
None,
'is_instance_of': {
'@id': '/type/permission'
},
'has_write_group': [{
'is_instance_of': {
'@id': '/type/usergroup'
},
# should check /type/user as the user loader
# does not.
'has_member': {
'@guid': userguid,
'is_instance_of': {
'@id': '/type/user'
}
},
':optional': False,
}]
}
}
result = self.querier.read(query, varenv)
# slight paranoia - result is not None should be enough; but
# optional=false might break in the future.
if (result is not None and
result['has_write_group'][0]['has_member']['@guid'] == userguid and
result['has_permission']['has_write_group'][0]['has_member']['@guid'] ==
userguid):
return True
return False
# XXX revokation should work properly...
def user_has_write_permission(self, userguid, varenv):
"""
Can the user write to objects with this permission?
"""
# this is the only query ever run to verify permissions, so it
# should be properly paranoid.
#
# Currently it checks the following.
#
# 0 - the user actually is a member of a group with permission!
# 1 - the permission is of /type/permission
# 2 - the attached group is of /type/usergroup
# 3 - the attached user is of /type/user
#
# It is possible we put futher restrictions on what it is to
# be a valid user, valid permission and/or valid group in the
# future. Perhaps you need to be in the /user namespace as a
# user. Perhaps groups and permissions also have namespaces.
query = {
'@guid':
self.guid,
'is_instance_of': {
'@id': '/type/permission'
},
'has_write_group': [{
'is_instance_of': {
'@id': '/type/usergroup'
},
# should check /type/user as the user loader does not.
'has_member': {
'@guid': userguid,
'is_instance_of': {
'@id': '/type/user'
}
},
':optional': False,
}]
}
result = self.querier.read(query, varenv)
# slight paranoia - result is not None should be enough; but
# optional=false might break in the future.
if (result is not None and
result['has_write_group'][0]['has_member']['@guid'] == userguid):
return True
return False
#
# we're a bit more restrictive with user-ids than with regular ids
# we insist on lower-case only, and a max-len of 38 (32 significant characters)
#
__userid_re = re.compile('^/user/[a-z](?:_?[a-z0-9])*$')
def valid_user_id(userid):
return __userid_re.match(userid) and len(userid) <= 38
def check_attribution_to_user(querier, varenv, attributionguid):
query = {
'@guid': attributionguid,
'@scope': varenv.get_user_guid(),
'is_instance_of': {
'@id': '/type/attribution'
}
}
result = querier.read(query, varenv)
if result is None:
return False
else:
return True
def check_write_defaults(querier, varenv):
"""
It is painful to deal with $user, $permission, $authority and $attribution
all the time, so this function verifies them and the sets them to member
variables.
"""
if not varenv.get_user_guid():
raise MQLAccessError(
None, 'You must specify a valid user to write', user=None)
# must check authority before permission as authority affects the check_permission() call later
if varenv.get('$authority'):
if not varenv.get('$privileged') is Authority:
# *****************************************************************************************************************
raise MQLAccessError(
None,
'user %(user)s cannot use authority %(authority)s without scope.Authority',
user=varenv.get_user_id(),
authority=varenv.get('$authority'))
# *****************************************************************************************************************
varenv.authority_guid = querier.lookup.lookup_guid(
varenv.get('$authority'), varenv)
else:
varenv.authority_guid = None
if varenv.get('$permission'):
permission_guid = querier.lookup.lookup_guid(
varenv.get('$permission'), varenv)
if not check_permission(querier, varenv, permissionguid=permission_guid):
# *****************************************************************************************************************
raise MQLAccessError(
None,
'User %(user)s cannot create with permission %(permission)s',
user=varenv.get_user_id(),
permission=permission_guid)
# *****************************************************************************************************************
# permission checks out OK (this implies the user checked out OK too)
varenv.default_permission_guid = permission_guid
else:
# *****************************************************************************************************************
raise MQLAccessError(
None,
'You must specify a default permission to write with',
permission=None)
# *****************************************************************************************************************
if varenv.get('$attribution'):
attribution_guid = querier.lookup.lookup_guid(
varenv.get('$attribution'), varenv)
if not check_attribution_to_user(querier, varenv, attribution_guid):
# *****************************************************************************************************************
raise MQLAccessError(
None,
'User %(user)s cannot attribute to a node %(attribution)s that they did not create, or is not of type /type/attribution',
user=varenv.get_user_id(),
attribution=varenv.get('$attribution'))
# *****************************************************************************************************************
# attribution checks out OK
varenv.attribution_guid = attribution_guid
else:
varenv.attribution_guid = varenv.get_user_guid()
def check_permission(querier, varenv, permissionguid):
"""
Check if the user can write to objects permitted by permission_guid
"""
write_permission = varenv.setdefault('write_permission', {})
if permissionguid not in write_permission:
userguid = varenv.get_user_guid()
authorityguid = varenv.authority_guid
permission = Permission(querier, permissionguid)
has_access = permission.user_has_write_permission(userguid, varenv)
if not has_access and authorityguid:
has_access = permission.user_has_write_permission(authorityguid, varenv)
if has_access:
LOG.notice(
'access.authority', 'for user %s, permission %s and authority %s' %
(userguid, permissionguid, authorityguid))
if not has_access and varenv.get('$privileged') is Privileged:
LOG.notice('access.privileged',
'for user %s and permission %s' % (userguid, permissionguid))
has_access = True
write_permission[permissionguid] = has_access
return write_permission[permissionguid]
def check_change_permission_by_user(querier, varenv, old_permission_guid,
new_permission_guid):
has_old_permission = \
check_permission_permission(querier, varenv, old_permission_guid)
has_new_permission = \
check_permission_permission(querier, varenv, new_permission_guid)
# privileged access bypass
if varenv.get('$privileged') is Privileged:
LOG.notice(
'access.privileged', 'for user %s changing permission %s to %s' %
(varenv.get_user_guid(), old_permission_guid, new_permission_guid))
return True
# no privileged block because I don't have any need to
# privilege this operation (yet) when there is a need a
# privileged block can be put here.
return has_old_permission and has_new_permission
def check_permission_permission(querier, varenv, permission_guid):
"""
Check if the user has permission to administer the given permission
"""
permission_permission = varenv.setdefault('permission_permission', {})
if permission_guid not in permission_permission:
userguid = varenv.get_user_guid()
authorityguid = varenv.authority_guid
permission = Permission(querier, permission_guid)
has_access = permission.user_has_permission_permission(userguid, varenv)
if not has_access and authorityguid:
has_access = permission.user_has_permission_permission(
authorityguid, varenv)
if has_access:
LOG.notice(
'access.authority', 'for user %s, permission %s and authority %s' %
(userguid, permission_guid, authorityguid))
permission_permission[permission_guid] = has_access
return permission_permission[permission_guid]
def check_write_throttle(querier, varenv):
userguid = varenv.get_user_guid()
max_writes = varenv.get('max_writes', None)
if max_writes is None or userguid in MAX_WRITE_EXCEPTED_USERS:
LOG.error('write.throttle.skipped',
'user=%s skipped write throttle' % userguid)
return True
# userguid starts with a '#' while max_writes['guid'] does not.
# We need to strip the '#' in order for the comparison to succeed.
if userguid[0] == '#':
userguid = userguid[1:]
if max_writes['guid'] != userguid:
LOG.notice(
'write.throttle.different_users',
'Logged in user: %s different from mqlwrite user: %s' %
(max_writes['guid'], userguid))
# 1 day
tdelta = timedelta(1)
yesterday = (datetime.utcnow() - tdelta).isoformat()
# MQL attribution models documented at:
# https://wiki.metaweb.com/index.php/MQL_Attribution_for_OAuth%2C_Acre%2C_etc
# normal attribution query
# need the optional to suppress EMPTY on count=0
graphq = ('(scope=%s timestamp>%s live=dontcare newest>=0 result=(count) '
'optional)') % (
max_writes['guid'], yesterday)
gresult = querier.gc.read_varenv(graphq, varenv)
count = int(gresult[0])
# oauth/open social attribution query
graphq = ('(scope->(scope=%s) timestamp>%s live=dontcare newest>=0 '
'result=(count) optional)') % (
max_writes['guid'], yesterday)
gresult = querier.gc.read_varenv(graphq, varenv)
count += int(gresult[0])
if count > max_writes['limit']:
LOG.alert(
'write.throttle.exceeded', 'user=%s count=%s max=%d delta=%s' %
(max_writes['guid'], count, max_writes['limit'], str(tdelta)))
msg = 'Daily write limit of %s was exceeded.' % max_writes['limit']
raise MQLWriteQuotaError(
None,
msg,
user='/guid/' + max_writes['guid'],
count=count,
max_writes=max_writes['limit'],
period=str(tdelta))
else:
LOG.notice(
'write.throttle.ok', 'user=%s count=%s max=%s' %
(max_writes['guid'], count, max_writes['limit']))
return True
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class transport(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/transport. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS transport.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "transport"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"transport",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/config (container)
YANG Description: This container defines ISIS transport related configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS transport related configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/state (container)
YANG Description: This container defines state information for ISIS transport
parameters.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS transport
parameters.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class transport(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/transport. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS transport.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "transport"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"transport",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/config (container)
YANG Description: This container defines ISIS transport related configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS transport related configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/state (container)
YANG Description: This container defines state information for ISIS transport
parameters.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/transport/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS transport
parameters.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
|
"""
Support for Z-Wave.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zwave/
"""
import asyncio
import copy
import logging
from pprint import pprint
import voluptuous as vol
from homeassistant.core import callback, CoreState
from homeassistant.loader import get_platform
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.const import (
ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.event import async_track_time_change
from homeassistant.util import convert
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from . import const
from .const import DOMAIN, DATA_DEVICES, DATA_NETWORK, DATA_ENTITY_VALUES
from .node_entity import ZWaveBaseEntity, ZWaveNodeEntity
from . import workaround
from .discovery_schemas import DISCOVERY_SCHEMAS
from .util import (check_node_schema, check_value_schema, node_name,
check_has_unique_id, is_node_parsed)
REQUIREMENTS = ['pydispatcher==2.0.5', 'python_openzwave==0.4.9']
_LOGGER = logging.getLogger(__name__)
CLASS_ID = 'class_id'
CONF_AUTOHEAL = 'autoheal'
CONF_DEBUG = 'debug'
CONF_POLLING_INTENSITY = 'polling_intensity'
CONF_POLLING_INTERVAL = 'polling_interval'
CONF_USB_STICK_PATH = 'usb_path'
CONF_CONFIG_PATH = 'config_path'
CONF_IGNORED = 'ignored'
CONF_INVERT_OPENCLOSE_BUTTONS = 'invert_openclose_buttons'
CONF_REFRESH_VALUE = 'refresh_value'
CONF_REFRESH_DELAY = 'delay'
CONF_DEVICE_CONFIG = 'device_config'
CONF_DEVICE_CONFIG_GLOB = 'device_config_glob'
CONF_DEVICE_CONFIG_DOMAIN = 'device_config_domain'
CONF_NETWORK_KEY = 'network_key'
ATTR_POWER = 'power_consumption'
DEFAULT_CONF_AUTOHEAL = True
DEFAULT_CONF_USB_STICK_PATH = '/zwaveusbstick'
DEFAULT_POLLING_INTERVAL = 60000
DEFAULT_DEBUG = False
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 5
RENAME_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
RENAME_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
vol.Optional(const.ATTR_CONFIG_SIZE, default=2): vol.Coerce(int)
})
SET_POLL_INTENSITY_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_POLL_INTENSITY): vol.Coerce(int),
})
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
})
NODE_SERVICE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
})
REFRESH_ENTITY_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
})
RESET_NODE_METERS_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=1): vol.Coerce(int)
})
CHANGE_ASSOCIATION_SCHEMA = vol.Schema({
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int)
})
SET_WAKEUP_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE):
vol.All(vol.Coerce(int), cv.positive_int),
})
HEAL_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_RETURN_ROUTES, default=False): cv.boolean,
})
TEST_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_MESSAGES, default=1): cv.positive_int,
})
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(CONF_INVERT_OPENCLOSE_BUTTONS,
default=DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS): cv.boolean,
vol.Optional(CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE):
cv.boolean,
vol.Optional(CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY):
cv.positive_int
})
SIGNAL_REFRESH_ENTITY_FORMAT = 'zwave_refresh_entity_{}'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_NETWORK_KEY): cv.string,
vol.Optional(CONF_DEVICE_CONFIG, default={}):
vol.Schema({cv.entity_id: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_GLOB, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL):
cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH, default=DEFAULT_CONF_USB_STICK_PATH):
cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not callable(getattr(obj, key))}
def _value_name(value):
"""Return the name of the value."""
return '{} {}'.format(node_name(value.node), value.label).strip()
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
_LOGGER.info("FOUND NODE %s \n"
"%s", node.product_name, node_dict)
def get_config_value(node, value_index, tries=5):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
if (value.command_class == const.COMMAND_CLASS_CONFIGURATION
and value.index == value_index):
return value.data
except RuntimeError:
# If we get a runtime error the dict has changed while
# we was looking for a value, just do it again
return None if tries <= 0 else get_config_value(
node, value_index, tries=tries - 1)
return None
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Z-Wave platform (generic part)."""
if discovery_info is None or DATA_NETWORK not in hass.data:
return False
device = hass.data[DATA_DEVICES].pop(
discovery_info[const.DISCOVERY_DEVICE], None)
if device is None:
return False
async_add_entities([device])
return True
async def async_setup(hass, config):
"""Set up Z-Wave.
Will automatically load components to support devices found on the network.
"""
from pydispatch import dispatcher
# pylint: disable=import-error
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
from openzwave.group import ZWaveGroup
# Load configuration
use_debug = config[DOMAIN].get(CONF_DEBUG)
autoheal = config[DOMAIN].get(CONF_AUTOHEAL)
device_config = EntityValues(
config[DOMAIN][CONF_DEVICE_CONFIG],
config[DOMAIN][CONF_DEVICE_CONFIG_DOMAIN],
config[DOMAIN][CONF_DEVICE_CONFIG_GLOB])
# Setup options
options = ZWaveOption(
config[DOMAIN].get(CONF_USB_STICK_PATH),
user_path=hass.config.config_dir,
config_path=config[DOMAIN].get(CONF_CONFIG_PATH))
options.set_console_output(use_debug)
if CONF_NETWORK_KEY in config[DOMAIN]:
options.addOption("NetworkKey", config[DOMAIN][CONF_NETWORK_KEY])
options.lock()
network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False)
hass.data[DATA_DEVICES] = {}
hass.data[DATA_ENTITY_VALUES] = []
if use_debug: # pragma: no cover
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Handle new added value to a node on the network."""
# Check if this value should be tracked by an existing entity
for values in hass.data[DATA_ENTITY_VALUES]:
values.check_value(value)
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value,
schema[const.DISC_VALUES][const.DISC_PRIMARY]):
continue
values = ZWaveDeviceEntityValues(
hass, schema, value, config, device_config, registry)
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
new_values = hass.data[DATA_ENTITY_VALUES] + [values]
hass.data[DATA_ENTITY_VALUES] = new_values
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = await async_get_registry(hass)
def node_added(node):
"""Handle a new node on the network."""
entity = ZWaveNodeEntity(node, network)
def _add_node_to_component():
name = node_name(node)
generated_id = generate_entity_id(DOMAIN + '.{}', name, [])
node_config = device_config.get(generated_id)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring node entity %s due to device settings",
generated_id)
return
component.add_entities([entity])
if entity.unique_id:
_add_node_to_component()
return
@callback
def _on_ready(sec):
_LOGGER.info("Z-Wave node %d ready after %d seconds",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave node %d not ready after %d seconds, "
"continuing anyway",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout,
hass.loop)
def network_ready():
"""Handle the query of all awake nodes."""
_LOGGER.info("Zwave network is ready for use. All awake nodes "
"have been queried. Sleeping nodes will be "
"queried when they awake.")
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Handle the querying of all nodes on network."""
_LOGGER.info("Z-Wave network is complete. All nodes on the network "
"have been queried")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Z-Wave add_node have been initialized")
network.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Z-Wave add_node_secure have been initialized")
network.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Z-Wwave remove_node have been initialized")
network.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running Z-Wave command")
network.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("Z-Wave heal running")
network.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Z-Wave soft_reset have been initialized")
network.controller.soft_reset()
def update_config(service):
"""Update the config from git."""
_LOGGER.info("Configuration update has been initialized")
network.controller.update_ozw_config()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Z-Wave test_network have been initialized")
network.test()
def stop_network(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping Z-Wave network")
network.stop()
if hass.state == CoreState.running:
hass.bus.fire(const.EVENT_NETWORK_STOP)
def rename_node(service):
"""Rename a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
name = service.data.get(const.ATTR_NAME)
node.name = name
_LOGGER.info(
"Renamed Z-Wave node %d to %s", node_id, name)
def rename_value(service):
"""Rename a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
name = service.data.get(const.ATTR_NAME)
value.label = name
_LOGGER.info(
"Renamed Z-Wave value (Node %d Value %d) to %s",
node_id, value_id, name)
def set_poll_intensity(service):
"""Set the polling intensity of a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
intensity = service.data.get(const.ATTR_POLL_INTENSITY)
if intensity == 0:
if value.disable_poll():
_LOGGER.info("Polling disabled (Node %d Value %d)",
node_id, value_id)
return
_LOGGER.info("Polling disabled failed (Node %d Value %d)",
node_id, value_id)
else:
if value.enable_poll(intensity):
_LOGGER.info(
"Set polling intensity (Node %d Value %d) to %s",
node_id, value_id, intensity)
return
_LOGGER.info("Set polling intensity failed (Node %d Value %d)",
node_id, value_id)
def remove_failed_node(service):
"""Remove failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to remove zwave node %d", node_id)
network.controller.remove_failed_node(node_id)
def replace_failed_node(service):
"""Replace failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to replace zwave node %d", node_id)
network.controller.replace_failed_node(node_id)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE)
for value in (
node.get_values(class_id=const.COMMAND_CLASS_CONFIGURATION)
.values()):
if value.index != param:
continue
if value.type in [const.TYPE_LIST, const.TYPE_BOOL]:
value.data = str(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with list/bool selection %s", param, node_id,
str(selection))
return
if value.type == const.TYPE_BUTTON:
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Setting config parameter %s on Node %s "
"with button selection %s", param, node_id,
selection)
return
value.data = int(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
return
node.set_config_param(param, selection, size)
_LOGGER.info("Setting unknown config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info("Config parameter %s on Node %s: %s",
param, node_id, get_config_value(node, param))
def print_node(service):
"""Print all information about z-wave node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
nice_print_node(node)
def set_wakeup(service):
"""Set wake-up interval of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
value = service.data.get(const.ATTR_CONFIG_VALUE)
if node.can_wake_up():
for value_id in node.get_values(
class_id=const.COMMAND_CLASS_WAKE_UP):
node.values[value_id].data = value
_LOGGER.info("Node %s wake-up set to %d", node_id, value)
else:
_LOGGER.info("Node %s is not wakeable", node_id)
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, network, node_id)
if association_type == 'add':
node.add_association(target_node_id, instance)
_LOGGER.info("Adding association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
if association_type == 'remove':
node.remove_association(target_node_id, instance)
_LOGGER.info("Removing association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
async def async_refresh_entity(service):
"""Refresh values that specific entity depends on."""
entity_id = service.data.get(ATTR_ENTITY_ID)
async_dispatcher_send(
hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id))
def refresh_node(service):
"""Refresh all node info."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
node.refresh_info()
def reset_node_meters(service):
"""Reset meter counters of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
instance = service.data.get(const.ATTR_INSTANCE)
node = network.nodes[node_id]
for value in (
node.get_values(class_id=const.COMMAND_CLASS_METER)
.values()):
if value.index != const.INDEX_METER_RESET:
continue
if value.instance != instance:
continue
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Resetting meters on node %s instance %s....",
node_id, instance)
return
_LOGGER.info("Node %s on instance %s does not have resettable "
"meters.", node_id, instance)
def heal_node(service):
"""Heal a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES)
node = network.nodes[node_id]
_LOGGER.info("Z-Wave node heal running for node %s", node_id)
node.heal(update_return_routes)
def test_node(service):
"""Send test messages to a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
messages = service.data.get(const.ATTR_MESSAGES)
node = network.nodes[node_id]
_LOGGER.info("Sending %s test-messages to node %s.", messages, node_id)
node.test(messages)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting Z-Wave network...")
network.start()
hass.bus.fire(const.EVENT_NETWORK_START)
async def _check_awaked():
"""Wait for Z-wave awaked state (or timeout) and finalize start."""
_LOGGER.debug(
"network state: %d %s", network.state,
network.state_str)
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow()-start_time).total_seconds())
if network.state >= network.STATE_AWAKED:
# Need to be in STATE_AWAKED before talking to nodes.
_LOGGER.info("Z-Wave ready after %d seconds", waited)
break
elif waited >= const.NETWORK_READY_WAIT_SECS:
# Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave
# network to be ready.
_LOGGER.warning(
"Z-Wave not ready after %d seconds, continuing anyway",
waited)
_LOGGER.info(
"final network state: %d %s", network.state,
network.state_str)
break
else:
await asyncio.sleep(1, loop=hass.loop)
hass.async_add_job(_finalize_start)
hass.add_job(_check_awaked)
def _finalize_start():
"""Perform final initializations after Z-Wave network is awaked."""
polling_interval = convert(
config[DOMAIN].get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
network.set_poll_interval(polling_interval, False)
poll_interval = network.get_poll_interval()
_LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE,
add_node_secure)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND,
cancel_command)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK,
heal_network)
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, const.SERVICE_UPDATE_CONFIG,
update_config)
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK,
test_network)
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK,
stop_network)
hass.services.register(DOMAIN, const.SERVICE_START_NETWORK,
start_zwave)
hass.services.register(DOMAIN, const.SERVICE_RENAME_NODE, rename_node,
schema=RENAME_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RENAME_VALUE,
rename_value,
schema=RENAME_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
schema=SET_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
schema=PRINT_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_FAILED_NODE,
remove_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REPLACE_FAILED_NODE,
replace_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_CHANGE_ASSOCIATION,
change_association,
schema=CHANGE_ASSOCIATION_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_WAKEUP,
set_wakeup,
schema=SET_WAKEUP_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_NODE,
print_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_ENTITY,
async_refresh_entity,
schema=REFRESH_ENTITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE,
refresh_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RESET_NODE_METERS,
reset_node_meters,
schema=RESET_NODE_METERS_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_POLL_INTENSITY,
set_poll_intensity,
schema=SET_POLL_INTENSITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NODE,
heal_node,
schema=HEAL_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_TEST_NODE,
test_node,
schema=TEST_NODE_SCHEMA)
# Setup autoheal
if autoheal:
_LOGGER.info("Z-Wave network autoheal is enabled")
async_track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
return True
class ZWaveDeviceEntityValues():
"""Manages entity access to the underlying zwave value objects."""
def __init__(self, hass, schema, primary_value, zwave_config,
device_config, registry):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._zwave_config = zwave_config
self._device_config = device_config
self._schema = copy.deepcopy(schema)
self._values = {}
self._entity = None
self._workaround_ignore = False
self._registry = registry
for name in self._schema[const.DISC_VALUES].keys():
self._values[name] = None
self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = \
[primary_value.instance]
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
# Check values that have already been discovered for node
for value in self._node.values.values():
self.check_value(value)
self._check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values[name]
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
if not check_node_schema(value.node, self._schema):
return
for name in self._values:
if self._values[name] is not None:
continue
if not check_value_schema(
value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity:
self._entity.value_added()
self._entity.value_changed()
self._check_entity_ready()
def _check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
if self._workaround_ignore:
return
if self._entity is not None:
return
for name in self._schema[const.DISC_VALUES]:
if self._values[name] is None and \
not self._schema[const.DISC_VALUES][name].get(
const.DISC_OPTIONAL):
return
component = self._schema[const.DISC_COMPONENT]
workaround_component = workaround.get_device_component_mapping(
self.primary)
if workaround_component and workaround_component != component:
if workaround_component == workaround.WORKAROUND_IGNORE:
_LOGGER.info("Ignoring Node %d Value %d due to workaround.",
self.primary.node.node_id, self.primary.value_id)
# No entity will be created for this value
self._workaround_ignore = True
return
_LOGGER.debug("Using %s instead of %s",
workaround_component, component)
component = workaround_component
entity_id = self._registry.async_get_entity_id(
component, DOMAIN,
compute_value_unique_id(self._node, self.primary))
if entity_id is None:
value_name = _value_name(self.primary)
entity_id = generate_entity_id(component + '.{}', value_name, [])
node_config = self._device_config.get(entity_id)
# Configure node
_LOGGER.debug("Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s as %s", self._node.node_id,
self._node.generic, self._node.specific,
self.primary.command_class, self.primary.type,
self.primary.genre, component)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring entity %s due to device settings", entity_id)
# No entity will be created for this value
self._workaround_ignore = True
return
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
self.primary.enable_poll(polling_intensity)
platform = get_platform(self._hass, component, DOMAIN)
device = platform.get_device(
node=self._node, values=self,
node_config=node_config, hass=self._hass)
if device is None:
# No entity will be created for this value
self._workaround_ignore = True
return
self._entity = device
dict_id = id(self)
@callback
def _on_ready(sec):
_LOGGER.info(
"Z-Wave entity %s (node_id: %d) ready after %d seconds",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device,
dict_id)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave entity %s (node_id: %d) not ready after %d seconds, "
"continuing anyway",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device,
dict_id)
async def discover_device(component, device, dict_id):
"""Put device in a dictionary and call discovery on it."""
self._hass.data[DATA_DEVICES][dict_id] = device
await discovery.async_load_platform(
self._hass, component, DOMAIN,
{const.DISCOVERY_DEVICE: dict_id}, self._zwave_config)
if device.unique_id:
self._hass.add_job(discover_device, component, device, dict_id)
else:
self._hass.add_job(check_has_unique_id, device, _on_ready,
_on_timeout, self._hass.loop)
class ZWaveDeviceEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node entity."""
def __init__(self, values, domain):
"""Initialize the z-Wave device."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self.values = values
self.node = values.primary.node
self.values.primary.set_change_verified(False)
self._name = _value_name(self.values.primary)
self._unique_id = self._compute_unique_id()
self._update_attributes()
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def network_value_changed(self, value):
"""Handle a value change on the network."""
if value.value_id in [v.value_id for v in self.values if v]:
return self.value_changed()
def value_added(self):
"""Handle a new value of this entity."""
pass
def value_changed(self):
"""Handle a changed value for this entity's node."""
self._update_attributes()
self.update_properties()
self.maybe_schedule_update()
async def async_added_to_hass(self):
"""Add device to dict."""
async_dispatcher_connect(
self.hass,
SIGNAL_REFRESH_ENTITY_FORMAT.format(self.entity_id),
self.refresh_from_network)
def _update_attributes(self):
"""Update the node attributes. May only be used inside callback."""
self.node_id = self.node.node_id
self._name = _value_name(self.values.primary)
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
self.try_remove_and_add()
if self.values.power:
self.power_consumption = round(
self.values.power.data, self.values.power.precision)
else:
self.power_consumption = None
def update_properties(self):
"""Update on data changes for node values."""
pass
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self.node_id,
const.ATTR_VALUE_INDEX: self.values.primary.index,
const.ATTR_VALUE_INSTANCE: self.values.primary.instance,
const.ATTR_VALUE_ID: str(self.values.primary.value_id),
}
if self.power_consumption is not None:
attrs[ATTR_POWER] = self.power_consumption
return attrs
def refresh_from_network(self):
"""Refresh all dependent values from zwave network."""
for value in self.values:
if value is not None:
self.node.refresh_value(value.value_id)
def _compute_unique_id(self):
if (is_node_parsed(self.node) and
self.values.primary.label != "Unknown") or \
self.node.is_ready:
return compute_value_unique_id(self.node, self.values.primary)
return None
def compute_value_unique_id(node, value):
"""Compute unique_id a value would get if it were to get one."""
return "{}-{}".format(node.node_id, value.object_id)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from oslo.config import cfg
import psutil
import six
import yaml
from stackalytics.openstack.common import log as logging
from stackalytics.openstack.common.py3kcompat import urlutils
from stackalytics.processor import config
from stackalytics.processor import default_data_processor
from stackalytics.processor import lp
from stackalytics.processor import mls
from stackalytics.processor import rcs
from stackalytics.processor import record_processor
from stackalytics.processor import runtime_storage
from stackalytics.processor import utils
from stackalytics.processor import vcs
LOG = logging.getLogger(__name__)
def get_pids():
uwsgi_dict = {}
for pid in psutil.get_pid_list():
try:
p = psutil.Process(pid)
if p.cmdline and p.cmdline[0].find('/uwsgi'):
if p.parent:
uwsgi_dict[p.pid] = p.parent.pid
except Exception as e:
LOG.debug('Exception while iterating process list: %s', e)
pass
result = set()
for pid in uwsgi_dict:
if uwsgi_dict[pid] in uwsgi_dict:
result.add(pid)
return result
def update_pids(runtime_storage):
pids = get_pids()
if not pids:
return
runtime_storage.active_pids(pids)
def _merge_commits(original, new):
if new['branches'] < original['branches']:
return False
else:
original['branches'] |= new['branches']
return True
def _record_typer(record_iterator, record_type):
for record in record_iterator:
record['record_type'] = record_type
yield record
def process_repo(repo, runtime_storage_inst, record_processor_inst):
uri = repo['uri']
LOG.debug('Processing repo uri %s' % uri)
bp_iterator = lp.log(repo)
bp_iterator_typed = _record_typer(bp_iterator, 'bp')
processed_bp_iterator = record_processor_inst.process(
bp_iterator_typed)
runtime_storage_inst.set_records(processed_bp_iterator,
utils.merge_records)
vcs_inst = vcs.get_vcs(repo, cfg.CONF.sources_root)
vcs_inst.fetch()
rcs_inst = rcs.get_rcs(repo, cfg.CONF.review_uri)
rcs_inst.setup(key_filename=cfg.CONF.ssh_key_filename,
username=cfg.CONF.ssh_username)
branches = set(['master'])
for release in repo.get('releases'):
if 'branch' in release:
branches.add(release['branch'])
for branch in branches:
LOG.debug('Processing repo %s, branch %s', uri, branch)
vcs_key = 'vcs:' + str(urlutils.quote_plus(uri) + ':' + branch)
last_id = runtime_storage_inst.get_by_key(vcs_key)
commit_iterator = vcs_inst.log(branch, last_id)
commit_iterator_typed = _record_typer(commit_iterator, 'commit')
processed_commit_iterator = record_processor_inst.process(
commit_iterator_typed)
runtime_storage_inst.set_records(
processed_commit_iterator, _merge_commits)
last_id = vcs_inst.get_last_id(branch)
runtime_storage_inst.set_by_key(vcs_key, last_id)
LOG.debug('Processing reviews for repo %s, branch %s', uri, branch)
rcs_key = 'rcs:' + str(urlutils.quote_plus(uri) + ':' + branch)
last_id = runtime_storage_inst.get_by_key(rcs_key)
review_iterator = rcs_inst.log(branch, last_id)
review_iterator_typed = _record_typer(review_iterator, 'review')
processed_review_iterator = record_processor_inst.process(
review_iterator_typed)
runtime_storage_inst.set_records(processed_review_iterator,
utils.merge_records)
last_id = rcs_inst.get_last_id(branch)
runtime_storage_inst.set_by_key(rcs_key, last_id)
def process_mail_list(uri, runtime_storage_inst, record_processor_inst):
mail_iterator = mls.log(uri, runtime_storage_inst)
mail_iterator_typed = _record_typer(mail_iterator, 'email')
processed_mail_iterator = record_processor_inst.process(
mail_iterator_typed)
runtime_storage_inst.set_records(processed_mail_iterator)
def update_records(runtime_storage_inst):
repos = utils.load_repos(runtime_storage_inst)
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
for repo in repos:
process_repo(repo, runtime_storage_inst, record_processor_inst)
mail_lists = runtime_storage_inst.get_by_key('mail_lists') or []
for mail_list in mail_lists:
process_mail_list(mail_list, runtime_storage_inst,
record_processor_inst)
record_processor_inst.update()
def apply_corrections(uri, runtime_storage_inst):
LOG.info('Applying corrections from uri %s', uri)
corrections = utils.read_json_from_uri(uri)
if not corrections:
LOG.error('Unable to read corrections from uri: %s', uri)
return
valid_corrections = []
for c in corrections['corrections']:
if 'primary_key' in c:
valid_corrections.append(c)
else:
LOG.warn('Correction misses primary key: %s', c)
runtime_storage_inst.apply_corrections(valid_corrections)
def _make_module_group(group_id, name, modules, tag=None):
module_group = {'id': group_id, 'module_group_name': name,
'modules': modules, 'tag': tag}
LOG.debug('New module group: %s', module_group)
return module_group
def _read_module_groups(program_list_uri):
LOG.debug('Process list of programs from uri: %s', program_list_uri)
content = yaml.safe_load(utils.read_uri(program_list_uri))
module_groups = []
modules_by_types = collections.defaultdict(list)
for name, info in six.iteritems(content):
group_id = name.lower()
if 'codename' in info:
name = '%s (%s)' % (info['codename'], name)
group_id = '%s-group' % info['codename'].lower()
all_modules = []
for project_type, project_list in six.iteritems(info['projects']):
module_list = [s.split('/')[1] for s in project_list]
modules_by_types[project_type] += module_list
all_modules += module_list
module_groups.append(_make_module_group(
group_id, name, all_modules, 'program'))
all_modules = []
for project_type, modules_list in six.iteritems(modules_by_types):
all_modules += modules_list
module_groups.append(
_make_module_group(
'official-%s' % project_type, project_type.capitalize(),
modules_list, 'project_type'))
module_groups.append(_make_module_group(
'official-all', 'OpenStack', all_modules, 'project_type'))
return module_groups
def process_program_list(runtime_storage_inst, program_list_uri):
module_groups = runtime_storage_inst.get_by_key('module_groups') or {}
for mg in _read_module_groups(program_list_uri):
module_groups[mg['module_group_name']] = mg
runtime_storage_inst.set_by_key('module_groups', module_groups)
def main():
# init conf and logging
conf = cfg.CONF
conf.register_cli_opts(config.OPTS)
conf.register_opts(config.OPTS)
conf()
logging.setup('stackalytics')
LOG.info('Logging enabled')
runtime_storage_inst = runtime_storage.get_runtime_storage(
cfg.CONF.runtime_storage_uri)
default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
if not default_data:
LOG.critical('Unable to load default data')
return not 0
default_data_processor.process(runtime_storage_inst,
default_data,
cfg.CONF.sources_root,
cfg.CONF.force_update)
process_program_list(runtime_storage_inst, cfg.CONF.program_list_uri)
update_pids(runtime_storage_inst)
update_records(runtime_storage_inst)
apply_corrections(cfg.CONF.corrections_uri, runtime_storage_inst)
if __name__ == '__main__':
main()
|
|
from raggregate.models.submission import Submission
from raggregate.models.comment import Comment
from raggregate.models.vote import Vote
from raggregate.models.subscribe import Subscribe
from raggregate.models.section import Section
from raggregate.queries import hotness
from raggregate.queries import subscribe as sub_queries
from raggregate.queries import section as section_queries
from raggregate.queries import general
import sqlahelper
from sqlalchemy.orm import joinedload
dbsession = sqlahelper.get_session()
#stories
def get_story_list(page_num = 1, per_page = 30, sort = 'new', request = None, self_only = False, section = None):
if 'users.id' in request.session and request.session['users.id'] is not None:
user_id = request.session['users.id']
else:
user_id = None
stories = dbsession.query(Submission).options(joinedload('submitter')).filter(Submission.deleted == False).filter(Submission.render_type == 'story_md')
if section and section.__class__ == Section:
stories = stories.filter(Submission.section == section.id)
elif section and section == 'all':
pass
else:
# show default user sections
if user_id is not None:
# Get a list of sections that this user is subscribed to
subscribed_to_list = sub_queries.get_subscribed_by_user_id(user_id)
# Filter sections by the list we just retreived
if len(subscribed_to_list) > 0:
stories = stories.filter(Submission.section.in_(subscribed_to_list))
if self_only:
stories = stories.filter(Submission.self_post == True)
if sort == 'top':
stories = stories.order_by(Submission.points.desc())
if sort == 'hot':
if request and 'sort.hot_point_window' in request.registry.settings:
sets = request.registry.settings
hotness.recentize_hots(hot_point_window = general.realize_timedelta_constructor(sets['sort.hot_point_window']),
hot_eligible_age = general.realize_timedelta_constructor(sets['sort.hot_eligible_age']),
hot_recalc_threshold = general.realize_timedelta_constructor(sets['sort.hot_recalc_threshold']))
stories = hotness.get_hot_stories(hot_eligible_age = general.realize_timedelta_constructor(sets['sort.hot_eligible_age']))
else:
hotness.recentize_hots()
stories = hotness.get_hot_stories()
if sort == 'new':
stories = stories.order_by(Submission.added_on.desc())
if sort == 'contro':
hotness.recentize_contro()
stories = hotness.get_controversial_stories()
max_stories = general.count_sa_obj(stories)
endpoints = get_endpoints_from_page_num(page_num, per_page)
return {'stories': stories[endpoints['start']:endpoints['end']], 'max_stories': max_stories}
def get_story_by_id(id):
return dbsession.query(Submission).options(joinedload('submitter')).filter(Submission.id == id).one()
def get_story_by_url_oldest(url):
"""
Return the oldest instance of a post that matches the passed URL if there is such a post.
@param url: url to match
@return: matching raggregate.models.Submission object if found, otherwise False
"""
q = dbsession.query(Submission).filter(Submission.url == url).order_by(Submission.added_on.asc()).limit(1)
res = q.all()
if len(res) > 0:
return res[0]
else:
return False
#def get_all_stories_with_user_votes(user_id):
# stories = get_all_stories()
# vote_dict = {}
# for s in stories:
# vote_dict[s.id] = []
# vs = dbsession.query(s.votes).filter(Vote.user_id == user_id).all()
# [vote_dict[s.id].append(v.direction) for v in vs]
# print(vote_dict)
# return {'stories': stories, 'vote_dict': vote_dict}
def update_story_vote_tally(story_id):
if type(story_id) is list:
for sid in story_id:
get_story_by_id(sid).tally_votes()
#@TODO: implement the single str/UUID form here too
#@TODO: implement caching
def get_endpoints_from_page_num(page_num, per_page):
if type(page_num) != int:
try:
page_num = int(page_num)
except:
page_num = 0
if type(per_page) != int:
try:
per_page = int(per_page)
except:
per_page = 30
if page_num > 0:
start = (page_num - 1) * per_page
end = page_num * per_page
else:
start = 0
end = per_page
return {'start': start, 'end': end}
def get_comments(id, organize_parentage = False, page_num = 1, per_page = 30, sort = 'new', target = 'story', target_id = None):
if not organize_parentage:
return dbsession.query(Comment).filter(Comment.submission_id == id).all()
else:
#@TODO: this will probably be slow in practice and would be better off as a hand-rolled SQL query
# not implementing that at the moment because I want database agnosticism, but perhaps I will include
# a statement for PostgreSQL soon. It could be used on Pg installs and as an example for others.
tree = {}
tree[id] = []
dex = {}
all_comments = dbsession.query(Comment).filter(Comment.submission_id == id).all()
if target == 'story':
roots = dbsession.query(Comment).filter(Comment.submission_id == id).filter(Comment.submission_id == Comment.parent_id)
elif target == 'comment':
roots = dbsession.query(Comment).filter(Comment.submission_id == id).filter(target_id == Comment.id)
max_roots = general.count_sa_obj(roots)
if sort == 'top':
roots = roots.order_by(Comment.points.desc())
else:
# use "new" as default sort option
roots = roots.order_by(Comment.added_on.desc())
endpoints = get_endpoints_from_page_num(page_num, per_page)
allowed_roots = [ ]
[allowed_roots.append(str(root.id)) for root in roots[endpoints['start']:endpoints['end']]]
trees = _build_comment_trees(all_comments, allowed_roots)
tree = trees['tree']
dex = trees['dex']
allowed_roots = trees['allowed_roots']
return {'tree': tree, 'dex': dex, 'comments': all_comments, 'max_comments': max_roots, 'allowed_roots': allowed_roots}
def _build_comment_trees(all_comments, allowed_roots):
tree = {}
dex = {}
for c in all_comments:
# make c.parent_id a string; this function receives UUIDs as strings
# @todo: we really need to unfungle the str/UUID conversion thing,
# it is inconsistent throughout the application
c.parent_id = str(c.parent_id)
# add comment to index for template lookup
dex[str(c.id)] = c
# do not compile deleted comments with no children, and remove them from allowed_roots if they exist
if c.deleted:
if count_comment_children(c.id) < 1:
if str(c.id) in allowed_roots:
allowed_roots.remove(str(c.id))
continue
# do not compile roots in this tree; use allowed_roots
if str(c.submission_id) == c.parent_id:
continue
# add parent id to tree if it doesn't exist
if c.parent_id not in tree:
tree[c.parent_id] = []
# add this comment as a child of its parent
tree[c.parent_id].append(str(c.id))
return {'tree': tree, 'dex': dex, 'allowed_roots': allowed_roots}
def count_comment_children(comment_id):
"""
Counts *only* direct children of a given comment id.
@param comment_id: the id whose children we should count
@return: the number of immediate children
"""
heritage = dbsession.query(Comment).filter(Comment.parent_id == comment_id).filter(Comment.deleted == False).all()
return len(heritage)
def get_comment_parent_story(id):
try:
return dbsession.query(Comment.submission_id).filter(Comment.id == id).one()
except:
return None
def get_comment_by_id(id):
return dbsession.query(Comment).filter(Comment.id == id).one()
def get_recent_comments(num):
"""
Get the last num comments.
@param num: number of comments to list
@return: list with num most recent comments as sa objects.
"""
return dbsession.query(Comment).filter(Comment.deleted == False).order_by(Comment.added_on.desc()).limit(num).all()
def get_story_id_from_slug(slug):
try_slug = True
# if our "slug" is the same length as a uuid
# try the uuid first, since it's more likely
# a uuid and NOT a slug.
#
# this breaks badly if it actually runs on a slug.
# because pgsql throws an error, we must explicitly
# roll back the current transaction, or everything
# else will also die.
if len(unicode(slug)) == 36:
try:
s = get_story_by_id(slug)
try_slug = False
except:
from pyramid_tm import transaction
transaction.abort()
transaction.begin()
if try_slug:
try:
s = dbsession.query(Submission).filter(Submission.slug == slug).one()
except:
s = get_story_by_id(slug)
return str(s.id)
|
|
#!/usr/bin/env python
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
MS authentication framework
"""
import storage.storagetypes as storagetypes
import storage.storage as storage
import logging
import random
import os
import json
import base64
import types
import errno
import time
import datetime
import collections
import inspect
from volume import Volume #, VolumeAccessRequest
from user import SyndicateUser
from gateway import Gateway
from common.msconfig import *
import common.jsonrpc as jsonrpc
# ----------------------------------
def is_user( caller_user_or_object ):
if caller_user_or_object.__class__ == SyndicateUser:
return True
else:
return False
# ----------------------------------
def is_admin( caller_user ):
if caller_user != None and is_user( caller_user ) and caller_user.is_admin:
return True
return False
# ----------------------------------
def assert_admin_or( caller_user, predicate ):
assert caller_user != None, "Authentication required"
if not is_admin(caller_user) or not predicate:
raise Exception("User '%s' is not sufficiently privileged" % caller_user.email)
# ----------------------------------
def assert_admin( caller_user ):
assert_admin_or( caller_user, True )
# ----------------------------------
def __object_equivalent( obj1, obj2 ):
return obj1.key == obj2.key
# ----------------------------------
def __get_readable_attrs( caller_user_or_object, target_object, object_cls ):
read_attrs = []
caller_user = None
caller_object = None
if is_user( caller_user_or_object ):
caller_user = caller_user_or_object
else:
caller_object = caller_user_or_object
if is_admin( caller_user ):
# admin called us
read_attrs = object_cls.get_admin_read_attrs()
elif caller_user != None and target_object != None and target_object.owned_by( caller_user ):
# the user that owns the read object called us
read_attrs = object_cls.get_api_read_attrs()
elif caller_object != None and target_object != None and __object_equivalent( caller_object, target_object ):
# the object is reading itself
read_attrs = object_cls.get_api_read_attrs()
else:
# someone (possibly anonymous) read this object
read_attrs = object_cls.get_public_read_attrs()
return read_attrs
# ----------------------------------
def __to_dict( obj, attr_list ):
"""
Turn an object into a dictionary of its readable attributes.
"""
ret = {}
if len(attr_list) > 0:
for attr in attr_list:
ret[attr] = getattr( obj, attr )
return ret
# ----------------------------------
def filter_result( caller_user_or_object, object_cls, result_raw ):
"""
Ensure that we return a dict of readable attributes for an object
"""
if isinstance( result_raw, list ):
# returned value is a list of objects
result = []
for ret_raw in result_raw:
ret = None
if isinstance( ret_raw, storagetypes.Object ):
attr_list = __get_readable_attrs( caller_user_or_object, ret_raw, object_cls )
ret = __to_dict( ret_raw, attr_list )
if not ret:
# nothing to return
continue
else:
ret = ret_raw
result.append( ret )
elif isinstance( result_raw, storagetypes.Object ):
# returned value is an object
attr_list = __get_readable_attrs( caller_user_or_object, result_raw, object_cls )
result = __to_dict( result_raw, attr_list )
else:
# returned value is an atom
result = result_raw
return result
# ----------------------------------
def object_id_from_name( object_name, func, args, kw ):
argspec = inspect.getargspec( func )
# is it a positional arg?
for i in xrange(0, len(argspec.args)):
if object_name == argspec.args[i]:
return args[i]
# is it a keyword arg?
for (key, value) in kw.items():
if object_name == key:
return value
return None
# ----------------------------------
def assert_public_method( method ):
if method == None:
# does not exist
raise Exception("No such method '%s'" % method_name)
if type(method) != types.FunctionType:
# not a function
raise Exception("No such method '%s'" % method_name)
if not getattr(method, "is_public", False):
# not a function decorated by Authenticate (i.e. not part of the API)
raise Exception("No such method '%s'" % method_name)
return True
# ----------------------------------
class CreateAPIGuard:
# creating an object requires a suitably capable user
def __init__(self, object_cls, admin_only=False, caller_user=None, **kw ):
self.object_cls = object_cls
self.admin_only = admin_only
self.pass_caller_user = caller_user
def __call__(self, func):
def inner( caller_user, *args, **kw ):
if caller_user is None:
raise Exception("Caller has insufficient privileges")
if not is_user( caller_user ):
# not a user
raise Exception("Caller is not a user")
if self.admin_only:
assert_admin( caller_user )
if self.pass_caller_user:
kw[self.pass_caller_user] = caller_user
ret = func(*args, **kw)
return filter_result( caller_user, self.object_cls, ret )
inner.__name__ = func.__name__
inner.object_id_attrs = self.object_cls.key_attrs
inner.mutable = True
return inner
# ----------------------------------
class ReadAPIGuard:
# reading an object requires one of three things: user is an admin, user owns the object, or the object is trying to read itself.
def __init__(self, object_cls, admin_only=False, **kw ):
self.object_cls = object_cls
self.admin_only = admin_only
def __call__(self, func):
def inner(caller_user, *args, **kw):
if caller_user == None:
# authentication failed
raise Exception("Caller has insufficient privileges")
if self.admin_only:
assert_admin( caller_user )
ret = func( *args, **kw )
return filter_result( caller_user, self.object_cls, ret )
inner.__name__ = func.__name__
inner.object_id_attrs = self.object_cls.key_attrs
return inner
# ----------------------------------
class UpdateAPIGuard:
"""
Decorator for an API method that will update an object. The decorator ensures certain
security invariants are met before allowing the update to happen.
"""
def __init__(self, target_object_cls, admin_only=False, caller_user=None, target_object_name=None, **kw ):
"""
* target_object_cls: Class of the object to be updated
* admin_only: if True, then only a user with the 'admin' flag set can call this method.
* pass_caller_user: if not None, then pass the SyndicateUser that called this method as a keyword argument with the name given in this variable.
* target_object_name: if not None, then this is the name of the argument in the API call whose value identifies the object (i.e. it can be fed into the object's Read() method).
* parse_args (in **kw): if set to a function, use that function to evaluate the API method's arguments before calling it (used client-side for pre-RPC processing)
"""
self.target_object_cls = target_object_cls
self.admin_only = admin_only
self.pass_caller_user = caller_user
self.target_object_name = target_object_name
def __call__(self, func):
def inner( caller_user, *args, **kw ):
if caller_user == None:
# authentication failed
raise Exception("Caller has insufficient privileges")
if not is_user( caller_user ):
# not a user
raise Exception("Caller is not a user")
if self.admin_only:
assert_admin( caller_user )
# find the target object ID
target_object_id = object_id_from_name( self.target_object_name, func, args, kw )
if target_object_id is None:
# invalid argument
raise Exception("No %s ID given" % (self.target_object_cls.__name__))
target_object = self.target_object_cls.Read( target_object_id )
if target_object == None:
raise Exception("No such %s: %s" % (self.target_object_cls.__name__, target_object_id))
if not is_admin( caller_user ) and not target_object.owned_by( caller_user ):
raise Exception("Object '%s: %s' is not owned by '%s'" % (self.target_object_cls.__name__, target_object_id, caller_user.email))
# only filter keywords that are writable in the object
method_kw = {}
write_kw = {}
for attr in kw.keys():
if attr not in self.target_object_cls.write_attrs:
method_kw[attr] = kw[attr]
else:
write_kw[attr] = kw[attr]
method_kw.update( write_kw )
if self.pass_caller_user:
method_kw[self.pass_caller_user] = caller_user
ret = func( *args, **method_kw)
return ret
inner.__name__ = func.__name__
inner.object_id_attrs = self.target_object_cls.key_attrs
inner.mutable = True
return inner
# ----------------------------------
class DeleteAPIGuard:
# Deleting an object requires a suitably capable user.
# NOTE: the decorated function must take an object's ID as its first argument!
def __init__(self, target_object_cls, caller_user=None, admin_only=False, target_object_name=None, **kw ):
self.admin_only = admin_only
self.target_object_cls = target_object_cls
self.target_object_name = target_object_name
self.pass_caller_user = caller_user
def __call__(self, func):
def inner( caller_user, *args, **kw ):
if caller_user == None:
# authentication failed
raise Exception("Caller has insufficient privileges")
if not is_user( caller_user ):
# not a user
raise Exception("Caller is not a user")
if self.admin_only:
assert_admin( caller_user )
# get the target object ID
target_object_id = object_id_from_name( self.target_object_name, func, args, kw )
if target_object_id is None:
raise Exception("No %s ID given" % self.target_object_cls.__name__)
target_object = self.target_object_cls.Read( target_object_id )
if target_object == None:
# done!
return True
if not is_admin( caller_user ) and not target_object.owned_by( caller_user ):
raise Exception("Object '%s: %s' is not owned by '%s'" % (self.target_object_cls.__name__, target_object_id, caller_user.email))
if self.pass_caller_user:
kw[self.pass_caller_user] = caller_user
ret = func( *args, **kw)
return ret
inner.__name__ = func.__name__
inner.object_id_attrs = self.target_object_cls.key_attrs
inner.mutable = True
return inner
# ----------------------------------
class ListAPIGuard:
# listing objects requires a suitably capable user. An unprivileged user can only list API-level attributes of objects it owns, and only public attributes of objects it does not own.
def __init__(self, object_cls, admin_only=False, caller_user=None, **kw ):
self.object_cls = object_cls
self.admin_only = admin_only
self.pass_caller_user = caller_user
def __call__(self, func):
def inner(caller_user, *args, **kw):
if caller_user == None:
raise Exception("Caller has insufficient privileges")
if not is_user( caller_user ):
# not a user
raise Exception("Caller is not a user")
if self.admin_only:
assert_admin( caller_user )
if self.pass_caller_user != None:
kw[self.pass_caller_user] = caller_user
list_ret = func(*args, **kw)
return filter_result( caller_user, self.object_cls, list_ret )
inner.__name__ = func.__name__
inner.object_id_attrs = self.object_cls.key_attrs
return inner
# ----------------------------------
class BindAPIGuard:
# caller user is attempting to bind/unbind a source and target object. Verify that the caller user owns it first, or is admin.
# NOTE: the decorated function must take a source object ID as its first argument, and a target object ID as its second argument!
def __init__(self, source_object_cls, target_object_cls, caller_owns_source=True, caller_owns_target=True, admin_only=False, caller_user=None, source_object_name=None, target_object_name=None, **kw ):
self.source_object_cls = source_object_cls
self.target_object_cls = target_object_cls
self.admin_only = admin_only
self.caller_owns_source = caller_owns_source
self.caller_owns_target = caller_owns_target
self.pass_caller_user = caller_user
self.source_object_name = source_object_name
self.target_object_name = target_object_name
def __call__(self, func):
def inner(caller_user, *args, **kw):
if caller_user == None:
# authentication failed
raise Exception("Caller has insufficient privileges")
if not is_user( caller_user ):
# not a user
raise Exception("Caller is not a user")
if self.admin_only:
assert_admin( caller_user )
source_object_fut = None
target_object_fut = None
futs = []
# get the source object
source_object_id = object_id_from_name( self.source_object_name, func, args, kw )
source_object_fut = self.source_object_cls.Read( source_object_id, async=True )
futs.append( source_object_fut )
# get the target object
target_object_id = object_id_from_name( self.target_object_name, func, args, kw )
target_object_fut = self.target_object_cls.Read( target_object_id, async=True )
futs.append( target_object_fut )
storagetypes.wait_futures( futs )
source_object = None
target_object = None
if source_object_fut != None:
source_object = source_object_fut.get_result()
if target_object_fut != None:
target_object = target_object_fut.get_result()
# check the source object...
source_object_id = object_id_from_name( self.source_object_name, func, args, kw )
if source_object_id is None:
raise Exception("BUG: No %s ID given" % self.source_object_cls.__name__)
if source_object == None:
raise Exception("Source object '%s' does not exist" % source_object_id )
if self.caller_owns_source and not source_object.owned_by( caller_user ) and not caller_user.is_admin:
raise Exception("Source object '%s' is not owned by '%s'" % (source_object_id, caller_user.email) )
# check the target object...
target_object_id = object_id_from_name( self.target_object_name, func, args, kw )
if target_object_id is None:
raise Exception("No %s ID given" % self.target_object_cls.__name__)
if target_object == None:
raise Exception("Target object '%s' does not exist" % target_object_id )
if self.caller_owns_target and not target_object.owned_by( caller_user ) and not caller_user.is_admin:
raise Exception("Target object '%s' is not owned by '%s'" % (target_object_id, caller_user.email))
if self.pass_caller_user:
kw[self.pass_caller_user] = caller_user
# all check pass...
result = func( *args, **kw )
assert isinstance( result, bool ), "Internal Bind error"
return result
inner.__name__ = func.__name__
inner.mutable = True
return inner
# ----------------------------------
class Authenticate:
def __call__(self, func):
def inner( authenticated_user, *args, **kw ):
if authenticated_user is None and self.need_authentication:
raise Exception("Unauthorized user")
return func( authenticated_user, *args, **kw )
inner.__name__ = func.__name__
inner.object_id_attrs = getattr( func, "object_id_attrs", None )
inner.target_object_name = getattr( func, "target_object_name", None )
inner.source_object_name = getattr( func, "source_object_name", None )
inner.mutable = getattr( func, "mutable", False )
inner.is_public = True
return inner
# ----------------------------------
class AuthMethod( object ):
def __init__(self, method_func, authenticated_user ):
# make sure this is decorated with Authenticate
assert_public_method( method_func )
self.authenticated_user = authenticated_user
self.method_func = method_func
def __call__(self, *args, **kw ):
ret = self.method_func( self.authenticated_user, *args, **kw )
return ret
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct Hamiltonians in plan wave basis and its dual in 3D."""
from typing import List, Tuple, Optional, Union
import numpy as np
from openfermion.hamiltonians.jellium import (jellium_model,
jordan_wigner_dual_basis_jellium)
from openfermion.ops.operators import FermionOperator, QubitOperator
from openfermion.transforms.repconversions import inverse_fourier_transform
from openfermion.utils.grid import Grid
import openfermion.chem.molecular_data as md
def dual_basis_external_potential(
grid: Grid,
geometry: List[Tuple[str, Tuple[Union[int, float], Union[int, float],
Union[int, float]]]],
spinless: bool,
non_periodic: bool = False,
period_cutoff: Optional[float] = None) -> FermionOperator:
"""Return the external potential in the dual basis of arXiv:1706.00023.
The external potential resulting from electrons interacting with nuclei
in the plane wave dual basis. Note that a cos term is used which is
strictly only equivalent under aliasing in odd grids, and amounts
to the addition of an extra term to make the diagonals real on even
grids. This approximation is not expected to be significant and allows
for use of even and odd grids on an even footing.
Args:
grid (Grid): The discretization to use.
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))].
Distances in atomic units. Use atomic symbols to specify atoms.
spinless (bool): Whether to use the spinless model or not.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions)
Returns:
FermionOperator: The dual basis operator.
"""
prefactor = -4.0 * np.pi / grid.volume_scale()
if non_periodic and period_cutoff is None:
period_cutoff = grid.volume_scale()**(1. / grid.dimensions)
operator = None
if spinless:
spins = [None]
else:
spins = [0, 1]
for pos_indices in grid.all_points_indices():
coordinate_p = grid.position_vector(pos_indices)
for nuclear_term in geometry:
coordinate_j = np.array(nuclear_term[1], float)
for momenta_indices in grid.all_points_indices():
momenta = grid.momentum_vector(momenta_indices)
momenta_squared = momenta.dot(momenta)
if momenta_squared == 0:
continue
cos_index = momenta.dot(coordinate_j - coordinate_p)
coefficient = (prefactor / momenta_squared *
md.periodic_hash_table[nuclear_term[0]] *
np.cos(cos_index))
for spin_p in spins:
orbital_p = grid.orbital_id(pos_indices, spin_p)
operators = ((orbital_p, 1), (orbital_p, 0))
if operator is None:
operator = FermionOperator(operators, coefficient)
else:
operator += FermionOperator(operators, coefficient)
return operator
def plane_wave_external_potential(
grid: Grid,
geometry: List[Tuple[str, Tuple[Union[int, float], Union[int, float],
Union[int, float]]]],
spinless: bool,
e_cutoff: Optional[float] = None,
non_periodic: bool = False,
period_cutoff: Optional[float] = None) -> FermionOperator:
"""Return the external potential operator in plane wave basis.
The external potential resulting from electrons interacting with nuclei.
It is defined here as the Fourier transform of the dual basis
Hamiltonian such that is spectrally equivalent in the case of
both even and odd grids. Otherwise, the two differ in the case of
even grids.
Args:
grid (Grid): The discretization to use.
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))].
Distances in atomic units. Use atomic symbols to specify atoms.
spinless: Bool, whether to use the spinless model or not.
e_cutoff (float): Energy cutoff.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions)
Returns:
FermionOperator: The plane wave operator.
"""
dual_basis_operator = dual_basis_external_potential(grid, geometry,
spinless, non_periodic,
period_cutoff)
operator = inverse_fourier_transform(dual_basis_operator, grid, spinless)
return operator
def plane_wave_hamiltonian(
grid: Grid,
geometry: Optional[List[Tuple[str, Tuple[
Union[int, float], Union[int, float], Union[int, float]]]]] = None,
spinless: bool = False,
plane_wave: bool = True,
include_constant: bool = False,
e_cutoff: Optional[float] = None,
non_periodic: bool = False,
period_cutoff: Optional[float] = None) -> FermionOperator:
"""Returns Hamiltonian as FermionOperator class.
Args:
grid (Grid): The discretization to use.
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))].
Distances in atomic units. Use atomic symbols to specify atoms.
spinless (bool): Whether to use the spinless model or not.
plane_wave (bool): Whether to return in plane wave basis (True)
or plane wave dual basis (False).
include_constant (bool): Whether to include the Madelung constant.
e_cutoff (float): Energy cutoff.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions)
Returns:
FermionOperator: The hamiltonian.
"""
if (geometry is not None) and (include_constant is True):
raise ValueError('Constant term unsupported for non-uniform systems')
jellium_op = jellium_model(grid, spinless, plane_wave, include_constant,
e_cutoff, non_periodic, period_cutoff)
if geometry is None:
return jellium_op
for item in geometry:
if len(item[1]) != grid.dimensions:
raise ValueError("Invalid geometry coordinate.")
if item[0] not in md.periodic_hash_table:
raise ValueError("Invalid nuclear element.")
if plane_wave:
external_potential = plane_wave_external_potential(
grid, geometry, spinless, e_cutoff, non_periodic, period_cutoff)
else:
external_potential = dual_basis_external_potential(
grid, geometry, spinless, non_periodic, period_cutoff)
return jellium_op + external_potential
def jordan_wigner_dual_basis_hamiltonian(
grid: Grid,
geometry: Optional[List[Tuple[str, Tuple[
Union[int, float], Union[int, float], Union[int, float]]]]] = None,
spinless: bool = False,
include_constant: bool = False) -> QubitOperator:
"""Return the dual basis Hamiltonian as QubitOperator.
Args:
grid (Grid): The discretization to use.
geometry: A list of tuples giving the coordinates of each atom.
example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))].
Distances in atomic units. Use atomic symbols to specify atoms.
spinless (bool): Whether to use the spinless model or not.
include_constant (bool): Whether to include the Madelung constant.
Returns:
hamiltonian (QubitOperator)
"""
if (geometry is not None) and (include_constant is True):
raise ValueError('Constant term unsupported for non-uniform systems')
jellium_op = jordan_wigner_dual_basis_jellium(grid, spinless,
include_constant)
if geometry is None:
return jellium_op
for item in geometry:
if len(item[1]) != grid.dimensions:
raise ValueError("Invalid geometry coordinate.")
if item[0] not in md.periodic_hash_table:
raise ValueError("Invalid nuclear element.")
n_orbitals = grid.num_points
volume = grid.volume_scale()
if spinless:
n_qubits = n_orbitals
else:
n_qubits = 2 * n_orbitals
prefactor = -2 * np.pi / volume
external_potential = QubitOperator()
for k_indices in grid.all_points_indices():
momenta = grid.momentum_vector(k_indices)
momenta_squared = momenta.dot(momenta)
if momenta_squared == 0:
continue
for p in range(n_qubits):
index_p = grid.grid_indices(p, spinless)
coordinate_p = grid.position_vector(index_p)
for nuclear_term in geometry:
coordinate_j = np.array(nuclear_term[1], float)
cos_index = momenta.dot(coordinate_j - coordinate_p)
coefficient = (prefactor / momenta_squared *
md.periodic_hash_table[nuclear_term[0]] *
np.cos(cos_index))
external_potential += (QubitOperator(
(), coefficient) - QubitOperator(((p, 'Z'),), coefficient))
return jellium_op + external_potential
|
|
"""Send data to Graphite metrics server (synchronously or on a background thread).
For example usage, see README.rst.
This code is licensed under a permissive MIT license -- see LICENSE.txt.
The graphyte project lives on GitHub here:
https://github.com/benhoyt/graphyte
"""
import atexit
import logging
try:
import queue
except ImportError:
import Queue as queue # Python 2.x compatibility
import socket
import threading
import time
__all__ = ['Sender', 'init', 'send']
__version__ = '1.7.0'
default_sender = None
logger = logging.getLogger(__name__)
def _has_whitespace(value):
return not value or value.split(None, 1)[0] != value
class Sender:
def __init__(self, host, port=2003, prefix=None, timeout=5, interval=None,
queue_size=None, log_sends=False, protocol='tcp',
batch_size=1000, tags={}, raise_send_errors=False):
"""Initialize a Sender instance, starting the background thread to
send messages at given interval (in seconds) if "interval" is not
None. Send at most "batch_size" messages per socket send operation.
Default protocol is TCP; use protocol='udp' for UDP.
Use "tags" to specify common or default tags for this Sender, which
are sent with each metric along with any tags passed to send().
"""
self.host = host
self.port = port
self.prefix = prefix
self.timeout = timeout
self.interval = interval
self.log_sends = log_sends
self.protocol = protocol
self.batch_size = batch_size
self.tags = tags
self.raise_send_errors = raise_send_errors
if self.interval is not None:
if raise_send_errors:
raise ValueError('raise_send_errors must be disabled when interval is set')
if queue_size is None:
queue_size = int(round(interval)) * 100
self._queue = queue.Queue(maxsize=queue_size)
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
atexit.register(self.stop)
def __del__(self):
self.stop()
def stop(self):
"""Tell the sender thread to finish and wait for it to stop sending
(should be at most "timeout" seconds).
"""
if self.interval is not None:
self._queue.put_nowait(None)
self._thread.join()
self.interval = None
def build_message(self, metric, value, timestamp, tags={}):
"""Build a Graphite message to send and return it as a byte string."""
if _has_whitespace(metric):
raise ValueError('"metric" must not have whitespace in it')
if not isinstance(value, (int, float)):
raise TypeError('"value" must be an int or a float, not a {}'.format(
type(value).__name__))
all_tags = self.tags.copy()
all_tags.update(tags)
tags_strs = [u';{}={}'.format(k, v) for k, v in sorted(all_tags.items())]
if any(_has_whitespace(t) for t in tags_strs):
raise ValueError('"tags" keys and values must not have whitespace in them')
tags_suffix = ''.join(tags_strs)
message = u'{}{}{} {} {}\n'.format(
self.prefix + '.' if self.prefix else '',
metric,
tags_suffix,
value,
int(round(timestamp))
)
message = message.encode('utf-8')
return message
def send(self, metric, value, timestamp=None, tags={}):
"""Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along
with the metric, in addition to any default tags passed to Sender() --
the tags argument here overrides any default tags.
"""
if timestamp is None:
timestamp = time.time()
message = self.build_message(metric, value, timestamp, tags=tags)
if self.interval is None:
self.send_socket(message)
else:
try:
self._queue.put_nowait(message)
except queue.Full:
logger.error('queue full when sending {!r}'.format(message))
def send_message(self, message):
if self.protocol == 'tcp':
sock = socket.create_connection((self.host, self.port), self.timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.sendall(message)
finally: # sockets don't support "with" statement on Python 2.x
sock.close()
elif self.protocol == 'udp':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(message, (self.host, self.port))
finally:
sock.close()
else:
raise ValueError('"protocol" must be \'tcp\' or \'udp\', not {!r}'.format(self.protocol))
def send_socket(self, message):
"""Low-level function to send message bytes to this Sender's socket.
You should usually call send() instead of this function (unless you're
subclassing or writing unit tests).
"""
if self.log_sends:
start_time = time.time()
try:
self.send_message(message)
except Exception as error:
if self.raise_send_errors:
raise
logger.error('error sending message {!r}: {}'.format(message, error))
else:
if self.log_sends:
elapsed_time = time.time() - start_time
logger.info('sent message {!r} to {}:{} in {:.03f} seconds'.format(
message, self.host, self.port, elapsed_time))
def _thread_loop(self):
"""Background thread used when Sender is in asynchronous/interval mode."""
last_check_time = time.time()
messages = []
while True:
# Get first message from queue, blocking until the next time we
# should be sending
time_since_last_check = time.time() - last_check_time
time_till_next_check = max(0, self.interval - time_since_last_check)
try:
message = self._queue.get(timeout=time_till_next_check)
except queue.Empty:
pass
else:
if message is None:
# None is the signal to stop this background thread
break
messages.append(message)
# Get any other messages currently on queue without blocking,
# paying attention to None ("stop thread" signal)
should_stop = False
while True:
try:
message = self._queue.get_nowait()
except queue.Empty:
break
if message is None:
should_stop = True
break
messages.append(message)
if should_stop:
break
# If it's time to send, send what we've collected
current_time = time.time()
if current_time - last_check_time >= self.interval:
last_check_time = current_time
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
messages = []
# Send any final messages before exiting thread
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
def init(*args, **kwargs):
"""Initialize default Sender instance with given args."""
global default_sender
default_sender = Sender(*args, **kwargs)
def send(*args, **kwargs):
"""Send message using default Sender instance."""
default_sender.send(*args, **kwargs)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('metric',
help='name of metric to send')
parser.add_argument('value', type=float,
help='numeric value to send')
parser.add_argument('-s', '--server', default='localhost',
help='hostname of Graphite server to send to, default %(default)s')
parser.add_argument('-p', '--port', type=int, default=2003,
help='port to send message to, default %(default)d')
parser.add_argument('-u', '--udp', action='store_true',
help='send via UDP instead of TCP')
parser.add_argument('-t', '--timestamp', type=int,
help='Unix timestamp for message (defaults to current time)')
parser.add_argument('-q', '--quiet', action='store_true',
help="quiet mode (don't log send to stdout)")
args = parser.parse_args()
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='%(message)s')
sender = Sender(args.server, port=args.port, log_sends=not args.quiet,
protocol='udp' if args.udp else 'tcp')
sender.send(args.metric, args.value, timestamp=args.timestamp)
|
|
from __future__ import unicode_literals
import time
from datetime import datetime
import boto
import pytz
from boto.emr.bootstrap_action import BootstrapAction
from boto.emr.instance_group import InstanceGroup
from boto.emr.step import StreamingStep
import six
import sure # noqa
from moto import mock_emr_deprecated
from tests.helpers import requires_boto_gte
run_jobflow_args = dict(
job_flow_role='EMR_EC2_DefaultRole',
keep_alive=True,
log_uri='s3://some_bucket/jobflow_logs',
master_instance_type='c1.medium',
name='My jobflow',
num_instances=2,
service_role='EMR_DefaultRole',
slave_instance_type='c1.medium',
)
input_instance_groups = [
InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'),
InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'),
InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'),
InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'),
]
@mock_emr_deprecated
def test_describe_cluster():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args.update(dict(
api_params={
'Applications.member.1.Name': 'Spark',
'Applications.member.1.Version': '2.4.2',
'Configurations.member.1.Classification': 'yarn-site',
'Configurations.member.1.Properties.entry.1.key': 'someproperty',
'Configurations.member.1.Properties.entry.1.value': 'somevalue',
'Configurations.member.1.Properties.entry.2.key': 'someotherproperty',
'Configurations.member.1.Properties.entry.2.value': 'someothervalue',
'Instances.EmrManagedMasterSecurityGroup': 'master-security-group',
'Instances.Ec2SubnetId': 'subnet-8be41cec',
},
availability_zone='us-east-2b',
ec2_keyname='mykey',
job_flow_role='EMR_EC2_DefaultRole',
keep_alive=False,
log_uri='s3://some_bucket/jobflow_logs',
name='My jobflow',
service_role='EMR_DefaultRole',
visible_to_all_users=True,
))
cluster_id = conn.run_jobflow(**args)
input_tags = {'tag1': 'val1', 'tag2': 'val2'}
conn.add_tags(cluster_id, input_tags)
cluster = conn.describe_cluster(cluster_id)
cluster.applications[0].name.should.equal('Spark')
cluster.applications[0].version.should.equal('2.4.2')
cluster.autoterminate.should.equal('true')
# configurations appear not be supplied as attributes?
attrs = cluster.ec2instanceattributes
# AdditionalMasterSecurityGroups
# AdditionalSlaveSecurityGroups
attrs.ec2availabilityzone.should.equal(args['availability_zone'])
attrs.ec2keyname.should.equal(args['ec2_keyname'])
attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId'])
# EmrManagedMasterSecurityGroups
# EmrManagedSlaveSecurityGroups
attrs.iaminstanceprofile.should.equal(args['job_flow_role'])
# ServiceAccessSecurityGroup
cluster.id.should.equal(cluster_id)
cluster.loguri.should.equal(args['log_uri'])
cluster.masterpublicdnsname.should.be.a(six.string_types)
cluster.name.should.equal(args['name'])
int(cluster.normalizedinstancehours).should.equal(0)
# cluster.release_label
cluster.shouldnt.have.property('requestedamiversion')
cluster.runningamiversion.should.equal('1.0.0')
# cluster.securityconfiguration
cluster.servicerole.should.equal(args['service_role'])
cluster.status.state.should.equal('TERMINATED')
cluster.status.statechangereason.message.should.be.a(six.string_types)
cluster.status.statechangereason.code.should.be.a(six.string_types)
cluster.status.timeline.creationdatetime.should.be.a(six.string_types)
# cluster.status.timeline.enddatetime.should.be.a(six.string_types)
# cluster.status.timeline.readydatetime.should.be.a(six.string_types)
dict((item.key, item.value)
for item in cluster.tags).should.equal(input_tags)
cluster.terminationprotected.should.equal('false')
cluster.visibletoallusers.should.equal('true')
@mock_emr_deprecated
def test_describe_jobflows():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
for idx in range(4):
cluster_name = 'cluster' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'state': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(4, 6):
cluster_name = 'cluster' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'state': 'TERMINATED'
}
jobs = conn.describe_jobflows()
jobs.should.have.length_of(6)
for cluster_id, y in expected.items():
resp = conn.describe_jobflows(jobflow_ids=[cluster_id])
resp.should.have.length_of(1)
resp[0].jobflowid.should.equal(cluster_id)
resp = conn.describe_jobflows(states=['WAITING'])
resp.should.have.length_of(4)
for x in resp:
x.state.should.equal('WAITING')
resp = conn.describe_jobflows(created_before=timestamp)
resp.should.have.length_of(4)
resp = conn.describe_jobflows(created_after=timestamp)
resp.should.have.length_of(2)
@mock_emr_deprecated
def test_describe_jobflow():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args.update(dict(
ami_version='3.8.1',
api_params={
#'Applications.member.1.Name': 'Spark',
#'Applications.member.1.Version': '2.4.2',
#'Configurations.member.1.Classification': 'yarn-site',
#'Configurations.member.1.Properties.entry.1.key': 'someproperty',
#'Configurations.member.1.Properties.entry.1.value': 'somevalue',
#'Instances.EmrManagedMasterSecurityGroup': 'master-security-group',
'Instances.Ec2SubnetId': 'subnet-8be41cec',
},
ec2_keyname='mykey',
hadoop_version='2.4.0',
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
keep_alive=True,
master_instance_type='c1.medium',
slave_instance_type='c1.medium',
num_instances=2,
availability_zone='us-west-2b',
job_flow_role='EMR_EC2_DefaultRole',
service_role='EMR_DefaultRole',
visible_to_all_users=True,
))
cluster_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(cluster_id)
jf.amiversion.should.equal(args['ami_version'])
jf.bootstrapactions.should.equal(None)
jf.creationdatetime.should.be.a(six.string_types)
jf.should.have.property('laststatechangereason')
jf.readydatetime.should.be.a(six.string_types)
jf.startdatetime.should.be.a(six.string_types)
jf.state.should.equal('WAITING')
jf.ec2keyname.should.equal(args['ec2_keyname'])
# Ec2SubnetId
jf.hadoopversion.should.equal(args['hadoop_version'])
int(jf.instancecount).should.equal(2)
for ig in jf.instancegroups:
ig.creationdatetime.should.be.a(six.string_types)
# ig.enddatetime.should.be.a(six.string_types)
ig.should.have.property('instancegroupid').being.a(six.string_types)
int(ig.instancerequestcount).should.equal(1)
ig.instancerole.should.be.within(['MASTER', 'CORE'])
int(ig.instancerunningcount).should.equal(1)
ig.instancetype.should.equal('c1.medium')
ig.laststatechangereason.should.be.a(six.string_types)
ig.market.should.equal('ON_DEMAND')
ig.name.should.be.a(six.string_types)
ig.readydatetime.should.be.a(six.string_types)
ig.startdatetime.should.be.a(six.string_types)
ig.state.should.equal('RUNNING')
jf.keepjobflowalivewhennosteps.should.equal('true')
jf.masterinstanceid.should.be.a(six.string_types)
jf.masterinstancetype.should.equal(args['master_instance_type'])
jf.masterpublicdnsname.should.be.a(six.string_types)
int(jf.normalizedinstancehours).should.equal(0)
jf.availabilityzone.should.equal(args['availability_zone'])
jf.slaveinstancetype.should.equal(args['slave_instance_type'])
jf.terminationprotected.should.equal('false')
jf.jobflowid.should.equal(cluster_id)
# jf.jobflowrole.should.equal(args['job_flow_role'])
jf.loguri.should.equal(args['log_uri'])
jf.name.should.equal(args['name'])
# jf.servicerole.should.equal(args['service_role'])
jf.steps.should.have.length_of(0)
list(i.value for i in jf.supported_products).should.equal([])
jf.visibletoallusers.should.equal('true')
@mock_emr_deprecated
def test_list_clusters():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
for idx in range(40):
cluster_name = 'jobflow' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'normalizedinstancehours': '0',
'state': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(40, 70):
cluster_name = 'jobflow' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'normalizedinstancehours': '0',
'state': 'TERMINATED'
}
args = {}
while 1:
resp = conn.list_clusters(**args)
clusters = resp.clusters
len(clusters).should.be.lower_than_or_equal_to(50)
for x in clusters:
y = expected[x.id]
x.id.should.equal(y['id'])
x.name.should.equal(y['name'])
x.normalizedinstancehours.should.equal(
y['normalizedinstancehours'])
x.status.state.should.equal(y['state'])
x.status.timeline.creationdatetime.should.be.a(six.string_types)
if y['state'] == 'TERMINATED':
x.status.timeline.enddatetime.should.be.a(six.string_types)
else:
x.status.timeline.shouldnt.have.property('enddatetime')
x.status.timeline.readydatetime.should.be.a(six.string_types)
if not hasattr(resp, 'marker'):
break
args = {'marker': resp.marker}
resp = conn.list_clusters(cluster_states=['TERMINATED'])
resp.clusters.should.have.length_of(30)
for x in resp.clusters:
x.status.state.should.equal('TERMINATED')
resp = conn.list_clusters(created_before=timestamp)
resp.clusters.should.have.length_of(40)
resp = conn.list_clusters(created_after=timestamp)
resp.clusters.should.have.length_of(30)
@mock_emr_deprecated
def test_run_jobflow():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
job_flow.state.should.equal('WAITING')
job_flow.jobflowid.should.equal(job_id)
job_flow.name.should.equal(args['name'])
job_flow.masterinstancetype.should.equal(args['master_instance_type'])
job_flow.slaveinstancetype.should.equal(args['slave_instance_type'])
job_flow.loguri.should.equal(args['log_uri'])
job_flow.visibletoallusers.should.equal('false')
int(job_flow.normalizedinstancehours).should.equal(0)
job_flow.steps.should.have.length_of(0)
@mock_emr_deprecated
def test_run_jobflow_in_multiple_regions():
regions = {}
for region in ['us-east-1', 'eu-west-1']:
conn = boto.emr.connect_to_region(region)
args = run_jobflow_args.copy()
args['name'] = region
cluster_id = conn.run_jobflow(**args)
regions[region] = {'conn': conn, 'cluster_id': cluster_id}
for region in regions.keys():
conn = regions[region]['conn']
jf = conn.describe_jobflow(regions[region]['cluster_id'])
jf.name.should.equal(region)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_new_params():
# Test that run_jobflow works with newer params
conn = boto.connect_emr()
conn.run_jobflow(**run_jobflow_args)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_visible_to_all_users():
conn = boto.connect_emr()
for expected in (True, False):
job_id = conn.run_jobflow(
visible_to_all_users=expected,
**run_jobflow_args
)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal(str(expected).lower())
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
job_id = conn.run_jobflow(instance_groups=input_instance_groups,
**run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
int(job_flow.instancecount).should.equal(
sum(g.num_instances for g in input_instance_groups))
for instance_group in job_flow.instancegroups:
expected = input_groups[instance_group.name]
instance_group.should.have.property('instancegroupid')
int(instance_group.instancerunningcount).should.equal(
expected.num_instances)
instance_group.instancerole.should.equal(expected.role)
instance_group.instancetype.should.equal(expected.type)
instance_group.market.should.equal(expected.market)
if hasattr(expected, 'bidprice'):
instance_group.bidprice.should.equal(expected.bidprice)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_set_termination_protection():
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('false')
conn.set_termination_protection(job_id, True)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('true')
conn.set_termination_protection(job_id, False)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('false')
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_set_visible_to_all_users():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args['visible_to_all_users'] = False
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('false')
conn.set_visible_to_all_users(job_id, True)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('true')
conn.set_visible_to_all_users(job_id, False)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('false')
@mock_emr_deprecated
def test_terminate_jobflow():
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('WAITING')
conn.terminate_jobflow(job_id)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('TERMINATED')
# testing multiple end points for each feature
@mock_emr_deprecated
def test_bootstrap_actions():
bootstrap_actions = [
BootstrapAction(
name='bs1',
path='path/to/script',
bootstrap_action_args=['arg1', 'arg2&arg3']),
BootstrapAction(
name='bs2',
path='path/to/anotherscript',
bootstrap_action_args=[])
]
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(
bootstrap_actions=bootstrap_actions,
**run_jobflow_args
)
jf = conn.describe_jobflow(cluster_id)
for x, y in zip(jf.bootstrapactions, bootstrap_actions):
x.name.should.equal(y.name)
x.path.should.equal(y.path)
list(o.value for o in x.args).should.equal(y.args())
resp = conn.list_bootstrap_actions(cluster_id)
for i, y in enumerate(bootstrap_actions):
x = resp.actions[i]
x.name.should.equal(y.name)
x.scriptpath.should.equal(y.path)
list(arg.value for arg in x.args).should.equal(y.args())
@mock_emr_deprecated
def test_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
args = run_jobflow_args.copy()
for key in ['master_instance_type', 'slave_instance_type', 'num_instances']:
del args[key]
args['instance_groups'] = input_instance_groups[:2]
job_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(job_id)
base_instance_count = int(jf.instancecount)
conn.add_instance_groups(job_id, input_instance_groups[2:])
jf = conn.describe_jobflow(job_id)
int(jf.instancecount).should.equal(
sum(g.num_instances for g in input_instance_groups))
for x in jf.instancegroups:
y = input_groups[x.name]
if hasattr(y, 'bidprice'):
x.bidprice.should.equal(y.bidprice)
x.creationdatetime.should.be.a(six.string_types)
# x.enddatetime.should.be.a(six.string_types)
x.should.have.property('instancegroupid')
int(x.instancerequestcount).should.equal(y.num_instances)
x.instancerole.should.equal(y.role)
int(x.instancerunningcount).should.equal(y.num_instances)
x.instancetype.should.equal(y.type)
x.laststatechangereason.should.be.a(six.string_types)
x.market.should.equal(y.market)
x.name.should.be.a(six.string_types)
x.readydatetime.should.be.a(six.string_types)
x.startdatetime.should.be.a(six.string_types)
x.state.should.equal('RUNNING')
for x in conn.list_instance_groups(job_id).instancegroups:
y = input_groups[x.name]
if hasattr(y, 'bidprice'):
x.bidprice.should.equal(y.bidprice)
# Configurations
# EbsBlockDevices
# EbsOptimized
x.should.have.property('id')
x.instancegrouptype.should.equal(y.role)
x.instancetype.should.equal(y.type)
x.market.should.equal(y.market)
x.name.should.equal(y.name)
int(x.requestedinstancecount).should.equal(y.num_instances)
int(x.runninginstancecount).should.equal(y.num_instances)
# ShrinkPolicy
x.status.state.should.equal('RUNNING')
x.status.statechangereason.code.should.be.a(six.string_types)
x.status.statechangereason.message.should.be.a(six.string_types)
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
x.status.timeline.readydatetime.should.be.a(six.string_types)
igs = dict((g.name, g) for g in jf.instancegroups)
conn.modify_instance_groups(
[igs['task-1'].instancegroupid, igs['task-2'].instancegroupid],
[2, 3])
jf = conn.describe_jobflow(job_id)
int(jf.instancecount).should.equal(base_instance_count + 5)
igs = dict((g.name, g) for g in jf.instancegroups)
int(igs['task-1'].instancerunningcount).should.equal(2)
int(igs['task-2'].instancerunningcount).should.equal(3)
@mock_emr_deprecated
def test_steps():
input_steps = [
StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'),
StreamingStep(
name='My wordcount example & co.',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input2',
output='s3n://output_bucket/output/wordcount_output2')
]
# TODO: implementation and test for cancel_steps
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(
steps=[input_steps[0]],
**run_jobflow_args)
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(1)
conn.add_jobflow_steps(cluster_id, [input_steps[1]])
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(2)
for step in jf.steps:
step.actiononfailure.should.equal('TERMINATE_JOB_FLOW')
list(arg.value for arg in step.args).should.have.length_of(8)
step.creationdatetime.should.be.a(six.string_types)
# step.enddatetime.should.be.a(six.string_types)
step.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
step.laststatechangereason.should.be.a(six.string_types)
step.mainclass.should.equal('')
step.name.should.be.a(six.string_types)
# step.readydatetime.should.be.a(six.string_types)
# step.startdatetime.should.be.a(six.string_types)
step.state.should.be.within(['STARTING', 'PENDING'])
expected = dict((s.name, s) for s in input_steps)
steps = conn.list_steps(cluster_id).steps
for x in steps:
y = expected[x.name]
# actiononfailure
list(arg.value for arg in x.config.args).should.equal([
'-mapper', y.mapper,
'-reducer', y.reducer,
'-input', y.input,
'-output', y.output,
])
x.config.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
x.config.mainclass.should.equal('')
# properties
x.should.have.property('id').should.be.a(six.string_types)
x.name.should.equal(y.name)
x.status.state.should.be.within(['STARTING', 'PENDING'])
# x.status.statechangereason
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
# x.status.timeline.startdatetime.should.be.a(six.string_types)
x = conn.describe_step(cluster_id, x.id)
list(arg.value for arg in x.config.args).should.equal([
'-mapper', y.mapper,
'-reducer', y.reducer,
'-input', y.input,
'-output', y.output,
])
x.config.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
x.config.mainclass.should.equal('')
# properties
x.should.have.property('id').should.be.a(six.string_types)
x.name.should.equal(y.name)
x.status.state.should.be.within(['STARTING', 'PENDING'])
# x.status.statechangereason
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
# x.status.timeline.startdatetime.should.be.a(six.string_types)
@requires_boto_gte('2.39')
def test_list_steps_with_states():
# boto's list_steps prior to 2.39 has a bug that ignores
# step_states argument.
steps = conn.list_steps(cluster_id).steps
step_id = steps[0].id
steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps
steps.should.have.length_of(1)
steps[0].id.should.equal(step_id)
test_list_steps_with_states()
@mock_emr_deprecated
def test_tags():
input_tags = {"tag1": "val1", "tag2": "val2"}
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(**run_jobflow_args)
conn.add_tags(cluster_id, input_tags)
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(2)
dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags)
conn.remove_tags(cluster_id, list(input_tags.keys()))
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(0)
|
|
from datetime import date
from random import randint
from django.core.exceptions import ValidationError
from django.test import TestCase
from rapidsms.contrib.locations.models import Location, LocationType
from survey.models import BatchQuestionOrder, GroupCondition, HouseholdHead, QuestionModule, Indicator, Formula, Survey, EnumerationArea
from django.db import IntegrityError
from survey.models.batch import Batch
from survey.models.backend import Backend
from survey.models.households import Household, HouseholdMember
from survey.models.investigator import Investigator
from survey.models.question import Question, QuestionOption
from survey.models.householdgroups import HouseholdMemberGroup
from survey.tests.base_test import BaseTest
class QuestionTest(TestCase):
def setUp(self):
self.batch = Batch.objects.create(order=1)
def test_unicode_representation_of_question(self):
question = Question.objects.create(text="This is a question", answer_type=Question.NUMBER, identifier="QN123")
question_unicode = "%s - %s: (%s)" % (question.identifier, question.text, question.answer_type.upper())
self.assertEqual(question_unicode, str(question))
def test_numerical_question(self):
question = Question.objects.create(text="This is a question", answer_type=Question.NUMBER)
self.failUnless(question.id)
def test_text_question(self):
question = Question.objects.create(text="This is a question", answer_type=Question.TEXT)
self.failUnless(question.id)
def test_variable_name_should_be_unique(self):
question = Question.objects.create(text="This is a question", answer_type=Question.TEXT, identifier="Q1haha")
duplicate_question = Question(text="haha", answer_type=Question.TEXT, identifier="Q1haha")
self.assertRaises(IntegrityError, duplicate_question.save)
def test_multichoice_question(self):
question = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE)
question.batches.add(self.batch)
self.failUnless(question.id)
def test_order(self):
question_2 = Question.objects.create(text="This is a question", answer_type="number", order=2)
question_1 = Question.objects.create(text="This is another question", answer_type="number",
order=1)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
questions = self.batch.questions.order_by('order').all()
self.assertEqual(questions[0], question_1)
self.assertEqual(questions[1], question_2)
def test_get_next_question_in_batch(self):
kampala = Location.objects.create(name="Kampala")
question_2 = Question.objects.create(text="This is a question", answer_type="number", order=2)
question_1 = Question.objects.create(text="This is another question", answer_type="number",
order=1)
self.batch.open_for_location(kampala)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
self.assertEqual(question_2, question_1.next_question(kampala))
def test_get_next_question_in_batch_if_sub_question_is_provided(self):
kampala = Location.objects.create(name="Kampala")
question_2 = Question.objects.create(text="This is a question", answer_type="number", order=2)
question_1 = Question.objects.create(text="This is another question", answer_type="number",
order=1)
sub_question_1 = Question.objects.create(text="This is another question", answer_type="number",
parent=question_1, subquestion=True)
self.batch.open_for_location(kampala)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
self.assertEqual(question_2, sub_question_1.next_question(kampala))
def test_cannot_save_subquestion_if_order_given(self):
question_2 = Question.objects.create(text="This is a question", answer_type="number", order=2)
subquestion = Question(text="Specify others", answer_type=Question.TEXT, subquestion=True, parent=question_2,
order=1)
self.assertRaises(ValidationError, subquestion.save)
def test_cannot_save_subquestion_if_parent_not_given(self):
subquestion = Question(text="Specify others", answer_type=Question.TEXT, subquestion=True)
self.assertRaises(ValidationError, subquestion.save)
def test_question_is_subquestion_if_parent_is_given(self):
question_2 = Question.objects.create(text="This is a question", answer_type="number", order=2)
subquestion = Question(text="Specify others", answer_type=Question.TEXT, parent=question_2)
subquestion.save()
self.assertEqual(False, question_2.subquestion)
self.assertEqual(True, subquestion.subquestion)
def test_question_should_know_it_all_question_fields(self):
question = Question()
fields = [str(item.attname) for item in question._meta.fields]
for field in ['id', 'identifier', 'group_id', 'text', 'answer_type', 'order', 'subquestion',
'parent_id', 'created', 'modified', 'module_id']:
self.assertIn(field, fields)
self.assertEqual(len(fields), 11)
def test_knows_what_group_question_belongs_to_when_successfully_created(self):
household_member_group = HouseholdMemberGroup.objects.create(name='Age 4-5', order=1)
data = {'identifier': 'question',
'text': "This is a question",
'answer_type': 'number',
'order': 1,
'subquestion': False,
'group': household_member_group}
question = Question.objects.create(**data)
self.assertEqual(household_member_group, question.group)
def test_knows_has_been_answered_by_member(self):
backend = Backend.objects.create(name='something')
kampala = Location.objects.create(name="Kampala")
ea = EnumerationArea.objects.create(name="EA2")
ea.locations.add(kampala)
investigator = Investigator.objects.create(name="", mobile_number="123456789", ea=ea, backend=backend)
household_member_group = HouseholdMemberGroup.objects.create(name='Age 4-5', order=1)
household = Household.objects.create(investigator=investigator, uid=0, ea=investigator.ea)
household_member = HouseholdMember.objects.create(surname="Member",
date_of_birth=date(1980, 2, 2), male=False,
household=household)
household_member_1 = HouseholdMember.objects.create(surname="Member",
date_of_birth=date(1980, 2, 2), male=False,
household=household)
question_1 = Question.objects.create(identifier="identifier1",
text="Question 1", answer_type='number',
order=1, subquestion=False, group=household_member_group)
self.batch.questions.add(question_1)
self.assertFalse(question_1.has_been_answered(household_member, self.batch))
investigator.member_answered(question_1, household_member, answer=1, batch=self.batch)
self.assertTrue(question_1.has_been_answered(household_member, self.batch))
self.assertFalse(question_1.has_been_answered(household_member_1, self.batch))
def test_knows_subquestions_for_a_question(self):
question_1 = Question.objects.create(text="question1", answer_type="number", order=1)
sub_question1 = Question.objects.create(text='sub1', answer_type=Question.NUMBER,
subquestion=True, parent=question_1)
sub_question2 = Question.objects.create(text='sub2', answer_type=Question.NUMBER,
subquestion=True, parent=question_1)
self.batch.questions.add(question_1)
self.batch.questions.add(sub_question1)
self.batch.questions.add(sub_question2)
subquestions = question_1.get_subquestions()
self.assertIn(sub_question1, subquestions)
self.assertIn(sub_question2, subquestions)
def test_question_knows_de_associate_self_from_batch(self):
batch = Batch.objects.create(order=1, name="Test")
batch_question = Question.objects.create(text="This is a test question", answer_type="multichoice")
another_batch_question = Question.objects.create(text="This is another test question",
answer_type=Question.MULTICHOICE)
batch_question.batches.add(batch)
another_batch_question.batches.add(batch)
BatchQuestionOrder.objects.create(question=batch_question, batch=batch, order=1)
BatchQuestionOrder.objects.create(question=another_batch_question, batch=batch, order=2)
batch_question.de_associate_from(batch)
batch_question_order = BatchQuestionOrder.objects.filter(batch=batch, question=batch_question)
self.failIf(batch_question_order)
updated_question = Question.objects.filter(text="This is a test question", answer_type="multichoice",
batches=batch)
self.failIf(updated_question)
remaining_batch_order_questions = BatchQuestionOrder.objects.filter(batch=batch,
question=another_batch_question)
self.failUnless(remaining_batch_order_questions)
self.assertEqual(1, remaining_batch_order_questions[0].order)
def test_question_knows_it_belongs_to_agroup(self):
another_group = HouseholdMemberGroup.objects.create(name="GENERAL", order=1)
general_group = HouseholdMemberGroup.objects.create(name="GENERAL", order=2)
general_condition = GroupCondition.objects.create(attribute="GENERAL", value="HEAD", condition='EQUALS')
general_group.conditions.add(general_condition)
general_question = Question.objects.create(group=general_group, text="General Question 1",
answer_type=Question.NUMBER,
order=4, identifier='Q3')
another_group_question = Question.objects.create(group=another_group, text="General Question 2",
answer_type=Question.NUMBER,
order=5, identifier='Q4')
self.assertTrue(general_question.belongs_to(general_group))
self.assertFalse(general_question.belongs_to(another_group))
self.assertTrue(another_group_question.belongs_to(another_group))
self.assertFalse(another_group_question.belongs_to(general_group))
class SimpleIndicatorQuestionCount(BaseTest):
def create_household_head(self, uid, investigator):
self.household = Household.objects.create(investigator=investigator, ea=investigator.ea,
uid=uid, survey=self.survey)
return HouseholdHead.objects.create(household=self.household, surname="Name " + str(randint(1, 9999)),
date_of_birth="1990-02-09")
def setUp(self):
self.survey = Survey.objects.create(name="haha")
self.batch = Batch.objects.create(order=1, survey=self.survey)
self.country = LocationType.objects.create(name="Country", slug="country")
self.region = LocationType.objects.create(name="Region", slug="region")
self.district = LocationType.objects.create(name="District", slug='district')
self.uganda = Location.objects.create(name="Uganda", type=self.country)
self.west = Location.objects.create(name="WEST", type=self.region, tree_parent=self.uganda)
self.central = Location.objects.create(name="CENTRAL", type=self.region, tree_parent=self.uganda)
self.kampala = Location.objects.create(name="Kampala", tree_parent=self.central, type=self.district)
self.mbarara = Location.objects.create(name="Mbarara", tree_parent=self.west, type=self.district)
ea = EnumerationArea.objects.create(name="EA2", survey=self.survey)
ea.locations.add(self.kampala)
mbarara_ea = EnumerationArea.objects.create(name="EA2", survey=self.survey)
mbarara_ea.locations.add(self.mbarara)
backend = Backend.objects.create(name='something')
self.investigator = Investigator.objects.create(name="Investigator 1", mobile_number="1", ea=ea,
backend=backend)
self.investigator_2 = Investigator.objects.create(name="Investigator 1", mobile_number="33331", ea=mbarara_ea,
backend=backend)
health_module = QuestionModule.objects.create(name="Health")
member_group = HouseholdMemberGroup.objects.create(name="Greater than 2 years", order=1)
self.question_3 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=3,
module=health_module, group=member_group)
self.yes_option = QuestionOption.objects.create(question=self.question_3, text="Yes", order=1)
self.no_option = QuestionOption.objects.create(question=self.question_3, text="No", order=2)
self.question_3.batches.add(self.batch)
self.household_head_1 = self.create_household_head(0, self.investigator)
self.household_head_2 = self.create_household_head(1, self.investigator)
self.household_head_3 = self.create_household_head(2, self.investigator)
self.household_head_4 = self.create_household_head(3, self.investigator)
self.household_head_5 = self.create_household_head(4, self.investigator)
self.household_head_6 = self.create_household_head(5, self.investigator_2)
self.household_head_7 = self.create_household_head(6, self.investigator_2)
self.household_head_8 = self.create_household_head(7, self.investigator_2)
self.household_head_9 = self.create_household_head(8, self.investigator_2)
def test_returns_options_counts_given_list_of_locations(self):
self.investigator.member_answered(self.question_3, self.household_head_1, self.yes_option.order, self.batch)
self.investigator.member_answered(self.question_3, self.household_head_2, self.yes_option.order, self.batch)
self.investigator.member_answered(self.question_3, self.household_head_3, self.yes_option.order, self.batch)
self.investigator.member_answered(self.question_3, self.household_head_4, self.no_option.order, self.batch)
self.investigator.member_answered(self.question_3, self.household_head_5, self.no_option.order, self.batch)
self.investigator_2.member_answered(self.question_3, self.household_head_6, self.yes_option.order, self.batch)
self.investigator_2.member_answered(self.question_3, self.household_head_7, self.yes_option.order, self.batch)
self.investigator_2.member_answered(self.question_3, self.household_head_8, self.no_option.order, self.batch)
self.investigator_2.member_answered(self.question_3, self.household_head_9, self.no_option.order, self.batch)
region_responses = {self.central: {self.yes_option.text: 3, self.no_option.text: 2},
self.west: {self.yes_option.text: 2, self.no_option.text: 2}}
self.assertEquals(self.question_3.hierarchical_result_for(self.uganda, self.survey), region_responses)
central_region_responses = {self.kampala: {self.yes_option.text: 3, self.no_option.text: 2}}
self.assertEquals(self.question_3.hierarchical_result_for(self.central, self.survey), central_region_responses)
west_region_responses = {self.mbarara: {self.yes_option.text: 2, self.no_option.text: 2}}
self.assertEquals(self.question_3.hierarchical_result_for(self.west, self.survey), west_region_responses)
class QuestionOptionTest(TestCase):
def setUp(self):
batch = Batch.objects.create(order=1)
self.question = Question.objects.create(text="This is a question", answer_type="multichoice")
batch.questions.add(self.question)
def test_store(self):
option_2 = QuestionOption.objects.create(question=self.question, text="OPTION 1", order=2)
option_1 = QuestionOption.objects.create(question=self.question, text="OPTION 2", order=1)
options = self.question.options.order_by('order').all()
self.assertEqual(len(options), 2)
options_in_text = "1: %s\n2: %s" % (option_1.text, option_2.text)
self.assertEqual(self.question.options_in_text(), options_in_text)
def test_question_text(self):
option_2 = QuestionOption.objects.create(question=self.question, text="OPTION 1", order=2)
option_1 = QuestionOption.objects.create(question=self.question, text="OPTION 2", order=1)
question_in_text = "%s\n%s" % (self.question.text, self.question.options_in_text())
self.assertEqual(self.question.to_ussd(), question_in_text)
|
|
import pytest
from aioresponses import aioresponses
import rasa.core
from rasa.core.actions import action
from rasa.core.actions.action import (
ACTION_DEACTIVATE_FORM_NAME, ACTION_DEFAULT_ASK_AFFIRMATION_NAME,
ACTION_DEFAULT_ASK_REPHRASE_NAME, ACTION_DEFAULT_FALLBACK_NAME,
ACTION_LISTEN_NAME, ACTION_RESTART_NAME, ACTION_REVERT_FALLBACK_EVENTS_NAME,
ActionExecutionRejection, ActionListen, ActionRestart, RemoteAction,
UtterAction, ACTION_BACK_NAME)
from rasa.core.domain import Domain
from rasa.core.events import Restarted, SlotSet, UserUtteranceReverted
from rasa.core.trackers import DialogueStateTracker
from rasa.core.utils import ClientResponseError, EndpointConfig
from tests.core.utilities import json_of_latest_request, latest_request
async def test_restart(default_dispatcher_collecting, default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
events = await ActionRestart().run(default_dispatcher_collecting, tracker,
default_domain)
assert events == [Restarted()]
def test_text_format():
assert "{}".format(ActionListen()) == \
"Action('action_listen')"
assert "{}".format(UtterAction("my_action_name")) == \
"UtterAction('my_action_name')"
def test_action_instantiation_from_names():
instantiated_actions = action.actions_from_names(
["random_name", "utter_test"], None, ["random_name", "utter_test"])
assert len(instantiated_actions) == 2
assert isinstance(instantiated_actions[0], RemoteAction)
assert instantiated_actions[0].name() == "random_name"
assert isinstance(instantiated_actions[1], UtterAction)
assert instantiated_actions[1].name() == "utter_test"
def test_domain_action_instantiation():
domain = Domain(
intent_properties={},
entities=[],
slots=[],
templates={},
action_names=["my_module.ActionTest", "utter_test"],
form_names=[])
instantiated_actions = domain.actions(None)
assert len(instantiated_actions) == 10
assert instantiated_actions[0].name() == ACTION_LISTEN_NAME
assert instantiated_actions[1].name() == ACTION_RESTART_NAME
assert instantiated_actions[2].name() == ACTION_DEFAULT_FALLBACK_NAME
assert instantiated_actions[3].name() == ACTION_DEACTIVATE_FORM_NAME
assert instantiated_actions[4].name() == ACTION_REVERT_FALLBACK_EVENTS_NAME
assert instantiated_actions[5].name() == (
ACTION_DEFAULT_ASK_AFFIRMATION_NAME)
assert instantiated_actions[6].name() == (
ACTION_DEFAULT_ASK_REPHRASE_NAME)
assert instantiated_actions[7].name() == ACTION_BACK_NAME
assert instantiated_actions[8].name() == "my_module.ActionTest"
assert instantiated_actions[9].name() == "utter_test"
def test_domain_fails_on_duplicated_actions():
with pytest.raises(ValueError):
Domain(intent_properties={},
entities=[],
slots=[],
templates={},
action_names=["random_name", "random_name"],
form_names=[])
async def test_remote_action_runs(default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
endpoint = EndpointConfig("https://example.com/webhooks/actions")
remote_action = action.RemoteAction("my_action",
endpoint)
with aioresponses() as mocked:
mocked.post(
'https://example.com/webhooks/actions',
payload={"events": [], "responses": []})
await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
r = latest_request(mocked, 'post',
"https://example.com/webhooks/actions")
assert r
assert json_of_latest_request(r) == {
'domain': default_domain.as_dict(),
'next_action': 'my_action',
'sender_id': 'default',
'version': rasa.__version__,
'tracker': {
'latest_message': {
'entities': [],
'intent': {},
'text': None
},
'active_form': {},
'latest_action_name': None,
'sender_id': 'default',
'paused': False,
'latest_event_time': None,
'followup_action': 'action_listen',
'slots': {'name': None},
'events': [],
'latest_input_channel': None
}
}
async def test_remote_action_logs_events(default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
endpoint = EndpointConfig("https://example.com/webhooks/actions")
remote_action = action.RemoteAction("my_action",
endpoint)
response = {
"events": [
{"event": "slot", "value": "rasa", "name": "name"}],
"responses": [{"text": "test text",
"buttons": [{"title": "cheap", "payload": "cheap"}]},
{"template": "utter_greet"}]}
with aioresponses() as mocked:
mocked.post('https://example.com/webhooks/actions', payload=response)
events = await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
r = latest_request(mocked, 'post',
"https://example.com/webhooks/actions")
assert r
assert json_of_latest_request(r) == {
'domain': default_domain.as_dict(),
'next_action': 'my_action',
'sender_id': 'default',
'version': rasa.__version__,
'tracker': {
'latest_message': {
'entities': [],
'intent': {},
'text': None
},
'active_form': {},
'latest_action_name': None,
'sender_id': 'default',
'paused': False,
'followup_action': 'action_listen',
'latest_event_time': None,
'slots': {'name': None},
'events': [],
'latest_input_channel': None
}
}
assert events == [SlotSet("name", "rasa")]
channel = default_dispatcher_collecting.output_channel
assert channel.messages == [
{"text": "test text", "recipient_id": "my-sender",
"buttons": [{"title": "cheap", "payload": "cheap"}]},
{"text": "hey there None!", "recipient_id": "my-sender"}]
async def test_remote_action_without_endpoint(default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
remote_action = action.RemoteAction("my_action", None)
with pytest.raises(Exception) as execinfo:
await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
assert "you didn't configure an endpoint" in str(execinfo.value)
async def test_remote_action_endpoint_not_running(
default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
endpoint = EndpointConfig("https://example.com/webhooks/actions")
remote_action = action.RemoteAction("my_action", endpoint)
with pytest.raises(Exception) as execinfo:
await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
assert "Failed to execute custom action." in str(execinfo.value)
async def test_remote_action_endpoint_responds_500(
default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
endpoint = EndpointConfig("https://example.com/webhooks/actions")
remote_action = action.RemoteAction("my_action", endpoint)
with aioresponses() as mocked:
mocked.post('https://example.com/webhooks/actions', status=500)
with pytest.raises(Exception) as execinfo:
await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
assert "Failed to execute custom action." in str(execinfo.value)
async def test_remote_action_endpoint_responds_400(
default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
endpoint = EndpointConfig("https://example.com/webhooks/actions")
remote_action = action.RemoteAction("my_action", endpoint)
with aioresponses() as mocked:
# noinspection PyTypeChecker
mocked.post(
'https://example.com/webhooks/actions',
exception=ClientResponseError(
400, None, '{"action_name": "my_action"}'))
with pytest.raises(Exception) as execinfo:
await remote_action.run(default_dispatcher_collecting,
tracker,
default_domain)
assert execinfo.type == ActionExecutionRejection
assert "Custom action 'my_action' rejected to run" in str(execinfo.value)
async def test_default_action(
default_dispatcher_collecting,
default_domain):
tracker = DialogueStateTracker("default",
default_domain.slots)
fallback_action = action.ActionDefaultFallback()
events = await fallback_action.run(default_dispatcher_collecting,
tracker,
default_domain)
channel = default_dispatcher_collecting.output_channel
assert channel.messages == [
{u'text': u'sorry, I didn\'t get that, can you rephrase it?',
u'recipient_id': u'my-sender'}]
assert events == [UserUtteranceReverted()]
|
|
import time
import datetime
import pymongo
import pyes
import redis
import celery
import mongoengine as mongo
from pyes.query import MatchQuery
from django.conf import settings
from django.contrib.auth.models import User
from apps.search.tasks import IndexSubscriptionsForSearch
from apps.search.tasks import IndexSubscriptionsChunkForSearch
from apps.search.tasks import IndexFeedsForSearch
from utils import log as logging
from utils.feed_functions import chunks
class MUserSearch(mongo.Document):
'''Search index state of a user's subscriptions.'''
user_id = mongo.IntField(unique=True)
last_search_date = mongo.DateTimeField()
subscriptions_indexed = mongo.BooleanField()
subscriptions_indexing = mongo.BooleanField()
meta = {
'collection': 'user_search',
'indexes': ['user_id'],
'index_drop_dups': True,
'allow_inheritance': False,
}
@classmethod
def get_user(cls, user_id, create=True):
try:
user_search = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(user_id=user_id)
except cls.DoesNotExist:
if create:
user_search = cls.objects.create(user_id=user_id)
else:
user_search = None
return user_search
def touch_search_date(self):
if not self.subscriptions_indexed and not self.subscriptions_indexing:
self.schedule_index_subscriptions_for_search()
self.subscriptions_indexing = True
self.last_search_date = datetime.datetime.now()
self.save()
def schedule_index_subscriptions_for_search(self):
IndexSubscriptionsForSearch.apply_async(kwargs=dict(user_id=self.user_id),
queue='search_indexer_tasker')
# Should be run as a background task
def index_subscriptions_for_search(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
SearchStory.create_elasticsearch_mapping()
start = time.time()
user = User.objects.get(pk=self.user_id)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(user.username, 'search_index_complete:start')
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
feed_ids = []
for sub in subscriptions:
try:
feed_ids.append(sub.feed.pk)
except Feed.DoesNotExist:
continue
feed_id_chunks = [c for c in chunks(feed_ids, 6)]
logging.user(user, "~FCIndexing ~SB%s feeds~SN in %s chunks..." %
(total, len(feed_id_chunks)))
tasks = [IndexSubscriptionsChunkForSearch().s(feed_ids=feed_id_chunk,
user_id=self.user_id
).set(queue='search_indexer')
for feed_id_chunk in feed_id_chunks]
group = celery.group(*tasks)
res = group.apply_async(queue='search_indexer')
res.join_native()
duration = time.time() - start
logging.user(user, "~FCIndexed ~SB%s feeds~SN in ~FM~SB%s~FC~SN sec." %
(total, round(duration, 2)))
r.publish(user.username, 'search_index_complete:done')
self.subscriptions_indexed = True
self.subscriptions_indexing = False
self.save()
def index_subscriptions_chunk_for_search(self, feed_ids):
from apps.rss_feeds.models import Feed
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCIndexing %s feeds..." % len(feed_ids))
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
r.publish(user.username, 'search_index_complete:feeds:%s' %
','.join([str(f) for f in feed_ids]))
@classmethod
def schedule_index_feeds_for_search(cls, feed_ids, user_id):
user_search = cls.get_user(user_id, create=False)
if (not user_search or
not user_search.subscriptions_indexed or
user_search.subscriptions_indexing):
# User hasn't searched before.
return
if not isinstance(feed_ids, list):
feed_ids = [feed_ids]
IndexFeedsForSearch.apply_async(kwargs=dict(feed_ids=feed_ids, user_id=user_id),
queue='search_indexer')
@classmethod
def index_feeds_for_search(cls, feed_ids, user_id):
from apps.rss_feeds.models import Feed
user = User.objects.get(pk=user_id)
logging.user(user, "~SB~FCIndexing %s~FC by request..." % feed_ids)
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
@classmethod
def remove_all(cls, drop_index=False):
user_searches = cls.objects.all()
logging.info(" ---> ~SN~FRRemoving ~SB%s~SN user searches..." % user_searches.count())
for user_search in user_searches:
try:
user_search.remove()
except Exception, e:
print " ****> Error on search removal: %s" % e
# You only need to drop the index if there is data you want to clear.
# A new search server won't need this, as there isn't anything to drop.
if drop_index:
logging.info(" ---> ~FRRemoving stories search index...")
SearchStory.drop()
def remove(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
user = User.objects.get(pk=self.user_id)
subscriptions = UserSubscription.objects.filter(user=self.user_id,
feed__search_indexed=True)
total = subscriptions.count()
removed = 0
for sub in subscriptions:
try:
feed = sub.feed
except Feed.DoesNotExist:
continue
feed.search_indexed = False
feed.save()
removed += 1
logging.user(user, "~FCRemoved ~SB%s/%s feed's search indexes~SN for ~SB~FB%s~FC~SN." %
(removed, total, user.username))
self.delete()
class SearchStory:
ES = pyes.ES(settings.ELASTICSEARCH_STORY_HOSTS)
name = "stories"
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
cls.ES.indices.create_index_if_missing("%s-index" % cls.name)
mapping = {
'title': {
'boost': 3.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'snowball',
},
'content': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'snowball',
},
'tags': {
'boost': 2.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'snowball',
},
'author': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'keyword',
},
'feed_id': {
'store': 'no',
'type': 'integer'
},
'date': {
'store': 'no',
'type': 'date',
}
}
cls.ES.indices.put_mapping("%s-type" % cls.name, {
'properties': mapping,
'_source': {'enabled': False},
}, ["%s-index" % cls.name])
@classmethod
def index(cls, story_hash, story_title, story_content, story_tags, story_author, story_feed_id,
story_date):
doc = {
"content" : story_content,
"title" : story_title,
"tags" : ', '.join(story_tags),
"author" : story_author,
"feed_id" : story_feed_id,
"date" : story_date,
}
try:
cls.ES.index(doc, "%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def remove(cls, story_hash):
try:
cls.ES.delete("%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def drop(cls):
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
@classmethod
def query(cls, feed_ids, query, order, offset, limit):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.StringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
return [r.get_id() for r in results]
class SearchFeed:
ES = pyes.ES(settings.ELASTICSEARCH_FEED_HOSTS)
name = "feeds"
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
settings = {
"index" : {
"analysis": {
"analyzer": {
"edgengram_analyzer": {
"filter": ["edgengram"],
"tokenizer": "lowercase",
"type": "custom"
},
"ngram_analyzer": {
"filter": ["ngram"],
"tokenizer": "lowercase",
"type": "custom"
}
},
"filter": {
"edgengram": {
"max_gram": "15",
"min_gram": "2",
"type": "edgeNGram"
},
"ngram": {
"max_gram": "15",
"min_gram": "3",
"type": "nGram"
}
},
"tokenizer": {
"edgengram_tokenizer": {
"max_gram": "15",
"min_gram": "2",
"side": "front",
"type": "edgeNGram"
},
"ngram_tokenizer": {
"max_gram": "15",
"min_gram": "3",
"type": "nGram"
}
}
}
}
}
cls.ES.indices.create_index_if_missing("%s-index" % cls.name, settings)
mapping = {
"address": {
"analyzer": "edgengram_analyzer",
"store": True,
"term_vector": "with_positions_offsets",
"type": "string"
},
"feed_id": {
"store": True,
"type": "string"
},
"num_subscribers": {
"index": "analyzed",
"store": True,
"type": "long"
},
"title": {
"analyzer": "edgengram_analyzer",
"store": True,
"term_vector": "with_positions_offsets",
"type": "string"
}
}
cls.ES.indices.put_mapping("%s-type" % cls.name, {
'properties': mapping,
}, ["%s-index" % cls.name])
@classmethod
def index(cls, feed_id, title, address, link, num_subscribers):
doc = {
"feed_id" : feed_id,
"title" : title,
"address" : address,
"link" : link,
"num_subscribers" : num_subscribers,
}
try:
cls.ES.index(doc, "%s-index" % cls.name, "%s-type" % cls.name, feed_id)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def query(cls, text):
cls.create_elasticsearch_mapping()
try:
cls.ES.default_indices = cls.index_name()
cls.ES.indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info("~FGSearch ~FCfeeds~FG by address: ~SB%s" % text)
q = MatchQuery('address', text, operator="and", type="phrase")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by title: ~SB%s" % text)
q = MatchQuery('title', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by link: ~SB%s" % text)
q = MatchQuery('link', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
return results
@classmethod
def export_csv(cls):
import djqscsv
qs = Feed.objects.filter(num_subscribers__gte=20).values('id', 'feed_title', 'feed_address', 'feed_link', 'num_subscribers')
csv = djqscsv.render_to_csv_response(qs).content
f = open('feeds.csv', 'w+')
f.write(csv)
f.close()
|
|
import math
import cPickle
import cocos
from cocos import euclid
import pyglet
from pyglet.gl import *
import copy
class Skin(cocos.cocosnode.CocosNode):
def __init__(self, skeleton):
super(Skin, self).__init__()
self.skeleton = skeleton
class ColorSkin(Skin):
def __init__(self, skeleton, color):
super(ColorSkin, self).__init__(skeleton)
self.color = color
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
self.skeleton.visit_children( lambda bone: self.draw_bone( bone ) )
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
glPopMatrix()
def draw_bone(self, bone):
p1 = bone.get_start()
p2 = bone.get_end()
glColor4ub(*self.color)
glLineWidth(5)
glBegin(GL_LINES)
glVertex2f(*p1)
glVertex2f(*p2)
glEnd()
class BitmapSkin(Skin):
skin_parts = []
def __init__(self, skeleton, skin_def, alpha=255):
super(BitmapSkin, self).__init__(skeleton)
self.alpha = alpha
self.skin_parts = skin_def
self.regenerate()
def move(self, idx, dx, dy):
sp = self.skin_parts
pos = sp[idx][1]
sp[idx] = sp[idx][0], (pos[0]+dx, pos[1]+dy), sp[idx][2], \
sp[idx][3], sp[idx][4], sp[idx][5]
self.regenerate()
def get_control_points(self):
return [ (i, p[0]) for i,p in enumerate(self.skin_parts) ]
def regenerate(self):
# print self.skin_parts
self.parts = [ (name, position, scale,\
pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)) \
for name, position, image, flip_x, flip_y, scale
in self.skin_parts ]
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
for bname, position, scale, image in self.parts:
matrix = bones[bname]
self.blit_image(matrix, position, scale, image)
glPopMatrix()
def blit_image(self, matrix, position, scale, image):
x, y = image.width*scale, image.height*scale
#dx = self.x + position[0]
#dy = self.y + position[1]
dx, dy = position
glEnable(image.target)
glBindTexture(image.target, image.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# blit img
points = [
(-dx, -dy),
(x-dx, -dy),
(x-dx, y-dy),
(-dx, y-dy)
]
a,b,_,c,d,_,e,f,_,g,h,_ = image.texture.tex_coords
textures = [ a,b,c,d,e,f,g,h ]
np = [ matrix*euclid.Point2(*p) for p in points ]
glColor4ub(255,255,255,self.alpha)
glBegin(GL_QUADS)
glTexCoord2f(a,b)
glVertex2f(*np[0])
glTexCoord2f(c,d)
glVertex2f(*np[1])
glTexCoord2f(e,f)
glVertex2f(*np[2])
glTexCoord2f(g,h)
glVertex2f(*np[3])
glEnd()
glColor4ub(255,255,255,255)
#pyglet.graphics.draw(4, GL_QUADS,
# ("v2f", new_points),
# ("t2f", textures),
# ("c4B", [255,255,255,self.alpha]*4),
# )
glPopAttrib()
glDisable(image.target)
def flip(self):
nsp = []
for name, position, image, flip_x, flip_y, scale in self.skin_parts:
im = pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)
x = im.width*scale - position[0]
y = position[1]
nsp.append( (name, (x,y), image, not flip_x, flip_y, scale))
self.skin_parts = nsp
self.regenerate()
self.skeleton = self.skeleton.flipped()
class Animate(cocos.actions.IntervalAction):
def init(self, animation, recenter=False, recenter_x=False, recenter_y=False):
if recenter:
recenter_x = recenter_y = True
self.recenter_x = recenter_x
self.recenter_y = recenter_y
self.duration = animation.get_duration()
self.animation = animation
def start(self):
nsk = copy.deepcopy(self.target.skeleton)
if self.recenter_x:
self.target.x += nsk.translation.x
nsk.translation.x = 0
if self.recenter_y:
self.target.y += nsk.translation.y
nsk.translation.y = 0
self.start_skeleton = nsk
def update(self, t):
self.animation.pose(self.target.skeleton, t, self.start_skeleton)
def __reversed__(self):
raise NotImplementedError("gimme some time")
class Skeleton(object):
def __init__(self, bone):
super(Skeleton, self).__init__()
self.bone = bone
self.matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Vector2(0,0)
def flipped(self):
sk = Skeleton(self.bone.flipped())
sk.translation.x = -self.translation.x
sk.translation.y = self.translation.y
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def save(self, name):
f = open(name, "w")
cPickle.dump(self, f)
f.close()
def move(self, dx, dy):
self.matrix.translate(dx, dy)
self.translation.x += dx
self.translation.y += dy
def propagate_matrix(self):
def visit(matrix, child):
child.parent_matrix = matrix
matrix = matrix * child.matrix
for c in child.children:
visit(matrix, c)
visit(self.matrix, self.bone)
def visit_children(self, func):
result = []
def inner(bone):
result.append( func( bone ) )
for b in bone.children:
inner(b)
inner(self.bone)
return result
def get_control_points(self):
points = [self]
self.propagate_matrix()
points += self.visit_children( lambda bone: bone )
return points
def interpolated_to(self, next, delta):
sk = Skeleton(self.bone.interpolated_to(next.bone, delta))
sk.translation = (next.translation-self.translation) * delta + self.translation
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def pose_from(self, other):
self.matrix = other.matrix
self.translation = other.translation
self.bone = copy.deepcopy(other.bone)
class Bone(object):
def __init__(self, label, size, rotation, translation):
self.size = size
self.label = label
self.children = []
self.matrix = euclid.Matrix3.new_translate(*translation) * \
euclid.Matrix3.new_rotate( math.radians(rotation) )
self.parent_matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Point2(*translation)
self.rotation = math.radians(rotation)
def move(self, dx, dy):
self.translation.x += dx
self.translation.y += dy
self.matrix = euclid.Matrix3.new_translate(*self.translation) * \
euclid.Matrix3.new_rotate( self.rotation)
def flipped(self):
bone = Bone(self.label, self.size, -math.degrees(self.rotation),
(-self.translation[0], self.translation[1]))
for b in self.children:
bone.add( b.flipped() )
return bone
def rotate(self, angle):
self.rotation += angle
self.matrix.rotate( angle )
def add(self, bone):
self.children.append(bone)
return self
def get_end(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, -self.size)
def get_start(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, 0)
def interpolated_to(self, next, delta):
ea = next.rotation%(math.pi*2)
sa = self.rotation %(math.pi*2)
angle = ((ea%(math.pi*2)) - (sa%(math.pi*2)))
if angle > math.pi:
angle = -math.pi*2+angle
if angle < -math.pi:
angle = math.pi*2+angle
nr = ( sa + angle * delta ) % (math.pi*2)
nr = math.degrees( nr )
bone = Bone(self.label, self.size, nr, self.translation)
for i, c in enumerate(self.children):
nc = c.interpolated_to(next.children[i], delta)
bone.add( nc )
return bone
def dump(self, depth=0):
print "-"*depth, self
for c in self.children:
c.dump(depth+1)
def repr(self, depth=0):
repr = " "*depth*4 + "Bone('%s', %s, %s, %s)"%(
self.label, self.size, math.degrees(self.rotation), self.translation
)
for c in self.children:
repr += " "*depth*4 +".add(\n" + c.repr(depth+1) + ")"
repr += "\n"
return repr
class Animation(object):
def __init__(self, skeleton):
self.frames = {}
self.position = 0
self.skeleton = skeleton
def flipped(self):
c = copy.deepcopy(self)
for t, sk in c.frames.items():
c.frames[t] = sk.flipped()
return c
def pose(self, who, t, start):
dt = t * self.get_duration()
self.position = dt
ct, curr = self.get_keyframe()
#print who.tranlation
# if we are in a keyframe, pose that
if curr:
who.pose_from( curr )
return
# find previous, if not, use start
pt, prev = self.get_keyframe(-1)
if not prev:
prev = start
pt = 0
# find next, if not, pose at prev
nt, next = self.get_keyframe(1)
if not next:
who.pose_from( prev )
return
# we find the dt betwen prev and next and pose from it
ft = (nt-dt)/(nt-pt)
who.pose_from( next.interpolated_to( prev, ft ) )
def get_duration(self):
if self.frames:
return max(max( self.frames ), self.position )
else:
return self.position
def get_markers(self):
return self.frames.keys()
def get_position(self):
return self.position
def get_keyframe(self, offset=0):
if offset == 0:
if self.position in self.frames:
return self.position, self.frames[self.position]
else:
return None, None
elif offset < 0:
prevs = [ t for t in self.frames if t < self.position ]
prevs.sort()
if abs(offset) <= len(prevs):
return prevs[offset], self.frames[prevs[offset]]
else:
return None, None
elif offset > 0:
next = [ t for t in self.frames if t > self.position ]
next.sort()
if abs(offset) <= len(next):
return next[offset-1], self.frames[next[offset-1]]
else:
return None, None
def next_keyframe(self):
next = [ t for t in self.frames if t > self.position ]
if not next:
return False
self.position = min(next)
return True
def prev_keyframe(self):
prevs = [ t for t in self.frames if t < self.position ]
if not prevs:
return False
self.position = max(prevs)
return True
def move_position(self, delta):
self.position = max(self.position+delta, 0)
return True
def move_start(self):
self.position = 0
return True
def move_end(self):
if self.frames:
self.position = max( self.frames )
else:
self.position = 0
return True
def insert_keyframe(self):
if self.position not in self.frames:
t, sk = self.get_keyframe(-1)
if not sk:
sk = self.skeleton
self.frames[ self.position ] = copy.deepcopy(sk)
return True
return False
def remove_keyframe(self):
if self.position in self.frames:
del self.frames[ self.position ]
return True
return False
def insert_time(self, delta):
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t >= self.position:
t += delta
new_frames[ t ] = sk
self.frames = new_frames
def delete_time(self, delta):
for t in self.frames:
if self.position <= t < self.position + delta:
return False
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t > self.position:
t -= delta
new_frames[ t ] = sk
self.frames = new_frames
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization API."""
import sys
import tensorflow as tf
def _is_colab():
return "google.colab" in sys.modules
if _is_colab():
from witwidget.notebook.colab.wit import * # pylint: disable=wildcard-import,g-import-not-at-top
else:
from witwidget.notebook.jupyter.wit import * # pylint: disable=wildcard-import,g-import-not-at-top
class WitConfigBuilder(object):
"""Configuration builder for WitWidget settings."""
def __init__(self, examples, feature_names=None):
"""Constructs the WitConfigBuilder object.
Args:
examples: A list of tf.Example or tf.SequenceExample proto objects, or
raw JSON objects. JSON is allowed only for AI Platform-hosted models (see
'set_ai_platform_model' and 'set_compare_ai_platform_model methods).
These are the examples that will be displayed in WIT. If no model to
infer these examples with is specified through the methods on this class,
then WIT will display the examples for exploration, but no model inference
will be performed by the tool.
feature_names: Optional, defaults to None. If examples are provided as
JSON lists of numbers (not as feature dictionaries), then this array
maps indices in the feature value lists to human-readable names of those
features, used for display purposes.
"""
self.config = {}
self.set_model_type('classification')
self.set_label_vocab([])
self.set_examples(examples, feature_names)
def build(self):
"""Returns the configuration set through use of this builder object.
Used by WitWidget to set the settings on an instance of the What-If Tool.
"""
return self.config
def store(self, key, value):
self.config[key] = value
def delete(self, key):
if key in self.config:
del self.config[key]
def set_examples(self, examples, feature_names=None):
"""Sets the examples to be displayed in WIT.
Args:
examples: List of example protos or JSON objects.
feature_names: Optional, defaults to None. If examples are provided as
JSON lists of numbers (not as feature dictionaries), then this array
maps indices in the feature value lists to human-readable names of those
features, used just for display purposes.
Returns:
self, in order to enabled method chaining.
"""
self.store('examples', examples)
if feature_names:
self.store('feature_names', feature_names)
if len(examples) > 0 and not (
isinstance(examples[0], tf.train.Example) or
isinstance(examples[0], tf.train.SequenceExample)):
self._set_uses_json_input(True)
if isinstance(examples[0], list):
self._set_uses_json_list(True)
elif len(examples) > 0:
self.store('are_sequence_examples',
isinstance(examples[0], tf.train.SequenceExample))
return self
def set_model_type(self, model):
"""Sets the type of the model being used for inference.
Args:
model: The model type, such as "classification" or "regression".
The model type defaults to "classification".
Returns:
self, in order to enabled method chaining.
"""
self.store('model_type', model)
return self
def set_inference_address(self, address):
"""Sets the inference address for model inference through TF Serving.
Args:
address: The address of the served model, including port, such as
"localhost:8888".
Returns:
self, in order to enabled method chaining.
"""
self.store('inference_address', address)
return self
def set_model_name(self, name):
"""Sets the model name for model inference through TF Serving.
Setting a model name is required if inferring through a model hosted by
TF Serving.
Args:
name: The name of the model to be queried through TF Serving at the
address provided by set_inference_address.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_name', name)
return self
def has_model_name(self):
return 'model_name' in self.config
def set_model_version(self, version):
"""Sets the optional model version for model inference through TF Serving.
Args:
version: The string version number of the model to be queried through TF
Serving. This is optional, as TF Serving will use the latest model version
if none is provided.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_version', version)
return self
def set_model_signature(self, signature):
"""Sets the optional model signature for model inference through TF Serving.
Args:
signature: The string signature of the model to be queried through TF
Serving. This is optional, as TF Serving will use the default model
signature if none is provided.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_signature', signature)
return self
def set_compare_inference_address(self, address):
"""Sets the inference address for model inference for a second model hosted
by TF Serving.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Args:
address: The address of the served model, including port, such as
"localhost:8888".
Returns:
self, in order to enabled method chaining.
"""
self.store('inference_address_2', address)
return self
def set_compare_model_name(self, name):
"""Sets the model name for a second model hosted by TF Serving.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Setting a model name is required if inferring through a model hosted by
TF Serving.
Args:
name: The name of the model to be queried through TF Serving at the
address provided by set_compare_inference_address.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_name_2', name)
return self
def has_compare_model_name(self):
return 'model_name_2' in self.config
def set_compare_model_version(self, version):
"""Sets the optional model version for a second model hosted by TF Serving.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Args:
version: The string version number of the model to be queried through TF
Serving. This is optional, as TF Serving will use the latest model version
if none is provided.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_version_2', version)
return self
def set_compare_model_signature(self, signature):
"""Sets the optional model signature for a second model hosted by TF
Serving.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Args:
signature: The string signature of the model to be queried through TF
Serving. This is optional, as TF Serving will use the default model
signature if none is provided.
Returns:
self, in order to enabled method chaining.
"""
self.store('model_signature_2', signature)
return self
def set_uses_predict_api(self, predict):
"""Indicates that the model uses the Predict API, as opposed to the
Classification or Regression API.
If the model doesn't use the standard Classification or Regression APIs
provided through TF Serving, but instead uses the more flexible Predict API,
then use this method to indicate that. If this is true, then use the
set_predict_input_tensor and set_predict_output_tensor methods to indicate
the names of the tensors that are used as the input and output for the
models provided in order to perform the appropriate inference request.
Args:
predict: True if the model or models use the Predict API.
Returns:
self, in order to enabled method chaining.
"""
self.store('uses_predict_api', predict)
return self
def set_max_classes_to_display(self, max_classes):
"""Sets the maximum number of class results to display for multiclass
classification models.
When using WIT with a multiclass model with a large number of possible
classes, it can be helpful to restrict WIT to only display some smaller
number of the highest-scoring classes as inference results for any given
example. This method sets that limit.
Args:
max_classes: The maximum number of classes to display for inference
results for multiclass classification models.
Returns:
self, in order to enabled method chaining.
"""
self.store('max_classes', max_classes)
return self
def set_multi_class(self, multiclass):
"""Sets if the model(s) to query are mutliclass classification models.
Args:
multiclass: True if the model or models are multiclass classififcation
models. Defaults to false.
Returns:
self, in order to enabled method chaining.
"""
self.store('multiclass', multiclass)
return self
def set_predict_input_tensor(self, tensor):
"""Sets the name of the input tensor for models that use the Predict API.
If using WIT with set_uses_predict_api(True), then call this to specify
the name of the input tensor of the model or models that accepts the
example proto for inference.
Args:
tensor: The name of the input tensor.
Returns:
self, in order to enabled method chaining.
"""
self.store('predict_input_tensor', tensor)
return self
def set_predict_output_tensor(self, tensor):
"""Sets the name of the output tensor for models that need output parsing.
If using WIT with set_uses_predict_api(True), then call this to specify
the name of the output tensor of the model or models that returns the
inference results to be explored by WIT.
If using an AI Platform model which returns multiple prediction
results in a dictionary, this method specifies the key corresponding to
the inference results to be explored by WIT.
Args:
tensor: The name of the output tensor.
Returns:
self, in order to enabled method chaining.
"""
self.store('predict_output_tensor', tensor)
return self
def set_label_vocab(self, vocab):
"""Sets the string value of numeric labels for classification models.
For classification models, the model returns scores for each class ID
number (classes 0 and 1 for binary classification models). In order for
WIT to visually display the results in a more-readable way, you can specify
string labels for each class ID.
Args:
vocab: A list of strings, where the string at each index corresponds to
the label for that class ID. For example ['<=50K', '>50K'] for the UCI
census binary classification task.
Returns:
self, in order to enabled method chaining.
"""
self.store('label_vocab', vocab)
return self
def set_estimator_and_feature_spec(self, estimator, feature_spec):
"""Sets the model for inference as a TF Estimator.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a TF Estimator object as the model to query. In order to
accomplish this, a feature_spec must also be provided to parse the
example protos for input into the estimator.
Args:
estimator: The TF Estimator which will be used for model inference.
feature_spec: The feature_spec object which will be used for example
parsing.
Returns:
self, in order to enabled method chaining.
"""
# If custom function is set, remove it before setting estimator
self.delete('custom_predict_fn')
self.store('estimator_and_spec', {
'estimator': estimator, 'feature_spec': feature_spec})
self.set_inference_address('estimator')
# If no model name has been set, give a default
if not self.has_model_name():
self.set_model_name('1')
return self
def set_compare_estimator_and_feature_spec(self, estimator, feature_spec):
"""Sets a second model for inference as a TF Estimator.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a TF Estimator object as the model to query. In order to
accomplish this, a feature_spec must also be provided to parse the
example protos for input into the estimator.
Args:
estimator: The TF Estimator which will be used for model inference.
feature_spec: The feature_spec object which will be used for example
parsing.
Returns:
self, in order to enabled method chaining.
"""
# If custom function is set, remove it before setting estimator
self.delete('compare_custom_predict_fn')
self.store('compare_estimator_and_spec', {
'estimator': estimator, 'feature_spec': feature_spec})
self.set_compare_inference_address('estimator')
# If no model name has been set, give a default
if not self.has_compare_model_name():
self.set_compare_model_name('2')
return self
def set_custom_predict_fn(self, predict_fn):
"""Sets a custom function for inference.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a custom function as the model to query. In this case, the
provided function should accept example protos and return:
- For classification: A 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction.
- For regression: A 1D list of numbers, with a regression score for each
example being predicted.
Optionally, if attributions or other prediction-time information
can be returned by the model with each prediction, then this method
can return a dict with the key 'predictions' containing the predictions
result list described above, and with the key 'attributions' containing
a list of attributions for each example that was predicted.
For each example, the attributions list should contain a dict mapping
input feature names to attribution values for that feature on that example.
The attribution value can be one of these things:
- A single number representing the attribution for the entire feature
- A list of numbers representing the attribution to each value in the
feature for multivalent features - such as attributions to individual
pixels in an image or numbers in a list of numbers.
This dict can contain any other keys, with their values being a list of
prediction-time strings or numbers for each example being predicted. These
values will be displayed in WIT as extra information for each example,
usable in the same ways by WIT as normal input features (such as for
creating plots and slicing performance data).
Args:
predict_fn: The custom python function which will be used for model
inference.
Returns:
self, in order to enabled method chaining.
"""
# If estimator is set, remove it before setting predict_fn
self.delete('estimator_and_spec')
self.store('custom_predict_fn', predict_fn)
self.set_inference_address('custom_predict_fn')
# If no model name has been set, give a default
if not self.has_model_name():
self.set_model_name('1')
return self
def set_compare_custom_predict_fn(self, predict_fn):
"""Sets a second custom function for inference.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a custom function as the model to query. In this case, the
provided function should accept example protos and return:
- For classification: A 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction.
- For regression: A 1D list of numbers, with a regression score for each
example being predicted.
Optionally, if attributions or other prediction-time information
can be returned by the model with each prediction, then this method
can return a dict with the key 'predictions' containing the predictions
result list described above, and with the key 'attributions' containing
a list of attributions for each example that was predicted.
For each example, the attributions list should contain a dict mapping
input feature names to attribution values for that feature on that example.
The attribution value can be one of these things:
- A single number representing the attribution for the entire feature
- A list of numbers representing the attribution to each value in the
feature for multivalent features - such as attributions to individual
pixels in an image or numbers in a list of numbers.
- A 2D list for sparse feature attribution. Index 0 contains a list of
feature values that there are attribution scores for. Index 1 contains
a list of attribution values for the corresponding feature values in
the first list.
This dict can contain any other keys, with their values being a list of
prediction-time strings or numbers for each example being predicted. These
values will be displayed in WIT as extra information for each example,
usable in the same ways by WIT as normal input features (such as for
creating plots and slicing performance data).
Args:
predict_fn: The custom python function which will be used for model
inference.
Returns:
self, in order to enabled method chaining.
"""
# If estimator is set, remove it before setting predict_fn
self.delete('compare_estimator_and_spec')
self.store('compare_custom_predict_fn', predict_fn)
self.set_compare_inference_address('custom_predict_fn')
# If no model name has been set, give a default
if not self.has_compare_model_name():
self.set_compare_model_name('2')
return self
def set_custom_distance_fn(self, distance_fn):
"""Sets a custom function for distance computation.
WIT can directly use a custom function for all distance computations within
the tool. In this case, the provided function should accept a query example
proto and a list of example protos to compute the distance against and
return a 1D list of numbers containing the distances.
Args:
distance_fn: The python function which will be used for distance
computation.
Returns:
self, in order to enabled method chaining.
"""
if distance_fn is None:
self.delete('custom_distance_fn')
else:
self.store('custom_distance_fn', distance_fn)
return self
def set_ai_platform_model(
self, project, model, version=None, force_json_input=None,
adjust_prediction=None, adjust_example=None, adjust_attribution=None,
service_name='ml', service_version='v1', get_explanations=True,
batch_size=500, api_key=None):
"""Sets the model information for a model served by AI Platform.
AI Platform Prediction a Google Cloud serving platform.
Args:
project: The name of the AI Platform Prediction project.
model: The name of the AI Platform Prediction model.
version: Optional, the version of the AI Platform Prediction model.
force_json_input: Optional. If True and examples are provided as
tf.Example protos, convert them to raw JSON objects before sending them
for inference to this model.
adjust_prediction: Optional. If not None then this function takes the
prediction output from the model for a single example and converts it to
the appropriate format - a regression score or a list of class scores. Only
necessary if the model doesn't already abide by this format.
adjust_example: Optional. If not None then this function takes an example
to run prediction on and converts it to the format expected by the model.
Necessary for example if the served model expects a single data value to
run inference on instead of a list or dict of values.
adjust_attribution: Optional. If not None and the model returns attribution
information, then this function takes the attribution information for an
example and converts it to the format expected by the tool, which is a
dictionary of input feature names to attribution scores. Usually necessary
if making use of adjust_example and the model returns attribution results.
service_name: Optional. Name of the AI Platform Prediction service. Defaults
to 'ml'.
service_version: Optional. Version of the AI Platform Prediction service. Defaults
to 'v1'.
get_explanations: Optional. If a model is deployed with explanations,
then this specifies if explanations will be calculated and displayed.
Defaults to True.
batch_size: Optional. Sets the individual batch size to send for
prediction. Defaults to 500.
api_key: Optional. A generated API key to send with the requests to AI
Platform.
Returns:
self, in order to enabled method chaining.
"""
self.set_inference_address(project)
self.set_model_name(model)
self.store('use_aip', True)
self.store('aip_service_name', service_name)
self.store('aip_service_version', service_version)
self.store('aip_batch_size', batch_size)
self.store('get_explanations', get_explanations)
if version is not None:
self.set_model_signature(version)
if force_json_input:
self.store('force_json_input', True)
if adjust_prediction:
self.store('adjust_prediction', adjust_prediction)
if adjust_example:
self.store('adjust_example', adjust_example)
if adjust_attribution:
self.store('adjust_attribution', adjust_attribution)
if api_key:
self.store('aip_api_key', api_key)
return self
def set_compare_ai_platform_model(
self, project, model, version=None, force_json_input=None,
adjust_prediction=None, adjust_example=None, adjust_attribution=None,
service_name='ml', service_version='v1', get_explanations=True,
batch_size=500, api_key=None):
"""Sets the model information for a second model served by AI Platform.
AI Platform Prediction a Google Cloud serving platform.
Args:
project: The name of the AI Platform Prediction project.
model: The name of the AI Platform Prediction model.
version: Optional, the version of the AI Platform Prediction model.
force_json_input: Optional. If True and examples are provided as
tf.Example protos, convert them to raw JSON objects before sending them
for inference to this model.
adjust_prediction: Optional. If not None then this function takes the
prediction output from the model for a single example and converts it to
the appropriate format - a regression score or a list of class scores. Only
necessary if the model doesn't already abide by this format.
adjust_example: Optional. If not None then this function takes an example
to run prediction on and converts it to the format expected by the model.
Necessary for example if the served model expects a single data value to
run inference on instead of a list or dict of values.
adjust_attribution: Optional. If not None and the model returns attribution
information, then this function takes the attribution information for an
example and converts it to the format expected by the tool, which is a
dictionary of input feature names to attribution scores. Usually necessary
if making use of adjust_example and the model returns attribution results.
service_name: Optional. Name of the AI Platform Prediction service. Defaults
to 'ml'.
service_version: Optional. Version of the AI Platform Prediction service. Defaults
to 'v1'.
get_explanations: Optional. If a model is deployed with explanations,
then this specifies if explanations will be calculated and displayed.
Defaults to True.
batch_size: Optional. Sets the individual batch size to send for
prediction. Defaults to 500.
api_key: Optional. A generated API key to send with the requests to AI
Platform.
Returns:
self, in order to enabled method chaining.
"""
self.set_compare_inference_address(project)
self.set_compare_model_name(model)
self.store('compare_use_aip', True)
self.store('compare_aip_service_name', service_name)
self.store('compare_aip_service_version', service_version)
self.store('compare_aip_batch_size', batch_size)
self.store('compare_get_explanations', get_explanations)
if version is not None:
self.set_compare_model_signature(version)
if force_json_input:
self.store('compare_force_json_input', True)
if adjust_prediction:
self.store('compare_adjust_prediction', adjust_prediction)
if adjust_example:
self.store('compare_adjust_example', adjust_example)
if adjust_attribution:
self.store('compare_adjust_attribution', adjust_attribution)
if api_key:
self.store('compare_aip_api_key', api_key)
return self
def set_target_feature(self, target):
"""Sets the name of the target feature in the provided examples.
If the provided examples contain a feature that represents the target
that the model is trying to predict, it can be specified by this method.
This is necessary for AI Platform models so that the target feature isn't
sent to the model for prediction, which can cause model inference errors.
Args:
target: The name of the feature in the examples that represents the value
that the model is trying to predict.
Returns:
self, in order to enabled method chaining.
"""
self.store('target_feature', target)
return self
def _set_uses_json_input(self, is_json):
self.store('uses_json_input', is_json)
return self
def _set_uses_json_list(self, is_list):
self.store('uses_json_list', is_list)
return self
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import collections
import logging as std_logging
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(conf=cfg.CONF, policy_file=None):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
_ENFORCER.load_rules(True)
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
return (resource, data[0] != 'get')
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in six.iteritems(validate)]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action, pluralized)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
manager = importutils.import_module('neutron.manager')
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def log_rule_list(match_rule):
if LOG.isEnabledFor(std_logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADMIN_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)
def check_is_advsvc(context):
"""Verify context has advsvc rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADVSVC_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(_ENFORCER.rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
|
|
#! /usr/bin/env python
import os
import subprocess
import re
import sys
import fnmatch
import commands
from collections import defaultdict
from sourcefile import SourceFile
from optparse import OptionParser
lint_root = os.path.dirname(os.path.abspath(__file__))
repo_root = os.path.dirname(os.path.dirname(lint_root))
def git(command, *args):
args = list(args)
proc_kwargs = {"cwd": repo_root}
command_line = ["git", command] + args
try:
return subprocess.check_output(command_line, **proc_kwargs)
except subprocess.CalledProcessError:
raise
def iter_files(flag=False, floder=""):
if floder != "" and floder != None:
os.chdir(repo_root)
for pardir, subdir, files in os.walk(floder):
for item in subdir + files:
if not os.path.isdir(os.path.join(pardir, item)):
yield os.path.join(pardir, item)
os.chdir(lint_root)
else:
if not flag:
os.chdir(repo_root)
for pardir, subdir, files in os.walk(repo_root):
for item in subdir + files:
if not os.path.isdir(os.path.join(pardir, item).split(repo_root)[1]):
yield os.path.join(pardir, item).split(repo_root + "/")[1]
os.chdir(lint_root)
else:
for item in git("diff", "--name-status", "HEAD~1").strip().split("\n"):
status = item.split("\t")
if status[0].strip() != "D":
yield status[1]
def check_path_length(path):
if len(path) + 1 > 150:
return [("PATH LENGTH", "%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), None)]
return []
def check_filename_space(path):
bname = os.path.basename(path)
if re.compile(" ").search(bname):
return [("FILENAME WHITESPACE", "Filename of %s contain white space" % path, None)]
return []
def check_format(path):
bname = os.path.basename(path)
lints = {#"python": ["pylint", "-r no --disable=all --enable=E *.py"],
"json": ["jsonlint", "-v *.json"],
"xml": ["xmllint", "--noout *.xml"]}
for key in lints:
if fnmatch.fnmatch(bname, lints[key][1][lints[key][1].index("*"):]):
returncode = commands.getstatusoutput("%s " % lints[key][0] + lints[key][1][:lints[key][1].index("*")] + os.path.join(repo_root, path))
if returncode[0] != 0:
return [("INVALID %s FORMAT" % key.upper(), "%s contain invalid %s format" % (path, key), None)]
return []
def check_permission(path):
bname = os.path.basename(path)
if not re.compile('\.py$|\.sh$').search(bname):
if os.access(os.path.join(repo_root, path), os.X_OK):
return [("UNNECESSARY EXECUTABLE PERMISSION", "%s contain unnecessary executable permission" % path, None)]
return []
def parse_whitelist_file(filename):
data = defaultdict(lambda:defaultdict(set))
with open(filename) as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_type, file_match, line_number = parts
data[file_match][error_type].add(line_number)
def inner(path, errors):
whitelisted = [False for item in xrange(len(errors))]
for file_match, whitelist_errors in data.iteritems():
if fnmatch.fnmatch(path, file_match):
for i, (error_type, msg, line) in enumerate(errors):
if "*" in whitelist_errors:
whitelisted[i] = True
elif error_type in whitelist_errors:
allowed_lines = whitelist_errors[error_type]
if None in allowed_lines or line in allowed_lines:
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
return inner
_whitelist_fn = None
def whitelist_errors(path, errors):
global _whitelist_fn
if _whitelist_fn is None:
_whitelist_fn = parse_whitelist_file(os.path.join(lint_root, "lint.whitelist"))
return _whitelist_fn(path, errors)
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = " $"
error = "TRAILING WHITESPACE"
class TabsRegexp(Regexp):
pattern = "^\t"
error = "INDENT TABS"
class CRRegexp(Regexp):
pattern = "\r$"
error = "CR AT EOL"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp]]
def check_regexp_line(path, f):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, "%s line %i" % (path, i+1), i+1))
return errors
def check_parsed(path, f):
source_file = SourceFile(repo_root, path, "/")
errors = []
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file %s" % path, None)]
if source_file.testharness_nodes:
if len(source_file.testharness_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESS",
"%s more than one <script src='/resources/testharness.js'>" % path, None))
if not source_file.testharnessreport_nodes:
errors.append(("MISSING-TESTHARNESSREPORT",
"%s missing <script src='/resources/testharnessreport.js'>" % path, None))
if source_file.testharnessreport_nodes:
if len(source_file.testharnessreport_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESSREPORT",
"%s more than one <script src='/resources/testharnessreport.js'>" % path, None))
if not source_file.testharness_nodes:
errors.append(("MISSING-TESTHARNESS",
"%s missing <script src='/resources/TESTHARNESS.js'>" % path, None))
return errors
def output_errors(errors):
for error_type, error, line_number in errors:
print "%s: %s" % (error_type, error)
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.iteritems())
count = sum(error_count.values())
if count == 1:
print "There was 1 error (%s)" % (by_type,)
else:
print "There were %d errors (%s)" % (count, by_type)
def main():
global repo_root
error_count = defaultdict(int)
parser = OptionParser()
parser.add_option('-p', '--pull', dest="pull_request", action='store_true', default=False)
parser.add_option("-d", '--dir', dest="dir", help="specify the checking dir, e.g. tools")
parser.add_option("-r", '--repo', dest="repo", help="specify the repo, e.g. crosswalk-test-suite")
options, args = parser.parse_args()
if options.repo == "" or options.repo == None:
options.repo = "crosswalk-test-suite"
repo_root = repo_root.replace("crosswalk-test-suite", options.repo)
def run_lint(path, fn, *args):
errors = whitelist_errors(path, fn(path, *args))
output_errors(errors)
for error_type, error, line in errors:
error_count[error_type] += 1
for path in iter_files(options.pull_request, options.dir):
abs_path = os.path.join(repo_root, path)
if not os.path.exists(abs_path):
continue
for path_fn in file_path_lints:
run_lint(path, path_fn)
for format_fn in file_format_lints:
run_lint(path, format_fn)
for state_fn in file_state_lints:
run_lint(path, state_fn)
if not os.path.isdir(abs_path):
if re.compile('\.html$|\.htm$|\.xhtml$|\.xhtm$').search(abs_path):
with open(abs_path) as f:
for file_fn in file_content_lints:
run_lint(path, file_fn, f)
f.seek(0)
output_error_count(error_count)
return sum(error_count.itervalues())
#file_path_lints = [check_path_length, check_filename_space]
file_path_lints = [check_filename_space]
file_content_lints = [check_regexp_line, check_parsed]
file_format_lints = [check_format]
file_state_lints = [check_permission]
if __name__ == "__main__":
error_count = main()
if error_count > 0:
sys.exit(1)
|
|
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
# Copyright (c) 2012-2016 The PyWavelets Developers
# <https://github.com/PyWavelets/pywt>
# See COPYING for license details.
"""
The thresholding helper module implements the most popular signal thresholding
functions.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
__all__ = ['threshold', 'threshold_firm']
def soft(data, value, substitute=0):
data = np.asarray(data)
magnitude = np.absolute(data)
with np.errstate(divide='ignore'):
# divide by zero okay as np.inf values get clipped, so ignore warning.
thresholded = (1 - value/magnitude)
thresholded.clip(min=0, max=None, out=thresholded)
thresholded = data * thresholded
if substitute == 0:
return thresholded
else:
cond = np.less(magnitude, value)
return np.where(cond, substitute, thresholded)
def nn_garrote(data, value, substitute=0):
"""Non-negative Garrote."""
data = np.asarray(data)
magnitude = np.absolute(data)
with np.errstate(divide='ignore'):
# divide by zero okay as np.inf values get clipped, so ignore warning.
thresholded = (1 - value**2/magnitude**2)
thresholded.clip(min=0, max=None, out=thresholded)
thresholded = data * thresholded
if substitute == 0:
return thresholded
else:
cond = np.less(magnitude, value)
return np.where(cond, substitute, thresholded)
def hard(data, value, substitute=0):
data = np.asarray(data)
cond = np.less(np.absolute(data), value)
return np.where(cond, substitute, data)
def greater(data, value, substitute=0):
data = np.asarray(data)
if np.iscomplexobj(data):
raise ValueError("greater thresholding only supports real data")
return np.where(np.less(data, value), substitute, data)
def less(data, value, substitute=0):
data = np.asarray(data)
if np.iscomplexobj(data):
raise ValueError("less thresholding only supports real data")
return np.where(np.greater(data, value), substitute, data)
thresholding_options = {'soft': soft,
'hard': hard,
'greater': greater,
'less': less,
'garrote': nn_garrote,
# misspelled garrote for backwards compatibility
'garotte': nn_garrote,
}
def threshold(data, value, mode='soft', substitute=0):
"""
Thresholds the input data depending on the mode argument.
In ``soft`` thresholding [1]_, data values with absolute value less than
`param` are replaced with `substitute`. Data values with absolute value
greater or equal to the thresholding value are shrunk toward zero
by `value`. In other words, the new value is
``data/np.abs(data) * np.maximum(np.abs(data) - value, 0)``.
In ``hard`` thresholding, the data values where their absolute value is
less than the value param are replaced with `substitute`. Data values with
absolute value greater or equal to the thresholding value stay untouched.
``garrote`` corresponds to the Non-negative garrote threshold [2]_, [3]_.
It is intermediate between ``hard`` and ``soft`` thresholding. It behaves
like soft thresholding for small data values and approaches hard
thresholding for large data values.
In ``greater`` thresholding, the data is replaced with `substitute` where
data is below the thresholding value. Greater data values pass untouched.
In ``less`` thresholding, the data is replaced with `substitute` where data
is above the thresholding value. Lesser data values pass untouched.
Both ``hard`` and ``soft`` thresholding also support complex-valued data.
Parameters
----------
data : array_like
Numeric data.
value : scalar
Thresholding value.
mode : {'soft', 'hard', 'garrote', 'greater', 'less'}
Decides the type of thresholding to be applied on input data. Default
is 'soft'.
substitute : float, optional
Substitute value (default: 0).
Returns
-------
output : array
Thresholded array.
See Also
--------
threshold_firm
References
----------
.. [1] D.L. Donoho and I.M. Johnstone. Ideal Spatial Adaptation via
Wavelet Shrinkage. Biometrika. Vol. 81, No. 3, pp.425-455, 1994.
DOI:10.1093/biomet/81.3.425
.. [2] L. Breiman. Better Subset Regression Using the Nonnegative Garrote.
Technometrics, Vol. 37, pp. 373-384, 1995.
DOI:10.2307/1269730
.. [3] H-Y. Gao. Wavelet Shrinkage Denoising Using the Non-Negative
Garrote. Journal of Computational and Graphical Statistics Vol. 7,
No. 4, pp.469-488. 1998.
DOI:10.1080/10618600.1998.10474789
Examples
--------
>>> import numpy as np
>>> import pywt
>>> data = np.linspace(1, 4, 7)
>>> data
array([ 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'soft')
array([ 0. , 0. , 0. , 0.5, 1. , 1.5, 2. ])
>>> pywt.threshold(data, 2, 'hard')
array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'garrote')
array([ 0. , 0. , 0. , 0.9 , 1.66666667,
2.35714286, 3. ])
>>> pywt.threshold(data, 2, 'greater')
array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ])
>>> pywt.threshold(data, 2, 'less')
array([ 1. , 1.5, 2. , 0. , 0. , 0. , 0. ])
"""
try:
return thresholding_options[mode](data, value, substitute)
except KeyError:
# Make sure error is always identical by sorting keys
keys = ("'{0}'".format(key) for key in
sorted(thresholding_options.keys()))
raise ValueError("The mode parameter only takes values from: {0}."
.format(', '.join(keys)))
def threshold_firm(data, value_low, value_high):
"""Firm threshold.
The approach is intermediate between soft and hard thresholding [1]_. It
behaves the same as soft-thresholding for values below `value_low` and
the same as hard-thresholding for values above `thresh_high`. For
intermediate values, the thresholded value is in between that corresponding
to soft or hard thresholding.
Parameters
----------
data : array-like
The data to threshold. This can be either real or complex-valued.
value_low : float
Any values smaller then `value_low` will be set to zero.
value_high : float
Any values larger than `value_high` will not be modified.
Notes
-----
This thresholding technique is also known as semi-soft thresholding [2]_.
For each value, `x`, in `data`. This function computes::
if np.abs(x) <= value_low:
return 0
elif np.abs(x) > value_high:
return x
elif value_low < np.abs(x) and np.abs(x) <= value_high:
return x * value_high * (1 - value_low/x)/(value_high - value_low)
``firm`` is a continuous function (like soft thresholding), but is
unbiased for large values (like hard thresholding).
If ``value_high == value_low`` this function becomes hard-thresholding.
If ``value_high`` is infinity, this function becomes soft-thresholding.
Returns
-------
val_new : array-like
The values after firm thresholding at the specified thresholds.
See Also
--------
threshold
References
----------
.. [1] H.-Y. Gao and A.G. Bruce. Waveshrink with firm shrinkage.
Statistica Sinica, Vol. 7, pp. 855-874, 1997.
.. [2] A. Bruce and H-Y. Gao. WaveShrink: Shrinkage Functions and
Thresholds. Proc. SPIE 2569, Wavelet Applications in Signal and
Image Processing III, 1995.
DOI:10.1117/12.217582
"""
if value_low < 0:
raise ValueError("value_low must be non-negative.")
if value_high < value_low:
raise ValueError(
"value_high must be greater than or equal to value_low.")
data = np.asarray(data)
magnitude = np.absolute(data)
with np.errstate(divide='ignore'):
# divide by zero okay as np.inf values get clipped, so ignore warning.
vdiff = value_high - value_low
thresholded = value_high * (1 - value_low/magnitude) / vdiff
thresholded.clip(min=0, max=None, out=thresholded)
thresholded = data * thresholded
# restore hard-thresholding behavior for values > value_high
large_vals = np.where(magnitude > value_high)
if np.any(large_vals[0]):
thresholded[large_vals] = data[large_vals]
return thresholded
|
|
# The content of this file was contributed by leppton
# (http://mail.python.org/pipermail/patches/2006-November/020942.html) to
# ctypes project, under MIT License.
# This example shows how to use ctypes module to read all
# function names from dll export directory
import os
if os.name != "nt":
raise Exception("Wrong OS")
import ctypes as ctypes # nopep8
import ctypes.wintypes as wintypes # nopep8
def convert_cdef_to_pydef(line):
"""convert_cdef_to_pydef(line_from_c_header_file) -> python_tuple_string
'DWORD var_name[LENGTH];' -> '("var_name", DWORD*LENGTH)'
doesn't work for all valid c/c++ declarations"""
l = line[:line.find(';')].split()
if len(l) != 2:
return None
type_ = l[0]
name = l[1]
i = name.find('[')
if i != -1:
name, brac = name[:i], name[i:][1:-1]
return '("%s", %s*%s)' % (name, type_, brac)
else:
return '("%s", %s)' % (name, type_)
def convert_cdef_to_structure(cdef, name, data_dict=ctypes.__dict__):
"""convert_cdef_to_structure(struct_definition_from_c_header_file)
-> python class derived from ctypes.Structure
limited support for c/c++ syntax"""
py_str = '[\n'
for line in cdef.split('\n'):
field = convert_cdef_to_pydef(line)
if field is not None:
py_str += ' ' * 4 + field + ',\n'
py_str += ']\n'
pyarr = eval(py_str, data_dict)
class ret_val(ctypes.Structure):
_fields_ = pyarr
ret_val.__name__ = name
ret_val.__module__ = None
return ret_val
# struct definitions we need to read dll file export table
winnt = (
('IMAGE_DOS_HEADER', """\
WORD e_magic;
WORD e_cblp;
WORD e_cp;
WORD e_crlc;
WORD e_cparhdr;
WORD e_minalloc;
WORD e_maxalloc;
WORD e_ss;
WORD e_sp;
WORD e_csum;
WORD e_ip;
WORD e_cs;
WORD e_lfarlc;
WORD e_ovno;
WORD e_res[4];
WORD e_oemid;
WORD e_oeminfo;
WORD e_res2[10];
LONG e_lfanew;
"""),
('IMAGE_FILE_HEADER', """\
WORD Machine;
WORD NumberOfSections;
DWORD TimeDateStamp;
DWORD PointerToSymbolTable;
DWORD NumberOfSymbols;
WORD SizeOfOptionalHeader;
WORD Characteristics;
"""),
('IMAGE_DATA_DIRECTORY', """\
DWORD VirtualAddress;
DWORD Size;
"""),
('IMAGE_OPTIONAL_HEADER32', """\
WORD Magic;
BYTE MajorLinkerVersion;
BYTE MinorLinkerVersion;
DWORD SizeOfCode;
DWORD SizeOfInitializedData;
DWORD SizeOfUninitializedData;
DWORD AddressOfEntryPoint;
DWORD BaseOfCode;
DWORD BaseOfData;
DWORD ImageBase;
DWORD SectionAlignment;
DWORD FileAlignment;
WORD MajorOperatingSystemVersion;
WORD MinorOperatingSystemVersion;
WORD MajorImageVersion;
WORD MinorImageVersion;
WORD MajorSubsystemVersion;
WORD MinorSubsystemVersion;
DWORD Win32VersionValue;
DWORD SizeOfImage;
DWORD SizeOfHeaders;
DWORD CheckSum;
WORD Subsystem;
WORD DllCharacteristics;
DWORD SizeOfStackReserve;
DWORD SizeOfStackCommit;
DWORD SizeOfHeapReserve;
DWORD SizeOfHeapCommit;
DWORD LoaderFlags;
DWORD NumberOfRvaAndSizes;
IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
""",
{'IMAGE_NUMBEROF_DIRECTORY_ENTRIES': 16}),
('IMAGE_NT_HEADERS', """\
DWORD Signature;
IMAGE_FILE_HEADER FileHeader;
IMAGE_OPTIONAL_HEADER32 OptionalHeader;
"""),
('IMAGE_EXPORT_DIRECTORY', """\
DWORD Characteristics;
DWORD TimeDateStamp;
WORD MajorVersion;
WORD MinorVersion;
DWORD Name;
DWORD Base;
DWORD NumberOfFunctions;
DWORD NumberOfNames;
DWORD AddressOfFunctions;
DWORD AddressOfNames;
DWORD AddressOfNameOrdinals;
"""),
)
# Construct python ctypes.Structures from above definitions
data_dict = dict(wintypes.__dict__)
for definition in winnt:
name = definition[0]
def_str = definition[1]
if len(definition) == 3:
data_dict.update(definition[2])
type_ = convert_cdef_to_structure(def_str, name, data_dict)
data_dict[name] = type_
globals()[name] = type_
ptype = ctypes.POINTER(type_)
pname = 'P' + name
data_dict[pname] = ptype
globals()[pname] = ptype
del data_dict
del winnt
class DllException(Exception):
pass
def read_export_table(dll_name, mmap=False, use_kernel=False):
"""
read_export_table(dll_name [,mmap=False [,use_kernel=False]]])
-> list of exported names
default is to load dll into memory: dll sections are aligned to
page boundaries, dll entry points is called, etc...
with mmap=True dll file image is mapped to memory, Relative Virtual
Addresses (RVAs) must be mapped to real addresses manually
with use_kernel=True direct kernel32.dll calls are used,
instead of python mmap module
see http://www.windowsitlibrary.com/Content/356/11/1.html
for details on Portable Executable (PE) file format
"""
if not mmap:
dll = ctypes.cdll.LoadLibrary(dll_name)
if dll is None:
raise DllException("Cant load dll")
base_addr = dll._handle
else:
if not use_kernel:
fileH = open(dll_name)
if fileH is None:
raise DllException("Cant load dll")
import mmap
m = mmap.mmap(fileH.fileno(), 0, None, mmap.ACCESS_READ)
# id(m)+8 sucks, is there better way?
base_addr = ctypes.cast(id(m) + 8, ctypes.POINTER(ctypes.c_int))[0]
else:
kernel32 = ctypes.windll.kernel32
if kernel32 is None:
raise DllException("cant load kernel")
fileH = kernel32.CreateFileA(dll_name, 0x00120089, 1, 0, 3, 0, 0)
if fileH == 0:
raise DllException(
"Cant open, errcode = %d" %
kernel32.GetLastError())
mapH = kernel32.CreateFileMappingW(fileH, 0, 0x8000002, 0, 0, 0)
if mapH == 0:
raise DllException(
"Cant mmap, errocode = %d" %
kernel32.GetLastError())
base_addr = ctypes.windll.kernel32.MapViewOfFile(
mapH, 0x4, 0, 0, 0)
if base_addr == 0:
raise DllException(
"Cant mmap(2), errocode = %d" %
kernel32.GetLastError())
dbghelp = ctypes.windll.dbghelp
if dbghelp is None:
raise DllException("dbghelp.dll not installed")
pimage_nt_header = dbghelp.ImageNtHeader(base_addr)
if pimage_nt_header == 0:
raise DllException("Cant find IMAGE_NT_HEADER")
# Functions like dbghelp.ImageNtHeader above have no type information
# let's make one prototype for extra buzz
# PVOID ImageRvaToVa(PIMAGE_NT_HEADERS NtHeaders, PVOID Base,
# ULONG Rva, PIMAGE_SECTION_HEADER* LastRvaSection)
# we use integers instead of pointers, coz integers are better
# for pointer arithmetic
prototype = ctypes.WINFUNCTYPE(
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
paramflags = (
(1, "NtHeaders", pimage_nt_header),
(1, "Base", base_addr),
(1, "Rva"),
(1, "LastRvaSection", 0))
ImageRvaToVa = prototype(('ImageRvaToVa', dbghelp), paramflags)
def cast_rva(rva, type_):
va = base_addr + rva
if mmap and va > pimage_nt_header:
va = ImageRvaToVa(Rva=rva)
if va == 0:
raise DllException("ImageRvaToVa failed")
return ctypes.cast(va, type_)
if not mmap:
dos_header = cast_rva(0, PIMAGE_DOS_HEADER)[0]
if dos_header.e_magic != 0x5A4D:
raise DllException("IMAGE_DOS_HEADER.e_magic error")
nt_header = cast_rva(dos_header.e_lfanew, PIMAGE_NT_HEADERS)[0]
else:
nt_header = ctypes.cast(pimage_nt_header, PIMAGE_NT_HEADERS)[0]
if nt_header.Signature != 0x00004550:
raise DllException("IMAGE_NT_HEADERS.Signature error")
opt_header = nt_header.OptionalHeader
if opt_header.Magic != 0x010b:
raise DllException("IMAGE_OPTIONAL_HEADERS32.Magic error")
ret_val = []
exports_dd = opt_header.DataDirectory[0]
if opt_header.NumberOfRvaAndSizes > 0 or exports_dd != 0:
export_dir = cast_rva(
exports_dd.VirtualAddress,
PIMAGE_EXPORT_DIRECTORY)[0]
nNames = export_dir.NumberOfNames
if nNames > 0:
PNamesType = ctypes.POINTER(ctypes.c_int * nNames)
names = cast_rva(export_dir.AddressOfNames, PNamesType)[0]
for rva in names:
name = cast_rva(rva, ctypes.c_char_p).value
ret_val.append(name)
if mmap:
if use_kernel:
kernel32.UnmapViewOfFile(base_addr)
kernel32.CloseHandle(mapH)
kernel32.CloseHandle(fileH)
else:
m.close()
fileH.close()
return ret_val
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('usage: %s dll_file_name' % sys.argv[0])
sys.exit()
# names = read_export_table(sys.argv[1], mmap=False, use_kernel=False)
names = read_export_table(sys.argv[1], mmap=False, use_kernel=False)
for name in names:
print(name)
|
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import threading
import logging
import os
import re
import gc
import traceback
import tensorflow_io # pylint: disable=unused-import
from tensorflow.compat.v1 import gfile
from cityhash import CityHash32 # pylint: disable=no-name-in-module
from fedlearner.common.db_client import DBClient
from fedlearner.data_join.output_writer_impl import create_output_writer
from fedlearner.data_join.item_batch_seq_processor import \
ItemBatch, ItemBatchSeqProcessor
from fedlearner.data_join.routine_worker import RoutineWorker
from fedlearner.data_join.raw_data_iter_impl.metric_stats import MetricStats
from fedlearner.data_join.raw_data_visitor import FileBasedMockRawDataVisitor
from fedlearner.data_join import common
class RawDataBatch(ItemBatch):
def __init__(self, begin_index):
self._begin_index = begin_index
self._raw_datas = []
@property
def begin_index(self):
return self._begin_index
def __len__(self):
return len(self._raw_datas)
def __lt__(self, other):
assert isinstance(other, RawDataBatch)
return self.begin_index < other.begin_index
def __iter__(self):
return iter(self._raw_datas)
def append(self, item):
self._raw_datas.append(item)
class RawDataBatchFetcher(ItemBatchSeqProcessor):
def __init__(self, kvstore, options):
super(RawDataBatchFetcher, self).__init__(
options.batch_processor_options.max_flying_item,
)
self._raw_data_visitor = FileBasedMockRawDataVisitor(
kvstore, options.raw_data_options,
'{}-partitioner-mock-data-source-{:04}'.format(
options.partitioner_name,
options.partitioner_rank_id
),
options.input_file_paths
)
self._batch_size = options.batch_processor_options.batch_size
self._metrics_tags = {
'partition_name': options.partitioner_name,
'partition': options.partitioner_rank_id
}
self._metric_stats = MetricStats(options.raw_data_options,
self._metrics_tags)
self.set_input_finished()
@classmethod
def name(cls):
return 'RawDataBatchFetcher'
def _make_item_batch(self, begin_index):
return RawDataBatch(begin_index)
def _make_inner_generator(self, next_index):
assert next_index is not None
if next_index == 0:
self._raw_data_visitor.reset()
else:
self._raw_data_visitor.seek(next_index - 1)
while not self._raw_data_visitor.finished() and \
not self._fly_item_full():
next_batch = self._make_item_batch(next_index)
for (index, item) in self._raw_data_visitor:
if index != next_index:
logging.fatal("batch raw data visitor is not consecutive, "\
"%d != %d", index, next_index)
traceback.print_stack()
os._exit(-1) # pylint: disable=protected-access
self._metric_stats.emit_metric(item)
next_batch.append(item)
next_index += 1
if len(next_batch) >= self._batch_size:
break
yield next_batch, self._raw_data_visitor.finished()
yield self._make_item_batch(next_index), \
self._raw_data_visitor.finished()
def cleanup_visitor_meta_data(self):
self._raw_data_visitor.cleanup_meta_data()
class RawDataPartitioner(object):
class FileMeta(object):
def __init__(self, rank_id, process_index, begin_index, end_index):
self._rank_id = rank_id
self._process_index = process_index
self._begin_index = begin_index
self._end_index = end_index
@property
def process_index(self):
return self._process_index
@property
def rank_id(self):
return self._rank_id
@property
def begin_index(self):
return self._begin_index
@property
def end_index(self):
return self._end_index
def __lt__(self, other):
assert isinstance(other, RawDataPartitioner.FileMeta)
assert self.rank_id == other.rank_id
return self.process_index < other.process_index
def encode_meta_to_fname(self):
return '{:04}.{:08}.{:010}-{:010}{}'.format(
self.rank_id, self.process_index, self.begin_index,
self.end_index, common.RawDataFileSuffix
)
@classmethod
def decode_meta_from_fname(cls, fname):
assert fname.endswith(common.RawDataFileSuffix)
segs = re.split('\.|-', fname[:-len(common.RawDataFileSuffix)]) # pylint: disable=anomalous-backslash-in-string
assert len(segs) == 4
return RawDataPartitioner.FileMeta(int(segs[0]), int(segs[1]),
int(segs[2]), int(segs[3]))
class OutputFileWriter(object):
def __init__(self, options, partition_id, process_index):
self._options = options
self._partition_id = partition_id
self._process_index = process_index
self._begin_index = None
self._end_index = None
self._writer = None
self._tmp_fpath = common.gen_tmp_fpath(
os.path.join(self._options.output_dir,
common.partition_repr(self._partition_id))
)
def append_item(self, index, item):
self._get_output_writer().write_item(item)
if self._begin_index is None:
self._begin_index = index
self._end_index = index
def finish(self):
meta = None
if self._writer is not None:
self._writer.close()
self._writer = None
meta = RawDataPartitioner.FileMeta(
self._options.partitioner_rank_id,
self._process_index,
self._begin_index,
self._end_index
)
fpath = os.path.join(self._options.output_dir,
common.partition_repr(self._partition_id),
meta.encode_meta_to_fname())
gfile.Rename(self.get_tmp_fpath(), fpath, True)
return meta
def get_tmp_fpath(self):
return self._tmp_fpath
def destroy(self):
if self._writer is not None:
self._writer.close()
self._writer = None
if gfile.Exists(self._tmp_fpath):
gfile.Remove(self._tmp_fpath)
def __del__(self):
self.destroy()
def _get_output_writer(self):
if self._writer is None:
self._writer = create_output_writer(
self._options.writer_options,
self._tmp_fpath
)
return self._writer
def __init__(self, options, part_field,
kvstore_type, use_mock_etcd=False):
self._options = options
self._part_field = part_field
kvstore = DBClient(kvstore_type, use_mock_etcd)
self._raw_data_batch_fetcher = RawDataBatchFetcher(kvstore, options)
self._next_part_index = None
self._dumped_process_index = None
self._flying_writers = []
self._dumped_file_metas = {}
self._worker_map = {}
self._started = False
self._part_finished = False
self._cond = threading.Condition()
def start_process(self):
with self._cond:
if not self._started:
self._worker_map = {
'raw_data_batch_fetcher': RoutineWorker(
'raw_data_batch_fetcher',
self._raw_data_batch_fetch_fn,
self._raw_data_batch_fetch_cond, 5),
'raw_data_partitioner': RoutineWorker(
'raw_data_partitioner',
self._raw_data_part_fn,
self._raw_data_part_cond, 5)
}
for _, w in self._worker_map.items():
w.start_routine()
self._started = True
def stop_process(self):
wait_join = True
with self._cond:
if self._started:
wait_join = True
self._started = False
if wait_join:
for w in self._worker_map.values():
w.stop_routine()
def wait_for_finished(self):
while not self._is_part_finished():
with self._cond:
self._cond.wait()
self.stop_process()
self._raw_data_batch_fetcher.cleanup_visitor_meta_data()
def _raw_data_part_fn(self):
if self._check_finished_tag():
logging.warning("raw data has been parttedfor rank id of parti"\
"tioner %d", self._options.partitioner_rank_id)
self._notify_part_finished()
return
self._sync_partitioner_state()
assert self._dumped_process_index is not None
assert len(self._flying_writers) == 0
fetcher = self._raw_data_batch_fetcher
fetch_finished = False
next_index = self._get_next_part_index()
hint_index = None
bp_options = self._options.batch_processor_options
round_dumped_item = 0
while not fetch_finished:
fetch_finished, batch, hint_index = \
fetcher.fetch_item_batch_by_index(next_index, hint_index)
if batch is not None:
for index, item in enumerate(batch):
raw_id = getattr(item, self._part_field)
partition_id = CityHash32(raw_id) % \
self._options.output_partition_num
writer = self._get_file_writer(partition_id)
writer.append_item(batch.begin_index+index, item)
next_index += len(batch)
round_dumped_item += len(batch)
fly_item_cnt = fetcher.get_flying_item_count()
if round_dumped_item // self._options.output_partition_num \
> (1<<21) or \
common.get_heap_mem_stats(None).CheckOomRisk(
fly_item_cnt,
self._options.memory_limit_ratio-0.05):
self._finish_file_writers()
self._set_next_part_index(next_index)
hint_index = self._evict_staless_batch(hint_index,
next_index-1)
logging.info("consumed %d items", next_index-1)
gc_cnt = gc.collect()
logging.warning("finish writer partition trigger "\
"gc %d actively", gc_cnt)
round_dumped_item = 0
self._wakeup_raw_data_fetcher()
elif not fetch_finished:
with self._cond:
self._cond.wait(1)
self._finish_file_writers()
self._dump_finished_tag()
for partition_id, metas in self._dumped_file_metas.items():
logging.info("part %d output %d files by partitioner",
partition_id, len(metas))
for meta in metas:
logging.info("%s", meta.encode_meta_to_fname())
logging.info("-----------------------------------")
self._notify_part_finished()
def _raw_data_part_cond(self):
if self._is_part_finished():
self._notify_part_finished()
return False
return True
def _notify_part_finished(self):
with self._cond:
self._part_finished = True
self._cond.notify_all()
def _is_part_finished(self):
with self._cond:
return self._part_finished
def _get_file_writer(self, partition_id):
if len(self._flying_writers) == 0:
self._flying_writers = \
[RawDataPartitioner.OutputFileWriter(
self._options, pid,
self._dumped_process_index+1
)
for pid in range(self._options.output_partition_num)]
assert partition_id < len(self._flying_writers)
return self._flying_writers[partition_id]
def _finish_file_writers(self):
for partition_id, writer in enumerate(self._flying_writers):
meta = writer.finish()
if meta is not None:
self._dumped_file_metas[partition_id].append(meta)
logging.info("dump %s for partition %d",
meta.encode_meta_to_fname(), partition_id)
self._flying_writers = []
self._dumped_process_index += 1
def _evict_staless_batch(self, hint_index, staless_index):
evict_cnt = self._raw_data_batch_fetcher.evict_staless_item_batch(
staless_index
)
if hint_index is not None:
if hint_index <= evict_cnt:
return 0
return hint_index-evict_cnt
return None
def _set_next_part_index(self, next_part_index):
with self._cond:
self._next_part_index = next_part_index
def _get_next_part_index(self):
with self._cond:
return self._next_part_index
def _sync_partitioner_state(self):
for writer in self._flying_writers:
writer.destroy()
self._flying_writers = []
if self._dumped_process_index is None:
max_process_index = None
min_process_index = None
for partition_id in range(self._options.output_partition_num):
metas = self._list_file_metas(partition_id)
metas.sort()
self._dumped_file_metas[partition_id] = metas
end_meta = None if len(metas) == 0 else metas[-1]
if end_meta is None:
continue
if min_process_index is None or \
min_process_index > end_meta.process_index:
min_process_index = end_meta.process_index
if max_process_index is None or \
max_process_index < end_meta.process_index:
max_process_index = end_meta.process_index
if max_process_index is None or min_process_index is None:
self._dumped_process_index = -1
elif max_process_index == min_process_index:
self._dumped_process_index = max_process_index
else:
self._dumped_process_index = max_process_index - 1
max_dumped_index = -1
for partition_id, metas in self._dumped_file_metas.items():
for meta in metas[::-1]:
if meta.process_index > self._dumped_process_index:
fpath = os.path.join(self._options.output_dir,
common.partition_repr(partition_id),
meta.encode_meta_to_fname())
if gfile.Exists(fpath):
gfile.Remove(fpath)
else:
break
metas = metas[:self._dumped_process_index+1]
self._dumped_file_metas[partition_id] = metas
if len(metas) > 0 and metas[-1].end_index > max_dumped_index:
max_dumped_index = metas[-1].end_index
self._set_next_part_index(max_dumped_index+1)
def _list_file_metas(self, partition_id):
dumped_dir = os.path.join(self._options.output_dir,
common.partition_repr(partition_id))
if not gfile.Exists(dumped_dir):
gfile.MakeDirs(dumped_dir)
assert gfile.IsDirectory(dumped_dir)
fnames = [os.path.basename(f) for f in gfile.ListDirectory(dumped_dir)
if f.endswith(common.RawDataFileSuffix)]
metas = [RawDataPartitioner.FileMeta.decode_meta_from_fname(f)
for f in fnames]
return [meta for meta in metas \
if meta.rank_id == self._options.partitioner_rank_id]
def _raw_data_batch_fetch_fn(self):
next_part_index = self._get_next_part_index()
fetcher = self._raw_data_batch_fetcher
for batch in fetcher.make_processor(next_part_index):
logging.debug("fetch batch begin at %d, len %d. wakeup "\
"partitioner", batch.begin_index, len(batch))
self._wakeup_partitioner()
fly_item_cnt = fetcher.get_flying_item_count()
if common.get_heap_mem_stats(None).CheckOomRisk(
fly_item_cnt, self._options.memory_limit_ratio):
logging.warning('early stop the raw data fetch '\
'since the oom risk')
break
def _raw_data_batch_fetch_cond(self):
next_part_index = self._get_next_part_index()
fetcher = self._raw_data_batch_fetcher
fly_item_cnt = fetcher.get_flying_item_count()
return self._raw_data_batch_fetcher.need_process(next_part_index) and \
not common.get_heap_mem_stats(None).CheckOomRisk(
fly_item_cnt, self._options.memory_limit_ratio)
def _wakeup_partitioner(self):
self._worker_map['raw_data_partitioner'].wakeup()
def _wakeup_raw_data_fetcher(self):
self._worker_map['raw_data_batch_fetcher'].wakeup()
def _dump_finished_tag(self):
finished_tag_fpath = self._get_finished_tag_fpath()
with gfile.GFile(finished_tag_fpath, 'w') as fh:
fh.write('finished')
def _check_finished_tag(self):
return gfile.Exists(self._get_finished_tag_fpath())
def _get_finished_tag_fpath(self):
return os.path.join(
self._options.output_dir,
'_SUCCESS.{:08}'.format(self._options.partitioner_rank_id)
)
|
|
# -*- coding: utf-8 -*-
from django import template
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import get_language, ugettext
from classytags.arguments import IntegerArgument, Argument, StringArgument
from classytags.core import Options
from classytags.helpers import InclusionTag
from cms.utils.i18n import (
force_language,
get_language_list,
get_language_object,
get_public_languages,
)
from menus.menu_pool import menu_pool
from menus.utils import DefaultLanguageChanger
register = template.Library()
class NOT_PROVIDED:
pass
def cut_after(node, levels, removed):
"""
given a tree of nodes cuts after N levels
"""
if levels == 0:
removed.extend(node.children)
node.children = []
else:
removed_local = []
for child in node.children:
if child.visible:
cut_after(child, levels - 1, removed)
else:
removed_local.append(child)
for removed_child in removed_local:
node.children.remove(removed_child)
removed.extend(removed_local)
def remove(node, removed):
removed.append(node)
if node.parent:
if node in node.parent.children:
node.parent.children.remove(node)
def cut_levels(nodes, from_level, to_level, extra_inactive, extra_active):
"""
cutting nodes away from menus
"""
final = []
removed = []
selected = None
for node in nodes:
if not hasattr(node, 'level'):
# remove and ignore nodes that don't have level information
remove(node, removed)
continue
if node.level == from_level:
# turn nodes that are on from_level into root nodes
final.append(node)
node.parent = None
if not node.ancestor and not node.selected and not node.descendant:
# cut inactive nodes to extra_inactive, but not of descendants of
# the selected node
cut_after(node, extra_inactive, removed)
if node.level > to_level and node.parent:
# remove nodes that are too deep, but not nodes that are on
# from_level (local root nodes)
remove(node, removed)
if node.selected:
selected = node
if not node.visible:
remove(node, removed)
if selected:
cut_after(selected, extra_active, removed)
if removed:
for node in removed:
if node in final:
final.remove(node)
return final
def flatten(nodes):
flat = []
for node in nodes:
flat.append(node)
flat.extend(flatten(node.children))
return flat
class ShowMenu(InclusionTag):
"""
render a nested list of all children of the pages
- from_level: starting level
- to_level: max level
- extra_inactive: how many levels should be rendered of the not active tree?
- extra_active: how deep should the children of the active node be rendered?
- namespace: the namespace of the menu. if empty will use all namespaces
- root_id: the id of the root node
- template: template used to render the menu
"""
name = 'show_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
StringArgument('template', default='menu/menu.html', required=False),
StringArgument('namespace', default=None, required=False),
StringArgument('root_id', default=None, required=False),
Argument('next_page', default=None, required=False),
)
def get_context(self, context, from_level, to_level, extra_inactive,
extra_active, template, namespace, root_id, next_page):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
if next_page:
children = next_page.children
else:
# new menu... get all the data so we can save a lot of queries
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes(namespace, root_id)
if root_id: # find the root id and cut the nodes
id_nodes = menu_pool.get_nodes_by_attribute(nodes, "reverse_id", root_id)
if id_nodes:
node = id_nodes[0]
nodes = node.children
for remove_parent in nodes:
remove_parent.parent = None
from_level += node.level + 1
to_level += node.level + 1
nodes = flatten(nodes)
else:
nodes = []
children = cut_levels(nodes, from_level, to_level, extra_inactive, extra_active)
children = menu_renderer.apply_modifiers(children, namespace, root_id, post_cut=True)
try:
context['children'] = children
context['template'] = template
context['from_level'] = from_level
context['to_level'] = to_level
context['extra_inactive'] = extra_inactive
context['extra_active'] = extra_active
context['namespace'] = namespace
except:
context = {"template": template}
return context
register.tag(ShowMenu)
class ShowMenuBelowId(ShowMenu):
name = 'show_menu_below_id'
options = Options(
Argument('root_id', default=None, required=False),
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
Argument('template', default='menu/menu.html', required=False),
Argument('namespace', default=None, required=False),
Argument('next_page', default=None, required=False),
)
register.tag(ShowMenuBelowId)
class ShowSubMenu(InclusionTag):
"""
show the sub menu of the current nav-node.
- levels: how many levels deep
- root_level: the level to start the menu at
- nephews: the level of descendants of siblings (nephews) to show
- template: template used to render the navigation
"""
name = 'show_sub_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('levels', default=100, required=False),
Argument('root_level', default=None, required=False),
IntegerArgument('nephews', default=100, required=False),
Argument('template', default='menu/sub_menu.html', required=False),
)
def get_context(self, context, levels, root_level, nephews, template):
# Django 1.4 doesn't accept 'None' as a tag value and resolve to ''
# So we need to force it to None again
if not root_level and root_level != 0:
root_level = None
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes()
children = []
# adjust root_level so we cut before the specified level, not after
include_root = False
if root_level is not None and root_level > 0:
root_level -= 1
elif root_level is not None and root_level == 0:
include_root = True
for node in nodes:
if root_level is None:
if node.selected:
# if no root_level specified, set it to the selected nodes level
root_level = node.level
# is this the ancestor of current selected node at the root level?
is_root_ancestor = (node.ancestor and node.level == root_level)
# is a node selected on the root_level specified
root_selected = (node.selected and node.level == root_level)
if is_root_ancestor or root_selected:
cut_after(node, levels, [])
children = node.children
for child in children:
if child.sibling:
cut_after(child, nephews, [])
# if root_level was 0 we need to give the menu the entire tree
# not just the children
if include_root:
children = menu_renderer.apply_modifiers([node], post_cut=True)
else:
children = menu_renderer.apply_modifiers(children, post_cut=True)
context['children'] = children
context['template'] = template
context['from_level'] = 0
context['to_level'] = 0
context['extra_inactive'] = 0
context['extra_active'] = 0
return context
register.tag(ShowSubMenu)
class ShowBreadcrumb(InclusionTag):
"""
Shows the breadcrumb from the node that has the same url as the current request
- start level: after which level should the breadcrumb start? 0=home
- template: template used to render the breadcrumb
"""
name = 'show_breadcrumb'
template = 'menu/dummy.html'
options = Options(
Argument('start_level', default=0, required=False),
Argument('template', default='menu/breadcrumb.html', required=False),
Argument('only_visible', default=True, required=False),
)
def get_context(self, context, start_level, template, only_visible):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if not (isinstance(start_level, int) or start_level.isdigit()):
only_visible = template
template = start_level
start_level = 0
try:
only_visible = bool(int(only_visible))
except:
only_visible = bool(only_visible)
ancestors = []
menu_renderer = context.get('cms_menu_renderer')
if not menu_renderer:
menu_renderer = menu_pool.get_renderer(request)
nodes = menu_renderer.get_nodes(breadcrumb=True)
# Find home
home = None
root_url = unquote(reverse("pages-root"))
home = next((node for node in nodes if node.get_absolute_url() == root_url), None)
# Find selected
selected = None
selected = next((node for node in nodes if node.selected), None)
if selected and selected != home:
node = selected
while node:
if node.visible or not only_visible:
ancestors.append(node)
node = node.parent
if not ancestors or (ancestors and ancestors[-1] != home) and home:
ancestors.append(home)
ancestors.reverse()
if len(ancestors) >= start_level:
ancestors = ancestors[start_level:]
else:
ancestors = []
context['ancestors'] = ancestors
context['template'] = template
return context
register.tag(ShowBreadcrumb)
def _raw_language_marker(language, lang_code):
return language
def _native_language_marker(language, lang_code):
with force_language(lang_code):
return force_text(ugettext(language))
def _current_language_marker(language, lang_code):
return force_text(ugettext(language))
def _short_language_marker(language, lang_code):
return lang_code
MARKERS = {
'raw': _raw_language_marker,
'native': _native_language_marker,
'current': _current_language_marker,
'short': _short_language_marker,
}
class LanguageChooser(InclusionTag):
"""
Displays a language chooser
- template: template used to render the language chooser
"""
name = 'language_chooser'
template = 'menu/dummy.html'
options = Options(
Argument('template', default=NOT_PROVIDED, required=False),
Argument('i18n_mode', default='raw', required=False),
)
def get_context(self, context, template, i18n_mode):
if template in MARKERS:
_tmp = template
if i18n_mode not in MARKERS:
template = i18n_mode
else:
template = NOT_PROVIDED
i18n_mode = _tmp
if template is NOT_PROVIDED:
template = "menu/language_chooser.html"
if not i18n_mode in MARKERS:
i18n_mode = 'raw'
if 'request' not in context:
# If there's an exception (500), default context_processors may not be called.
return {'template': 'cms/content.html'}
marker = MARKERS[i18n_mode]
current_lang = get_language()
site = Site.objects.get_current()
request = context['request']
if request.user.is_staff:
languages = get_language_list(site_id=site.pk)
else:
languages = get_public_languages(site_id=site.pk)
languages_info = []
for language in languages:
obj = get_language_object(language, site_id=site.pk)
languages_info.append((obj['code'], marker(obj['name'], obj['code'])))
context['languages'] = languages_info
context['current_language'] = current_lang
context['template'] = template
return context
register.tag(LanguageChooser)
class PageLanguageUrl(InclusionTag):
"""
Displays the url of the current page in the defined language.
You can set a language_changer function with the set_language_changer function in the utils.py if there is no page.
This is needed if you have slugs in more than one language.
"""
name = 'page_language_url'
template = 'cms/content.html'
options = Options(
Argument('lang'),
)
def get_context(self, context, lang):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if hasattr(request, "_language_changer"):
try:
url = request._language_changer(lang)
except NoReverseMatch:
url = DefaultLanguageChanger(request)(lang)
else:
# use the default language changer
url = DefaultLanguageChanger(request)(lang)
return {'content': url}
register.tag(PageLanguageUrl)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ryu Ishimoto, Midokura Japan KK
# @author: Tomoe Sugihara, Midokura Japan KK
import uuid
import mock
from quantum.plugins.midonet import midonet_lib
from quantum.tests import base
class MidonetLibTestCase(base.BaseTestCase):
def setUp(self):
super(MidonetLibTestCase, self).setUp()
self.mock_api = mock.Mock()
def _create_mock_chains(self, sg_id, sg_name):
mock_in_chain = mock.Mock()
mock_in_chain.get_name.return_value = "OS_SG_%s_%s_IN" % (sg_id,
sg_name)
mock_out_chain = mock.Mock()
mock_out_chain.get_name.return_value = "OS_SG_%s_%s_OUT" % (sg_id,
sg_name)
return (mock_in_chain, mock_out_chain)
def _create_mock_router_chains(self, router_id):
mock_in_chain = mock.Mock()
mock_in_chain.get_name.return_value = "OS_ROUTER_IN_%s" % (router_id)
mock_out_chain = mock.Mock()
mock_out_chain.get_name.return_value = "OS_ROUTER_OUT_%s" % (router_id)
return (mock_in_chain, mock_out_chain)
def _create_mock_port_group(self, sg_id, sg_name):
mock_pg = mock.Mock()
mock_pg.get_name.return_value = "OS_SG_%s_%s" % (sg_id, sg_name)
return mock_pg
def _create_mock_rule(self, rule_id):
mock_rule = mock.Mock()
mock_rule.get_properties.return_value = {"os_sg_rule_id": rule_id}
return mock_rule
class MidonetChainManagerTestCase(MidonetLibTestCase):
def setUp(self):
super(MidonetChainManagerTestCase, self).setUp()
self.mgr = midonet_lib.ChainManager(self.mock_api)
def test_create_for_sg(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
sg_name = 'test_sg_name'
calls = [mock.call.add_chain().tenant_id(tenant_id)]
self.mgr.create_for_sg(tenant_id, sg_id, sg_name)
self.mock_api.assert_has_calls(calls)
def test_delete_for_sg(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
sg_name = 'test_sg_name'
in_chain, out_chain = self._create_mock_chains(sg_id, sg_name)
# Mock get_chains returned values
self.mock_api.get_chains.return_value = [in_chain, out_chain]
self.mgr.delete_for_sg(tenant_id, sg_id, sg_name)
self.mock_api.assert_has_calls(mock.call.get_chains(
{"tenant_id": tenant_id}))
in_chain.delete.assert_called_once_with()
out_chain.delete.assert_called_once_with()
def test_get_router_chains(self):
tenant_id = 'test_tenant'
router_id = str(uuid.uuid4())
in_chain, out_chain = self._create_mock_router_chains(router_id)
# Mock get_chains returned values
self.mock_api.get_chains.return_value = [in_chain, out_chain]
chains = self.mgr.get_router_chains(tenant_id, router_id)
self.mock_api.assert_has_calls(mock.call.get_chains(
{"tenant_id": tenant_id}))
self.assertEqual(len(chains), 2)
self.assertEqual(chains['in'], in_chain)
self.assertEqual(chains['out'], out_chain)
def test_create_router_chains(self):
tenant_id = 'test_tenant'
router_id = str(uuid.uuid4())
calls = [mock.call.add_chain().tenant_id(tenant_id)]
self.mgr.create_router_chains(tenant_id, router_id)
self.mock_api.assert_has_calls(calls)
def test_get_sg_chains(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
# Mock get_chains returned values
self.mock_api.get_chains.return_value = [in_chain, out_chain]
chains = self.mgr.get_sg_chains(tenant_id, sg_id)
self.mock_api.assert_has_calls(mock.call.get_chains(
{"tenant_id": tenant_id}))
self.assertEqual(len(chains), 2)
self.assertEqual(chains['in'], in_chain)
self.assertEqual(chains['out'], out_chain)
class MidonetPortGroupManagerTestCase(MidonetLibTestCase):
def setUp(self):
super(MidonetPortGroupManagerTestCase, self).setUp()
self.mgr = midonet_lib.PortGroupManager(self.mock_api)
def test_create(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
sg_name = 'test_sg'
pg_mock = self._create_mock_port_group(sg_id, sg_name)
rv = self.mock_api.add_port_group.return_value.tenant_id.return_value
rv.name.return_value = pg_mock
self.mgr.create(tenant_id, sg_id, sg_name)
pg_mock.create.assert_called_once_with()
def test_delete(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
sg_name = 'test_sg'
pg_mock1 = self._create_mock_port_group(sg_id, sg_name)
pg_mock2 = self._create_mock_port_group(sg_id, sg_name)
self.mock_api.get_port_groups.return_value = [pg_mock1, pg_mock2]
self.mgr.delete(tenant_id, sg_id, sg_name)
self.mock_api.assert_has_calls(mock.call.get_port_groups(
{"tenant_id": tenant_id}))
pg_mock1.delete.assert_called_once_with()
pg_mock2.delete.assert_called_once_with()
def test_get_for_sg(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
pg_mock = self._create_mock_port_group(sg_id, 'foo')
self.mock_api.get_port_groups.return_value = [pg_mock]
pg = self.mgr.get_for_sg(tenant_id, sg_id)
self.assertEqual(pg, pg_mock)
class MidonetRuleManagerTestCase(MidonetLibTestCase):
def setUp(self):
super(MidonetRuleManagerTestCase, self).setUp()
self.mgr = midonet_lib.RuleManager(self.mock_api)
self.mgr.chain_manager = mock.Mock()
self.mgr.pg_manager = mock.Mock()
def _create_test_rule(self, tenant_id, sg_id, rule_id, direction="egress",
protocol="tcp", port_min=1, port_max=65535,
src_ip='192.168.1.0/24', src_group_id=None,
ethertype=0x0800):
return {"tenant_id": tenant_id, "security_group_id": sg_id,
"rule_id": rule_id, "direction": direction,
"protocol": protocol,
"remote_ip_prefix": src_ip, "remote_group_id": src_group_id,
"port_range_min": port_min, "port_range_max": port_max,
"ethertype": ethertype, "id": rule_id, "external_id": None}
def test_create_for_sg_rule(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
rule_id = str(uuid.uuid4())
in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
self.mgr.chain_manager.get_sg_chains.return_value = {"in": in_chain,
"out": out_chain}
props = {"os_sg_rule_id": rule_id}
rule = self._create_test_rule(tenant_id, sg_id, rule_id)
calls = [mock.call.add_rule().port_group(None).type(
'accept').nw_proto(6).nw_src_address(
'192.168.1.0').nw_src_length(24).tp_src_start(
None).tp_src_end(None).tp_dst_start(1).tp_dst_end(
65535).properties(props).create()]
self.mgr.create_for_sg_rule(rule)
in_chain.assert_has_calls(calls)
def test_delete_for_sg_rule(self):
tenant_id = 'test_tenant'
sg_id = str(uuid.uuid4())
rule_id = str(uuid.uuid4())
in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
self.mgr.chain_manager.get_sg_chains.return_value = {"in": in_chain,
"out": out_chain}
# Mock the rules returned for each chain
mock_rule_in = self._create_mock_rule(rule_id)
mock_rule_out = self._create_mock_rule(rule_id)
in_chain.get_rules.return_value = [mock_rule_in]
out_chain.get_rules.return_value = [mock_rule_out]
rule = self._create_test_rule(tenant_id, sg_id, rule_id)
self.mgr.delete_for_sg_rule(rule)
mock_rule_in.delete.assert_called_once_with()
mock_rule_out.delete.assert_called_once_with()
|
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.messages import error
from django.contrib.redirects.models import Redirect
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import reverse, resolve
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseGone)
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import Template, RequestContext
from django.utils.cache import get_max_age
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from mezzanine.conf import settings
from mezzanine.core.models import SitePermission
from mezzanine.core.management.commands.createdb import (DEFAULT_USERNAME,
DEFAULT_PASSWORD)
from mezzanine.utils.cache import (cache_key_prefix, nevercache_token,
cache_get, cache_set, cache_installed)
from mezzanine.utils.device import templates_for_device
from mezzanine.utils.deprecation import MiddlewareMixin, get_middleware_setting
from mezzanine.utils.sites import current_site_id, templates_for_host
from mezzanine.utils.urls import next_url
class AdminLoginInterfaceSelectorMiddleware(MiddlewareMixin):
"""
Checks for a POST from the admin login view and if authentication is
successful and the "site" interface is selected, redirect to the site.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
login_type = request.POST.get("mezzanine_login_interface")
if login_type and not request.user.is_authenticated():
response = view_func(request, *view_args, **view_kwargs)
if request.user.is_authenticated():
if login_type == "admin":
next = next_url(request) or request.get_full_path()
username = request.user.get_username()
if (username == DEFAULT_USERNAME and
request.user.check_password(DEFAULT_PASSWORD)):
error(request, mark_safe(_(
"Your account is using the default password, "
"please <a href='%s'>change it</a> immediately.")
% reverse("user_change_password",
args=(request.user.id,))))
else:
next = "/"
return HttpResponseRedirect(next)
else:
return response
return None
class SitePermissionMiddleware(MiddlewareMixin):
"""
Marks the current user with a ``has_site_permission`` which is
used in place of ``user.is_staff`` to achieve per-site staff
access.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
has_site_permission = False
if request.user.is_superuser:
has_site_permission = True
elif request.user.is_staff:
lookup = {"user": request.user, "sites": current_site_id()}
try:
SitePermission.objects.get(**lookup)
except SitePermission.DoesNotExist:
admin_index = reverse("admin:index")
if request.path.startswith(admin_index):
logout(request)
view_func = admin.site.login
extra_context = {"no_site_permission": True}
return view_func(request, extra_context=extra_context)
else:
has_site_permission = True
request.user.has_site_permission = has_site_permission
class TemplateForDeviceMiddleware(MiddlewareMixin):
"""
Inserts device-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
templates = templates_for_device(request,
response.template_name)
response.template_name = templates
return response
class TemplateForHostMiddleware(MiddlewareMixin):
"""
Inserts host-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
response.template_name = templates_for_host(
response.template_name)
return response
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response phase for Mezzanine's cache middleware. Handles caching
the response, and then performing the second phase of rendering,
for content enclosed by the ``nevercache`` tag.
"""
def process_response(self, request, response):
# Caching is only applicable for text-based, non-streaming
# responses. We also skip it for non-200 statuses during
# development, so that stack traces are correctly rendered.
is_text = response.get("content-type", "").startswith("text")
valid_status = response.status_code == 200
streaming = getattr(response, "streaming", False)
if not is_text or streaming or (settings.DEBUG and not valid_status):
return response
# Cache the response if all the required conditions are met.
# Response must be marked for updating by the
# ``FetchFromCacheMiddleware`` having a cache get miss, the
# user must not be authenticated, the HTTP status must be OK
# and the response mustn't include an expiry age, indicating it
# shouldn't be cached.
marked_for_update = getattr(request, "_update_cache", False)
anon = hasattr(request, "user") and not request.user.is_authenticated()
timeout = get_max_age(response)
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
if anon and valid_status and marked_for_update and timeout:
cache_key = cache_key_prefix(request) + request.get_full_path()
_cache_set = lambda r: cache_set(cache_key, r.content, timeout)
if callable(getattr(response, "render", None)):
response.add_post_render_callback(_cache_set)
else:
_cache_set(response)
# Second phase rendering for non-cached template code and
# content. Split on the delimiter the ``nevercache`` tag
# wrapped its contents in, and render only the content
# enclosed by it, to avoid possible template code injection.
token = nevercache_token()
try:
token = token.encode('utf-8')
except AttributeError:
pass
parts = response.content.split(token)
# Restore csrf token from cookie - check the response
# first as it may be being set for the first time.
csrf_token = None
try:
csrf_token = response.cookies[settings.CSRF_COOKIE_NAME].value
except KeyError:
try:
csrf_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
pass
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
context = RequestContext(request)
for i, part in enumerate(parts):
if i % 2:
part = Template(part).render(context).encode("utf-8")
parts[i] = part
response.content = b"".join(parts)
response["Content-Length"] = len(response.content)
if hasattr(request, '_messages'):
# Required to clear out user messages.
request._messages.update(response)
# Response needs to be run-through the CSRF middleware again so
# that if there was a {% csrf_token %} inside of the nevercache
# the cookie will be correctly set for the the response
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in get_middleware_setting():
response.csrf_processing_done = False
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_response(request, response)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request phase for Mezzanine cache middleware. Return a response
from cache if found, othwerwise mark the request for updating
the cache in ``UpdateCacheMiddleware``.
"""
def process_request(self, request):
if (cache_installed() and request.method == "GET" and
not request.user.is_authenticated()):
cache_key = cache_key_prefix(request) + request.get_full_path()
response = cache_get(cache_key)
# We need to force a csrf token here, as new sessions
# won't receieve one on their first request, with cache
# middleware running.
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in get_middleware_setting():
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_view(request, lambda x: None, None, None)
get_token(request)
if response is None:
request._update_cache = True
else:
return HttpResponse(response)
class SSLRedirectMiddleware(MiddlewareMixin):
"""
Handles redirections required for SSL when ``SSL_ENABLED`` is ``True``.
If ``SSL_FORCE_HOST`` is ``True``, and is not the current host,
redirect to it.
Also ensure URLs defined by ``SSL_FORCE_URL_PREFIXES`` are redirect
to HTTPS, and redirect all other URLs to HTTP if on HTTPS.
"""
def languages(self):
if not hasattr(self, "_languages"):
self._languages = dict(settings.LANGUAGES).keys()
return self._languages
def process_request(self, request):
force_host = settings.SSL_FORCE_HOST
response = None
if force_host and request.get_host().split(":")[0] != force_host:
url = "http://%s%s" % (force_host, request.get_full_path())
response = HttpResponsePermanentRedirect(url)
elif settings.SSL_ENABLED and not settings.DEV_SERVER:
url = "%s%s" % (request.get_host(), request.get_full_path())
path = request.path
if settings.USE_I18N and path[1:3] in self.languages():
path = path[3:]
if path.startswith(settings.SSL_FORCE_URL_PREFIXES):
if not request.is_secure():
response = HttpResponseRedirect("https://%s" % url)
elif request.is_secure() and settings.SSL_FORCED_PREFIXES_ONLY:
response = HttpResponseRedirect("http://%s" % url)
if response and request.method == "POST":
if resolve(request.get_full_path()).url_name == "fb_do_upload":
# The handler for the flash file uploader in filebrowser
# doesn't have access to the http headers Django will use
# to determine whether the request is secure or not, so
# in this case we don't attempt a redirect - note that
# when /admin is restricted to SSL using Mezzanine's SSL
# setup, the flash uploader will post over SSL, so
# someone would need to explictly go out of their way to
# trigger this.
return
# Tell the client they need to re-POST.
response.status_code = 307
return response
class RedirectFallbackMiddleware(MiddlewareMixin):
"""
Port of Django's ``RedirectFallbackMiddleware`` that uses
Mezzanine's approach for determining the current site.
"""
def __init__(self, *args, **kwargs):
super(RedirectFallbackMiddleware, self).__init__(*args, **kwargs)
if "django.contrib.redirects" not in settings.INSTALLED_APPS:
raise MiddlewareNotUsed
def process_response(self, request, response):
if response.status_code == 404:
lookup = {
"site_id": current_site_id(),
"old_path": request.get_full_path(),
}
try:
redirect = Redirect.objects.get(**lookup)
except Redirect.DoesNotExist:
pass
else:
if not redirect.new_path:
response = HttpResponseGone()
else:
response = HttpResponsePermanentRedirect(redirect.new_path)
return response
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module defines utility classes and functions.
"""
import six
from io import open
import os
import tempfile
from subprocess import Popen, PIPE
import numpy as np
try:
import pybel as pb
except ImportError:
pb = None
from pymatgen import Molecule
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord_utils import get_angle
from pymatgen.io.babel import BabelMolAdaptor
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
__author__ = 'Kiran Mathew, Brandon Wood, Michael Humbert'
__email__ = '[email protected]'
class Polymer(object):
"""
Generate polymer chain via Random walk. At each position there are
a total of 5 possible moves(excluding the previous direction).
"""
def __init__(self, start_monomer, s_head, s_tail,
monomer, head, tail,
end_monomer, e_head, e_tail,
n_units, link_distance=1.0, linear_chain=False):
"""
Args:
start_monomer (Molecule): Starting molecule
s_head (int): starting atom index of the start_monomer molecule
s_tail (int): tail atom index of the start_monomer
monomer (Molecule): The monomer
head (int): index of the atom in the monomer that forms the head
tail (int): tail atom index. monomers will be connected from
tail to head
end_monomer (Molecule): Terminal molecule
e_head (int): starting atom index of the end_monomer molecule
e_tail (int): tail atom index of the end_monomer
n_units (int): number of monomer units excluding the start and
terminal molecules
link_distance (float): distance between consecutive monomers
linear_chain (bool): linear or random walk polymer chain
"""
self.start = s_head
self.end = s_tail
self.monomer = monomer
self.n_units = n_units
self.link_distance = link_distance
self.linear_chain = linear_chain
# translate monomers so that head atom is at the origin
start_monomer.translate_sites(range(len(start_monomer)),
- monomer.cart_coords[s_head])
monomer.translate_sites(range(len(monomer)),
- monomer.cart_coords[head])
end_monomer.translate_sites(range(len(end_monomer)),
- monomer.cart_coords[e_head])
self.mon_vector = monomer.cart_coords[tail] - monomer.cart_coords[head]
self.moves = {1: [1, 0, 0],
2: [0, 1, 0],
3: [0, 0, 1],
4: [-1, 0, 0],
5: [0, -1, 0],
6: [0, 0, -1]}
self.prev_move = 1
# places the start monomer at the beginning of the chain
self.molecule = start_monomer.copy()
self.length = 1
# create the chain
self._create(self.monomer, self.mon_vector)
# terminate the chain with the end_monomer
self.n_units += 1
end_mon_vector = end_monomer.cart_coords[e_tail] - \
end_monomer.cart_coords[e_head]
self._create(end_monomer, end_mon_vector)
def _create(self, monomer, mon_vector):
"""
create the polymer from the monomer
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
"""
while self.length != (self.n_units-1):
if self.linear_chain:
move_direction = np.array(mon_vector) / np.linalg.norm(mon_vector)
else:
move_direction = self._next_move_direction()
self._add_monomer(monomer.copy(), mon_vector, move_direction)
def _next_move_direction(self):
"""
pick a move at random from the list of moves
"""
nmoves = len(self.moves)
move = np.random.randint(1, nmoves+1)
while self.prev_move == (move + 3) % nmoves:
move = np.random.randint(1, nmoves+1)
self.prev_move = move
return np.array(self.moves[move])
def _align_monomer(self, monomer, mon_vector, move_direction):
"""
rotate the monomer so that it is aligned along the move direction
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
move_direction (numpy.array): the direction of the polymer chain
extension
"""
axis = np.cross(mon_vector, move_direction)
origin = monomer[self.start].coords
angle = get_angle(mon_vector, move_direction)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
monomer.apply_operation(op)
def _add_monomer(self, monomer, mon_vector, move_direction):
"""
extend the polymer molecule by adding a monomer along mon_vector direction
Args:
monomer (Molecule): monomer molecule
mon_vector (numpy.array): monomer vector that points from head to tail.
move_direction (numpy.array): direction along which the monomer
will be positioned
"""
translate_by = self.molecule.cart_coords[self.end] + \
self.link_distance * move_direction
monomer.translate_sites(range(len(monomer)), translate_by)
if not self.linear_chain:
self._align_monomer(monomer, mon_vector, move_direction)
# add monomer if there are no crossings
does_cross = False
for i, site in enumerate(monomer):
try:
self.molecule.append(site.specie, site.coords,
properties=site.properties)
except:
does_cross = True
polymer_length = len(self.molecule)
self.molecule.remove_sites(
range(polymer_length - i, polymer_length))
break
if not does_cross:
self.length += 1
self.end += len(self.monomer)
class PackmolRunner(object):
"""
Wrapper for the Packmol software that can be used to pack various types of
molecules into a one single unit.
"""
@requires(which('packmol'),
"PackmolRunner requires the executable 'packmol' to be in "
"the path. Please download packmol from "
"https://github.com/leandromartinez98/packmol "
"and follow the instructions in the README to compile. "
"Don't forget to add the packmol binary to your path")
def __init__(self, mols, param_list, input_file="pack.inp",
tolerance=2.0, filetype="xyz",
control_params={"maxit": 20, "nloop": 600},
auto_box=True, output_file="packed.xyz"):
"""
Args:
mols:
list of Molecules to pack
input_file:
name of the packmol input file
tolerance:
min distance between the atoms
filetype:
input/output structure file type
control_params:
packmol control parameters dictionary. Basically
all parameters other than structure/atoms
param_list:
list of parameters containing dicts for each molecule
auto_box:
put the molecule assembly in a box
output_file:
output file name. The extension will be adjusted
according to the filetype
"""
self.mols = mols
self.param_list = param_list
self.input_file = input_file
self.boxit = auto_box
self.control_params = control_params
if not self.control_params.get("tolerance"):
self.control_params["tolerance"] = tolerance
if not self.control_params.get("filetype"):
self.control_params["filetype"] = filetype
if not self.control_params.get("output"):
self.control_params["output"] = "{}.{}".format(
output_file.split(".")[0], self.control_params["filetype"])
if self.boxit:
self._set_box()
def _format_param_val(self, param_val):
"""
Internal method to format values in the packmol parameter dictionaries
Args:
param_val:
Some object to turn into String
Returns:
string representation of the object
"""
if isinstance(param_val, list):
return ' '.join(str(x) for x in param_val)
else:
return str(param_val)
def _set_box(self):
"""
Set the box size for the molecular assembly
"""
net_volume = 0.0
for idx, mol in enumerate(self.mols):
length = max([np.max(mol.cart_coords[:, i])-np.min(mol.cart_coords[:, i])
for i in range(3)]) + 2.0
net_volume += (length**3.0) * float(self.param_list[idx]['number'])
length = net_volume**(1.0/3.0)
for idx, mol in enumerate(self.mols):
self.param_list[idx]['inside box'] = '0.0 0.0 0.0 {} {} {}'.format(
length, length, length)
def _write_input(self, input_dir="."):
"""
Write the packmol input file to the input directory.
Args:
input_dir (string): path to the input directory
"""
with open(os.path.join(input_dir, self.input_file), 'wt', encoding="utf-8") as inp:
for k, v in six.iteritems(self.control_params):
inp.write('{} {}\n'.format(k, self._format_param_val(v)))
# write the structures of the constituent molecules to file and set
# the molecule id and the corresponding filename in the packmol
# input file.
for idx, mol in enumerate(self.mols):
a = BabelMolAdaptor(mol)
pm = pb.Molecule(a.openbabel_mol)
filename = os.path.join(
input_dir, '{}.{}'.format(
idx, self.control_params["filetype"])).encode("ascii")
pm.write(self.control_params["filetype"], filename=filename,
overwrite=True)
inp.write("\n")
inp.write(
"structure {}.{}\n".format(
os.path.join(input_dir, str(idx)),
self.control_params["filetype"]))
for k, v in six.iteritems(self.param_list[idx]):
inp.write(' {} {}\n'.format(k, self._format_param_val(v)))
inp.write('end structure\n')
def run(self, copy_to_current_on_exit=False):
"""
Write the input file to the scratch directory, run packmol and return
the packed molecule.
Args:
copy_to_current_on_exit (bool): Whether or not to copy the packmol
input/output files from the scratch directory to the current
directory.
Returns:
Molecule object
"""
scratch = tempfile.gettempdir()
with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:
self._write_input(input_dir=scratch_dir)
packmol_bin = ['packmol']
packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')
p = Popen(packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)
p.wait()
(stdout, stderr) = p.communicate()
output_file = os.path.join(scratch_dir, self.control_params["output"])
if os.path.isfile(output_file):
packed_mol = BabelMolAdaptor.from_file(output_file)
print("packed molecule written to {}".format(
self.control_params["output"]))
return packed_mol.pymatgen_mol
else:
print("Packmol execution failed")
print(stdout, stderr)
return None
class LammpsRunner(object):
def __init__(self, dict_input, input_filename="lammps.in", bin="lammps"):
"""
LAMMPS wrapper
Args:
dict_input (DictLammpsInput): lammps input object
input_filename (string): input file name
bin (string): command to run, excluding the input file name
"""
self.lammps_bin = bin.split()
if not which(self.lammps_bin[-1]):
raise RuntimeError(
"LammpsRunner requires the executable {} to be in the path. "
"Please download and install LAMMPS from " \
"http://lammps.sandia.gov. "
"Don't forget to add the binary to your path".format(self.lammps_bin[-1]))
self.dict_input = dict_input
self.input_filename = input_filename
def run(self):
"""
Write the input/data files and run LAMMPS.
"""
self.dict_input.write_input(self.input_filename)
print("Input file: {}".format(self.input_filename))
lammps_cmd = self.lammps_bin + ['-in', self.input_filename]
print("Running: {}".format(" ".join(lammps_cmd)))
p = Popen(lammps_cmd, stdout=PIPE, stderr=PIPE)
p.wait()
(stdout, stderr) = p.communicate()
print("Done")
print(stdout, stderr)
if __name__ == '__main__':
ethanol_coords = [[0.00720, -0.56870, 0.00000],
[-1.28540, 0.24990, 0.00000],
[1.13040, 0.31470, 0.00000],
[0.03920, -1.19720, 0.89000],
[0.03920, -1.19720, -0.89000],
[-1.31750, 0.87840, 0.89000],
[-1.31750, 0.87840, -0.89000],
[-2.14220, -0.42390, -0.00000],
[1.98570, -0.13650, -0.00000]]
ethanol = Molecule(["C", "C", "O", "H", "H", "H", "H", "H", "H"],
ethanol_coords)
water_coords = [[9.626, 6.787, 12.673],
[9.626, 8.420, 12.673],
[10.203, 7.604, 12.673]]
water = Molecule(["H", "H", "O"], water_coords)
pmr = PackmolRunner([ethanol, water],
[{"number": 1, "fixed": [0, 0, 0, 0, 0, 0],
"centerofmass": ""},
{"number": 15, "inside sphere": [0, 0, 0, 5]}],
input_file="packmol_input.inp", tolerance=2.0,
filetype="xyz",
control_params={"nloop": 1000},
auto_box=False, output_file="cocktail.xyz")
s = pmr.run()
|
|
import logging
from collections import namedtuple
from typing import Union
import urllib
from sqlalchemy import create_engine, MetaData, Column, Table, select, asc, desc, and_
from sqlalchemy import engine
from sqlalchemy.sql import Select
from sqlalchemy.sql.functions import Function
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.engine import reflection
from grice.complex_filter import ComplexFilter, get_column
from grice.errors import ConfigurationError, NotFoundError, JoinError
log = logging.getLogger(__name__) # pylint: disable=invalid-name
DEFAULT_PAGE = 0
DEFAULT_PER_PAGE = 50
SORT_DIRECTIONS = ['asc', 'desc']
SUPPORTED_FUNCS = ['avg', 'count', 'min', 'max', 'sum', 'stddev_pop']
ColumnSort = namedtuple('ColumnSort', ['table_name', 'column_name', 'direction'])
ColumnPair = namedtuple('ColumnPair', ['from_column', 'to_column'])
TableJoin = namedtuple('TableJoin', ['table_name', 'column_pairs', 'outer_join'])
QueryArguments = namedtuple('QueryArguments', ['column_names', 'page', 'per_page', 'filters', 'sorts', 'join', 'group_by', 'format_as_list'])
def init_database(db_config):
"""
Creates a SqlAlchemy engine object from a config file.
:param db_config:
:return: SqlAlchemy engine object.
"""
driver = db_config.get('driver', 'postgresql')
try:
db_args = {
'username': db_config['username'],
'password': db_config['password'],
'host': db_config['host'],
'port': db_config['port'],
'database': db_config['database']
}
if 'query' in db_config:
db_args['query'] = dict(urllib.parse.parse_qsl(db_config['query'], keep_blank_values=True))
except KeyError:
msg = '"username", "password", "host", "port", and "database" are required fields of database config'
raise ConfigurationError(msg)
eng_url = engine.url.URL(driver, **db_args)
return create_engine(eng_url)
def computed_column_to_dict(column: Union[Function, BinaryExpression]):
"""
Converts a SqlAlchemy object for a column that contains a computed value to a dict so we can return JSON.
:param column: a SqlAlchemy Function or a SqlAlchemy BinaryExpression
:return: dict
"""
if isinstance(column, Function):
data = {
'name': str(column),
'primary_key': column.primary_key,
'table': '<Function {}>'.format(column.name),
'type': column.type.__class__.__name__,
}
elif isinstance(column, BinaryExpression):
data = {
'name': str(column),
'primary_key': column.primary_key,
'table': '<BinaryExpression {}>'.format(column),
'type': column.type.__class__.__name__,
}
return data
def _column_to_dict(column: Column):
"""
Converts a SqlAlchemy Column object to a dict so we can return JSON.
:param column: a SqlAlchemy Column
:return: dict
"""
foreign_keys = []
for fk in column.foreign_keys:
fk_column = fk.column
foreign_keys.append({'name': fk_column.name, 'table_name': fk_column.table.name})
data = {
'name': column.name,
'primary_key': column.primary_key,
'nullable': column.nullable,
'type': column.type.__class__.__name__,
'foreign_keys': foreign_keys,
'table': column.table.name
}
return data
def column_to_dict(column):
"""
Converts a SqlAlchemy Column, or column-like object to a dict so we can return JSON.
:param column: a column
:return: dict
"""
if isinstance(column, Column):
return _column_to_dict(column)
return computed_column_to_dict(column)
def table_to_dict(table: Table):
"""
Converts a SqlAlchemy Table object to a dict so we can return JSON.
:param table: a SqlAlchemy Table
:return: dict
"""
return {
'name': table.name,
'schema': table.schema,
'columns': [column_to_dict(column) for column in table.columns]
}
def names_to_columns(column_names, table: Table, join_table: Table):
"""
Converts column names to columns. If column_names is None then we assume all columns are wanted.
:param column_names: list of column_name strings, can be None.
:param table: The main table.
:param join_table: The table we are joining on, can be None.
:return: list of SqlAlchemy column objects.
"""
if not column_names:
columns = table.columns.values()
if join_table is not None:
columns = columns + join_table.columns.values()
return columns
columns = []
for column_name in column_names:
column = get_column(column_name, [table, join_table])
if column is not None:
columns.append(column)
return columns
def apply_column_filters(query, table: Table, join_table: Table, filters: ComplexFilter):
"""
Apply the ColumnFilters from the filters object to the query.
- Goals is to be smart when applying filters.
- multiple filters on a column should probably be OR'ed.
- if lt value is smaller than gt value then we probably want to OR (i.e. lt 60 OR gt 120)
- if lt value is bigger than gt value then we probably want to AND (i.e. lt 120 AND gt 60)
- alternatively allow BETWEEN and NOT BETWEEN, and if multiples just OR those.
- Filter sets between columns should be AND'ed.
:param query: SQLAlchemy Select object.
:param table: SQLAlchemy Table object.
:param join_table: SQLAlchemy Table object.
:param filters: The filters dict from db_controller.parse_filters: in form of column_name -> filters list
:return: A SQLAlchemy select object with filters applied.
"""
expression = filters.get_expression([table, join_table])
if expression is not None:
query = query.where(expression)
return query
def apply_column_sorts(query, table: Table, join_table: Table, sorts: dict):
"""
Adds sorts to a query object.
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: The Table we are joining to.
:param sorts: List of ColumnSort objects.
:return: A SQLAlchemy select object modified to with sorts.
"""
for sort in sorts:
if sort.table_name == table.name:
column = table.columns.get(sort.column_name, None)
elif join_table is not None and sort.table_name == join_table.name:
column = join_table.columns.get(sort.column_name, None)
if column is not None:
if sort.direction == 'asc':
query = query.order_by(asc(column))
if sort.direction == 'desc':
query = query.order_by(desc(column))
return query
def apply_group_by(query, table: Table, join_table: Table, group_by: list):
"""
Adds sorts to a query object.
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: The Table we are joining to.
:param sorts: List of ColumnSort objects.
:return: A SQLAlchemy select object modified to with sorts.
"""
for group in group_by:
column = get_column(group, [table, join_table])
if column is not None:
query = query.group_by(column)
return query
def apply_join(query: Select, table: Table, join_table: Table, join: TableJoin):
"""
Performs a inner or outer join between two tables on a given query object.
TODO: enable multiple joins
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: The Table we are joining to.
:param join: The Join object describing how to join the tables.
:return: A SQLAlchemy select object modified to join two tables.
"""
error_msg = 'Invalid join, "{}" is not a column on table "{}"'
join_conditions = []
for column_pair in join.column_pairs:
from_col = table.columns.get(column_pair.from_column)
to_col = join_table.columns.get(column_pair.to_column)
if from_col is None:
raise ValueError(error_msg.format(column_pair.from_column, table.name))
if to_col is None:
raise ValueError(error_msg.format(column_pair.to_column, join_table.name))
join_conditions.append(from_col == to_col)
return query.select_from(table.join(join_table, onclause=and_(*join_conditions), isouter=join.outer_join))
class DBService:
"""
TODO:
- Add methods for saving table queries
"""
def __init__(self, db_config):
self.meta = MetaData()
self.db = init_database(db_config)
self._reflect_database()
def _reflect_database(self):
"""
This method reflects the database and also instantiates an Inspector.
:return:
"""
self.meta.reflect(bind=self.db)
self.inspector = reflection.Inspector.from_engine(self.db)
def get_tables(self):
schemas = {}
for table in self.meta.sorted_tables:
schema = table.schema
if schema not in schemas:
schemas[schema] = {}
schemas[schema][table.name] = table_to_dict(table)
return schemas
def get_table(self, table_name):
table = self.meta.tables.get(table_name, None)
if table is None:
raise NotFoundError('Table "{}" does exist'.format(table_name))
return table_to_dict(table)
def query_table(self, table_name: str, quargs: QueryArguments): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
table = self.meta.tables.get(table_name, None)
join_table = None
if quargs.join is not None:
join_table = self.meta.tables.get(quargs.join.table_name, None)
rows = []
if table is None:
raise NotFoundError('Table "{}" does exist'.format(quargs.table_name))
if quargs.join is not None and join_table is None:
raise JoinError('Invalid join. Table with name "{}" does not exist.'.format(quargs.join.table_name))
columns = names_to_columns(quargs.column_names, table, join_table)
if len(columns) == 0:
return [], []
query = select(columns).apply_labels()
if quargs.per_page > -1:
query = query.limit(quargs.per_page).offset(quargs.page * quargs.per_page)
if quargs.filters is not None:
query = apply_column_filters(query, table, join_table, quargs.filters)
if quargs.sorts is not None:
query = apply_column_sorts(query, table, join_table, quargs.sorts)
if quargs.join is not None:
query = apply_join(query, table, join_table, quargs.join)
if quargs.group_by is not None:
query = apply_group_by(query, table, join_table, quargs.group_by)
with self.db.connect() as conn:
log.debug("Query %s", query)
result = conn.execute(query)
if quargs.format_as_list:
# SQLalchemy is giving us the data in the correct format
rows = result
else:
column_name_map = {}
first_row = True
for row in result:
# Make friendlier names if possible
if first_row:
for column, column_label in zip(columns, row.keys()):
if isinstance(column, Column):
full_column_name = column.table.name + '.' + column.name
column_name_map[column_label] = full_column_name
first_row = False
data = {column_name_map.get(key, key): val for key, val in row.items()}
rows.append(data)
column_data = [column_to_dict(column) for column in columns]
return rows, column_data
if __name__ == '__main__':
import configparser
config = configparser.ConfigParser()
config.read('../config.ini')
s = DBService(config['database'])
|
|
"""
Tests for django test runner
"""
from __future__ import absolute_import, unicode_literals
import sys
from optparse import make_option
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django import db
from django.test import simple, TransactionTestCase, skipUnlessDBFeature
from django.test.simple import DjangoTestSuiteRunner, get_tests
from django.test.testcases import connections_support_transactions
from django.utils import unittest
from django.utils.importlib import import_module
from admin_scripts.tests import AdminScriptTestCase
from .models import Person
TEST_APP_OK = 'test_runner.valid_app.models'
TEST_APP_ERROR = 'test_runner.invalid_app.models'
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['charlie'],
'bravo': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
def test_chained_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['bravo'],
'bravo': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
# Implied dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_multiple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
('s4', ('s4_db', ['delta'])),
]
dependencies = {
'alpha': ['bravo','delta'],
'bravo': ['charlie'],
'delta': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,aliases in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertIn('s4', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4'))
# Implicit dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_circular_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
]
dependencies = {
'bravo': ['alpha'],
'alpha': ['bravo'],
}
self.assertRaises(ImproperlyConfigured, simple.dependency_ordered, raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [
('s1', ('s1_db', ['alpha', 'bravo']))
]
dependencies = {
'alpha': ['bravo']
}
with self.assertRaises(ImproperlyConfigured):
simple.dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [
('s1', ('s1_db', ['bravo', 'alpha']))
]
with self.assertRaises(ImproperlyConfigured):
simple.dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner(object):
invoked = False
def __init__(self, *args, **kwargs):
pass
def run_tests(self, test_labels, extra_tests=None, **kwargs):
MockTestRunner.invoked = True
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command('test', 'sites',
testrunner='test_runner.tests.MockTestRunner')
self.assertTrue(MockTestRunner.invoked,
"The custom test runner has not been invoked")
def test_bad_test_runner(self):
with self.assertRaises(AttributeError):
call_command('test', 'sites',
testrunner='test_runner.NonExistentRunner')
class CustomOptionsTestRunner(simple.DjangoTestSuiteRunner):
option_list = (
make_option('--option_a','-a', action='store', dest='option_a', default='1'),
make_option('--option_b','-b', action='store', dest='option_b', default='2'),
make_option('--option_c','-c', action='store', dest='option_c', default='3'),
)
def __init__(self, verbosity=1, interactive=True, failfast=True, option_a=None, option_b=None, option_c=None, **kwargs):
super(CustomOptionsTestRunner, self).__init__(verbosity=verbosity, interactive=interactive,
failfast=failfast)
self.option_a = option_a
self.option_b = option_b
self.option_c = option_c
def run_tests(self, test_labels, extra_tests=None, **kwargs):
print("%s:%s:%s" % (self.option_a, self.option_b, self.option_c))
class CustomTestRunnerOptionsTests(AdminScriptTestCase):
def setUp(self):
settings = {
'TEST_RUNNER': '\'test_runner.tests.CustomOptionsTestRunner\'',
}
self.write_settings('settings.py', sdict=settings)
def tearDown(self):
self.remove_settings('settings.py')
def test_default_options(self):
args = ['test', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:2:3')
def test_default_and_given_options(self):
args = ['test', '--settings=test_project.settings', '--option_b=foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_option_name_and_value_separated(self):
args = ['test', '--settings=test_project.settings', '--option_b', 'foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_all_options_given(self):
args = ['test', '--settings=test_project.settings', '--option_a=bar',
'--option_b=foo', '--option_c=31337']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ['help', 'test']
out, err = self.run_manage(args)
self.assertNoOutput(err)
class ModulesTestsPackages(unittest.TestCase):
def test_get_tests(self):
"Check that the get_tests helper function can find tests in a directory"
module = import_module(TEST_APP_OK)
tests = get_tests(module)
self.assertIsInstance(tests, type(module))
def test_import_error(self):
"Test for #12658 - Tests with ImportError's shouldn't fail silently"
module = import_module(TEST_APP_ERROR)
self.assertRaises(ImportError, get_tests, module)
class Sqlite3InMemoryTestDbs(unittest.TestCase):
@unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections),
"This is a sqlite-specific issue")
def test_transaction_support(self):
"""Ticket #16329: sqlite3 in-memory test databases"""
old_db_connections = db.connections
for option in ('NAME', 'TEST_NAME'):
try:
db.connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
option: ':memory:',
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
option: ':memory:',
},
})
other = db.connections['other']
DjangoTestSuiteRunner(verbosity=0).setup_databases()
msg = "DATABASES setting '%s' option set to sqlite3's ':memory:' value shouldn't interfere with transaction support detection." % option
# Transaction support should be properly initialised for the 'other' DB
self.assertTrue(other.features.supports_transactions, msg)
# And all the DBs should report that they support transactions
self.assertTrue(connections_support_transactions(), msg)
finally:
db.connections = old_db_connections
class DummyBackendTest(unittest.TestCase):
def test_setup_databases(self):
"""
Test that setup_databases() doesn't fail with dummy database backend.
"""
runner = DjangoTestSuiteRunner(verbosity=0)
old_db_connections = db.connections
try:
db.connections = db.ConnectionHandler({})
old_config = runner.setup_databases()
runner.teardown_databases(old_config)
except Exception as e:
self.fail("setup_databases/teardown_databases unexpectedly raised "
"an error: %s" % e)
finally:
db.connections = old_db_connections
class DeprecationDisplayTest(AdminScriptTestCase):
# tests for 19546
def setUp(self):
settings = {'INSTALLED_APPS': '("test_runner.deprecation_app",)',
'DATABASES': '{"default": {"ENGINE":"django.db.backends.sqlite3", "NAME":":memory:"}}' }
self.write_settings('settings.py', sdict=settings)
def tearDown(self):
self.remove_settings('settings.py')
def test_runner_deprecation_verbosity_default(self):
args = ['test', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertIn("DeprecationWarning: warning from test", err)
self.assertIn("DeprecationWarning: module-level warning from deprecation_app", err)
@unittest.skipIf(sys.version_info[:2] == (2, 6),
"On Python 2.6, DeprecationWarnings are visible anyway")
def test_runner_deprecation_verbosity_zero(self):
args = ['test', '--settings=settings', '--verbosity=0']
out, err = self.run_django_admin(args)
self.assertFalse("DeprecationWarning: warning from test" in err)
class AutoIncrementResetTest(TransactionTestCase):
"""
Here we test creating the same model two times in different test methods,
and check that both times they get "1" as their PK value. That is, we test
that AutoField values start from 1 for each transactional test case.
"""
reset_sequences = True
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset1(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset2(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
|
|
import pygame
import pygame.camera
from pygame.locals import *
#import wx
import time
from datetime import date
import numpy
import Image
import zbar
from pgu import gui
pygame.init()
pygame.camera.init()
automatic = False
pause = 0.1
size = ( 640 , 480 )
window_size = ( 640 , 480 )
shrunken = size
camlist = pygame.camera.list_cameras()
camera = pygame.camera.Camera(camlist[0], size, "RGBA")
camera.start()
b = (0, 0, 0xFF)
r = (0xFF, 0, 0)
t = (0x5A, 0xAA, 0xAA)
yellow = (255, 255, 0)
red = (255, 0, 0)
class CamControl:
def __init__(self):
self.going = True
self.window = pygame.display.set_mode( window_size, 0 )
pygame.display.set_caption("ArchiPhen - Root Phenotyping")
self.snapshot_raw = pygame.surface.Surface( size, 0, self.window)
self.last_array = None
self.diffs = None
self.ccolor = (0x5A, 0xAA, 0xAA)
self.scanner = zbar.ImageScanner()
# configure the reader
self.scanner.parse_config('enable')
#self.scanner.set_config(0, zbar.Config.ENABLE, 0)
#self.scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)
self.label = ""
self.date = date.today()
def readQRCODE(self, MASK):
self.label = ""
pilImage = self.pygame_to_pil_img(MASK).convert('L')
#pilImage = Image.open("qr.bmp").convert('L')
pilImage = pilImage.convert('L')
width, height = pilImage.size
raw = pilImage.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
self.scanner.scan(image)
# extract results
for symbol in image:
self.label = symbol.data
# clean up
del(image)
def pygame_to_pil_img(self, pg_surface):
imgstr = pygame.image.tostring(pg_surface, 'RGB')
return Image.fromstring('RGB', pg_surface.get_size(), imgstr)
def pil_to_pygame_img(self,pil_img):
imgstr = pil_img.tostring()
return pygame.image.fromstring(imgstr, pil_img.size, 'RGB')
def capture_image(self, date, id):
self.label = ""
def run(self):
self.going = True
while (self.going):
pygame.event.pump()
time.sleep(pause)
# Update video output
self.snapshot_raw = camera.get_image(self.snapshot_raw)
# Listen for keyboard
events = pygame.event.get()
for e in events:
if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
self.going = False
if e.type == KEYDOWN and e.key == K_SPACE:
mask_size = (max_rect.right-max_rect.left,max_rect.bottom-max_rect.top)
self.snapshot_mask = pygame.surface.Surface( mask_size, 0, self.window)
s2d = pygame.surfarray.array2d( self.snapshot_raw)
s2d = s2d[max_rect.left:max_rect.right, max_rect.top:max_rect.bottom]
pygame.surfarray.blit_array(self.snapshot_mask, s2d)
self.readQRCODE(self.snapshot_mask)
if e.type == KEYDOWN and e.key == K_TAB:
app = gui.App()
app = gui.Desktop()
app.connect(gui.QUIT,app.quit,None)
##The table code is entered much like HTML.
##::
c = gui.Table()
c.tr()
c.td(gui.Label("Sample ID"))
c.tr()
c.td(gui.Label(""))
c.tr()
w = gui.Input(value='',size=8)
c.td(w,colspan=1)
def cb():
self.label = w.value
app.quit()
c.tr()
c.td(gui.Label(""))
c.tr()
btn = gui.Button("Manual Save")
btn.connect(gui.CLICK, cb)
c.td(btn,colspan=1)
app.run(c)
if e.type == KEYDOWN and e.key == K_RETURN and self.label != "" and self.date !="":
self.capture_image(self.date, self.label)
elif e.type == KEYDOWN and e.key == K_RETURN and (self.label == "" or self.date == ""):
app = gui.App()
app = gui.Desktop()
app.connect(gui.QUIT,app.quit,None)
c = gui.Table()
c.tr()
c.td(gui.Label("DATA NOT SAVED!"))
c.tr()
c.td(gui.Label(""))
c.tr()
def cb():
app.quit()
c.tr()
c.td(gui.Label(""))
c.tr()
btn = gui.Button("OK")
btn.connect(gui.CLICK, cb)
c.td(btn,colspan=1)
app.run(c)
m = pygame.mask.from_threshold(self.snapshot_raw, self.ccolor, (50, 50, 50))
max_rect = None
max_area = 0
for rect in m.get_bounding_rects():
if rect[2]*rect[3] > max_area:
max_area = rect[2]*rect[3]
max_rect = rect
if max_area>0:
pygame.draw.rect(self.snapshot_raw, (250,0,0), max_rect, 5)
# pick a font you have and set its size
# apply it to text on a label
if self.label == "":
myfont = pygame.font.SysFont("Comic Sans MS", 16)
label = myfont.render("No Genotype detected - press space bar decode QRcode ", 1, red)
else:
myfont = pygame.font.SysFont("Comic Sans MS", 16)
label = myfont.render("GENOTYPE: " + str(self.label), 1, yellow)
self.window.blit(self.snapshot_raw, (0,0))
self.window.blit(label, (2, 2))
pygame.display.flip()
def calibrate(self):
self.going = True
while (self.going):
# capture the image
self.snapshot_raw = camera.get_image(self.snapshot_raw)
# blit it to the display surface
#self.window.blit(self.snapshot_raw, (0,0))
# make a rect in the middle of the screen
crect = pygame.draw.rect(self.snapshot_raw, (255,0,0), (size[0]/2-20,size[1]/2-20,40,40), 1)
# get the average color of the area inside the rect
self.ccolor = pygame.transform.average_color(self.snapshot_raw, crect)
# pick a font you have and set its size
myfont = pygame.font.SysFont("Comic Sans MS", 16)
# apply it to text on a label
label = myfont.render("CALIBRATION: target color then pres Esc", 1, yellow)
self.window.blit(self.snapshot_raw, (0,0))
self.window.blit(label, (2, 2))
pygame.display.flip()
# Listen for keyboard
events = pygame.event.get()
for e in events:
if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
self.going = False
#
cam = CamControl()
cam.calibrate()
cam.run()
|
|
#!/usr/bin/env python3
import os
import sys
import argparse
import glob
import shutil
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, run_as_root, OKBLUE, ENDC, \
get_dev_uuid_var_path, file_or_package_hash_updated
from version import PROVISION_VERSION
from tools.setup.generate_zulip_bots_static_files import generate_zulip_bots_static_files
VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
XUNIT_XML_TEST_RESULTS_DIR_PATH = os.path.join(VAR_DIR_PATH, 'xunit-test-results')
is_travis = 'TRAVIS' in os.environ
# TODO: De-duplicate this with emoji_dump.py
EMOJI_CACHE_PATH = "/srv/zulip-emoji-cache"
if is_travis:
# In Travis CI, we don't have root access
EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache"
UUID_VAR_PATH = get_dev_uuid_var_path()
user_id = os.getuid()
def setup_shell_profile(shell_profile):
# type: (str) -> None
shell_profile_path = os.path.expanduser(shell_profile)
def write_command(command):
# type: (str) -> None
if os.path.exists(shell_profile_path):
with open(shell_profile_path, 'r') as shell_profile_file:
lines = [line.strip() for line in shell_profile_file.readlines()]
if command not in lines:
with open(shell_profile_path, 'a+') as shell_profile_file:
shell_profile_file.writelines(command + '\n')
else:
with open(shell_profile_path, 'w') as shell_profile_file:
shell_profile_file.writelines(command + '\n')
source_activate_command = "source " + os.path.join(VENV_PATH, "bin", "activate")
write_command(source_activate_command)
if os.path.exists('/srv/zulip'):
write_command('cd /srv/zulip')
def main(options: argparse.Namespace) -> int:
setup_shell_profile('~/.bash_profile')
setup_shell_profile('~/.zprofile')
# This needs to happen before anything that imports zproject.settings.
run(["scripts/setup/generate_secrets.py", "--development"])
# create log directory `zulip/var/log`
os.makedirs(LOG_DIR_PATH, exist_ok=True)
# create upload directory `var/uploads`
os.makedirs(UPLOAD_DIR_PATH, exist_ok=True)
# create test upload directory `var/test_upload`
os.makedirs(TEST_UPLOAD_DIR_PATH, exist_ok=True)
# create coverage directory `var/coverage`
os.makedirs(COVERAGE_DIR_PATH, exist_ok=True)
# create linecoverage directory `var/node-coverage`
os.makedirs(NODE_TEST_COVERAGE_DIR_PATH, exist_ok=True)
# create XUnit XML test results directory`var/xunit-test-results`
os.makedirs(XUNIT_XML_TEST_RESULTS_DIR_PATH, exist_ok=True)
# The `build_emoji` script requires `emoji-datasource` package
# which we install via npm; thus this step is after installing npm
# packages.
if not os.path.isdir(EMOJI_CACHE_PATH):
run_as_root(["mkdir", EMOJI_CACHE_PATH])
run_as_root(["chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
run(["tools/setup/emoji/build_emoji"])
# copy over static files from the zulip_bots package
generate_zulip_bots_static_files()
build_pygments_data_paths = ["tools/setup/build_pygments_data", "tools/setup/lang.json"]
from pygments import __version__ as pygments_version
if file_or_package_hash_updated(build_pygments_data_paths, "build_pygments_data_hash", options.is_force,
[pygments_version]):
run(["tools/setup/build_pygments_data"])
else:
print("No need to run `tools/setup/build_pygments_data`.")
update_authors_json_paths = ["tools/update-authors-json", "zerver/tests/fixtures/authors.json"]
if file_or_package_hash_updated(update_authors_json_paths, "update_authors_json_hash", options.is_force):
run(["tools/update-authors-json", "--use-fixture"])
else:
print("No need to run `tools/update-authors-json`.")
email_source_paths = ["tools/inline-email-css", "templates/zerver/emails/email.css"]
email_source_paths += glob.glob('templates/zerver/emails/*.source.html')
if file_or_package_hash_updated(email_source_paths, "last_email_source_files_hash", options.is_force):
run(["tools/inline-email-css"])
else:
print("No need to run `tools/inline-email-css`.")
if not options.is_production_travis:
# The following block is skipped for the production Travis
# suite, because that suite doesn't make use of these elements
# of the development environment (it just uses the development
# environment to build a release tarball).
# Need to set up Django before using template_database_status
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from zerver.lib.test_fixtures import template_database_status, run_db_migrations, \
destroy_leaked_test_databases
try:
from zerver.lib.queue import SimpleQueueClient
SimpleQueueClient()
rabbitmq_is_configured = True
except Exception:
rabbitmq_is_configured = False
if options.is_force or not rabbitmq_is_configured:
run(["scripts/setup/configure-rabbitmq"])
else:
print("RabbitMQ is already configured.")
migration_status_path = os.path.join(UUID_VAR_PATH, "migration_status_dev")
dev_template_db_status = template_database_status(
migration_status=migration_status_path,
settings="zproject.settings",
database_name="zulip",
)
if options.is_force or dev_template_db_status == 'needs_rebuild':
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
elif dev_template_db_status == 'run_migrations':
run_db_migrations('dev')
elif dev_template_db_status == 'current':
print("No need to regenerate the dev DB.")
test_template_db_status = template_database_status()
if options.is_force or test_template_db_status == 'needs_rebuild':
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
elif test_template_db_status == 'run_migrations':
run_db_migrations('test')
elif test_template_db_status == 'current':
print("No need to regenerate the test DB.")
# Consider updating generated translations data: both `.mo`
# files and `language-options.json`.
paths = ['zerver/management/commands/compilemessages.py']
paths += glob.glob('locale/*/LC_MESSAGES/*.po')
paths += glob.glob('locale/*/translations.json')
if file_or_package_hash_updated(paths, "last_compilemessages_hash", options.is_force):
run(["./manage.py", "compilemessages"])
else:
print("No need to run `manage.py compilemessages`.")
destroyed = destroy_leaked_test_databases()
if destroyed:
print("Dropped %s stale test databases!" % (destroyed,))
run(["scripts/lib/clean-unused-caches", "--threshold=6"])
# Keeping this cache file around can cause eslint to throw
# random TypeErrors when new/updated dependencies are added
if os.path.isfile('.eslintcache'):
# Remove this block when
# https://github.com/eslint/eslint/issues/11639 is fixed
# upstream.
os.remove('.eslintcache')
# Clean up the root of the `var/` directory for various
# testing-related files that we have migrated to
# `var/<uuid>/test-backend`.
print("Cleaning var/ directory files...")
var_paths = glob.glob('var/test*')
var_paths.append('var/bot_avatar')
for path in var_paths:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except FileNotFoundError:
pass
version_file = os.path.join(UUID_VAR_PATH, 'provision_version')
print('writing to %s\n' % (version_file,))
open(version_file, 'w').write(PROVISION_VERSION + '\n')
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true', dest='is_force',
default=False,
help="Ignore all provisioning optimizations.")
parser.add_argument('--production-travis', action='store_true',
dest='is_production_travis',
default=False,
help="Provision for Travis with production settings.")
options = parser.parse_args()
sys.exit(main(options))
|
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
from .. import command, manager
class Layout(command.CommandObject):
"""
This class defines the API that should be exposed by all layouts.
"""
defaults = manager.Defaults()
def __init__(self, **config):
command.CommandObject.__init__(self)
self.defaults.load(self, config)
def layout(self, windows, screen):
assert windows, "let's eliminate unnecessary calls"
for i in windows:
self.configure(i, screen)
def clone(self, group):
"""
:group Group to attach new layout instance to.
Make a copy of this layout. This is done to provide each group with
a unique instance of every layout.
"""
c = copy.copy(self)
c.group = group
return c
def focus(self, c):
"""
Called whenever the focus changes.
"""
pass
def blur(self):
"""
Called whenever focus is gone from this layout.
"""
pass
def add(self, c):
"""
Called whenever a window is added to the group, whether the layout
is current or not. The layout should just add the window to its
internal datastructures, without mapping or configuring.
"""
pass
def remove(self, c):
"""
Called whenever a window is removed from the group, whether the
layout is current or not. The layout should just de-register the
window from its data structures, without unmapping the window.
Returns the "next" window that should gain focus or None.
"""
pass
def configure(self, c, screen):
"""
This method should:
- Configure the dimensions and borders of a window using the
.place() method.
- Call either .hide or .unhide on the window.
"""
raise NotImplementedError
def info(self):
"""
Returns a dictionary of layout information.
"""
return dict(
name = self.name,
group = self.group.name
)
def _items(self, name):
if name == "screen":
return True, None
elif name == "group":
return True, None
def _select(self, name, sel):
if name == "screen":
return self.group.screen
elif name == "group":
return self.group
def cmd_info(self):
"""
Return a dictionary of info for this object.
"""
return self.info()
def show(self, screen):
"""
Called when layout is being shown
"""
def hide(self):
"""
Called when layout is being hidden
"""
class SingleWindow(Layout):
"""Base for layouts with single visible window"""
def _get_window(self):
"""Should return either visible window or None"""
raise NotImplementedError("abstract method")
def configure(self, win, screen):
if win is self._get_window():
win.place(
screen.x, screen.y,
screen.width, screen.height,
0,
None,
)
win.unhide()
else:
win.hide()
def focus_first(self):
return self._get_window()
def focus_next(self, win):
return None
def focus_last(self):
return self._get_window()
def focus_prev(self, win):
return None
class Delegate(Layout):
"""Base for all delegation layouts"""
def __init__(self, **config):
self.layouts = {}
Layout.__init__(self, **config)
def clone(self, group):
c = Layout.clone(group)
c.layouts = {}
return c
def _get_layouts(self):
"""Returns all children layouts"""
raise NotImplementedError("abstact method")
def _get_active_layout(self):
"""Returns layout to which delegate commands to"""
raise NotImplementedError("abstrac method")
def delegate_layout(self, windows, mapping):
"""Delegates layouting actual windows
:param windows: windows to layout
:param mapping: mapping from layout to ScreenRect for each layout
"""
grouped = {}
for w in windows:
lay = self.layouts[w]
if lay in grouped:
grouped[lay].append(w)
else:
grouped[lay] = [w]
for lay, wins in grouped.iteritems():
lay.layout(wins, mapping[lay])
def remove(self, win):
lay = self.layouts.pop(win)
focus = lay.remove(win)
if not focus:
layouts = self._get_layouts()
idx = layouts.index(lay)
while idx < len(layouts)-1 and not focus:
idx += 1
focus = layouts[idx].focus_first()
return focus
def focus_first(self):
layouts = self._get_layouts()
for lay in layouts:
win = lay.focus_first()
if win:
return win
def focus_last(self):
layouts = self._get_layouts()
for lay in reversed(layouts):
win = lay.focus_last()
if win:
return win
def focus_next(self, win):
layouts = self._get_layouts()
cur = self.layouts[win]
focus = cur.focus_next(win)
if not focus:
idx = layouts.index(cur)
while idx < len(layouts)-1 and not focus:
idx += 1
focus = layouts[idx].focus_first()
return focus
def focus_prev(self, win):
layouts = self._get_layouts()
cur = self.layouts[win]
focus = cur.focus_prev(win)
if not focus:
idx = layouts.index(cur)
while idx > 0 and not focus:
idx -= 1
focus = layouts[idx].focus_last()
return focus
def cmd_up(self):
self._get_active_layout().cmd_up()
def cmd_down(self):
self._get_active_layout().cmd_down()
|
|
"""
.. _tut_stats_cluster_source_rANOVA:
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemannn <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id, copy=False)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([], int)] # right hemi is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language). Without this also
# the main effects will be returned.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# Note. for further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut_stats_cluster_sensor_rANOVA_tfr>`.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# get f-values only.
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal).
source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, colormap='mne',
views='lateral',
time_label='Duration significant (ms)')
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
|
|
import logging
import warnings
import flask
import werkzeug.exceptions
from airflow._vendor.connexion.apis import flask_utils
from airflow._vendor.connexion.apis.abstract import AbstractAPI
from airflow._vendor.connexion.handlers import AuthErrorHandler
from airflow._vendor.connexion.jsonifier import Jsonifier
from airflow._vendor.connexion.lifecycle import ConnexionRequest, ConnexionResponse
from airflow._vendor.connexion.utils import is_json_mimetype, yamldumper
from werkzeug.local import LocalProxy
logger = logging.getLogger('connexion.apis.flask_api')
class FlaskApi(AbstractAPI):
def _set_base_path(self, base_path):
super(FlaskApi, self)._set_base_path(base_path)
self._set_blueprint()
def _set_blueprint(self):
logger.debug('Creating API blueprint: %s', self.base_path)
endpoint = flask_utils.flaskify_endpoint(self.base_path)
self.blueprint = flask.Blueprint(endpoint, __name__, url_prefix=self.base_path,
template_folder=str(self.options.openapi_console_ui_from_dir))
def add_openapi_json(self):
"""
Adds spec json to {base_path}/swagger.json
or {base_path}/openapi.json (for oas3)
"""
logger.debug('Adding spec json: %s/%s', self.base_path,
self.options.openapi_spec_path)
endpoint_name = "{name}_openapi_json".format(name=self.blueprint.name)
self.blueprint.add_url_rule(self.options.openapi_spec_path,
endpoint_name,
self._handlers.get_json_spec)
def add_openapi_yaml(self):
"""
Adds spec yaml to {base_path}/swagger.yaml
or {base_path}/openapi.yaml (for oas3)
"""
if not self.options.openapi_spec_path.endswith("json"):
return
openapi_spec_path_yaml = \
self.options.openapi_spec_path[:-len("json")] + "yaml"
logger.debug('Adding spec yaml: %s/%s', self.base_path,
openapi_spec_path_yaml)
endpoint_name = "{name}_openapi_yaml".format(name=self.blueprint.name)
self.blueprint.add_url_rule(
openapi_spec_path_yaml,
endpoint_name,
self._handlers.get_yaml_spec
)
def add_swagger_ui(self):
"""
Adds swagger ui to {base_path}/ui/
"""
console_ui_path = self.options.openapi_console_ui_path.strip('/')
logger.debug('Adding swagger-ui: %s/%s/',
self.base_path,
console_ui_path)
if self.options.openapi_console_ui_config is not None:
config_endpoint_name = "{name}_swagger_ui_config".format(name=self.blueprint.name)
config_file_url = '/{console_ui_path}/swagger-ui-config.json'.format(
console_ui_path=console_ui_path)
self.blueprint.add_url_rule(config_file_url,
config_endpoint_name,
lambda: flask.jsonify(self.options.openapi_console_ui_config))
static_endpoint_name = "{name}_swagger_ui_static".format(name=self.blueprint.name)
static_files_url = '/{console_ui_path}/<path:filename>'.format(
console_ui_path=console_ui_path)
self.blueprint.add_url_rule(static_files_url,
static_endpoint_name,
self._handlers.console_ui_static_files)
index_endpoint_name = "{name}_swagger_ui_index".format(name=self.blueprint.name)
console_ui_url = '/{console_ui_path}/'.format(
console_ui_path=console_ui_path)
self.blueprint.add_url_rule(console_ui_url,
index_endpoint_name,
self._handlers.console_ui_home)
def add_auth_on_not_found(self, security, security_definitions):
"""
Adds a 404 error handler to authenticate and only expose the 404 status if the security validation pass.
"""
logger.debug('Adding path not found authentication')
not_found_error = AuthErrorHandler(self, werkzeug.exceptions.NotFound(), security=security,
security_definitions=security_definitions)
endpoint_name = "{name}_not_found".format(name=self.blueprint.name)
self.blueprint.add_url_rule('/<path:invalid_path>', endpoint_name, not_found_error.function)
def _add_operation_internal(self, method, path, operation):
operation_id = operation.operation_id
logger.debug('... Adding %s -> %s', method.upper(), operation_id,
extra=vars(operation))
flask_path = flask_utils.flaskify_path(path, operation.get_path_parameter_types())
endpoint_name = flask_utils.flaskify_endpoint(operation.operation_id,
operation.randomize_endpoint)
function = operation.function
self.blueprint.add_url_rule(flask_path, endpoint_name, function, methods=[method])
@property
def _handlers(self):
# type: () -> InternalHandlers
if not hasattr(self, '_internal_handlers'):
self._internal_handlers = InternalHandlers(self.base_path, self.options, self.specification)
return self._internal_handlers
@classmethod
def get_response(cls, response, mimetype=None, request=None):
"""Gets ConnexionResponse instance for the operation handler
result. Status Code and Headers for response. If only body
data is returned by the endpoint function, then the status
code will be set to 200 and no headers will be added.
If the returned object is a flask.Response then it will just
pass the information needed to recreate it.
:type response: flask.Response | (flask.Response,) | (flask.Response, int) | (flask.Response, dict) | (flask.Response, int, dict)
:rtype: ConnexionResponse
"""
return cls._get_response(response, mimetype=mimetype, extra_context={"url": flask.request.url})
@classmethod
def _is_framework_response(cls, response):
""" Return True if provided response is a framework type """
return flask_utils.is_flask_response(response)
@classmethod
def _framework_to_connexion_response(cls, response, mimetype):
""" Cast framework response class to ConnexionResponse used for schema validation """
return ConnexionResponse(
status_code=response.status_code,
mimetype=response.mimetype,
content_type=response.content_type,
headers=response.headers,
body=response.get_data(),
)
@classmethod
def _connexion_to_framework_response(cls, response, mimetype, extra_context=None):
""" Cast ConnexionResponse to framework response class """
flask_response = cls._build_response(
mimetype=response.mimetype or mimetype,
content_type=response.content_type,
headers=response.headers,
status_code=response.status_code,
data=response.body,
extra_context=extra_context,
)
return flask_response
@classmethod
def _build_response(cls, mimetype, content_type=None, headers=None, status_code=None, data=None, extra_context=None):
if cls._is_framework_response(data):
return flask.current_app.make_response((data, status_code, headers))
data, status_code, serialized_mimetype = cls._prepare_body_and_status_code(data=data, mimetype=mimetype, status_code=status_code, extra_context=extra_context)
kwargs = {
'mimetype': mimetype or serialized_mimetype,
'content_type': content_type,
'headers': headers,
'response': data,
'status': status_code
}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return flask.current_app.response_class(**kwargs)
@classmethod
def _serialize_data(cls, data, mimetype):
# TODO: harmonize flask and aiohttp serialization when mimetype=None or mimetype is not JSON
# (cases where it might not make sense to jsonify the data)
if (isinstance(mimetype, str) and is_json_mimetype(mimetype)):
body = cls.jsonifier.dumps(data)
elif not (isinstance(data, bytes) or isinstance(data, str)):
warnings.warn(
"Implicit (flask) JSON serialization will change in the next major version. "
"This is triggered because a response body is being serialized as JSON "
"even though the mimetype is not a JSON type. "
"This will be replaced by something that is mimetype-specific and may "
"raise an error instead of silently converting everything to JSON. "
"Please make sure to specify media/mime types in your specs.",
FutureWarning # a Deprecation targeted at application users.
)
body = cls.jsonifier.dumps(data)
else:
body = data
return body, mimetype
@classmethod
def get_request(cls, *args, **params):
# type: (*Any, **Any) -> ConnexionRequest
"""Gets ConnexionRequest instance for the operation handler
result. Status Code and Headers for response. If only body
data is returned by the endpoint function, then the status
code will be set to 200 and no headers will be added.
If the returned object is a flask.Response then it will just
pass the information needed to recreate it.
:rtype: ConnexionRequest
"""
context_dict = {}
setattr(flask._request_ctx_stack.top, 'connexion_context', context_dict)
flask_request = flask.request
request = ConnexionRequest(
flask_request.url,
flask_request.method,
headers=flask_request.headers,
form=flask_request.form,
query=flask_request.args,
body=flask_request.get_data(),
json_getter=lambda: flask_request.get_json(silent=True),
files=flask_request.files,
path_params=params,
context=context_dict
)
logger.debug('Getting data and status code',
extra={
'data': request.body,
'data_type': type(request.body),
'url': request.url
})
return request
@classmethod
def _set_jsonifier(cls):
"""
Use Flask specific JSON loader
"""
cls.jsonifier = Jsonifier(flask.json, indent=2)
def _get_context():
return getattr(flask._request_ctx_stack.top, 'connexion_context')
context = LocalProxy(_get_context)
class InternalHandlers(object):
"""
Flask handlers for internally registered endpoints.
"""
def __init__(self, base_path, options, specification):
self.base_path = base_path
self.options = options
self.specification = specification
def console_ui_home(self):
"""
Home page of the OpenAPI Console UI.
:return:
"""
openapi_json_route_name = "{blueprint}.{prefix}_openapi_json"
escaped = flask_utils.flaskify_endpoint(self.base_path)
openapi_json_route_name = openapi_json_route_name.format(
blueprint=escaped,
prefix=escaped
)
template_variables = {
'openapi_spec_url': flask.url_for(openapi_json_route_name)
}
if self.options.openapi_console_ui_config is not None:
template_variables['configUrl'] = 'swagger-ui-config.json'
return flask.render_template('index.j2', **template_variables)
def console_ui_static_files(self, filename):
"""
Servers the static files for the OpenAPI Console UI.
:param filename: Requested file contents.
:return:
"""
# convert PosixPath to str
static_dir = str(self.options.openapi_console_ui_from_dir)
return flask.send_from_directory(static_dir, filename)
def get_json_spec(self):
return flask.jsonify(self._spec_for_prefix())
def get_yaml_spec(self):
return yamldumper(self._spec_for_prefix()), 200, {"Content-Type": "text/yaml"}
def _spec_for_prefix(self):
"""
Modify base_path in the spec based on incoming url
This fixes problems with reverse proxies changing the path.
"""
base_path = flask.url_for(flask.request.endpoint).rsplit("/", 1)[0]
return self.specification.with_base_path(base_path).raw
|
|
# -*- encoding: utf-8
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import desc
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Identity
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import or_
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base as mssql
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import CursorSQL
from sqlalchemy.testing.assertsql import DialectSQL
class IdentityInsertTest(fixtures.TablesTest, AssertsCompiledSQL):
__only_on__ = "mssql"
__dialect__ = mssql.MSDialect()
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"cattable",
metadata,
Column("id", Integer),
Column("description", String(50)),
PrimaryKeyConstraint("id", name="PK_cattable"),
)
def test_compiled(self):
cattable = self.tables.cattable
self.assert_compile(
cattable.insert().values(id=9, description="Python"),
"INSERT INTO cattable (id, description) "
"VALUES (:id, :description)",
)
def test_execute(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert().values(id=9, description="Python"))
cats = conn.execute(cattable.select().order_by(cattable.c.id))
eq_([(9, "Python")], list(cats))
result = conn.execute(cattable.insert().values(description="PHP"))
eq_(result.inserted_primary_key, (10,))
lastcat = conn.execute(cattable.select().order_by(desc(cattable.c.id)))
eq_((10, "PHP"), lastcat.first())
def test_executemany(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(
cattable.insert(),
[
{"id": 89, "description": "Python"},
{"id": 8, "description": "Ruby"},
{"id": 3, "description": "Perl"},
{"id": 1, "description": "Java"},
],
)
cats = conn.execute(cattable.select().order_by(cattable.c.id))
eq_(
[(1, "Java"), (3, "Perl"), (8, "Ruby"), (89, "Python")],
list(cats),
)
conn.execute(
cattable.insert(),
[{"description": "PHP"}, {"description": "Smalltalk"}],
)
lastcats = conn.execute(
cattable.select().order_by(desc(cattable.c.id)).limit(2)
)
eq_([(91, "Smalltalk"), (90, "PHP")], list(lastcats))
def test_insert_plain_param(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert(), dict(id=5))
eq_(conn.scalar(select(cattable.c.id)), 5)
def test_insert_values_key_plain(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert().values(id=5))
eq_(conn.scalar(select(cattable.c.id)), 5)
def test_insert_values_key_expression(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert().values(id=literal(5)))
eq_(conn.scalar(select(cattable.c.id)), 5)
def test_insert_values_col_plain(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert().values({cattable.c.id: 5}))
eq_(conn.scalar(select(cattable.c.id)), 5)
def test_insert_values_col_expression(self, connection):
conn = connection
cattable = self.tables.cattable
conn.execute(cattable.insert().values({cattable.c.id: literal(5)}))
eq_(conn.scalar(select(cattable.c.id)), 5)
@testing.requires.schemas
def test_insert_using_schema_translate(self, connection, metadata):
t = Table(
"t",
metadata,
Column("id", Integer),
Column("description", String(50)),
PrimaryKeyConstraint("id", name="PK_cattable"),
schema=None,
)
conn = connection.execution_options(
schema_translate_map={None: config.test_schema}
)
metadata.create_all(conn)
conn.execute(t.insert().values({"id": 1, "description": "descrip"}))
eq_(conn.execute(select(t)).first(), (1, "descrip"))
class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
def test_fetchid_trigger(self, metadata, connection):
# TODO: investigate test hang on mssql when connection fixture is used
"""
Verify identity return value on inserting to a trigger table.
MSSQL's OUTPUT INSERTED clause does not work for the
case of a table having an identity (autoincrement)
primary key column, and which also has a trigger configured
to fire upon each insert and subsequently perform an
insert into a different table.
SQLALchemy's MSSQL dialect by default will attempt to
use an OUTPUT_INSERTED clause, which in this case will
raise the following error:
ProgrammingError: (ProgrammingError) ('42000', 334,
"[Microsoft][SQL Server Native Client 10.0][SQL Server]The
target table 't1' of the DML statement cannot have any enabled
triggers if the statement contains an OUTPUT clause without
INTO clause.", 7748) 'INSERT INTO t1 (descr) OUTPUT inserted.id
VALUES (?)' ('hello',)
This test verifies a workaround, which is to rely on the
older SCOPE_IDENTITY() call, which still works for this scenario.
To enable the workaround, the Table must be instantiated
with the init parameter 'implicit_returning = False'.
"""
# TODO: this same test needs to be tried in a multithreaded context
# with multiple threads inserting to the same table.
# TODO: check whether this error also occurs with clients other
# than the SQL Server Native Client. Maybe an assert_raises
# test should be written.
meta = metadata
t1 = Table(
"t1",
meta,
Column("id", Integer, Identity(start=100), primary_key=True),
Column("descr", String(200)),
# the following flag will prevent the
# MSSQLCompiler.returning_clause from getting called,
# though the ExecutionContext will still have a
# _select_lastrowid, so the SELECT SCOPE_IDENTITY() will
# hopefully be called instead.
implicit_returning=False,
)
t2 = Table(
"t2",
meta,
Column("id", Integer, Identity(start=200), primary_key=True),
Column("descr", String(200)),
)
event.listen(
meta,
"after_create",
DDL(
"create trigger paj on t1 for insert as "
"insert into t2 (descr) select descr from inserted"
),
)
# this DROP is not actually needed since SQL Server transactional
# DDL is reverting it with the connection fixture. however,
# since we can use "if exists" it's safe to have this here in
# case things change.
event.listen(
meta, "before_drop", DDL("""drop trigger if exists paj""")
)
# seems to work with all linux drivers + backend. not sure
# if windows drivers / servers have different behavior here.
meta.create_all(connection)
r = connection.execute(t2.insert(), dict(descr="hello"))
eq_(r.inserted_primary_key, (200,))
r = connection.execute(t1.insert(), dict(descr="hello"))
eq_(r.inserted_primary_key, (100,))
def test_compiler_symbol_conflict(self, connection, metadata):
t = Table("t", metadata, Column("POSTCOMPILE_DATA", String(50)))
t.create(connection)
connection.execute(t.insert().values(POSTCOMPILE_DATA="some data"))
eq_(
connection.scalar(
select(t.c.POSTCOMPILE_DATA).where(
t.c.POSTCOMPILE_DATA.in_(["some data", "some other data"])
)
),
"some data",
)
@testing.provide_metadata
def _test_disable_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": False})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
with engine.begin() as conn:
conn.execute(t1.insert(), {"data": "somedata"})
# TODO: need a dialect SQL that acts like Cursor SQL
asserter.assert_(
DialectSQL(
"INSERT INTO t1 (data) VALUES (:data)", {"data": "somedata"}
),
CursorSQL(
"SELECT @@identity AS lastrowid", consume_statement=False
),
)
@testing.provide_metadata
def test_enable_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": True})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
with engine.begin() as conn:
conn.execute(t1.insert())
# even with pyodbc, we don't embed the scope identity on a
# DEFAULT VALUES insert
asserter.assert_(
CursorSQL(
"INSERT INTO t1 DEFAULT VALUES", consume_statement=False
),
CursorSQL(
"SELECT scope_identity() AS lastrowid", consume_statement=False
),
)
@testing.only_on("mssql+pyodbc")
@testing.provide_metadata
def test_embedded_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": True})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
with engine.begin() as conn:
conn.execute(t1.insert(), {"data": "somedata"})
# pyodbc-specific system
asserter.assert_(
CursorSQL(
"INSERT INTO t1 (data) VALUES (?); select scope_identity()",
("somedata",),
consume_statement=False,
)
)
@testing.provide_metadata
def test_insertid_schema(self, connection):
meta = self.metadata
tbl = Table(
"test",
meta,
Column("id", Integer, primary_key=True),
schema=testing.config.test_schema,
)
tbl.create(connection)
connection.execute(tbl.insert(), {"id": 1})
eq_(connection.scalar(tbl.select()), 1)
@testing.provide_metadata
def test_returning_no_autoinc(self, connection):
meta = self.metadata
table = Table(
"t1",
meta,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
table.create(connection)
result = connection.execute(
table.insert()
.values(id=1, data=func.lower("SomeString"))
.returning(table.c.id, table.c.data)
)
eq_(result.fetchall(), [(1, "somestring")])
@testing.provide_metadata
def test_delete_schema(self, connection):
meta = self.metadata
tbl = Table(
"test",
meta,
Column("id", Integer, primary_key=True),
schema=testing.config.test_schema,
)
tbl.create(connection)
connection.execute(tbl.insert(), {"id": 1})
eq_(connection.scalar(tbl.select()), 1)
connection.execute(tbl.delete().where(tbl.c.id == 1))
eq_(connection.scalar(tbl.select()), None)
@testing.provide_metadata
def test_insertid_reserved(self, connection):
meta = self.metadata
table = Table("select", meta, Column("col", Integer, primary_key=True))
table.create(connection)
connection.execute(table.insert(), {"col": 7})
eq_(connection.scalar(table.select()), 7)
class Foo:
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
if not testing.against("mssql"):
return True
with testing.db.connect() as conn:
result = conn.exec_driver_sql(
"SELECT cast(SERVERPROPERTY('IsFullTextInstalled') as integer)"
)
return result.scalar() == 0
class MatchTest(fixtures.TablesTest, AssertsCompiledSQL):
__only_on__ = "mssql"
__skip_if__ = (full_text_search_missing,)
__backend__ = True
run_setup_tables = "once"
run_inserts = run_deletes = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"cattable",
metadata,
Column("id", Integer),
Column("description", String(50)),
PrimaryKeyConstraint("id", name="PK_cattable"),
)
Table(
"matchtable",
metadata,
Column("id", Integer),
Column("title", String(200)),
Column("category_id", Integer, ForeignKey("cattable.id")),
PrimaryKeyConstraint("id", name="PK_matchtable"),
)
event.listen(
metadata,
"before_create",
DDL("CREATE FULLTEXT CATALOG Catalog AS DEFAULT"),
)
event.listen(
metadata,
"after_create",
DDL(
"""CREATE FULLTEXT INDEX
ON cattable (description)
KEY INDEX PK_cattable"""
),
)
event.listen(
metadata,
"after_create",
DDL(
"""CREATE FULLTEXT INDEX
ON matchtable (title)
KEY INDEX PK_matchtable"""
),
)
event.listen(
metadata,
"after_drop",
DDL("DROP FULLTEXT CATALOG Catalog"),
)
@classmethod
def setup_bind(cls):
return testing.db.execution_options(isolation_level="AUTOCOMMIT")
@classmethod
def setup_test_class(cls):
with testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
) as conn:
try:
conn.exec_driver_sql("DROP FULLTEXT CATALOG Catalog")
except:
pass
@classmethod
def insert_data(cls, connection):
cattable, matchtable = cls.tables("cattable", "matchtable")
connection.execute(
cattable.insert(),
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
],
)
connection.execute(
matchtable.insert(),
[
{
"id": 1,
"title": "Web Development with Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{"id": 4, "title": "Guide to Django", "category_id": 1},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
],
)
# apparently this is needed! index must run asynchronously
connection.execute(DDL("WAITFOR DELAY '00:00:05'"))
def test_expression(self):
matchtable = self.tables.matchtable
self.assert_compile(
matchtable.c.title.match("somstr"),
"CONTAINS (matchtable.title, ?)",
)
def test_simple_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
).fetchall()
eq_([2, 5], [r.id for r in results])
def test_simple_match_with_apostrophe(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select().where(matchtable.c.title.match("Matz's"))
).fetchall()
eq_([3], [r.id for r in results])
def test_simple_prefix_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select().where(matchtable.c.title.match('"nut*"'))
).fetchall()
eq_([5], [r.id for r in results])
def test_simple_inflectional_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select().where(
matchtable.c.title.match('FORMSOF(INFLECTIONAL, "dives")')
)
).fetchall()
eq_([2], [r.id for r in results])
def test_or_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select()
.where(
or_(
matchtable.c.title.match("nutshell"),
matchtable.c.title.match("ruby"),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([3, 5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("nutshell OR ruby"))
.order_by(matchtable.c.id)
).fetchall()
eq_([3, 5], [r.id for r in results2])
def test_and_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select().where(
and_(
matchtable.c.title.match("python"),
matchtable.c.title.match("nutshell"),
)
)
).fetchall()
eq_([5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select().where(
matchtable.c.title.match("python AND nutshell")
)
).fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self, connection):
matchtable = self.tables.matchtable
cattable = self.tables.cattable
results = connection.execute(
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
or_(
cattable.c.description.match("Ruby"),
matchtable.c.title.match("nutshell"),
),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results])
class TableValuedTest(fixtures.TestBase):
__backend__ = True
__only_on__ = "mssql"
@testing.fixture
def scalar_strings(self, connection):
connection.exec_driver_sql(
"""
CREATE FUNCTION scalar_strings (
)
RETURNS TABLE
AS
RETURN
SELECT
my_string
FROM (
VALUES ('some string'), ('some string'), ('some string')
) AS my_tab(my_string)
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION scalar_strings")
@testing.fixture
def two_strings(self, connection):
connection.exec_driver_sql(
"""
CREATE FUNCTION three_pairs (
)
RETURNS TABLE
AS
RETURN
SELECT
s1 AS string1, s2 AS string2
FROM (
VALUES ('a', 'b'), ('c', 'd'), ('e', 'f')
) AS my_tab(s1, s2)
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION three_pairs")
def test_scalar_strings_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT my_string FROM scalar_strings()"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 3)
def test_scalar_strings_named_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT anon_1.my_string " "FROM scalar_strings() AS anon_1"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 3)
def test_scalar_strings(self, scalar_strings, connection):
fn = func.scalar_strings().table_valued("my_string")
result = connection.execute(select(fn.c.my_string)).scalars().all()
eq_(result, ["some string"] * 3)
def test_two_strings_control(self, two_strings, connection):
result = connection.exec_driver_sql(
"SELECT string1, string2 FROM three_pairs ()"
).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
def test_two_strings(self, two_strings, connection):
fn = func.three_pairs().table_valued("string1", "string2")
result = connection.execute(select(fn.c.string1, fn.c.string2)).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
|
|
#!/usr/bin/python2.6
from eventlet.queue import LifoQueue as Queue
import eventlet
eventlet.monkey_patch()
from clusto import script_helper
from sgext.drivers import SGServer, EC2Zone
from clusto.drivers import Pool
from clusto.services.config import conf, get_logger
import clusto
import sgext
import kombu
from traceback import format_exc
from time import sleep, time
import logging
import sys
QUEUE_HOSTS = conf('barker.hosts')
QUEUE_EXCHANGE = conf('barker.exchange')
QUEUE_NAME = conf('barker.queue')
QUEUE_VHOST = conf('barker.vhost')
QUEUE_USER = conf('barker.user')
QUEUE_PASSWORD = conf('barker.password')
EC2_SUBKEYS = {
'ami-id': 'ami',
'kernel-id': 'kernel',
'instance-type': 'type',
'local-hostname': 'private-dns',
'public-hostname': 'public-dns',
}
log = get_logger('clusto.barker', level='DEBUG')
def barker_callback(body):
if not 'ec2' in body:
return
if not 'instance-id' in body['ec2']:
return
ec2 = body['ec2']
log.debug(ec2['instance-id'])
try:
clusto.begin_transaction()
server = clusto.get_or_create(ec2['instance-id'], SGServer)
if not server.attr_values(key='ec2', subkey='instance-id'):
server.set_attr(key='ec2', subkey='instance-id', value=ec2['instance-id'])
zone = clusto.get(ec2['placement'])
if not zone:
zone = EC2Zone(ec2['placement'])
else:
zone = zone[0]
if not server in zone:
zone.insert(server)
for key, subkey in EC2_SUBKEYS.items():
server.set_attr(key='ec2', subkey=subkey, value=ec2[key])
previous_ec2sg = server.attr_values(key='ec2',subkey='security-group')
for group in set(previous_ec2sg).difference(set(ec2['security-groups'])):
server.del_attrs(key='ec2',subkey='security-group', value=group)
for i,group in enumerate(sorted(ec2['security-groups'])):
server.set_attr(key='ec2', subkey='security-group', number=i, value=group)
if group.find('_') != -1:
environment, role = group.lower().split('_', 1)
p = clusto.get_or_create(environment, Pool)
if not p.attrs(key='pooltype', value='environment'):
p.set_attr(key='pooltype', value='environment')
if not server in p:
p.insert(server)
#server.bind_ip_to_osport(ec2['local-ipv4'], 'nic-eth', 0)
#server.bind_ip_to_osport(ec2['public-ipv4'], 'nic-eth', 0)
if len(server.attrs(key='ip', subkey='ipstring')) != 2:
server.del_attrs(key='ip', subkey='ipstring')
server.add_attr(key='ip', subkey='ipstring', value=ec2['local-ipv4'], number=0)
server.add_attr(key='ip', subkey='ipstring', value=ec2['public-ipv4'], number=0)
system = body['os']
server.set_attr(key='system', subkey='memory',
value=int(system['memory']['MemTotal']) / 1024)
server.set_attr(key='system', subkey='hostname',
value=system['hostname'])
server.set_attr(key='system', subkey='os',
value=system['operatingsystemrelease'])
if 'cpu' in system and len(system['cpu']) > 0:
server.set_attr(key='system', subkey='cputype',
value=system['cpu'][0]['model name'])
server.set_attr(key='system', subkey='cpucount',
value=len(system['cpu']))
server.set_attr(key='system', subkey='cpucache',
value=system['cpu'][0]['cache size'])
if 'kernelrelease' in system:
server.set_attr(key='system', subkey='kernelrelease',
value=system['kernelrelease'])
previous_disk = server.attr_key_tuples(key='disk')
incoming_disk = []
blockmap = [(v.replace('/dev/', ''), k) for k, v in ec2['block-device-mapping'].items() if k != 'root']
blockmap = dict(blockmap)
total_disk = 0
for i, disk in enumerate(system['disks']):
for subkey in disk.keys():
server.set_attr(key='disk', subkey=subkey, number=i, value=str(disk[subkey]))
incoming_disk.append(('disk',i,subkey))
if disk['osname'] in blockmap:
server.set_attr(key='disk', subkey='ec2-type', number=i, value=blockmap[disk['osname']])
incoming_disk.append(('disk',i,'ec2-type'))
total_disk += disk['size']
total_disk = total_disk / 1073741824
server.set_attr(key='system', subkey='disk', value=total_disk)
for attr in set(previous_disk).difference(set(incoming_disk)):
server.del_attrs(key=attr[0],subkey=attr[2],number=attr[1])
for subkey, value in body.get('sgmetadata', {}).items():
server.set_attr(key='sgmetadata', subkey=subkey, value=value)
if subkey == 'clusterid' and value:
cluster = clusto.get_or_create(value, Pool)
if not cluster.attrs(key='pooltype', value='clusterid'):
cluster.set_attr(key='pooltype', value='clusterid')
if not server in cluster:
cluster.insert(server)
if subkey == 'role' and value:
if len(server.attr_values(key='puppet', subkey='class', merge_container_attrs=True)) == 0:
server.set_attr(key='puppet', subkey='class',
value='site::role::%s' % value)
p = clusto.get_or_create(value, Pool)
if not p.attrs(key='pooltype', value='role'):
p.set_attr(key='pooltype', value='role')
if not server in p:
p.insert(server)
if len(server.attr_values(key='puppet', subkey='class', merge_container_attrs=True)) == 0:
log.warning('Found host %s with no role set, using site::role::base' % ec2['instance-id'])
server.set_attr(key='puppet', subkey='class',
value='site::role::base')
#server.set_attr(key='barker', subkey='last_updated', value=int(time()))
try:
owners = body['owners']
for owner, reason in owners.iteritems():
server.set_attr(key='owner', subkey=owner, value=reason)
except KeyError:
pass
clusto.commit()
except:
log.warning('Exception from %s: %s' % (ec2['instance-id'], format_exc()))
clusto.rollback_transaction()
class BarkerConsumer(clusto.script_helper.Script):
def __init__(self):
clusto.script_helper.Script.__init__(self)
self.queue = None
def callback(self, body, message):
if self.queue.qsize() > 500:
log.warning('Dropping message, queue size is over 500')
return
self.queue.put(body)
def run(self, args):
self.queue = Queue()
for hostname in QUEUE_HOSTS:
eventlet.spawn_n(self.consumer, hostname)
while True:
body = self.queue.get()
log.debug('Queue size %s' % self.queue.qsize())
barker_callback(body)
def consumer(self, hostname):
exchange = kombu.Exchange(QUEUE_EXCHANGE, type='fanout',
delivery_mode='transient')
queue = kombu.Queue(QUEUE_NAME, exchange)
try:
connection = kombu.BrokerConnection(
hostname=hostname,
userid=QUEUE_USER,
password=QUEUE_PASSWORD,
virtual_host=QUEUE_VHOST)
channel = connection.channel()
consumer = kombu.Consumer(channel, queue,
callbacks=[self.callback],
no_ack=True)
consumer.consume()
log.info('%s consumer running' % hostname)
while True:
try:
connection.drain_events()
except Exception, e:
log.error(str(e))
except Exception, e:
log.error(format_exc())
raise e
finally:
if connection:
connection.release()
def main():
barker_consumer, args = script_helper.init_arguments(BarkerConsumer)
return barker_consumer.run(args)
if __name__ == '__main__':
sys.exit(main())
|
|
import logging
from consts.ranking_indexes import RankingIndexes
from models.event_details import EventDetails
class RankingsHelper(object):
SORT_ORDERS = {
2019: [2, 3, 4, 5, 6],
2018: [2, 3, 4, 5, 6],
2017: [2, 3, 4, 5, 6, 7],
2016: [2, 3, 4, 5, 6],
2015: [2, 5, 3, 4, 7, 6],
2014: [2, 3, 4, 5, 6],
2013: [2, 3, 4, 5],
2012: [2, 3, 4, 5],
2011: [6, 7],
2010: [3, 4, 5],
2009: [6, 7, 8],
2008: [6, 7, 8],
2007: [6, 7, 8],
}
SORT_ORDER_INFO = {
2019: [
{'name': 'Ranking Score',
'precision': 2},
{'name': 'Cargo',
'precision': 0},
{'name': 'Hatch Panel',
'precision': 0},
{'name': 'HAB Climb',
'precision': 0},
{'name': 'Sandstorm Bonus',
'precision': 0}],
2018: [
{'name': 'Ranking Score',
'precision': 2},
{'name': 'Park/Climb Points',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Ownership',
'precision': 0},
{'name': 'Vault',
'precision': 0}],
2017: [
{'name': 'Ranking Score',
'precision': 2},
{'name': 'Match Points',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Rotor',
'precision': 0},
{'name': 'Touchpad',
'precision': 0},
{'name': 'Pressure',
'precision': 0}],
2016: [
{'name': 'Ranking Score',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Scale/Challenge',
'precision': 0},
{'name': 'Goals',
'precision': 0},
{'name': 'Defense',
'precision': 0}],
2015: [
{'name': 'Qual Avg.',
'precision': 1},
{'name': 'Coopertition',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Container',
'precision': 0},
{'name': 'Tote',
'precision': 0},
{'name': 'Litter',
'precision': 0}],
2014: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Assist',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Truss & Catch',
'precision': 0},
{'name': 'Teleop',
'precision': 0}],
2013: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Auto',
'precision': 0},
{'name': 'Climb',
'precision': 0},
{'name': 'Teleop',
'precision': 0}],
2012: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Hybrid',
'precision': 0},
{'name': 'Bridge',
'precision': 0},
{'name': 'Teleop',
'precision': 0}],
2011: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Ranking Score',
'precision': 2}],
2010: [
{'name': 'Seeding Score',
'precision': 0},
{'name': 'Coopertition Bonus',
'precision': 0},
{'name': 'Hanging Points',
'precision': 0}],
2009: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Seeding Score',
'precision': 2},
{'name': 'Match Points',
'precision': 0}],
2008: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Seeding Score',
'precision': 2},
{'name': 'Match Points',
'precision': 0}],
2007: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Seeding Score',
'precision': 2},
{'name': 'Match Points',
'precision': 0}],
2006: [
{'name': 'Qual Score',
'precision': 0},
{'name': 'Seeding Score',
'precision': 2},
{'name': 'Match Points',
'precision': 0}],
}
NO_RECORD_YEARS = {2010, 2015}
QUAL_AVERAGE_YEARS = {2015}
@classmethod
def build_ranking(cls, year, rank, team_key, wins, losses, ties, qual_average, matches_played, dq, sort_orders):
if year in cls.NO_RECORD_YEARS:
record = None
else:
record = {
'wins': int(wins),
'losses': int(losses),
'ties': int(ties),
}
if year not in cls.QUAL_AVERAGE_YEARS:
qual_average = None
else:
qual_average = float(qual_average)
sort_orders_sanitized = []
for so in sort_orders:
try:
sort_orders_sanitized.append(float(so))
except:
sort_orders_sanitized.append(0.0)
return {
'rank': int(rank),
'team_key': team_key,
'record': record, # None if record doesn't affect rank (e.g. 2010, 2015)
'qual_average': qual_average, # None if qual_average doesn't affect rank (all years except 2015)
'matches_played': int(matches_played),
'dq': int(dq),
'sort_orders': sort_orders_sanitized,
}
@classmethod
def get_sort_order_info(cls, event_details):
year = event_details.year
if event_details.key.id() == '2015mttd': # 2015mttd played the 2014 game
year = 2014
return cls.SORT_ORDER_INFO.get(year)
@classmethod
def convert_rankings(cls, event_details):
"""
Converts event_details.rankings to event_details.rankings2
"""
if not event_details.rankings:
return None
year = event_details.year
if event_details.key.id() == '2015mttd': # 2015mttd played the 2014 game
year = 2014
# Look up indexes
mp_index = RankingIndexes.MATCHES_PLAYED.get(year)
if mp_index is None:
return
ranking_index = RankingIndexes.RECORD_INDEXES.get(year)
dq_index = None
# Overwrite indexes in case thing are different
for i, name in enumerate(event_details.rankings[0]):
name = name.lower()
if name == 'played':
mp_index = i
if name == 'dq':
dq_index = i
sort_order_indices = cls.SORT_ORDERS[year]
# Special case for offseasons with different ordering
if year == 2015 and event_details.rankings[0][3].lower() == 'coopertition':
sort_order_indices = [2, 3, 5, 4, 6, 7]
rankings2 = []
for row in event_details.rankings[1:]:
if ranking_index is None:
wins = 0
losses = 0
ties = 0
elif type(ranking_index) == tuple:
wins = row[ranking_index[0]]
losses = row[ranking_index[1]]
ties = row[ranking_index[2]]
else:
wins, losses, ties = row[ranking_index].split('-')
if dq_index is None:
dq = 0
else:
dq = int(row[dq_index])
if year == 2015:
qual_average = row[RankingIndexes.CUMULATIVE_RANKING_SCORE.get(year)]
else:
qual_average = None
sort_orders = [row[index] for index in sort_order_indices]
rankings2.append(cls.build_ranking(
year, int(row[0]), 'frc{}'.format(row[1]), wins, losses, ties, qual_average, row[mp_index], dq, sort_orders))
return rankings2
|
|
# -*- coding: utf-8 -*-
"""Mimic C structs with lots of extra functionality.
$Id: ipstruct.py 1950 2006-11-28 19:15:35Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2004 Fernando Perez <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from IPython import Release
__author__ = '%s <%s>' % Release.authors['Fernando']
__license__ = Release.license
__all__ = ['Struct']
import types
import pprint
from IPython.genutils import list2dict2
class Struct:
"""Class to mimic C structs but also provide convenient dictionary-like
functionality.
Instances can be initialized with a dictionary, a list of key=value pairs
or both. If both are present, the dictionary must come first.
Because Python classes provide direct assignment to their members, it's
easy to overwrite normal methods (S.copy = 1 would destroy access to
S.copy()). For this reason, all builtin method names are protected and
can't be assigned to. An attempt to do s.copy=1 or s['copy']=1 will raise
a KeyError exception. If you really want to, you can bypass this
protection by directly assigning to __dict__: s.__dict__['copy']=1 will
still work. Doing this will break functionality, though. As in most of
Python, namespace protection is weakly enforced, so feel free to shoot
yourself if you really want to.
Note that this class uses more memory and is *much* slower than a regular
dictionary, so be careful in situations where memory or performance are
critical. But for day to day use it should behave fine. It is particularly
convenient for storing configuration data in programs.
+,+=,- and -= are implemented. +/+= do merges (non-destructive updates),
-/-= remove keys from the original. See the method descripitions.
This class allows a quick access syntax: both s.key and s['key'] are
valid. This syntax has a limitation: each 'key' has to be explicitly
accessed by its original name. The normal s.key syntax doesn't provide
access to the keys via variables whose values evaluate to the desired
keys. An example should clarify this:
Define a dictionary and initialize both with dict and k=v pairs:
>>> d={'a':1,'b':2}
>>> s=Struct(d,hi=10,ho=20)
The return of __repr__ can be used to create a new instance:
>>> s
Struct({'ho': 20, 'b': 2, 'hi': 10, 'a': 1})
__str__ (called by print) shows it's not quite a regular dictionary:
>>> print s
Struct {a: 1, b: 2, hi: 10, ho: 20}
Access by explicitly named key with dot notation:
>>> s.a
1
Or like a dictionary:
>>> s['a']
1
If you want a variable to hold the key value, only dictionary access works:
>>> key='hi'
>>> s.key
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: Struct instance has no attribute 'key'
>>> s[key]
10
Another limitation of the s.key syntax (and Struct(key=val)
initialization): keys can't be numbers. But numeric keys can be used and
accessed using the dictionary syntax. Again, an example:
This doesn't work:
>>> s=Struct(4='hi')
SyntaxError: keyword can't be an expression
But this does:
>>> s=Struct()
>>> s[4]='hi'
>>> s
Struct({4: 'hi'})
>>> s[4]
'hi'
"""
# Attributes to which __setitem__ and __setattr__ will block access.
# Note: much of this will be moot in Python 2.2 and will be done in a much
# cleaner way.
__protected = ('copy dict dictcopy get has_attr has_key items keys '
'merge popitem setdefault update values '
'__make_dict __dict_invert ').split()
def __init__(self,dict=None,**kw):
"""Initialize with a dictionary, another Struct, or by giving
explicitly the list of attributes.
Both can be used, but the dictionary must come first:
Struct(dict), Struct(k1=v1,k2=v2) or Struct(dict,k1=v1,k2=v2).
"""
self.__dict__['__allownew'] = True
if dict is None:
dict = {}
if isinstance(dict,Struct):
dict = dict.dict()
elif dict and type(dict) is not types.DictType:
raise TypeError,\
'Initialize with a dictionary or key=val pairs.'
dict.update(kw)
# do the updating by hand to guarantee that we go through the
# safety-checked __setitem__
for k,v in dict.items():
self[k] = v
def __setitem__(self,key,value):
"""Used when struct[key] = val calls are made."""
if key in Struct.__protected:
raise KeyError,'Key '+`key`+' is a protected key of class Struct.'
if not self['__allownew'] and key not in self.__dict__:
raise KeyError(
"Can't create unknown attribute %s - Check for typos, or use allow_new_attr to create new attributes!" %
key)
self.__dict__[key] = value
def __setattr__(self, key, value):
"""Used when struct.key = val calls are made."""
self.__setitem__(key,value)
def __str__(self):
"""Gets called by print."""
return 'Struct('+ pprint.pformat(self.__dict__)+')'
def __repr__(self):
"""Gets called by repr.
A Struct can be recreated with S_new=eval(repr(S_old))."""
return self.__str__()
def __getitem__(self,key):
"""Allows struct[key] access."""
return self.__dict__[key]
def __contains__(self,key):
"""Allows use of the 'in' operator."""
return self.__dict__.has_key(key)
def __iadd__(self,other):
"""S += S2 is a shorthand for S.merge(S2)."""
self.merge(other)
return self
def __add__(self,other):
"""S + S2 -> New Struct made form S and S.merge(S2)"""
Sout = self.copy()
Sout.merge(other)
return Sout
def __sub__(self,other):
"""Return S1-S2, where all keys in S2 have been deleted (if present)
from S1."""
Sout = self.copy()
Sout -= other
return Sout
def __isub__(self,other):
"""Do in place S = S - S2, meaning all keys in S2 have been deleted
(if present) from S1."""
for k in other.keys():
if self.has_key(k):
del self.__dict__[k]
def __make_dict(self,__loc_data__,**kw):
"Helper function for update and merge. Return a dict from data."
if __loc_data__ == None:
dict = {}
elif type(__loc_data__) is types.DictType:
dict = __loc_data__
elif isinstance(__loc_data__,Struct):
dict = __loc_data__.__dict__
else:
raise TypeError, 'Update with a dict, a Struct or key=val pairs.'
if kw:
dict.update(kw)
return dict
def __dict_invert(self,dict):
"""Helper function for merge. Takes a dictionary whose values are
lists and returns a dict. with the elements of each list as keys and
the original keys as values."""
outdict = {}
for k,lst in dict.items():
if type(lst) is types.StringType:
lst = lst.split()
for entry in lst:
outdict[entry] = k
return outdict
def clear(self):
"""Clear all attributes."""
self.__dict__.clear()
def copy(self):
"""Return a (shallow) copy of a Struct."""
return Struct(self.__dict__.copy())
def dict(self):
"""Return the Struct's dictionary."""
return self.__dict__
def dictcopy(self):
"""Return a (shallow) copy of the Struct's dictionary."""
return self.__dict__.copy()
def popitem(self):
"""S.popitem() -> (k, v), remove and return some (key, value) pair as
a 2-tuple; but raise KeyError if S is empty."""
return self.__dict__.popitem()
def update(self,__loc_data__=None,**kw):
"""Update (merge) with data from another Struct or from a dictionary.
Optionally, one or more key=value pairs can be given at the end for
direct update."""
# The funny name __loc_data__ is to prevent a common variable name which
# could be a fieled of a Struct to collide with this parameter. The problem
# would arise if the function is called with a keyword with this same name
# that a user means to add as a Struct field.
newdict = Struct.__make_dict(self,__loc_data__,**kw)
for k,v in newdict.items():
self[k] = v
def merge(self,__loc_data__=None,__conflict_solve=None,**kw):
"""S.merge(data,conflict,k=v1,k=v2,...) -> merge data and k=v into S.
This is similar to update(), but much more flexible. First, a dict is
made from data+key=value pairs. When merging this dict with the Struct
S, the optional dictionary 'conflict' is used to decide what to do.
If conflict is not given, the default behavior is to preserve any keys
with their current value (the opposite of the update method's
behavior).
conflict is a dictionary of binary functions which will be used to
solve key conflicts. It must have the following structure:
conflict == { fn1 : [Skey1,Skey2,...], fn2 : [Skey3], etc }
Values must be lists or whitespace separated strings which are
automatically converted to lists of strings by calling string.split().
Each key of conflict is a function which defines a policy for
resolving conflicts when merging with the input data. Each fn must be
a binary function which returns the desired outcome for a key
conflict. These functions will be called as fn(old,new).
An example is probably in order. Suppose you are merging the struct S
with a dict D and the following conflict policy dict:
S.merge(D,{fn1:['a','b',4], fn2:'key_c key_d'})
If the key 'a' is found in both S and D, the merge method will call:
S['a'] = fn1(S['a'],D['a'])
As a convenience, merge() provides five (the most commonly needed)
pre-defined policies: preserve, update, add, add_flip and add_s. The
easiest explanation is their implementation:
preserve = lambda old,new: old
update = lambda old,new: new
add = lambda old,new: old + new
add_flip = lambda old,new: new + old # note change of order!
add_s = lambda old,new: old + ' ' + new # only works for strings!
You can use those four words (as strings) as keys in conflict instead
of defining them as functions, and the merge method will substitute
the appropriate functions for you. That is, the call
S.merge(D,{'preserve':'a b c','add':[4,5,'d'],my_function:[6]})
will automatically substitute the functions preserve and add for the
names 'preserve' and 'add' before making any function calls.
For more complicated conflict resolution policies, you still need to
construct your own functions. """
data_dict = Struct.__make_dict(self,__loc_data__,**kw)
# policies for conflict resolution: two argument functions which return
# the value that will go in the new struct
preserve = lambda old,new: old
update = lambda old,new: new
add = lambda old,new: old + new
add_flip = lambda old,new: new + old # note change of order!
add_s = lambda old,new: old + ' ' + new
# default policy is to keep current keys when there's a conflict
conflict_solve = list2dict2(self.keys(),default = preserve)
# the conflict_solve dictionary is given by the user 'inverted': we
# need a name-function mapping, it comes as a function -> names
# dict. Make a local copy (b/c we'll make changes), replace user
# strings for the three builtin policies and invert it.
if __conflict_solve:
inv_conflict_solve_user = __conflict_solve.copy()
for name, func in [('preserve',preserve), ('update',update),
('add',add), ('add_flip',add_flip), ('add_s',add_s)]:
if name in inv_conflict_solve_user.keys():
inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
del inv_conflict_solve_user[name]
conflict_solve.update(Struct.__dict_invert(self,inv_conflict_solve_user))
#print 'merge. conflict_solve: '; pprint(conflict_solve) # dbg
#print '*'*50,'in merger. conflict_solver:'; pprint(conflict_solve)
for key in data_dict:
if key not in self:
self[key] = data_dict[key]
else:
self[key] = conflict_solve[key](self[key],data_dict[key])
def has_key(self,key):
"""Like has_key() dictionary method."""
return self.__dict__.has_key(key)
def hasattr(self,key):
"""hasattr function available as a method.
Implemented like has_key, to make sure that all available keys in the
internal dictionary of the Struct appear also as attributes (even
numeric keys)."""
return self.__dict__.has_key(key)
def items(self):
"""Return the items in the Struct's dictionary, in the same format
as a call to {}.items()."""
return self.__dict__.items()
def keys(self):
"""Return the keys in the Struct's dictionary, in the same format
as a call to {}.keys()."""
return self.__dict__.keys()
def values(self,keys=None):
"""Return the values in the Struct's dictionary, in the same format
as a call to {}.values().
Can be called with an optional argument keys, which must be a list or
tuple of keys. In this case it returns only the values corresponding
to those keys (allowing a form of 'slicing' for Structs)."""
if not keys:
return self.__dict__.values()
else:
ret=[]
for k in keys:
ret.append(self[k])
return ret
def get(self,attr,val=None):
"""S.get(k[,d]) -> S[k] if S.has_key(k), else d. d defaults to None."""
try:
return self[attr]
except KeyError:
return val
def setdefault(self,attr,val=None):
"""S.setdefault(k[,d]) -> S.get(k,d), also set S[k]=d if not S.has_key(k)"""
if not self.has_key(attr):
self[attr] = val
return self.get(attr,val)
def allow_new_attr(self, allow = True):
""" Set whether new attributes can be created inside struct
This can be used to catch typos by verifying that the attribute user tries to
change already exists in this Struct.
"""
self['__allownew'] = allow
# end class Struct
|
|
#!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import datetime
import logging
import os
import sys
import time
import unittest
import urllib
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import test_env
test_env.setup_test_env()
# From components/third_party/
import webtest
import webapp2
from google.appengine.ext import ndb
from components import auth
from components import utils
from support import test_case
import acl
import config
import gcs
import handlers_api
import handlers_backend
import model
# Access to a protected member _XXX of a client class
# pylint: disable=W0212
def hash_item(content):
h = model.get_hash_algo('default')
h.update(content)
return h.hexdigest()
def gen_item(content):
"""Returns data to send to /pre-upload to upload 'content'."""
return {
'h': hash_item(content),
'i': 0,
's': len(content),
}
class MainTest(test_case.TestCase):
"""Tests the handlers."""
APP_DIR = ROOT_DIR
def setUp(self):
"""Creates a new app instance for every test case."""
super(MainTest, self).setUp()
self.testbed.init_user_stub()
# When called during a taskqueue, the call to get_app_version() may fail so
# pre-fetch it.
version = utils.get_app_version()
self.mock(utils, 'get_task_queue_host', lambda: version)
self.source_ip = '192.168.0.1'
self.app_api = webtest.TestApp(
webapp2.WSGIApplication(handlers_api.get_routes(), debug=True),
extra_environ={'REMOTE_ADDR': self.source_ip})
# Do not use handlers_backend.create_application() because it also
# initializes ereporter2 cron jobs, which requires templates. We want to
# make sure templates are not needed for APIs.
self.app_backend = webtest.TestApp(
webapp2.WSGIApplication(handlers_backend.get_routes(), debug=True),
extra_environ={'REMOTE_ADDR': self.source_ip})
# Tasks are enqueued on the backend.
self.app = self.app_backend
def whitelist_self(self):
auth.bootstrap_ip_whitelist(auth.BOTS_IP_WHITELIST, [self.source_ip])
def mock_acl_checks(self):
known_groups = (
acl.FULL_ACCESS_GROUP,
acl.READONLY_ACCESS_GROUP,
)
def is_group_member_mock(group):
if auth.get_current_identity().is_anonymous:
return False
return group in known_groups
self.mock(auth, 'is_group_member', is_group_member_mock)
def handshake(self):
self.whitelist_self()
self.mock_acl_checks()
data = {
'client_app_version': '0.2',
'fetcher': True,
'protocol_version': handlers_api.ISOLATE_PROTOCOL_VERSION,
'pusher': True,
}
req = self.app_api.post_json('/content-gs/handshake', data)
return urllib.quote(req.json['access_token'])
def mock_delete_files(self):
deleted = []
def delete_files(bucket, files, ignore_missing=False):
# pylint: disable=W0613
self.assertEquals('isolateserver-dev', bucket)
deleted.extend(files)
return []
self.mock(gcs, 'delete_files', delete_files)
return deleted
def put_content(self, url, content):
"""Simulare isolateserver.py archive."""
req = self.app_api.put(
url, content_type='application/octet-stream', params=content)
self.assertEqual(200, req.status_code)
self.assertEqual({'entry':{}}, req.json)
# Test cases.
def test_pre_upload_ok(self):
req = self.app_api.post_json(
'/content-gs/pre-upload/a?token=%s' % self.handshake(),
[gen_item('foo')])
self.assertEqual(1, len(req.json))
self.assertEqual(2, len(req.json[0]))
# ['url', None]
self.assertTrue(req.json[0][0])
self.assertEqual(None, req.json[0][1])
def test_pre_upload_invalid_namespace(self):
req = self.app_api.post_json(
'/content-gs/pre-upload/[?token=%s' % self.handshake(),
[gen_item('foo')],
expect_errors=True)
self.assertTrue(
'Invalid namespace; allowed keys must pass regexp "[a-z0-9A-Z\-._]+"' in
req.body)
def test_upload_tag_expire(self):
# Complete integration test that ensures tagged items are properly saved and
# non tagged items are dumped.
# Use small objects so it doesn't exercise the GS code path.
deleted = self.mock_delete_files()
items = ['bar', 'foo']
now = datetime.datetime(2012, 01, 02, 03, 04, 05, 06)
self.mock(utils, 'utcnow', lambda: now)
self.mock(ndb.DateTimeProperty, '_now', lambda _: now)
self.mock(ndb.DateProperty, '_now', lambda _: now.date())
r = self.app_api.post_json(
'/content-gs/pre-upload/default?token=%s' % self.handshake(),
[gen_item(i) for i in items])
self.assertEqual(len(items), len(r.json))
self.assertEqual(0, len(list(model.ContentEntry.query())))
for content, urls in zip(items, r.json):
self.assertEqual(2, len(urls))
self.assertEqual(None, urls[1])
self.put_content(urls[0], content)
self.assertEqual(2, len(list(model.ContentEntry.query())))
expiration = config.settings().default_expiration
self.assertEqual(0, self.execute_tasks())
# Advance time, tag the first item.
now += datetime.timedelta(seconds=2*expiration)
r = self.app_api.post_json(
'/content-gs/pre-upload/default?token=%s' % self.handshake(),
[gen_item(items[0])])
self.assertEqual(200, r.status_code)
self.assertEqual([None], r.json)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(2, len(list(model.ContentEntry.query())))
# 'bar' was kept, 'foo' was cleared out.
headers = {'X-AppEngine-Cron': 'true'}
resp = self.app_backend.get(
'/internal/cron/cleanup/trigger/old', headers=headers)
self.assertEqual(200, resp.status_code)
self.assertEqual([None], r.json)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(1, len(list(model.ContentEntry.query())))
self.assertEqual('bar', model.ContentEntry.query().get().content)
# Advance time and force cleanup. This deletes 'bar' too.
now += datetime.timedelta(seconds=2*expiration)
headers = {'X-AppEngine-Cron': 'true'}
resp = self.app_backend.get(
'/internal/cron/cleanup/trigger/old', headers=headers)
self.assertEqual(200, resp.status_code)
self.assertEqual([None], r.json)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(0, len(list(model.ContentEntry.query())))
# Advance time and force cleanup.
now += datetime.timedelta(seconds=2*expiration)
headers = {'X-AppEngine-Cron': 'true'}
resp = self.app_backend.get(
'/internal/cron/cleanup/trigger/old', headers=headers)
self.assertEqual(200, resp.status_code)
self.assertEqual([None], r.json)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(0, len(list(model.ContentEntry.query())))
# All items expired are tried to be deleted from GS. This is the trade off
# between having to fetch the items vs doing unneeded requests to GS for the
# inlined objects.
expected = sorted('default/' + hash_item(i) for i in items)
self.assertEqual(expected, sorted(deleted))
def test_trim_missing(self):
deleted = self.mock_delete_files()
def gen_file(i, t=0):
return (i, gcs.cloudstorage.GCSFileStat(i, 100, 'etag', t))
mock_files = [
# Was touched.
gen_file('d/' + '0' * 40),
# Is deleted.
gen_file('d/' + '1' * 40),
# Too recent.
gen_file('d/' + '2' * 40, time.time() - 60),
]
self.mock(gcs, 'list_files', lambda _: mock_files)
model.ContentEntry(key=model.entry_key('d', '0' * 40)).put()
headers = {'X-AppEngine-Cron': 'true'}
resp = self.app_backend.get(
'/internal/cron/cleanup/trigger/trim_lost', headers=headers)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(['d/' + '1' * 40], deleted)
def test_verify(self):
# Upload a file larger than MIN_SIZE_FOR_DIRECT_GS and ensure the verify
# task works.
data = '0' * handlers_api.MIN_SIZE_FOR_DIRECT_GS
req = self.app_api.post_json(
'/content-gs/pre-upload/default?token=%s' % self.handshake(),
[gen_item(data)])
self.assertEqual(1, len(req.json))
self.assertEqual(2, len(req.json[0]))
# ['url', 'url']
self.assertTrue(req.json[0][0])
# Fake the upload by calling the second function.
self.mock(gcs, 'get_file_info', lambda _b, _f: gcs.FileInfo(size=len(data)))
req = self.app_api.post(req.json[0][1], '')
self.mock(gcs, 'read_file', lambda _b, _f: [data])
self.assertEqual(1, self.execute_tasks())
# Assert the object is still there.
self.assertEqual(1, len(list(model.ContentEntry.query())))
def test_verify_corrupted(self):
# Upload a file larger than MIN_SIZE_FOR_DIRECT_GS and ensure the verify
# task works.
data = '0' * handlers_api.MIN_SIZE_FOR_DIRECT_GS
req = self.app_api.post_json(
'/content-gs/pre-upload/default?token=%s' % self.handshake(),
[gen_item(data)])
self.assertEqual(1, len(req.json))
self.assertEqual(2, len(req.json[0]))
# ['url', 'url']
self.assertTrue(req.json[0][0])
# Fake the upload by calling the second function.
self.mock(gcs, 'get_file_info', lambda _b, _f: gcs.FileInfo(size=len(data)))
req = self.app_api.post(req.json[0][1], '')
# Fake corruption
data_corrupted = '1' * handlers_api.MIN_SIZE_FOR_DIRECT_GS
self.mock(gcs, 'read_file', lambda _b, _f: [data_corrupted])
deleted = self.mock_delete_files()
self.assertEqual(1, self.execute_tasks())
# Assert the object is gone.
self.assertEqual(0, len(list(model.ContentEntry.query())))
self.assertEqual(['default/' + hash_item(data)], deleted)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.FATAL)
unittest.main()
|
|
#!/usr/bin/python
# coding=utf-8
from catcher.api.resource import Collection, Item
from catcher import models as m
from catcher.resources.tournamentCreater import TournamentCreater
from catcher.models.queries import Queries
from catcher.api.privileges import Privilege
import falcon
import logging
import datetime
from catcher.models.queries import Queries
class Tournament(Item):
@m.db.atomic()
def prepareTournament(self, id):
tournament = m.Tournament.get(id=id)
if tournament.ready:
raise ValueError("Tournament %s is already ready" % id)
teams = m.TeamAtTournament.select().where(
m.TeamAtTournament.tournamentId == id
)
if len(teams) != tournament.teams:
logging.error("Tournament has different number of teams")
raise RuntimeError(
"Tournament has different number of teams"
" in contrast to TeamAtTournament" % matchId
)
# ready Tournament
m.Tournament.update(ready=True).where(m.Tournament.id==id).execute()
# Standing
for x in range(1, len(teams)+1):
m.Standing.insert(
tournamentId = id,
standing = x
).execute()
# Matches and Spirit overalls
seedings = {}
for team in teams:
seedings[team.seeding] = team.teamId
m.SpiritAvg.insert(
teamId = team.teamId,
tournamentId = tournament.id
).execute()
matches = m.Match.select().where(
m.Match.tournamentId == 1 and \
(m.Match.homeSeed != None or m.Match.awaySeed != None)
)
for match in matches:
m.Match.update(
homeTeamId = seedings[match.homeSeed],
awayTeamId = seedings[match.awaySeed]
).where(
m.Match.id == match.id
).execute()
@m.db.atomic()
def terminateTournament(self, id):
'''terminate tournament'''
tournament = m.Tournament.get(id=id)
standings = m.Standing.select().where(m.Standing.tournamentId==tournament.id)
for standing in standings:
if standing.teamId is None:
raise falcon.HTTPBadRequest(
"Tournanent can't be terminated",
"All standings aren't known. Probably some matches are still active."
)
matches = Queries.getMatches(tournamentId=tournament.id)
for match in matches:
if match['homeTeam']['spirit'] is None or match['awayTeam']['spirit'] is None:
raise falcon.HTTPBadRequest(
"Tournanent can't be terminated",
("Spirit from match %s is still missing" % match['ide'])
)
tournament.terminated = True
tournament.save()
@falcon.before(Privilege(["organizer", "admin"]))
def on_put(self, req, resp, id):
Privilege.checkOrganizer(req.context['user'], int(id))
data = req.context['data']
tournament = m.Tournament.select(m.Tournament).where(m.Tournament.id==id).get()
super(Tournament, self).on_put(req, resp, id,
['name', 'startDate', 'endDate', 'city', 'country', 'caldTournamentId']
)
edited = False
if tournament.ready is False and data.get('ready') is True:
self.prepareTournament(id)
edited = True
if tournament.terminated is False and data.get('terminated') is True:
self.terminateTournament(id)
edited = True
if edited:
resp.status = falcon.HTTP_200
class Tournaments(Collection):
def on_get(self, req, resp):
tournaments = Queries.getTournaments(
req.params.get('country'),
req.params.get('divisionId'),
req.get_param_as_bool('active'),
req.get_param_as_bool('terminated'),
req.params.get('userId'),
)
collection = {
'count': len(tournaments),
'items': tournaments
}
req.context['result'] = collection
@falcon.before(Privilege(["organizer", "admin"]))
def on_post(self, req, resp):
tournamentCreater = TournamentCreater()
createdTurnament = tournamentCreater.createTournament(req, resp, req.context['user'])
req.context['result'] = createdTurnament
resp.status = falcon.HTTP_201
class TournamentTeams(object):
def on_get(self, req, resp, id):
teams = Queries.getTeams(id)
collection = {
'count' : len(teams),
'items' : teams
}
req.context['result'] = collection
@falcon.before(Privilege(["organizer", "admin"]))
def on_put(self, req, resp, id):
Privilege.checkOrganizer(req.context['user'], int(id))
tournament = m.Tournament.get(id=id)
if tournament.ready:
raise ValueError("Tournament is ready and teams can't be changed")
data = req.context['data']
qr = m.TeamAtTournament.\
update(
teamId = data['teamId']
).\
where(
m.TeamAtTournament.tournamentId == id,
m.TeamAtTournament.seeding == data['seeding']
).execute()
resp.status = falcon.HTTP_200 if qr else falcon.HTTP_304
req.context['result'] = m.TeamAtTournament.get(
tournamentId = id,
seeding = data['seeding']
)
class TournamentMatches(object):
def on_get(self, req, resp, id):
matches = Queries.getMatches(
id,
req.params.get('matchId'),
req.params.get('fieldId'),
req.params.get('date'),
req.get_param_as_bool('active'),
req.get_param_as_bool('terminated'),
req.params.get('groupIde')
)
collection = {
'count': len(matches),
'items': matches
}
req.context['result'] = collection
class TournamentPlayers(object):
def on_get(self, req, resp, id):
players = Queries.getPlayers(
id, req.params.get('teamId'), req.params.get('limit')
)
collection = {
'count': len(players),
'items': players
}
req.context['result'] = collection
@falcon.before(Privilege(["club", "organizer", "admin"]))
def on_post(self, req, resp, id):
teamId = req.context['data']['teamId']
playerId = req.context['data']['playerId']
Privilege.checkOrganizer(req.context['user'], int(id))
Privilege.checkClub(req.context['user'], m.Team.get(id=teamId).clubId)
tournament = m.Tournament.get(id=id)
if not tournament.ready:
raise ValueError("Tournament is not ready")
newPlayer, created = m.PlayerAtTournament.create_or_get(
tournamentId = int(id),
teamId = teamId,
playerId = playerId
)
resp.status = falcon.HTTP_201 if created else falcon.HTTP_200
req.context['result'] = newPlayer
@falcon.before(Privilege(["club", "organizer", "admin"]))
def on_delete(self, req, resp, id):
teamId = req.context['data']['teamId']
playerId = req.context['data']['playerId']
Privilege.checkOrganizer(req.context['user'], int(id))
Privilege.checkClub(req.context['user'], m.Team.get(id=teamId).clubId)
matches = m.PlayerAtTournament.get(
tournamentId = id,
teamId = teamId,
playerId = playerId
).matches
if matches == 0:
player = m.PlayerAtTournament.get(
tournamentId = id,
teamId = teamId,
playerId = playerId
).delete_instance()
else:
raise ValueError("Player has played matches")
class TournamentGroup(object):
@staticmethod
def getGroup(tournamentId, ide):
qr = m.Group.get(tournamentId=tournamentId, ide=ide)
group = {
'ide' : qr.ide,
'description': qr.description
}
qr = m.GroupHasTeam.select().where(
m.GroupHasTeam.tournamentId == tournamentId,
m.GroupHasTeam.ide == ide
)
teams = []
for team in qr:
teams.append({
'teamId' : team.teamId,
'matches' : team.matches,
'wins' : team.wins,
'losses' : team.losses,
'plus' : team.plus,
'minus' : team.minus,
'points' : team.points,
'standing': team.standing
})
group['teams'] = teams
return group
def on_get(self, req, resp, id, ide):
req.context['result'] = TournamentGroup.getGroup(id, ide)
class TournamentGroups(object):
def on_get(self, req, resp, id):
groups = []
qr = m.Group.select().where(m.Group.tournamentId==id)
for group in qr:
groups.append(TournamentGroup.getGroup(id, group.ide))
collection = {
'count': len(groups),
'items': groups
}
req.context['result'] = collection
|
|
import socket
import socketserver
import threading
import json
import re
from node import Node
from ring import ConsistentRing
import api.tcpclient as client
# The node containing container (our node container only) and the ring metadata
node = None
# Globals for commands
ADD_NODE = {
"cmd": "addnode",
"args": 1,
"response_ok": 1,
"response_err": -1,
}
RM_NODE = {
"cmd": "rmnode",
"args": 1,
"response_ok": 2,
"response_err": -2,
}
ADD = {
"cmd": "add",
"args": 2,
"response_ok": 3,
"response_err": -3,
}
GET = {
"cmd": "get",
"args": 1,
"response_ok": 4,
"response_err": -4,
}
STATS = {
"cmd": "stats",
"args": 0,
"response_ok": 5,
"response_err": -5,
}
ERROR = {
"cmd": None,
"args": 0,
"response_ok": 0,
"response_err": -99,
}
key_regex = re.compile("\d\.\d\.\d\.\d:\d")
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Protocol
# --------
#
# addnode {key}
# rmnode {key}
# add {key} {value}
# get {key}
# stats
#
# Returned data
# -------------
#
# Added node: 1 {message}
# Error added node: -1 {message}
# Removed node: 2 {message}
# Error removed node: -2 {message}
# Added key: 3 {message}
# Error added key: -3 {message}
# get key: 4 {key} {value}
# Missed key: -4 {message}
# stats: 5 {stats}
# Error stats: -5 {stats}
# CMD Error : -99 {message}
# Get data and split
data = str(self.request.recv(1024), 'utf-8').split(None, 2)
command = data[0]
if command == ADD_NODE["cmd"]:
if len(data)-1 < ADD_NODE["args"]:
response = "-99 Wrong parameters"
else:
if key_regex.search(data[1]):
self._add_node(data[1])
response = "{0} {1}".format(ADD_NODE["response_ok"], "Added node")
else:
response = "{0} {1}".format(ADD_NODE["response_err"], "bad argument")
elif command == RM_NODE["cmd"]:
if len(data)-1 < RM_NODE["args"]:
response = "-99 Wrong parameters"
else:
if key_regex.search(data[1]):
self._rm_node(data[1])
response = "{0} {1}".format(RM_NODE["response_ok"], "removed node")
else:
response = "{0} {1}".format(RM_NODE["response_err"], "bad argument")
elif command == ADD["cmd"]:
if len(data)-1 < ADD["args"]:
response = "-99 Wrong parameters"
else:
try:
self._add_data(data[1], data[2])
response = "{0} {1}".format(ADD["response_ok"], "Added data")
except ConnectionRefusedError:
response = "{0} {1}".format(ADD["response_err"], "Connection error")
elif command == GET["cmd"]:
if len(data)-1 < GET["args"]:
response = "-99 Wrong parameters"
else:
try:
res_data = self._get_data(data[1])
if not res_data:
response = "{0} {1}".format(GET["response_err"], "Missed data")
else:
response = "{0} {1}".format(GET["response_ok"], res_data)
except ConnectionRefusedError:
response = "{0} {1}".format(GET["response_err"], "Connection error")
elif command == STATS["cmd"]:
if len(data)-1 < STATS["args"]:
response = "-99 Wrong parameters"
else:
response = json.dumps(node.stats())
else:
response = "-99 Wrong command"
self.request.sendall(bytes(response, 'utf-8'))
# Helper functions
def _get_data(self, key):
# Check in wich node is the key
node_key = node.where(key)
# Is us?
if node_key == node.key:
return node.get_data(key)
else: # If not, ask to the proper node (We know the key)
print("asking to: {0}".format(node_key))
host, port = node_key.split(":")
# TODO: Check correct return
return client.get(host, int(port), key)[1]
def _add_data(self, key, data):
global node
# Check in wich node is the key
node_key = node.where(key)
# Is us?
if node_key == node.key:
print("Inserting in this node")
return node.set_data(key, data)
else: # If not, ask to the proper node (We know the key)
# TODO: Check node is up before inserting
print("Inserting in node {0}".format(node_key))
host, port = node_key.split(":")
return client.add(host, int(port), key, data)
def _add_node(self, key):
node.add_node_to_ring(key)
def _rm_node(self, key):
node.rm_node_from_ring(key)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class NodeServer(object):
def __init__(self, host="127.0.0.1", port=8000):
self._host = host
self._port = port
self._server = None
def _set_environment(self):
"""
Starts the node, ring and stuff
"""
# Global vriables will enable accesing from threads
global node
# Create a new ring, with default values
ring = ConsistentRing()
node = Node("{0}:{1}".format(self._host, self._port), ring)
def run(self):
try:
self._set_environment()
print ("Node listening {0}:{1}".format(self._host, self._port))
self._server = ThreadedTCPServer((self._host, self._port), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=self._server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread:", server_thread.name)
self._server.serve_forever()
except KeyboardInterrupt:
print("^C received, shutting down the node")
#TODO: Notify
self._server.shutdown()
|
|
import json
import time
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_settings
from restlib2.http import codes
from avocado.history.models import Revision
from avocado.models import DataField, DataView, DataContext
from serrano.models import ApiToken
from serrano.resources import API_VERSION
class BaseTestCase(TestCase):
fixtures = ['tests/fixtures/test_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', quiet=True,
publish=False, concepts=False)
DataField.objects.filter(
model_name__in=['project', 'title']).update(published=True)
self.user = User.objects.create_user(username='root',
password='password')
self.user.is_superuser = True
self.user.save()
class TransactionBaseTestCase(TransactionTestCase):
fixtures = ['tests/fixtures/test_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', quiet=True,
publish=False, concepts=False)
DataField.objects.filter(
model_name__in=['project', 'title']).update(published=True)
self.user = User.objects.create_user(username='root',
password='password')
self.user.is_superuser = True
self.user.save()
class AuthenticatedBaseTestCase(BaseTestCase):
def setUp(self):
super(AuthenticatedBaseTestCase, self).setUp()
self.user = User.objects.create_user(username='test', password='test')
self.client.login(username='test', password='test')
class RootResourceTestCase(TestCase):
def test_get(self):
response = self.client.get('/api/', HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(response['Link'], (
'<http://testserver/api/data/export/>; rel="exporter", '
'<http://testserver/api/async/preview/>; rel="async_preview", '
'<http://testserver/api/views/>; rel="views", '
'<http://testserver/api/concepts/>; rel="concepts", '
'<http://testserver/api/stats/>; rel="stats", '
'<http://testserver/api/categories/>; rel="categories", '
'<http://testserver/api/queries/public/>; rel="public_queries", '
'<http://testserver/api/sets/>; rel="sets", '
'<http://testserver/api/contexts/>; rel="contexts", '
'<http://testserver/api/fields/>; rel="fields", '
'<http://testserver/api/>; rel="self", '
'<http://testserver/api/ping/>; rel="ping", '
'<http://testserver/api/queries/>; rel="queries", '
'<http://testserver/api/async/export/>; rel="async_exporter", '
'<http://testserver/api/data/preview/>; rel="preview"'
))
self.assertEqual(json.loads(response.content), {
'title': 'Serrano Hypermedia API',
'version': API_VERSION,
})
@override_settings(SERRANO_AUTH_REQUIRED=True)
def test_post(self):
User.objects.create_user(username='root', password='password')
response = self.client.post(
'/api/',
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.unauthorized)
response = self.client.post(
'/api/',
json.dumps({'username': 'root', 'password': 'password'}),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue('token' in json.loads(response.content))
# Confirm that passing an invalid username/password returns a status
# code of codes.unauthorized -- unauthorized.
response = self.client.post(
'/api/',
json.dumps({'username': 'root', 'password': 'NOT_THE_PASSWORD'}),
content_type='application/json')
self.assertEqual(response.status_code, codes.unauthorized)
@override_settings(SERRANO_AUTH_REQUIRED=True)
def test_api_token_access(self):
response = self.client.get('/api/',
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.unauthorized)
# Create user token
user = User.objects.create_user(username='root', password='password')
api_token = ApiToken.objects.create(user=user)
self.assertTrue(api_token.token)
response = self.client.get('/api/',
data={'token': api_token.token},
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
@override_settings(SERRANO_RATE_LIMIT_COUNT=None)
class ThrottledResourceTestCase(BaseTestCase):
def test_too_many_auth_requests(self):
f = DataField.objects.all()[0]
self.client.login(username='root', password='password')
# Be certain we are clear of the current interval.
time.sleep(7)
# These 20 requests should be OK.
for _ in range(20):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
# Wait a little while but stay in the interval.
time.sleep(3)
# These 20 requests should be still be OK.
for _ in range(20):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
# These 10 requests should fail as we've exceeded the limit.
for _ in range(10):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.too_many_requests)
# Wait out the interval.
time.sleep(6)
# These 5 requests should be OK now that we've entered a new interval.
for _ in range(5):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
def test_too_many_requests(self):
f = DataField.objects.all()[0]
# Force these the requests to be unauthenitcated.
self.user = None
# We execute a request before the actual test in order to initialize
# the session so that we have valid session keys on subsequent
# requests.
# TODO: Can the session be initialized somehow without sending
# a request via the client?
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
# Be certain we are clear of the current interval.
time.sleep(5)
# These 10 requests should be OK
for _ in range(10):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
# Wait a little while but stay in the interval.
time.sleep(1)
# These 10 requests should be still be OK.
for _ in range(10):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
# These 10 requests should fail as we've exceeded the limit.
for _ in range(10):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.too_many_requests)
# Wait out the interval.
time.sleep(4)
# These 5 requests should be OK now that we've entered a new interval.
for _ in range(5):
response = self.client.get('/api/fields/{0}/'.format(f.pk),
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
class RevisionResourceTestCase(AuthenticatedBaseTestCase):
def test_no_object_model(self):
# This will trigger a revision to be created
view = DataView(user=self.user)
view.save()
# Make sure we have a revision for this user
self.assertTrue(Revision.objects.filter(user=self.user).exists())
response = self.client.get('/api/test/no_model/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(len(json.loads(response.content)), 0)
def test_custom_template(self):
view = DataView(user=self.user)
view.save()
response = self.client.get('/api/test/template/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(len(json.loads(response.content)), 1)
revision = json.loads(response.content)[0]
self.assertEqual(revision['object_id'], view.pk)
self.assertTrue(response['Link-Template'])
self.assertFalse('content_type' in revision)
class ObjectRevisionResourceTestCase(AuthenticatedBaseTestCase):
def test_bad_urls(self):
view = DataView(user=self.user)
view.save()
target_revision_id = Revision.objects.all().count()
url = '/api/test/revisions/{0}/'.format(target_revision_id)
response = self.client.get(url, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.not_found)
url = '/api/test/{0}/revisions/'.format(view.id)
response = self.client.get(url, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.not_found)
class PingResourceTestCase(AuthenticatedBaseTestCase):
@override_settings(SERRANO_AUTH_REQUIRED=True)
def test(self):
response = self.client.get('/api/ping/',
HTTP_ACCEPT='application/json')
self.assertEqual(json.loads(response.content)['status'], 'ok')
# emulate session timeout..
self.client.logout()
response = self.client.get('/api/ping/',
HTTP_ACCEPT='application/json',
HTTP_REFERER='http://testserver/query/')
data = json.loads(response.content)
self.assertEqual(data['status'], 'timeout')
self.assertEqual(data['location'],
'http://testserver/accounts/login/?next=/query/')
@override_settings(SERRANO_AUTH_REQUIRED=True, LOGIN_REDIRECT_URL='/')
def test_nonsafe_referer(self):
self.client.logout()
response = self.client.get('/api/ping/',
HTTP_ACCEPT='application/json',
HTTP_REFERER='http://example.com/spam/')
data = json.loads(response.content)
self.assertEqual(data['status'], 'timeout')
self.assertEqual(data['location'],
'http://testserver/accounts/login/?next=/')
class MultipleObjectsTestCase(AuthenticatedBaseTestCase):
def test_multiple_contexts(self):
cxt1 = DataContext(session=True, user=self.user)
cxt1.save()
cxt2 = DataContext(user=self.user, session=True)
cxt2.save()
response = self.client.get('/api/data/preview/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
def test_multiple_views(self):
dv1 = DataView(session=True, user=self.user)
dv1.save()
dv2 = DataView(session=True, user=self.user)
dv2.save()
response = self.client.get('/api/data/preview/',
HTTP_ACCEPT='application/json')
self.assertTrue(response.content)
self.assertEqual(response.status_code, codes.ok)
def test_multiple_context_and_view(self):
# Create two Contexts and views, an illegal action that
# Our base resource should handle
cxt3 = DataContext(session=True, user=self.user)
cxt3.save()
cxt4 = DataContext(user=self.user, session=True)
cxt4.save()
dv3 = DataView(session=True, user=self.user)
dv3.save()
dv4 = DataView(session=True, user=self.user)
dv4.save()
response = self.client.get('/api/data/preview/',
HTTP_ACCEPT='application/json')
self.assertTrue(response.content)
self.assertEqual(response.status_code, codes.ok)
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
from botocore import __version__
import botocore.configloader
import botocore.credentials
import botocore.client
from botocore.exceptions import ConfigNotFound, ProfileNotFound
from botocore.exceptions import UnknownServiceError
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore.regions import EndpointResolver
from botocore.model import ServiceModel
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': ('metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': ('metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
}
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
self.session_var_map = copy.copy(self.SESSION_VARIABLES)
if session_vars:
self.session_var_map.update(session_vars)
if event_hooks is None:
self._events = HierarchicalEmitter()
else:
self._events = event_hooks
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._components = ComponentLocator()
self._register_components()
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider',
lambda: botocore.credentials.create_credential_resolver(self))
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
def create_default_resolver():
loader = self.get_component('data_loader')
endpoints = loader.load_data('endpoints')
return EndpointResolver(endpoints)
self._components.lazy_register_component(
'endpoint_resolver', create_default_resolver)
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name,
methods=('instance', 'env', 'config')):
"""
Retrieve the value associated with the specified logical_name
from the environment or the config file. Values found in the
environment variable take precedence of values found in the
config file. If no value can be found, a None will be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:type method: tuple
:param method: Defines which methods will be used to find
the variable value. By default, all available methods
are tried but you can limit which methods are used
by supplying a different value to this parameter.
Valid choices are: instance|env|config
:returns: value of variable or None if not defined.
"""
# Handle all the short circuit special cases first.
if logical_name not in self.session_var_map:
return
# Do the actual lookups. We need to handle
# 'instance', 'env', and 'config' locations, in that order.
value = None
var_config = self.session_var_map[logical_name]
if self._found_in_instance_vars(methods, logical_name):
return self._session_instance_vars[logical_name]
elif self._found_in_env(methods, var_config):
value = self._retrieve_from_env(var_config[1], os.environ)
elif self._found_in_config_file(methods, var_config):
value = self.get_scoped_config()[var_config[0]]
if value is None:
value = var_config[2]
if var_config[3] is not None:
value = var_config[3](value)
return value
def _found_in_instance_vars(self, methods, logical_name):
if 'instance' in methods:
return logical_name in self._session_instance_vars
return False
def _found_in_env(self, methods, var_config):
return (
'env' in methods and
var_config[1] is not None and
self._retrieve_from_env(var_config[1], os.environ) is not None)
def _found_in_config_file(self, methods, var_config):
if 'config' in methods and var_config[0] is not None:
return var_config[0] in self.get_scoped_config()
return False
def _retrieve_from_env(self, names, environ):
# We need to handle the case where names is either
# a single value or a list of variables.
if not isinstance(names, list):
names = [names]
for name in names:
if name in environ:
return environ[name]
return None
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._session_instance_vars[logical_name] = value
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.configloader.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.configloader.raw_config_parse(cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Boto` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
self._events.emit('service-data-loaded.%s' % service_name,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
return self._components.get_component(name)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
# Figure out the user-provided region based on the various
# configuration options.
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
else:
credentials = self.get_credentials()
endpoint_resolver = self.get_component('endpoint_resolver')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory)
client = client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
return client
def get_available_partitions(self):
"""Lists the available partitions found on disk
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
resolver = self.get_component('endpoint_resolver')
return resolver.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
This parameter accepts a service name (e.g., "elb") or endpoint
prefix (e.g., "elasticloadbalancing").
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
resolver = self.get_component('endpoint_resolver')
results = []
try:
service_data = self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
|
|
from __future__ import with_statement, absolute_import
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.views.main import ChangeList, SEARCH_VAR, ALL_VAR
from django.contrib.auth.models import User
from django.template import Context, Template
from django.test import TestCase
from django.test.client import RequestFactory
from .admin import (ChildAdmin, QuartetAdmin, BandAdmin, ChordsBandAdmin,
GroupAdmin, ParentAdmin, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, CustomPaginationAdmin,
FilteredChildAdmin, CustomPaginator, site as custom_site,
SwallowAdmin)
from .models import (Child, Parent, Genre, Band, Musician, Group, Quartet,
Membership, ChordsMusician, ChordsBand, Invitation, Swallow,
UnorderedObject, OrderedObject)
class ChangeListTests(TestCase):
urls = "regressiontests.admin_changelist.urls"
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_query_set() shouldn't
overwrite a custom select_related provided by ModelAdmin.queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.query_set.query.select_related, {'parent': {'name': {}}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">(None)</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">Parent object</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertFalse(table_output.find(hiddenfields_div) == -1,
'Failed to find hidden fields in: %s' % table_output)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertFalse('<td>%s</td>' % editable_name_field == -1,
'Failed to find "name" list_editable field in: %s' % table_output)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda: \
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(cl.paginator.page_range, [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(cl.paginator.page_range, [1, 2, 3])
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
self.assertContains(response, '<a href="%s/">%s</a>' % (i, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, unicode(swallow.origin))
self.assertContains(response, unicode(swallow.load))
self.assertContains(response, unicode(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(reverse=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 51 if reverse else 0
for page in range (0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += -1 if reverse else 1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by 'pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order(reverse=True)
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order(reverse=True)
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order()
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(reverse=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 51 if reverse else 0
for page in range (0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += -1 if reverse else 1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. '-number')
check_results_order(reverse=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order(reverse=True)
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order()
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order(reverse=True)
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order()
|
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Test Env, manages DUT, App and EnvConfig, interface for test cases to access these components """
import functools
import os
import threading
import traceback
import netifaces
from . import EnvConfig
def _synced(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
with self.lock:
ret = func(self, *args, **kwargs)
return ret
decorator.__doc__ = func.__doc__
return decorator
class Env(object):
"""
test env, manages DUTs and env configs.
:keyword app: class for default application
:keyword dut: class for default DUT
:keyword env_tag: test env tag, used to select configs from env config file
:keyword env_config_file: test env config file path
:keyword test_name: test suite name, used when generate log folder name
"""
CURRENT_LOG_FOLDER = ''
def __init__(self,
app=None,
dut=None,
env_tag=None,
env_config_file=None,
test_suite_name=None,
**kwargs):
self.app_cls = app
self.default_dut_cls = dut
self.config = EnvConfig.Config(env_config_file, env_tag)
self.log_path = self.app_cls.get_log_folder(test_suite_name)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
Env.CURRENT_LOG_FOLDER = self.log_path
self.allocated_duts = dict()
self.lock = threading.RLock()
@_synced
def get_dut(self, dut_name, app_path, dut_class=None, app_class=None, app_config_name=None, **dut_init_args):
"""
get_dut(dut_name, app_path, dut_class=None, app_class=None)
:param dut_name: user defined name for DUT
:param app_path: application path, app instance will use this path to process application info
:param dut_class: dut class, if not specified will use default dut class of env
:param app_class: app class, if not specified will use default app of env
:param app_config_name: app build config
:keyword dut_init_args: extra kwargs used when creating DUT instance
:return: dut instance
"""
if dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]['dut']
else:
if dut_class is None:
dut_class = self.default_dut_cls
if app_class is None:
app_class = self.app_cls
app_target = dut_class.TARGET
detected_target = None
try:
port = self.config.get_variable(dut_name)
if not app_target:
result, detected_target = dut_class.confirm_dut(port)
except ValueError:
# try to auto detect ports
allocated_ports = [self.allocated_duts[x]['port'] for x in self.allocated_duts]
available_ports = dut_class.list_available_ports()
for port in available_ports:
if port not in allocated_ports:
result, detected_target = dut_class.confirm_dut(port)
if result:
break
else:
port = None
if not app_target:
app_target = detected_target
if not app_target:
raise ValueError("DUT class doesn't specify the target, and autodetection failed")
app_inst = app_class(app_path, app_config_name, app_target)
if port:
try:
dut_config = self.get_variable(dut_name + '_port_config')
except ValueError:
dut_config = dict()
dut_config.update(dut_init_args)
dut = dut_class(dut_name, port,
os.path.join(self.log_path, dut_name + '.log'),
app_inst,
**dut_config)
self.allocated_duts[dut_name] = {'port': port, 'dut': dut}
else:
raise ValueError('Failed to get DUT')
return dut
@_synced
def close_dut(self, dut_name):
"""
close_dut(dut_name)
close one DUT by name if DUT name is valid (the name used by ``get_dut``). otherwise will do nothing.
:param dut_name: user defined name for DUT
:return: None
"""
try:
dut = self.allocated_duts.pop(dut_name)['dut']
dut.close()
except KeyError:
pass
@_synced
def get_variable(self, variable_name):
"""
get_variable(variable_name)
get variable from config file. If failed then try to auto-detected it.
:param variable_name: name of the variable
:return: value of variable if successfully found. otherwise None.
"""
return self.config.get_variable(variable_name)
PROTO_MAP = {
'ipv4': netifaces.AF_INET,
'ipv6': netifaces.AF_INET6,
'mac': netifaces.AF_LINK,
}
@_synced
def get_pc_nic_info(self, nic_name='pc_nic', proto='ipv4'):
"""
get_pc_nic_info(nic_name="pc_nic")
try to get info of a specified NIC and protocol.
:param nic_name: pc nic name. allows passing variable name, nic name value.
:param proto: "ipv4", "ipv6" or "mac"
:return: a dict of nic info if successfully found. otherwise None.
nic info keys could be different for different protocols.
key "addr" is available for both mac, ipv4 and ipv6 pic info.
"""
interfaces = netifaces.interfaces()
if nic_name in interfaces:
# the name is in the interface list, we regard it as NIC name
if_addr = netifaces.ifaddresses(nic_name)
else:
# it's not in interface name list, we assume it's variable name
_nic_name = self.get_variable(nic_name)
if_addr = netifaces.ifaddresses(_nic_name)
return if_addr[self.PROTO_MAP[proto]][0]
@_synced
def close(self, dut_debug=False):
"""
close()
close all DUTs of the Env.
:param dut_debug: if dut_debug is True, then print all dut expect failures before close it
:return: exceptions during close DUT
"""
dut_close_errors = []
for dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]['dut']
try:
if dut_debug:
dut.print_debug_info()
dut.close()
except Exception as e:
traceback.print_exc()
dut_close_errors.append(e)
self.allocated_duts = dict()
return dut_close_errors
|
|
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import scipy.special as special
from bayespy.utils import misc, linalg
from .expfamily import ExponentialFamily
from .expfamily import ExponentialFamilyDistribution
from .expfamily import useconstructor
from .constant import Constant
from .deterministic import Deterministic
from .gamma import GammaMoments
from .node import Moments, Node
class WishartPriorMoments(Moments):
def __init__(self, k):
self.k = k
self.dims = ( (), () )
return
def compute_fixed_moments(self, n):
""" Compute moments for fixed x. """
u0 = np.asanyarray(n)
u1 = special.multigammaln(0.5*u0, self.k)
return [u0, u1]
@classmethod
def from_values(cls, x, d):
""" Compute the dimensions of phi or u. """
return cls(d)
class WishartMoments(Moments):
def __init__(self, shape):
self.shape = shape
self.ndim = len(shape)
self.dims = ( 2 * shape, () )
return
def compute_fixed_moments(self, Lambda, gradient=None):
""" Compute moments for fixed x. """
Lambda = np.asanyarray(Lambda)
L = linalg.chol(Lambda, ndim=self.ndim)
ldet = linalg.chol_logdet(L, ndim=self.ndim)
u = [Lambda,
ldet]
if gradient is None:
return u
du0 = gradient[0]
du1 = (
misc.add_trailing_axes(gradient[1], 2*self.ndim)
* linalg.chol_inv(L, ndim=self.ndim)
)
du = du0 + du1
return (u, du)
def plates_from_shape(self, shape):
if self.ndim == 0:
return shape
else:
return shape[:-2*self.ndim]
def shape_from_plates(self, plates):
return plates + self.shape + self.shape
def get_instance_conversion_kwargs(self):
return dict(ndim=self.ndim)
def get_instance_converter(self, ndim):
if ndim != self.ndim:
raise NotImplementedError(
"No conversion between different ndim implemented for "
"WishartMoments yet"
)
return None
@classmethod
def from_values(cls, x, ndim):
""" Compute the dimensions of phi and u. """
if np.ndim(x) < 2 * ndim:
raise ValueError("Values for Wishart distribution must be at least "
"2-D arrays.")
if ndim > 0 and (np.shape(x)[-ndim:] != np.shape(x)[-2*ndim:-ndim]):
raise ValueError("Values for Wishart distribution must be square "
"matrices, thus the two last axes must have equal "
"length.")
shape = (
np.shape(x)[-ndim:] if ndim > 0 else
()
)
return cls(shape)
class WishartDistribution(ExponentialFamilyDistribution):
"""
Sub-classes implement distribution specific computations.
Distribution for :math:`k \times k` symmetric positive definite matrix.
.. math::
\Lambda \sim \mathcal{W}(n, V)
Note: :math:`V` is inverse scale matrix.
.. math::
p(\Lambda | n, V) = ..
"""
def compute_message_to_parent(self, parent, index, u_self, u_n, u_V):
if index == 0:
raise NotImplementedError("Message from Wishart to degrees of "
"freedom parameter (first parent) "
"not yet implemented")
elif index == 1:
Lambda = u_self[0]
n = u_n[0]
return [-0.5 * Lambda,
0.5 * n]
else:
raise ValueError("Invalid parent index {0}".format(index))
def compute_phi_from_parents(self, u_n, u_V, mask=True):
r"""
Compute natural parameters
.. math::
\phi(n, V) =
\begin{bmatrix}
-\frac{1}{2} V
\\
\frac{1}{2} n
\end{bmatrix}
"""
return [-0.5 * u_V[0],
0.5 * u_n[0]]
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Return moments and cgf for given natural parameters
.. math::
\langle u \rangle =
\begin{bmatrix}
\phi_2 (-\phi_1)^{-1}
\\
-\log|-\phi_1| + \psi_k(\phi_2)
\end{bmatrix}
\\
g(\phi) = \phi_2 \log|-\phi_1| - \log \Gamma_k(\phi_2)
"""
U = linalg.chol(-phi[0])
k = np.shape(phi[0])[-1]
#k = self.dims[0][0]
logdet_phi0 = linalg.chol_logdet(U)
u0 = phi[1][...,np.newaxis,np.newaxis] * linalg.chol_inv(U)
u1 = -logdet_phi0 + misc.multidigamma(phi[1], k)
u = [u0, u1]
g = phi[1] * logdet_phi0 - special.multigammaln(phi[1], k)
return (u, g)
def compute_cgf_from_parents(self, u_n, u_V):
r"""
CGF from parents
.. math::
g(n, V) = \frac{n}{2} \log|V| - \frac{nk}{2} \log 2 -
\log \Gamma_k(\frac{n}{2})
"""
n = u_n[0]
gammaln_n = u_n[1]
V = u_V[0]
logdet_V = u_V[1]
k = np.shape(V)[-1]
g = 0.5*n*logdet_V - 0.5*k*n*np.log(2) - gammaln_n
return g
def compute_fixed_moments_and_f(self, Lambda, mask=True):
r"""
Compute u(x) and f(x) for given x.
.. math:
u(\Lambda) =
\begin{bmatrix}
\Lambda
\\
\log |\Lambda|
\end{bmatrix}
"""
k = np.shape(Lambda)[-1]
ldet = linalg.chol_logdet(linalg.chol(Lambda))
u = [Lambda,
ldet]
f = -(k+1)/2 * ldet
return (u, f)
class Wishart(ExponentialFamily):
r"""
Node for Wishart random variables.
The random variable :math:`\mathbf{\Lambda}` is a :math:`D\times{}D`
positive-definite symmetric matrix.
.. math::
p(\mathbf{\Lambda}) = \mathrm{Wishart}(\mathbf{\Lambda} | N,
\mathbf{V})
Parameters
----------
n : scalar or array
:math:`N`, degrees of freedom, :math:`N>D-1`.
V : Wishart-like node or (...,D,D)-array
:math:`\mathbf{V}`, scale matrix.
"""
_distribution = WishartDistribution()
def __init__(self, n, V, **kwargs):
"""
Create Wishart node.
"""
super().__init__(n, V, **kwargs)
@classmethod
def _constructor(cls, n, V, **kwargs):
"""
Constructs distribution and moments objects.
"""
# Make V a proper parent node and get the dimensionality of the matrix
V = cls._ensure_moments(V, WishartMoments, ndim=1)
D = V.dims[0][-1]
n = cls._ensure_moments(n, WishartPriorMoments, d=D)
moments = WishartMoments((D,))
# Parent node message types
parent_moments = (n._moments, V._moments)
parents = [n, V]
return (parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'),
cls._distribution.plates_from_parent(0, n.plates),
cls._distribution.plates_from_parent(1, V.plates)),
cls._distribution,
moments,
parent_moments)
def scale(self, scalar, **kwargs):
return _ScaledWishart(self, scalar, **kwargs)
def __str__(self):
n = 2*self.phi[1]
A = 0.5 * self.u[0] / self.phi[1][...,np.newaxis,np.newaxis]
return ("%s ~ Wishart(n, A)\n"
" n =\n"
"%s\n"
" A =\n"
"%s\n"
% (self.name, n, A))
class _ScaledWishart(Deterministic):
def __init__(self, Lambda, alpha, ndim=None, **kwargs):
if ndim is None:
try:
ndim = Lambda._moments.ndim
except AttributeError:
raise ValueError("Give explicit ndim argument. (ndim=1 for normal matrix)")
Lambda = self._ensure_moments(Lambda, WishartMoments, ndim=ndim)
alpha = self._ensure_moments(alpha, GammaMoments)
dims = Lambda.dims
self._moments = Lambda._moments
self._parent_moments = (Lambda._moments, alpha._moments)
return super().__init__(Lambda, alpha, dims=dims, **kwargs)
def _compute_moments(self, u_Lambda, u_alpha):
Lambda = u_Lambda[0]
logdet_Lambda = u_Lambda[1]
alpha = misc.add_trailing_axes(u_alpha[0], 2*self._moments.ndim)
logalpha = u_alpha[1]
u0 = Lambda * alpha
u1 = logdet_Lambda + np.prod(self._moments.shape) * logalpha
return [u0, u1]
def _compute_message_to_parent(self, index, m, u_Lambda, u_alpha):
if index == 0:
alpha = misc.add_trailing_axes(u_alpha[0], 2*self._moments.ndim)
logalpha = u_alpha[1]
m0 = m[0] * alpha
m1 = m[1]
return [m0, m1]
if index == 1:
Lambda = u_Lambda[0]
logdet_Lambda = u_Lambda[1]
m0 = linalg.inner(m[0], Lambda, ndim=2*self._moments.ndim)
m1 = m[1] * np.prod(self._moments.shape)
return [m0, m1]
raise IndexError()
|
|
# pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import itertools
from elftools.elf.elffile import ELFFile
from intelhex import IntelHex
import six
import errno
from .loader import FlashLoader
from ..core import exceptions
from ..debug.elf.elf import (ELFBinaryFile, SH_FLAGS)
from ..utility.compatibility import FileNotFoundError
LOG = logging.getLogger(__name__)
def ranges(i):
"""!
Accepts a sorted list of byte addresses. Breaks the addresses into contiguous ranges.
Yields 2-tuples of the start and end address for each contiguous range.
For instance, the input [0, 1, 2, 3, 32, 33, 34, 35] will yield the following 2-tuples:
(0, 3) and (32, 35).
"""
for a, b in itertools.groupby(enumerate(i), lambda x: x[1] - x[0]):
b = list(b)
yield b[0][1], b[-1][1]
class FileProgrammer(object):
"""! @brief Class to manage programming a file in any supported format with many options.
Most specifically, this class implements the behaviour provided by the command-line flash
programming tool. The code in this class simply extracts data from the given file, potentially
respecting format-specific options such as the base address for binary files. Then the heavy
lifting of flash programming is handled by FlashLoader, and beneath that, FlashBuilder.
Support file formats are:
- Binary (.bin)
- Intel Hex (.hex)
- ELF (.elf or .axf)
"""
def __init__(self, session, progress=None, chip_erase=None, smart_flash=None,
trust_crc=None, keep_unwritten=None):
"""! @brief Constructor.
@param self
@param session The session object.
@param progress A progress report handler as a callable that takes a percentage completed.
If not set or None, a default progress handler will be used unless the session option
'hide_programming_progress' is set to True, in which case progress will be disabled.
@param chip_erase Sets whether to use chip erase or sector erase. The value must be one of
"auto", "sector", or "chip". "auto" means the fastest erase method should be used.
@param smart_flash If set to True, the programmer will attempt to not program pages whose
contents are not going to change by scanning target flash memory. A value of False will
force all pages to be erased and programmed.
@param trust_crc Boolean indicating whether to use only the sector CRC32 to decide whether a
sector already contains the data to be programmed. Use with caution, as CRC32 may return
the same value for different content.
@param keep_unwritten Depending on the sector versus page size and the amount of data
written, there may be ranges of flash that would be erased but not written with new
data. This parameter sets whether the existing contents of those unwritten ranges will
be read from memory and restored while programming.
"""
self._session = session
self._chip_erase = chip_erase
self._smart_flash = smart_flash
self._trust_crc = trust_crc
self._keep_unwritten = keep_unwritten
self._progress = progress
self._loader = None
self._format_handlers = {
'axf': self._program_elf,
'bin': self._program_bin,
'elf': self._program_elf,
'hex': self._program_hex,
}
def program(self, file_or_path, file_format=None, **kwargs):
"""! @brief Program a file into flash.
@param self
@param file_or_path Either a string that is a path to a file, or a file-like object.
@param file_format Optional file format name, one of "bin", "hex", "elf", "axf". If not provided,
the file's extension will be used. If a file object is passed for _file_or_path_ then
this parameter must be used to set the format.
@param kwargs Optional keyword arguments for format-specific parameters.
The only current format-specific keyword parameters are for the binary format:
- `base_address`: Memory address at which to program the binary data. If not set, the base
of the boot memory will be used.
- `skip`: Number of bytes to skip at the start of the binary file. Does not affect the
base address.
@exception FileNotFoundError Provided file_or_path string does not reference a file.
@exception ValueError Invalid argument value, for instance providing a file object but
not setting file_format.
"""
isPath = isinstance(file_or_path, six.string_types)
# Check for valid path first.
if isPath and not os.path.isfile(file_or_path):
raise FileNotFoundError(errno.ENOENT, "No such file: '{}'".format(file_or_path))
# If no format provided, use the file's extension.
if not file_format:
if isPath:
# Extract the extension from the path.
file_format = os.path.splitext(file_or_path)[1][1:]
# Explicitly check for no extension.
if file_format == '':
raise ValueError("file path '{}' does not have an extension and "
"no format is set".format(file_or_path))
else:
raise ValueError("file object provided but no format is set")
# Check the format is one we understand.
if file_format not in self._format_handlers:
raise ValueError("unknown file format '%s'" % file_format)
self._loader = FlashLoader(self._session,
progress=self._progress,
chip_erase=self._chip_erase,
smart_flash=self._smart_flash,
trust_crc=self._trust_crc,
keep_unwritten=self._keep_unwritten)
file_obj = None
try:
# Open the file if a path was provided.
if isPath:
mode = 'rb'
if file_format == 'hex':
# hex file must be read as plain text file
mode = 'r'
file_obj = open(file_or_path, mode)
else:
file_obj = file_or_path
# Pass to the format-specific programmer.
self._format_handlers[file_format](file_obj, **kwargs)
self._loader.commit()
finally:
if isPath and file_obj is not None:
file_obj.close()
def _program_bin(self, file_obj, **kwargs):
"""! @brief Binary file format loader"""
# If no base address is specified use the start of the boot memory.
address = kwargs.get('base_address', None)
if address is None:
boot_memory = self._session.target.memory_map.get_boot_memory()
if boot_memory is None:
raise exceptions.TargetSupportError("No boot memory is defined for this device")
address = boot_memory.start
file_obj.seek(kwargs.get('skip', 0), os.SEEK_SET)
data = list(bytearray(file_obj.read()))
self._loader.add_data(address, data)
def _program_hex(self, file_obj, **kwargs):
"""! Intel hex file format loader"""
hexfile = IntelHex(file_obj)
addresses = hexfile.addresses()
addresses.sort()
data_list = list(ranges(addresses))
for start, end in data_list:
size = end - start + 1
data = list(hexfile.tobinarray(start=start, size=size))
# Ignore invalid addresses for HEX files only
# Binary files (obviously) don't contain addresses
# For ELF files, any metadata that's not part of the application code
# will be held in a section that doesn't have the SHF_WRITE flag set
try:
self._loader.add_data(start, data)
except ValueError as e:
LOG.warning("Failed to add data chunk: %s", e)
def _program_elf(self, file_obj, **kwargs):
elf = ELFFile(file_obj)
for segment in elf.iter_segments():
addr = segment['p_paddr']
if segment.header.p_type == 'PT_LOAD' and segment.header.p_filesz != 0:
data = bytearray(segment.data())
LOG.debug("Writing segment LMA:0x%08x, VMA:0x%08x, size %d", addr,
segment['p_vaddr'], segment.header.p_filesz)
try:
self._loader.add_data(addr, data)
except ValueError as e:
LOG.warning("Failed to add data chunk: %s", e)
else:
LOG.debug("Skipping segment LMA:0x%08x, VMA:0x%08x, size %d", addr,
segment['p_vaddr'], segment.header.p_filesz)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.