gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ELEMENTSD", "elementsd"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("ELEMENTSD", "elementsd"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Huawei Technologies Co., Ltd.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for Huawei T and Dorado volume drivers.
"""
import mox
import os
import shutil
import socket
import tempfile
import time
from xml.dom.minidom import Document
from xml.etree import ElementTree as ET
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import HuaweiVolumeDriver
from cinder.volume.drivers.huawei import ssh_common
from cinder.volume import volume_types
LUN_INFO = {'ID': None,
'Name': None,
'Size': None,
'LUN WWN': None,
'Status': None,
'Visible Capacity': None,
'Disk Pool ID': None,
'Cache Prefetch Strategy': None,
'Lun Type': None,
'Consumed Capacity': None,
'Pool ID': None,
'SnapShot ID': None,
'LunCopy ID': None,
'Owner Controller': None,
'Worker Controller': None,
'RAID Group ID': None}
CLONED_LUN_INFO = {'ID': None,
'Name': None,
'Size': None,
'LUN WWN': None,
'Status': None,
'Visible Capacity': None,
'Disk Pool ID': None,
'Cache Prefetch Strategy': None,
'Lun Type': None,
'Consumed Capacity': None,
'Pool ID': None,
'SnapShot ID': None,
'LunCopy ID': None,
'Owner Controller': None,
'Worker Controller': None,
'RAID Group ID': None}
SNAPSHOT_INFO = {'Source LUN ID': None,
'Source LUN Name': None,
'ID': None,
'Name': None,
'Type': 'Public',
'Status': None}
MAP_INFO = {'Host Group ID': None,
'Host Group Name': None,
'Host ID': None,
'Host Name': None,
'Os Type': None,
'INI Port ID': None,
'INI Port Name': None,
'INI Port Info': None,
'INI Port WWN': None,
'INI Port Type': None,
'Link Status': None,
'LUN WWN': None,
'DEV LUN ID': None,
'Host LUN ID': None,
'CHAP status': False}
HOST_PORT_INFO = {'ID': None,
'Name': None,
'Info': None,
'WWN': None,
'Type': None}
LUNCOPY_INFO = {'Name': None,
'ID': None,
'Type': None,
'State': None,
'Status': None}
LUNCOPY_SETTING = {'ID': '1',
'Type': 'FULL',
'State': 'Created',
'Status': 'Normal'}
POOL_SETTING = {'ID': '2',
'Level': 'RAID6',
'Status': 'Normal',
'Free Capacity': '10240',
'Disk List': '0,1;0,2;0,3;0,4;0,5;0,6',
'Name': 'RAID_001',
'Type': 'Thick'}
INITIATOR_SETTING = {'TargetIQN': 'iqn.2006-08.com.huawei:oceanspace:2103037:',
'TargetIQN-form': 'iqn.2006-08.com.huawei:oceanspace:'
'2103037::1020001:192.168.100.2',
'Initiator Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'Initiator TargetIP': '192.168.100.2',
'WWN': ['2011666666666565']}
FAKE_VOLUME = {'name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe',
'id': 'lele34fe-223f-dd33-4423-asdfghjklqwe',
'size': '2',
'provider_auth': None,
'volume_type_id': None,
'provider_location': None}
FAKE_CLONED_VOLUME = {'name': 'Volume-jeje34fe-223f-dd33-4423-asdfghjklqwg',
'id': 'jeje34fe-223f-dd33-4423-asdfghjklqwg',
'size': '3',
'provider_auth': None,
'volume_type_id': None,
'provider_location': None}
FAKE_SNAPSHOT = {'name': 'keke34fe-223f-dd33-4423-asdfghjklqwf',
'id': '223f-dd33-4423-asdfghjklqwf',
'volume_name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe',
'provider_location': None}
FAKE_CONNECTOR = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['1000000164s45126'],
'wwnns': ['2000666666666565'],
'host': 'fakehost',
'ip': '10.10.0.1'}
RESPOOL_A_SIM = {'Size': '10240', 'Valid Size': '5120'}
RESPOOL_B_SIM = {'Size': '10240', 'Valid Size': '10240'}
VOLUME_SNAP_ID = {'vol': '0', 'vol_copy': '1', 'snap': '2'}
cmd_error_list = [] # CLI cmds in this list will run failed
Curr_test = [''] # show current testing driver
class FakeChannel():
def __init__(self):
if Curr_test[0] == 'T':
self.simu = HuaweiTCLIResSimulator()
elif Curr_test[0] == 'Dorado5100':
self.simu = HuaweiDorado5100CLIResSimulator()
else:
self.simu = HuaweiDorado2100G2CLIResSimulator()
def resize_pty(self, width=80, height=24):
pass
def settimeout(self, time):
pass
def send(self, s):
self.command = s
def recv(self, nbytes):
command = self.command.split()
cmd = command[0]
params = command[1:]
if cmd in cmd_error_list:
reset_error_flg(cmd)
out = self.command[:-1] + 'ERROR' + '\nadmin:/>'
return out.replace('\n', '\r\n')
func_name = 'cli_' + cmd
cli_func = getattr(self.simu, func_name)
out = cli_func(params)
out = self.command[:-1] + out + '\nadmin:/>'
return out.replace('\n', '\r\n')
def close(self):
pass
class FakeSSHClient():
def invoke_shell(self):
return FakeChannel()
def get_transport(self):
class transport():
def __init__(self):
self.sock = sock()
class sock():
def settimeout(self, time):
pass
return transport()
def close(self):
pass
class FakeSSHPool():
def __init__(self, ip, port, conn_timeout, login, password=None,
*args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
def create(self):
return FakeSSHClient()
def get(self):
return FakeSSHClient()
def put(self, ssh):
pass
def remove(self, ssh):
pass
def Fake_sleep(time):
pass
def Fake_change_file_mode(obj, filepath):
pass
def create_fake_conf_file(filename):
doc = Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
product = doc.createElement('Product')
product_text = doc.createTextNode('T')
product.appendChild(product_text)
storage.appendChild(product)
config.appendChild(storage)
protocol = doc.createElement('Protocol')
protocol_text = doc.createTextNode('iSCSI')
protocol.appendChild(protocol_text)
storage.appendChild(protocol)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('123456')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
storagepool.setAttribute('Name', 'RAID_001')
lun.appendChild(storagepool)
luntype = doc.createElement('LUNType')
luntype_text = doc.createTextNode('Thick')
luntype.appendChild(luntype_text)
lun.appendChild(luntype)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('192.168.100.1')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.100.2')
iscsi.appendChild(initiator)
os_type = doc.createElement('Host')
os_type.setAttribute('OSType', 'Linux')
os_type.setAttribute('HostIP', '10.10.0.1')
config.appendChild(os_type)
tmp_file = open(filename, 'w')
tmp_file.write(doc.toprettyxml(indent=''))
tmp_file.close()
def modify_conf(conf, item, val, attrib=None):
tree = ET.parse(conf)
root = tree.getroot()
conf_item = root.find('%s' % item)
if not attrib:
conf_item.text = '%s' % val
else:
conf_item.attrib['%s' % attrib] = '%s' % val
tree.write(conf, 'UTF-8')
def set_error_flg(cmd):
cmd_error_list.append(cmd)
def reset_error_flg(cmd):
cmd_error_list.remove(cmd)
class HuaweiTCLIResSimulator():
def _name_translate(self, name):
return 'OpenStack_' + str(hash(name))
def cli_showsys(self, params):
pass
def cli_createlun(self, params):
lun_type = ('THIN' if '-pool' in params else 'THICK')
if LUN_INFO['ID'] is None:
LUN_INFO['Name'] = self._name_translate(FAKE_VOLUME['name'])
LUN_INFO['ID'] = VOLUME_SNAP_ID['vol']
LUN_INFO['Size'] = FAKE_VOLUME['size']
LUN_INFO['Lun Type'] = lun_type
LUN_INFO['Owner Controller'] = 'A'
LUN_INFO['Worker Controller'] = 'A'
LUN_INFO['RAID Group ID'] = POOL_SETTING['ID']
FAKE_VOLUME['provider_location'] = LUN_INFO['ID']
else:
CLONED_LUN_INFO['Name'] = \
self._name_translate(FAKE_CLONED_VOLUME['name'])
CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy']
CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size']
CLONED_LUN_INFO['Lun Type'] = lun_type
CLONED_LUN_INFO['Owner Controller'] = 'A'
CLONED_LUN_INFO['Worker Controller'] = 'A'
CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID']
FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID']
out = 'command operates successfully'
return out
def cli_showlun(self, params):
if '-lun' not in params:
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
out = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB) \
LUN Name Stripe Unit Size(KB) Lun Type
---------------------------------------------------------------------------
%s %s -- Normal %s %s %s 64 THICK
===========================================================================
""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'])
else:
out = """/>showlun
============================================================================
LUN Information
----------------------------------------------------------------------------
ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB)\
LUN Name Stripe Unit Size(KB) Lun Type
----------------------------------------------------------------------------
%s %s -- Normal %s %s %s 64 THICK
%s %s -- Normal %s %s %s 64 THICK
============================================================================
""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'],
CLONED_LUN_INFO['Owner Controller'],
str(int(CLONED_LUN_INFO['Size']) * 1024),
CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
out = """/>showlun
================================================
LUN Information
------------------------------------------------
ID | %s
Name | %s
LUN WWN | --
Visible Capacity | %s
RAID GROUP ID | %s
Owning Controller | %s
Workong Controller | %s
Lun Type | %s
SnapShot ID | %s
LunCopy ID | %s
================================================
""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else
(CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
CLONED_LUN_INFO['Owner Controller'],
CLONED_LUN_INFO['Worker Controller'],
CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
return out
def cli_dellun(self, params):
if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']:
LUN_INFO['Name'] = None
LUN_INFO['ID'] = None
LUN_INFO['Size'] = None
LUN_INFO['Lun Type'] = None
LUN_INFO['LUN WWN'] = None
LUN_INFO['Owner Controller'] = None
LUN_INFO['Worker Controller'] = None
LUN_INFO['RAID Group ID'] = None
FAKE_VOLUME['provider_location'] = None
else:
CLONED_LUN_INFO['Name'] = None
CLONED_LUN_INFO['ID'] = None
CLONED_LUN_INFO['Size'] = None
CLONED_LUN_INFO['Lun Type'] = None
CLONED_LUN_INFO['LUN WWN'] = None
CLONED_LUN_INFO['Owner Controller'] = None
CLONED_LUN_INFO['Worker Controller'] = None
CLONED_LUN_INFO['RAID Group ID'] = None
CLONED_LUN_INFO['provider_location'] = None
FAKE_CLONED_VOLUME['provider_location'] = None
out = 'command operates successfully'
return out
def cli_showrg(self, params):
out = """/>showrg
=====================================================================
RAID Group Information
---------------------------------------------------------------------
ID Level Status Free Capacity(MB) Disk List Name
---------------------------------------------------------------------
0 RAID6 Normal 1024 0,0;0,2; RAID003
%s %s %s %s %s %s
=====================================================================
-""" % (POOL_SETTING['ID'], POOL_SETTING['Level'],
POOL_SETTING['Status'], POOL_SETTING['Free Capacity'],
POOL_SETTING['Disk List'], POOL_SETTING['Name'])
return out
def cli_showpool(self, params):
out = """/>showpool
=====================================================================
Pool Information
---------------------------------------------------------------------
Level Status Available Capacity(MB) Disk List
---------------------------------------------------------------------
RAID6 Normal %s 0,0;0,2;0,4;0,5;
=====================================================================
-""" % POOL_SETTING['Free Capacity']
return out
def cli_createluncopy(self, params):
src_id = params[params.index('-slun') + 1]
tgt_id = params[params.index('-tlun') + 1]
LUNCOPY_INFO['Name'] = 'OpenStack_%s_%s' % (src_id, tgt_id)
LUNCOPY_INFO['ID'] = LUNCOPY_SETTING['ID']
LUNCOPY_INFO['Type'] = LUNCOPY_SETTING['Type']
LUNCOPY_INFO['State'] = LUNCOPY_SETTING['State']
LUNCOPY_INFO['Status'] = LUNCOPY_SETTING['Status']
out = 'command operates successfully'
return out
def cli_chgluncopystatus(self, params):
LUNCOPY_INFO['State'] = 'Start'
out = 'command operates successfully'
return out
def cli_showluncopy(self, params):
if LUNCOPY_INFO['State'] == 'Start':
LUNCOPY_INFO['State'] = 'Copying'
elif LUNCOPY_INFO['State'] == 'Copying':
LUNCOPY_INFO['State'] = 'Complete'
out = """/>showluncopy
============================================================================
LUN Copy Information
----------------------------------------------------------------------------
LUN Copy Name LUN Copy ID Type LUN Copy State LUN Copy Status
----------------------------------------------------------------------------
%s %s %s %s %s
============================================================================
""" % (LUNCOPY_INFO['Name'], LUNCOPY_INFO['ID'], LUNCOPY_INFO['Type'],
LUNCOPY_INFO['State'], LUNCOPY_INFO['Status'])
return out
def cli_delluncopy(self, params):
LUNCOPY_INFO['Name'] = None
LUNCOPY_INFO['ID'] = None
LUNCOPY_INFO['Type'] = None
LUNCOPY_INFO['State'] = None
LUNCOPY_INFO['Status'] = None
out = 'command operates successfully'
return out
def cli_createsnapshot(self, params):
SNAPSHOT_INFO['Source LUN ID'] = LUN_INFO['ID']
SNAPSHOT_INFO['Source LUN Name'] = LUN_INFO['Name']
SNAPSHOT_INFO['ID'] = VOLUME_SNAP_ID['snap']
SNAPSHOT_INFO['Name'] =\
self._name_translate(FAKE_SNAPSHOT['name'])
SNAPSHOT_INFO['Status'] = 'Disable'
out = 'command operates successfully'
return out
def cli_showsnapshot(self, params):
if SNAPSHOT_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
else:
out = """/>showsnapshot
==========================================================================
Snapshot Information
--------------------------------------------------------------------------
Name ID Type Status Time Stamp
--------------------------------------------------------------------------
%s %s Public %s 2013-01-15 14:21:13
==========================================================================
""" % (SNAPSHOT_INFO['Name'], SNAPSHOT_INFO['ID'], SNAPSHOT_INFO['Status'])
return out
def cli_actvsnapshot(self, params):
SNAPSHOT_INFO['Status'] = 'Active'
FAKE_SNAPSHOT['provider_location'] = SNAPSHOT_INFO['ID']
out = 'command operates successfully'
return out
def cli_disablesnapshot(self, params):
SNAPSHOT_INFO['Status'] = 'Disable'
out = 'command operates successfully'
return out
def cli_delsnapshot(self, params):
SNAPSHOT_INFO['Source LUN ID'] = None
SNAPSHOT_INFO['Source LUN Name'] = None
SNAPSHOT_INFO['ID'] = None
SNAPSHOT_INFO['Name'] = None
SNAPSHOT_INFO['Status'] = None
FAKE_SNAPSHOT['provider_location'] = None
out = 'command operates successfully'
return out
def cli_showrespool(self, params):
out = """/>showrespool
===========================================================================
Resource Pool Information
---------------------------------------------------------------------------
Pool ID Size(MB) Usage(MB) Valid Size(MB) Alarm Threshold
---------------------------------------------------------------------------
A %s 0.0 %s 80
B %s 0.0 %s 80
===========================================================================
-""" % (RESPOOL_A_SIM['Size'], RESPOOL_A_SIM['Valid Size'],
RESPOOL_B_SIM['Size'], RESPOOL_B_SIM['Valid Size'])
return out
def cli_showiscsitgtname(self, params):
iqn = INITIATOR_SETTING['TargetIQN']
out = """/>showiscsitgtname
===================================================================
ISCSI Name
-------------------------------------------------------------------
Iscsi Name | %s
===================================================================
""" % iqn
return out
def cli_showiscsiip(self, params):
out = """/>showiscsiip
============================================================================
iSCSI IP Information
----------------------------------------------------------------------------
Controller ID Interface Module ID Port ID IP Address Mask
----------------------------------------------------------------------------
B 0 P1 %s 255.255.255.0
============================================================================
-""" % INITIATOR_SETTING['Initiator TargetIP']
return out
def cli_showhostgroup(self, params):
if MAP_INFO['Host Group ID'] is None:
out = """/>showhostgroup
============================================================
Host Group Information
------------------------------------------------------------
Host Group ID Name File Engine Cluster
------------------------------------------------------------
0 Default Group NO
============================================================
"""
else:
out = """/>showhostgroup
============================================================
Host Group Information
------------------------------------------------------------
Host Group ID Name File Engine Cluster
------------------------------------------------------------
0 Default Group NO
%s %s NO
============================================================
""" % (MAP_INFO['Host Group ID'], MAP_INFO['Host Group Name'])
return out
def cli_createhostgroup(self, params):
MAP_INFO['Host Group ID'] = '1'
MAP_INFO['Host Group Name'] = 'HostGroup_OpenStack'
out = 'command operates successfully'
return out
def cli_showhost(self, params):
if MAP_INFO['Host ID'] is None:
out = 'command operates successfully, but no information.'
else:
out = """/>showhost
=======================================================
Host Information
-------------------------------------------------------
Host ID Host Name Host Group ID Os Type
-------------------------------------------------------
%s %s %s Linux
=======================================================
""" % (MAP_INFO['Host ID'], MAP_INFO['Host Name'], MAP_INFO['Host Group ID'])
return out
def cli_addhost(self, params):
MAP_INFO['Host ID'] = '1'
MAP_INFO['Host Name'] = 'Host_' + FAKE_CONNECTOR['host']
MAP_INFO['Os Type'] = 'Linux'
out = 'command operates successfully'
return out
def cli_delhost(self, params):
MAP_INFO['Host ID'] = None
MAP_INFO['Host Name'] = None
MAP_INFO['Os Type'] = None
out = 'command operates successfully'
return out
def cli_showiscsiini(self, params):
if HOST_PORT_INFO['ID'] is None:
out = 'Error: The parameter is wrong.'
else:
out = """/>showiscsiini
========================================================
Initiator Information
--------------------------------------------------------
Initiator Name Chap Status
--------------------------------------------------------
%s Disable
========================================================
""" % HOST_PORT_INFO['Info']
return out
def cli_addiscsiini(self, params):
HOST_PORT_INFO['ID'] = '1'
HOST_PORT_INFO['Name'] = 'iSCSIInitiator001'
HOST_PORT_INFO['Info'] = INITIATOR_SETTING['Initiator Name']
HOST_PORT_INFO['Type'] = 'ISCSITGT'
out = 'command operates successfully'
return out
def cli_deliscsiini(self, params):
HOST_PORT_INFO['ID'] = None
HOST_PORT_INFO['Name'] = None
HOST_PORT_INFO['Info'] = None
HOST_PORT_INFO['Type'] = None
out = 'command operates successfully'
return out
def cli_showhostport(self, params):
if MAP_INFO['INI Port ID'] is None:
out = 'command operates successfully, but no information.'
else:
out = """/>showhostport
============================================================================
Host Port Information
----------------------------------------------------------------------------
Port ID Port Name Port Information Port Type Host ID Link Status \
Multipath Type
----------------------------------------------------------------------------
%s %s %s %s %s Unconnected Default
============================================================================
""" % (MAP_INFO['INI Port ID'], MAP_INFO['INI Port Name'],
MAP_INFO['INI Port Info'], MAP_INFO['INI Port Type'],
MAP_INFO['Host ID'])
return out
def cli_addhostport(self, params):
MAP_INFO['INI Port ID'] = HOST_PORT_INFO['ID']
MAP_INFO['INI Port Name'] = HOST_PORT_INFO['Name']
MAP_INFO['INI Port Info'] = HOST_PORT_INFO['Info']
MAP_INFO['INI Port Type'] = HOST_PORT_INFO['Type']
out = 'command operates successfully'
return out
def cli_delhostport(self, params):
MAP_INFO['INI Port ID'] = None
MAP_INFO['INI Port Name'] = None
MAP_INFO['INI Port Info'] = None
MAP_INFO['INI Port Type'] = None
HOST_PORT_INFO['ID'] = None
HOST_PORT_INFO['Name'] = None
HOST_PORT_INFO['Info'] = None
HOST_PORT_INFO['Type'] = None
out = 'command operates successfully'
return out
def cli_showhostmap(self, params):
if MAP_INFO['DEV LUN ID'] is None:
out = 'command operates successfully, but no information.'
else:
out = """/>showhostmap
===========================================================================
Map Information
---------------------------------------------------------------------------
Map ID Working Controller Dev LUN ID LUN WWN Host LUN ID Mapped to\
RAID ID Dev LUN Cap(MB) Map Type Whether Command LUN Pool ID
----------------------------------------------------------------------------
2147483649 %s %s %s %s Host: %s %s %s HOST No --
============================================================================
""" % (LUN_INFO['Worker Controller'], LUN_INFO['ID'], LUN_INFO['LUN WWN'],
MAP_INFO['Host LUN ID'], MAP_INFO['Host ID'], LUN_INFO['RAID Group ID'],
str(int(LUN_INFO['Size']) * 1024))
return out
def cli_addhostmap(self, params):
MAP_INFO['DEV LUN ID'] = LUN_INFO['ID']
MAP_INFO['LUN WWN'] = LUN_INFO['LUN WWN']
MAP_INFO['Host LUN ID'] = '2'
MAP_INFO['Link Status'] = 'Linked'
out = 'command operates successfully'
return out
def cli_delhostmap(self, params):
if MAP_INFO['Link Status'] == 'Linked':
MAP_INFO['Link Status'] = 'Deleting'
out = 'there are IOs accessing the system, please try later'
else:
MAP_INFO['Link Status'] = None
MAP_INFO['DEV LUN ID'] = None
MAP_INFO['LUN WWN'] = None
MAP_INFO['Host LUN ID'] = None
out = 'command operates successfully'
return out
def cli_showfreeport(self, params):
out = """/>showfreeport
=======================================================================
Host Free Port Information
-----------------------------------------------------------------------
WWN Or MAC Type Location Connection Status
-----------------------------------------------------------------------
1000000164s45126 FC Primary Controller Connected
=======================================================================
"""
HOST_PORT_INFO['ID'] = '2'
HOST_PORT_INFO['Name'] = 'FCInitiator001'
HOST_PORT_INFO['Info'] = '1000000164s45126'
HOST_PORT_INFO['Type'] = 'FC'
return out
def cli_showhostpath(self, params):
host = params[params.index('-host') + 1]
out = """/>showhostpath -host 1
=======================================
Multi Path Information
---------------------------------------
Host ID | %s
Controller ID | B
Port Type | FC
Initiator WWN | 1000000164s45126
Target WWN | %s
Host Port ID | 0
Link Status | Normal
=======================================
""" % (host, INITIATOR_SETTING['WWN'][0])
return out
def cli_showfcmode(self, params):
out = """/>showfcport
=========================================================================
FC Port Topology Mode
-------------------------------------------------------------------------
Controller ID Interface Module ID Port ID WWN Current Mode
-------------------------------------------------------------------------
B 1 P0 %s --
=========================================================================
-""" % INITIATOR_SETTING['WWN'][0]
return out
def cli_chglun(self, params):
if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']:
LUN_INFO['Owner Controller'] = 'B'
else:
CLONED_LUN_INFO['Owner Controller'] = 'B'
out = 'command operates successfully'
return out
class HuaweiDorado5100CLIResSimulator(HuaweiTCLIResSimulator):
def cli_showsys(self, params):
out = """/>showsys
=============================================================
System Information
-------------------------------------------------------------
System Name | SN_Dorado5100
Device Type | Oceanstor Dorado5100
Current System Mode | Double Controllers Normal
Mirroring Link Status | Link Up
Location |
Time | 2013-01-01 01:01:01
Product Version | V100R001C00
=============================================================
"""
return out
def cli_showlun(self, params):
if '-lun' not in params:
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
out = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name..\
Strip Unit Size(KB) Lun Type
---------------------------------------------------------------------------
%s %s Normal %s %s %s 64 THICK
===========================================================================
""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024),
LUN_INFO['Name'])
else:
out = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name \
Strip Unit Size(KB) Lun Type
---------------------------------------------------------------------------
%s %s Normal %s %s %s 64 THICK
%s %s Norma %s %s %s 64 THICK
===========================================================================
""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'],
CLONED_LUN_INFO['Owner Controller'],
str(int(CLONED_LUN_INFO['Size']) * 1024),
CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
out = """/>showlun
================================================
LUN Information
------------------------------------------------
ID | %s
Name | %s
LUN WWN | --
Visible Capacity | %s
RAID GROUP ID | %s
Owning Controller | %s
Workong Controller | %s
Lun Type | %s
SnapShot ID | %s
LunCopy ID | %s
================================================
""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else
(CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
CLONED_LUN_INFO['Owner Controller'],
CLONED_LUN_INFO['Worker Controller'],
CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
return out
class HuaweiDorado2100G2CLIResSimulator(HuaweiTCLIResSimulator):
def cli_showsys(self, params):
out = """/>showsys
==========================================================================
System Information
--------------------------------------------------------------------------
System Name | SN_Dorado2100_G2
Device Type | Oceanstor Dorado2100 G2
Current System Mode | Double Controllers Normal
Mirroring Link Status | Link Up
Location |
Time | 2013-01-01 01:01:01
Product Version | V100R001C00
===========================================================================
"""
return out
def cli_createlun(self, params):
lun_type = ('THIN' if params[params.index('-type') + 1] == '2' else
'THICK')
if LUN_INFO['ID'] is None:
LUN_INFO['Name'] = self._name_translate(FAKE_VOLUME['name'])
LUN_INFO['ID'] = VOLUME_SNAP_ID['vol']
LUN_INFO['Size'] = FAKE_VOLUME['size']
LUN_INFO['Lun Type'] = lun_type
LUN_INFO['Owner Controller'] = 'A'
LUN_INFO['Worker Controller'] = 'A'
LUN_INFO['RAID Group ID'] = POOL_SETTING['ID']
FAKE_VOLUME['provider_location'] = LUN_INFO['ID']
else:
CLONED_LUN_INFO['Name'] = \
self._name_translate(FAKE_CLONED_VOLUME['name'])
CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy']
CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size']
CLONED_LUN_INFO['Lun Type'] = lun_type
CLONED_LUN_INFO['Owner Controller'] = 'A'
CLONED_LUN_INFO['Worker Controller'] = 'A'
CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID']
CLONED_LUN_INFO['provider_location'] = CLONED_LUN_INFO['ID']
FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID']
out = 'command operates successfully'
return out
def cli_showlun(self, params):
if '-lun' not in params:
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
out = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
ID Status Controller Visible Capacity(MB) LUN Name Lun Type
---------------------------------------------------------------------------
%s Normal %s %s %s THICK
===========================================================================
""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'])
else:
out = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
ID Status Controller Visible Capacity(MB) LUN Name Lun Type
---------------------------------------------------------------------------
%s Normal %s %s %s THICK
%s Normal %s %s %s THICK
===========================================================================
""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Owner Controller'],
str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
out = """/>showlun
================================================
LUN Information
------------------------------------------------
ID | %s
Name | %s
LUN WWN | --
Visible Capacity | %s
RAID GROUP ID | %s
Owning Controller | %s
Workong Controller | %s
Lun Type | %s
SnapShot ID | %s
LunCopy ID | %s
================================================
""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
if params[params.index('-lun')] == VOLUME_SNAP_ID['vol'] else
(CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
CLONED_LUN_INFO['Owner Controller'],
CLONED_LUN_INFO['Worker Controller'],
CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
return out
class HuaweiTISCSIDriverTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(HuaweiTISCSIDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(HuaweiTISCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
create_fake_conf_file(self.fake_conf_file)
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.configuration.append_config_values(mox.IgnoreArg())
self.stubs.Set(time, 'sleep', Fake_sleep)
self.stubs.Set(utils, 'SSHPool', FakeSSHPool)
self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode',
Fake_change_file_mode)
self._init_driver()
def _init_driver(self):
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(HuaweiTISCSIDriverTestCase, self).tearDown()
def test_conf_invalid(self):
# Test config file not found
tmp_fonf_file = '/xxx/cinder_huawei_conf.xml'
tmp_configuration = mox.MockObject(conf.Configuration)
tmp_configuration.cinder_huawei_conf_file = tmp_fonf_file
tmp_configuration.append_config_values(mox.IgnoreArg())
self.assertRaises(IOError,
HuaweiVolumeDriver,
configuration=tmp_configuration)
# Test Product and Protocol invalid
tmp_dict = {'Storage/Product': 'T', 'Storage/Protocol': 'iSCSI'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, 'xx')
self.assertRaises(exception.InvalidInput,
HuaweiVolumeDriver,
configuration=self.configuration)
modify_conf(self.fake_conf_file, k, v)
# Test ctr ip, UserName and password unspecified
tmp_dict = {'Storage/ControllerIP0': '10.10.10.1',
'Storage/ControllerIP1': '10.10.10.2',
'Storage/UserName': 'admin',
'Storage/UserPassword': '123456'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, '')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, k, v)
# Test StoragePool unspecified
modify_conf(self.fake_conf_file, 'LUN/StoragePool', '', attrib='Name')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'LUN/StoragePool', 'RAID_001',
attrib='Name')
# Test LUN type invalid
modify_conf(self.fake_conf_file, 'LUN/LUNType', 'thick')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver.do_setup(None)
self.assertRaises(exception.InvalidInput,
tmp_driver.create_volume, FAKE_VOLUME)
modify_conf(self.fake_conf_file, 'LUN/LUNType', 'Thick')
# Test OSType invalid
modify_conf(self.fake_conf_file, 'Host', 'invalid_type',
attrib='OSType')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'Host', 'Linux', attrib='OSType')
# Test TargetIP not found
modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '')
modify_conf(self.fake_conf_file, 'iSCSI/Initiator', '', attrib='Name')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver.do_setup(None)
tmp_driver.create_volume(FAKE_VOLUME)
self.assertRaises(exception.InvalidInput,
tmp_driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
tmp_driver.delete_volume(FAKE_VOLUME)
modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP',
'192.168.100.1')
modify_conf(self.fake_conf_file, 'iSCSI/Initiator',
'iqn.1993-08.debian:01:ec2bff7ac3a3', attrib='Name')
def test_volume_type(self):
ctxt = context.get_admin_context()
extra_specs = {'drivers:LUNType': 'Thin'}
type_ref = volume_types.create(ctxt, 'THIN', extra_specs)
FAKE_VOLUME['volume_type_id'] = type_ref['id']
self.driver.create_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol'])
self.assertEqual(LUN_INFO['Lun Type'], 'THIN')
self.driver.delete_volume(FAKE_VOLUME)
FAKE_VOLUME['volume_type_id'] = None
# Test volume type invalid
extra_specs = {'drivers:InvalidLUNType': 'Thin'}
type_ref = volume_types.create(ctxt, 'Invalid_THIN', extra_specs)
FAKE_VOLUME['volume_type_id'] = type_ref['id']
self.driver.create_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol'])
self.assertNotEqual(LUN_INFO['Lun Type'], 'THIN')
self.driver.delete_volume(FAKE_VOLUME)
FAKE_VOLUME['volume_type_id'] = None
def test_create_delete_volume(self):
# Test create lun cli exception
set_error_flg('createlun')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, FAKE_VOLUME)
ret = self.driver.create_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
self.assertEqual(ret['provider_location'], LUN_INFO['ID'])
# Test delete lun cli exception
set_error_flg('dellun')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, FAKE_VOLUME)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
self.assertEqual(FAKE_VOLUME['provider_location'], None)
def test_create_delete_cloned_volume(self):
# Test no source volume
self.assertRaises(exception.VolumeNotFound,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.driver.create_volume(FAKE_VOLUME)
# Test create luncopy failed
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
set_error_flg('createluncopy')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
# Test start luncopy failed
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
set_error_flg('chgluncopystatus')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
# Test luncopy status abnormal
LUNCOPY_SETTING['Status'] = 'Disable'
self.assertEqual(LUN_INFO['ID'], '0')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
LUNCOPY_SETTING['Status'] = 'Normal'
# Test delete luncopy failed
set_error_flg('delluncopy')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
# need to clean up LUNCopy
LUNCOPY_INFO['Name'] = None
LUNCOPY_INFO['ID'] = None
LUNCOPY_INFO['Type'] = None
LUNCOPY_INFO['State'] = None
LUNCOPY_INFO['Status'] = None
# Test normal create and delete cloned volume
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID'])
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(FAKE_CLONED_VOLUME['provider_location'], None)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def test_create_delete_snapshot(self):
# Test no resource pool
RESPOOL_A_SIM['Valid Size'] = '0'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, FAKE_SNAPSHOT)
RESPOOL_A_SIM['Valid Size'] = '5120'
# Test no source volume
self.assertRaises(exception.VolumeNotFound,
self.driver.create_snapshot, FAKE_SNAPSHOT)
# Test create snapshot cli exception
self.driver.create_volume(FAKE_VOLUME)
set_error_flg('createsnapshot')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
FAKE_SNAPSHOT)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
# Test active snapshot failed
set_error_flg('actvsnapshot')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], None)
self.assertEqual(SNAPSHOT_INFO['Status'], None)
# Test disable snapshot failed
set_error_flg('disablesnapshot')
self.driver.create_snapshot(FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap'])
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['Status'], 'Active')
# Test delsnapshot failed
set_error_flg('delsnapshot')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['Status'], 'Disable')
self.driver.delete_snapshot(FAKE_SNAPSHOT)
# Test normal create and delete snapshot
self.driver.create_volume(FAKE_VOLUME)
ret = self.driver.create_snapshot(FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap'])
self.assertEqual(SNAPSHOT_INFO['Status'], 'Active')
self.assertEqual(ret['provider_location'], SNAPSHOT_INFO['ID'])
self.driver.delete_snapshot(FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], None)
self.assertEqual(SNAPSHOT_INFO['Status'], None)
def test_create_delete_snapshot_volume(self):
# Test no source snapshot
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
FAKE_CLONED_VOLUME, FAKE_SNAPSHOT)
# Test normal create and delete snapshot volume
self.driver.create_volume(FAKE_VOLUME)
self.driver.create_snapshot(FAKE_SNAPSHOT)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap'])
ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME,
FAKE_SNAPSHOT)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID'])
self.driver.delete_snapshot(FAKE_SNAPSHOT)
self.driver.delete_volume(FAKE_VOLUME)
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(SNAPSHOT_INFO['ID'], None)
def test_initialize_connection(self):
# Test can not get iscsi iqn
set_error_flg('showiscsitgtname')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test failed to get iSCSI port info
set_error_flg('showiscsiip')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test create hostgroup failed
set_error_flg('createhostgroup')
MAP_INFO['Host Group ID'] = None
MAP_INFO['Host Group Name'] = None
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test create host failed
set_error_flg('addhost')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test add iSCSI initiator failed
set_error_flg('addiscsiini')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test add hostport failed
set_error_flg('addhostport')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test no volume
FAKE_VOLUME['provider_location'] = '100'
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
FAKE_VOLUME['provider_location'] = None
# Test map volume failed
self.driver.create_volume(FAKE_VOLUME)
set_error_flg('addhostmap')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test normal initialize connection
self.assertEqual(FAKE_VOLUME['provider_location'],
VOLUME_SNAP_ID['vol'])
self.assertEqual(LUN_INFO['Owner Controller'], 'A')
ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR)
iscsi_propers = ret['data']
self.assertEqual(iscsi_propers['target_iqn'],
INITIATOR_SETTING['TargetIQN-form'])
self.assertEqual(iscsi_propers['target_portal'],
INITIATOR_SETTING['Initiator TargetIP'] + ':3260')
self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID'])
self.assertEqual(MAP_INFO["INI Port Info"],
FAKE_CONNECTOR['initiator'])
self.assertEqual(LUN_INFO['Owner Controller'], 'B')
self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def test_terminate_connection(self):
# Test no host was found
self.assertRaises(exception.HostNotFound,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test no volume was found
self.driver .create_volume(FAKE_VOLUME)
self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR)
FAKE_VOLUME['provider_location'] = None
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
FAKE_VOLUME['provider_location'] = LUN_INFO['ID']
# Test delete map failed
set_error_flg('delhostmap')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Delete hostport failed
set_error_flg('delhostport')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test delete initiator failed
set_error_flg('deliscsiini')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test delete host failed
set_error_flg('delhost')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
FAKE_VOLUME, FAKE_CONNECTOR)
# Test normal terminate connection
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR)
self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR)
self.assertEqual(MAP_INFO["DEV LUN ID"], None)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def test_get_volume_stats(self):
stats = self.driver.get_volume_stats(True)
free_capacity = float(POOL_SETTING['Free Capacity']) / 1024
self.assertEqual(stats['free_capacity_gb'], free_capacity)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
class HuaweiTFCDriverTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(HuaweiTFCDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(HuaweiTFCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
create_fake_conf_file(self.fake_conf_file)
modify_conf(self.fake_conf_file, 'Storage/Protocol', 'FC')
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.configuration.append_config_values(mox.IgnoreArg())
self.stubs.Set(time, 'sleep', Fake_sleep)
self.stubs.Set(utils, 'SSHPool', FakeSSHPool)
self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode',
Fake_change_file_mode)
self._init_driver()
def _init_driver(self):
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(HuaweiTFCDriverTestCase, self).tearDown()
def test_validate_connector_failed(self):
invalid_connector = {'host': 'testhost'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.validate_connector,
invalid_connector)
def test_create_delete_volume(self):
self.driver.create_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol'])
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def test_create_delete_snapshot(self):
self.driver.create_volume(FAKE_VOLUME)
self.driver.create_snapshot(FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap'])
self.driver.delete_snapshot(FAKE_SNAPSHOT)
self.assertEqual(SNAPSHOT_INFO['ID'], None)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def test_create_cloned_volume(self):
self.driver.create_volume(FAKE_VOLUME)
ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID'])
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(LUN_INFO['ID'], None)
def test_create_snapshot_volume(self):
self.driver.create_volume(FAKE_VOLUME)
self.driver.create_snapshot(FAKE_SNAPSHOT)
ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME,
FAKE_SNAPSHOT)
self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy'])
self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID'])
self.driver.delete_volume(FAKE_CLONED_VOLUME)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(CLONED_LUN_INFO['ID'], None)
self.assertEqual(LUN_INFO['ID'], None)
def test_initialize_terminitat_connection(self):
self.driver.create_volume(FAKE_VOLUME)
ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR)
fc_properties = ret['data']
self.assertEqual(fc_properties['target_wwn'],
INITIATOR_SETTING['WWN'])
self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID'])
self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR)
self.assertEqual(MAP_INFO["DEV LUN ID"], None)
self.assertEqual(MAP_INFO["Host LUN ID"], None)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
def _test_get_volume_stats(self):
stats = self.driver.get_volume_stats(True)
fakecapacity = float(POOL_SETTING['Free Capacity']) / 1024
self.assertEqual(stats['free_capacity_gb'], fakecapacity)
self.assertEqual(stats['storage_protocol'], 'FC')
class HuaweiDorado5100FCDriverTestCase(HuaweiTFCDriverTestCase):
def __init__(self, *args, **kwargs):
super(HuaweiDorado5100FCDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(HuaweiDorado5100FCDriverTestCase, self).setUp()
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
def test_create_snapshot_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
FAKE_CLONED_VOLUME, FAKE_SNAPSHOT)
class HuaweiDorado2100G2FCDriverTestCase(HuaweiTFCDriverTestCase):
def __init__(self, *args, **kwargs):
super(HuaweiDorado2100G2FCDriverTestCase, self).__init__(*args,
**kwargs)
def setUp(self):
super(HuaweiDorado2100G2FCDriverTestCase, self).setUp()
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
def test_create_delete_snapshot(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, FAKE_SNAPSHOT)
def test_create_snapshot_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
FAKE_CLONED_VOLUME, FAKE_SNAPSHOT)
class HuaweiDorado5100ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase):
def __init__(self, *args, **kwargs):
super(HuaweiDorado5100ISCSIDriverTestCase, self).__init__(*args,
**kwargs)
def setUp(self):
super(HuaweiDorado5100ISCSIDriverTestCase, self).setUp()
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def test_create_delete_cloned_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
def test_create_delete_snapshot_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
FAKE_CLONED_VOLUME, FAKE_SNAPSHOT)
def test_volume_type(self):
pass
class HuaweiDorado2100G2ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase):
def __init__(self, *args, **kwargs):
super(HuaweiDorado2100G2ISCSIDriverTestCase, self).__init__(*args,
**kwargs)
def setUp(self):
super(HuaweiDorado2100G2ISCSIDriverTestCase, self).setUp()
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def test_conf_invalid(self):
pass
def test_create_delete_cloned_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
FAKE_CLONED_VOLUME, FAKE_VOLUME)
def test_create_delete_snapshot(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, FAKE_SNAPSHOT)
def test_create_delete_snapshot_volume(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
FAKE_CLONED_VOLUME, FAKE_SNAPSHOT)
def test_initialize_connection(self):
self.driver.create_volume(FAKE_VOLUME)
ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR)
iscsi_propers = ret['data']
self.assertEqual(iscsi_propers['target_iqn'],
INITIATOR_SETTING['TargetIQN-form'])
self.assertEqual(iscsi_propers['target_portal'],
INITIATOR_SETTING['Initiator TargetIP'] + ':3260')
self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID'])
self.assertEqual(MAP_INFO["INI Port Info"],
FAKE_CONNECTOR['initiator'])
self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR)
self.driver.delete_volume(FAKE_VOLUME)
self.assertEqual(LUN_INFO['ID'], None)
class SSHMethodTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(SSHMethodTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(SSHMethodTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
create_fake_conf_file(self.fake_conf_file)
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.configuration.append_config_values(mox.IgnoreArg())
self.stubs.Set(time, 'sleep', Fake_sleep)
self.stubs.Set(utils, 'SSHPool', FakeSSHPool)
self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode',
Fake_change_file_mode)
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver.do_setup(None)
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(SSHMethodTestCase, self).tearDown()
def test_reach_max_connection_limit(self):
self.stubs.Set(FakeChannel, 'recv', self._fake_recv1)
self.assertRaises(exception.CinderException,
self.driver.create_volume, FAKE_VOLUME)
def test_socket_timeout(self):
self.stubs.Set(FakeChannel, 'recv', self._fake_recv2)
self.assertRaises(socket.timeout,
self.driver.create_volume, FAKE_VOLUME)
def _fake_recv1(self, nbytes):
return "No response message"
def _fake_recv2(self, nBytes):
raise socket.timeout()
class HuaweiUtilsTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(HuaweiUtilsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(HuaweiUtilsTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
create_fake_conf_file(self.fake_conf_file)
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(HuaweiUtilsTestCase, self).tearDown()
def test_parse_xml_file_ioerror(self):
tmp_fonf_file = '/xxx/cinder_huawei_conf.xml'
self.assertRaises(IOError, huawei_utils.parse_xml_file, tmp_fonf_file)
def test_is_xml_item_exist(self):
root = huawei_utils.parse_xml_file(self.fake_conf_file)
res = huawei_utils.is_xml_item_exist(root, 'Storage/UserName')
self.assertTrue(res)
res = huawei_utils.is_xml_item_exist(root, 'xxx')
self.assertFalse(res)
res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'Name')
self.assertTrue(res)
res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'xxx')
self.assertFalse(res)
def test_is_xml_item_valid(self):
root = huawei_utils.parse_xml_file(self.fake_conf_file)
res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType',
['Thin', 'Thick'])
self.assertTrue(res)
res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType', ['test'])
self.assertFalse(res)
res = huawei_utils.is_xml_item_valid(root, 'Host',
['Linux', 'Windows'], 'OSType')
self.assertTrue(res)
res = huawei_utils.is_xml_item_valid(root, 'Host', ['test'], 'OSType')
self.assertFalse(res)
def test_get_conf_host_os_type(self):
# Default os is Linux
res = huawei_utils.get_conf_host_os_type('10.10.10.1',
self.fake_conf_file)
self.assertEqual(res, '0')
modify_conf(self.fake_conf_file, 'Host', 'Windows', 'OSType')
res = huawei_utils.get_conf_host_os_type(FAKE_CONNECTOR['ip'],
self.fake_conf_file)
self.assertEqual(res, '1')
|
|
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def reader(bucket, worker_id, file_names, queue, rand):
while True:
objname = rand.choice(file_names)
key = bucket.new_key(objname)
fp = realistic.FileVerifier()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.get_contents_to_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
result.update(
error=dict(
msg='md5sum check failed',
),
)
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.chunks,
)
queue.put(result)
def writer(bucket, worker_id, file_names, files, queue, rand):
while True:
fp = next(files)
objname = rand.choice(file_names)
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.set_contents_from_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.last_chunks,
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def write_file(bucket, file_name, fp):
"""
Write a single file to the bucket using the file_name.
This is used during the warmup to initialize the files.
"""
key = bucket.new_key(file_name)
key.set_contents_from_file(fp)
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'readwrite' not in config:
raise RuntimeError('readwrite section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.readwrite:
raise RuntimeError("Missing readwrite config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.readwrite.files:
raise RuntimeError("Missing readwrite config item: files.{item}".format(item=item))
seeds = dict(config.readwrite.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
file_names = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
file_names = itertools.islice(file_names, config.readwrite.files.num)
file_names = list(file_names)
files = realistic.files2(
mean=1024 * config.readwrite.files.size,
stddev=1024 * config.readwrite.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
# warmup - get initial set of files uploaded
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
warmup_pool.spawn_link_exception(
write_file,
bucket=bucket,
file_name=file_name,
fp=fp,
)
warmup_pool.join()
# main work
print "Starting main worker loop."
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
group = gevent.pool.Group()
rand_writer = random.Random(seeds['writer'])
for x in xrange(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn_link_exception(
writer,
bucket=bucket,
worker_id=x,
file_names=file_names,
files=files,
queue=q,
rand=this_rand,
)
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn_link_exception(
reader,
bucket=bucket,
worker_id=x,
file_names=file_names,
queue=q,
rand=this_rand,
)
def stop():
group.kill(block=True)
q.put(StopIteration)
gevent.spawn_later(config.readwrite.duration, stop)
yaml.safe_dump_all(q, stream=real_stdout)
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)
|
|
from functools import partial
import logging
from django.conf import settings
from django.core.cache import caches
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from django.db import models
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from markitup.fields import MarkupField
from wafer.menu import refresh_menu_cache
logger = logging.getLogger(__name__)
class PageMarkupField(MarkupField):
"""MarkupField that uses our own render function"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dotted_path, kwargs = settings.WAFER_PAGE_MARKITUP_FILTER
module, func = dotted_path.rsplit('.', 1)
func = getattr(__import__(module, {}, {}, [func]), func)
self.render_func = partial(func, **kwargs)
def pre_save(self, model_instance, add):
value = super().pre_save(model_instance, add)
rendered = self.render_func(value)
rendered_field_name = getattr(model_instance, self.attname).rendered_field_name
setattr(model_instance, rendered_field_name, rendered)
return value
class File(models.Model):
"""A file for use in page markup."""
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
item = models.FileField(upload_to='pages_files')
def __str__(self):
return u'%s (%s)' % (self.name, self.item.url)
class Page(models.Model):
"""An extra page for the site."""
name = models.CharField(max_length=255)
slug = models.SlugField(help_text=_("Last component of the page URL"))
parent = models.ForeignKey(
'self', null=True, blank=True, on_delete=models.CASCADE, related_name="children")
content = PageMarkupField(
help_text=_("Markdown contents for the page."))
include_in_menu = models.BooleanField(
help_text=_("Whether to include in menus."),
default=False)
menu_order = models.PositiveSmallIntegerField(
help_text=_("Ordering in the menu (smaller numbers come first)"),
null=True,
blank=True,
)
exclude_from_static = models.BooleanField(
help_text=_("Whether to exclude this page from the static version of"
" the site (Container pages, etc.)"),
default=False)
files = models.ManyToManyField(
File, related_name="pages", blank=True,
help_text=_("Images and other files for use in"
" the content markdown field."))
people = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='pages', blank=True,
help_text=_("People associated with this page for display in the"
" schedule (Session chairs, panelists, etc.)"))
cache_time = models.IntegerField(
default=-1,
help_text=_("Length of time (in seconds) to cache the page for "
"dynamic page content. A negative value means this page "
"is not dynamic and it will be not be regenerated "
"until it is next edited."))
def __str__(self):
return u'%s' % (self.name,)
cache_name = settings.WAFER_CACHE
def get_path(self):
path, parent = [self.slug], self.parent
while parent is not None:
path.insert(0, parent.slug)
parent = parent.parent
return path
def get_absolute_url(self):
if self.slug == 'index' and not self.parent:
return reverse('wafer_page')
url = "/".join(self.get_path())
return reverse('wafer_page', args=(url,))
def _cache_key(self):
return "wafer.pages:rendered:%s" % self.get_absolute_url()
def cached_render(self):
if self.cache_time < 0:
return self.content.rendered
cache = caches[self.cache_name]
cache_key = self._cache_key()
rendered = cache.get(cache_key)
if rendered is None:
content_field = self._meta.get_field('content')
rendered = content_field.render_func(self.content.raw)
# Should reset the database copy, but this is enough for
# now
cache.set(cache_key, rendered, self.cache_time)
return rendered
def invalidate_cache(self):
cache = caches[self.cache_name]
cache.delete(self._cache_key())
get_absolute_url.short_description = 'page url'
def get_in_schedule(self):
if self.scheduleitem_set.all():
return True
return False
def get_people_display_names(self):
names = [person.userprofile.display_name()
for person in self.people.all()]
if len(names) > 2:
comma_names = ', '.join(names[:-1])
return comma_names + ' and ' + names[-1]
else:
return ' and '.join(names)
get_in_schedule.short_description = 'Added to schedule'
get_in_schedule.boolean = True
get_people_display_names.short_description = 'People'
class Model:
unique_together = (('parent', 'slug'),)
def clean(self):
keys = [self.pk]
parent = self.parent
while parent is not None:
if parent.pk in keys:
raise ValidationError(
{
NON_FIELD_ERRORS: [
_("Circular reference in parent."),
],
})
keys.append(parent.pk)
parent = parent.parent
return super().clean()
def validate_unique(self, exclude=None):
existing = Page.objects.filter(slug=self.slug, parent=self.parent)
# We could be updating the page, so don't fail if the existing
# entry is this page.
if existing.count() > 1 or (existing.count() == 1 and
existing.first().pk != self.pk):
raise ValidationError(
{
NON_FIELD_ERRORS: [
_("Duplicate parent/slug combination."),
],
})
return super().validate_unique(exclude)
def save(self, *args, **kwargs):
"""Ensure we invalidate the cache after saving"""
super().save(*args, **kwargs)
self.invalidate_cache()
def page_menus(root_menu):
"""Add page menus."""
for page in Page.objects.filter(include_in_menu=True, parent=None).prefetch_related("children").order_by('menu_order'):
subpages = page.children.filter(include_in_menu=True).order_by('menu_order')
if len(subpages) > 0:
root_menu.add_menu(
page.slug,
page.name,
[],
)
for subpage in subpages:
root_menu.add_item(
subpage.name,
subpage.get_absolute_url(),
menu=page.slug,
)
else:
root_menu.add_item(
page.name,
page.get_absolute_url(),
)
post_save.connect(refresh_menu_cache, sender=Page)
|
|
import datetime
import warnings
import json
import mock
import sift
import unittest
import sys
import requests.exceptions
if sys.version_info[0] < 3:
import six.moves.urllib as urllib
else:
import urllib.parse
def valid_transaction_properties():
return {
'$buyer_user_id': '123456',
'$seller_user_id': '654321',
'$amount': 1253200,
'$currency_code': 'USD',
'$time': int(datetime.datetime.now().strftime('%s')),
'$transaction_id': 'my_transaction_id',
'$billing_name': 'Mike Snow',
'$billing_bin': '411111',
'$billing_last4': '1111',
'$billing_address1': '123 Main St.',
'$billing_city': 'San Francisco',
'$billing_region': 'CA',
'$billing_country': 'US',
'$billing_zip': '94131',
'$user_email': '[email protected]'
}
def valid_label_properties():
return {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': '[email protected]'
}
def score_response_json():
return """{
"status": 0,
"error_message": "OK",
"user_id": "12345",
"score": 0.85,
"latest_label": {
"is_bad": true,
"time": 1450201660000
},
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
# A sample response from the /{version}/users/{userId}/score API.
USER_SCORE_RESPONSE_JSON = """{
"status": 0,
"error_message": "OK",
"entity_type": "user",
"entity_id": "12345",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_decisions": {
"payment_abuse": {
"id": "user_looks_bad_payment_abuse",
"category": "block",
"source": "AUTOMATED_RULE",
"time": 1352201880,
"description": "Bad Fraudster"
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def action_response_json():
return """{
"actions": [
{
"action": {
"id": "freds_action"
},
"entity": {
"id": "Fred"
},
"id": "ACTION1234567890:freds_action",
"triggers": [
{
"source": "synchronous_action",
"trigger": {
"id": "TRIGGER1234567890"
},
"type": "formula"
}
]
}
],
"score": 0.85,
"status": 0,
"error_message": "OK",
"user_id": "Fred",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def response_with_data_header():
return {
'content-type': 'application/json; charset=UTF-8'
}
class TestSiftPythonClient(unittest.TestCase):
def setUp(self):
self.test_key = 'a_fake_test_api_key'
self.account_id = 'ACCT'
self.sift_client = sift.Client(api_key=self.test_key, account_id=self.account_id)
def test_global_api_key(self):
# test for error if global key is undefined
self.assertRaises(TypeError, sift.Client)
sift.api_key = "a_test_global_api_key"
local_api_key = "a_test_local_api_key"
client1 = sift.Client()
client2 = sift.Client(local_api_key)
# test that global api key is assigned
assert(client1.api_key == sift.api_key)
# test that local api key is assigned
assert(client2.api_key == local_api_key)
client2 = sift.Client()
# test that client2 is assigned a new object with global api_key
assert(client2.api_key == sift.api_key)
def test_constructor_requires_valid_api_key(self):
self.assertRaises(TypeError, sift.Client, None)
self.assertRaises(ValueError, sift.Client, '')
def test_constructor_invalid_api_url(self):
self.assertRaises(TypeError, sift.Client, self.test_key, None)
self.assertRaises(ValueError, sift.Client, self.test_key, '')
def test_constructor_api_key(self):
client = sift.Client(self.test_key)
self.assertEqual(client.api_key, self.test_key)
def test_track_requires_valid_event(self):
self.assertRaises(TypeError, self.sift_client.track, None, {})
self.assertRaises(ValueError, self.sift_client.track, '', {})
self.assertRaises(TypeError, self.sift_client.track, 42, {})
def test_track_requires_properties(self):
event = 'custom_event'
self.assertRaises(TypeError, self.sift_client.track, event, None)
self.assertRaises(TypeError, self.sift_client.track, event, 42)
self.assertRaises(ValueError, self.sift_client.track, event, {})
def test_score_requires_user_id(self):
self.assertRaises(TypeError, self.sift_client.score, None)
self.assertRaises(ValueError, self.sift_client.score, '')
self.assertRaises(TypeError, self.sift_client.score, 42)
def test_event_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(event, valid_transaction_properties())
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_event_with_timeout_param_ok(self):
event = '$transaction'
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event, valid_transaction_properties(), timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=test_timeout,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_score_ok(self):
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345')
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_score_with_timeout_param_ok(self):
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_get_user_score_ok(self):
"""Test the GET /{version}/users/{userId}/score API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_get_user_score_with_abuse_types_ok(self):
"""Test the GET /{version}/users/{userId}/score?abuse_types=... API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_ok(self):
"""Test the POST /{version}/users/{userId}/score API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345', test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_with_abuse_types_ok(self):
"""Test the POST /{version}/users/{userId}/score?abuse_types=... API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_sync_score_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = ('{"status": 0, "error_message": "OK", "score_response": %s}'
% score_response_json())
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event,
valid_transaction_properties(),
return_score=True,
abuse_types=['payment_abuse', 'content_abuse', 'legacy'])
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={'return_score': 'true', 'abuse_types': 'payment_abuse,content_abuse,legacy'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
assert(response.body['score_response']['score'] == 0.85)
assert(response.body['score_response']['scores']['content_abuse']['score'] == 0.14)
assert(response.body['score_response']['scores']['payment_abuse']['score'] == 0.97)
def test_get_decisions_fails(self):
with self.assertRaises(ValueError):
self.sift_client.get_decisions('usr')
def test_get_decisions(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_user",
"name": "Block user",
"description": "user has a different billing and shipping addresses",
"entity_type": "user",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "[email protected]",
"updated_at": "1469229177756",
"updated_by": "[email protected]"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="user",
limit=10,
start_from=None,
abuse_types="legacy,payment_abuse",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'user', 'limit': 10, 'abuse_types': 'legacy,payment_abuse'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_user')
def test_get_decisions_entity_session(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_session",
"name": "Block session",
"description": "session has problems",
"entity_type": "session",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "[email protected]",
"updated_at": "1469229177756",
"updated_by": "[email protected]"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="session",
limit=10,
start_from=None,
abuse_types="account_takeover",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'session', 'limit': 10, 'abuse_types': 'account_takeover'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_session')
def test_apply_decision_to_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': '[email protected]',
'description': 'called user and verified account',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "user"
},
"decision": {
"id": "user_looks_ok_legacy"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_user_decision(user_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/decisions' % user_id,
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.body['entity']['type'] == 'user')
assert(response.http_status_code == 200)
assert(response.is_ok())
def test_validate_no_user_id_string_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': '[email protected]',
'description': 'called user and verified account',
}
with self.assertRaises(TypeError):
self.sift_client._validate_apply_decision_request(apply_decision_request, 123)
def test_apply_decision_to_order_fails_with_no_order_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_order_decision("user_id", None, {})
def test_apply_decision_to_session_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_session_decision("user_id", None, {})
def test_get_session_decisions_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.get_session_decisions("user_id", None)
def test_apply_decision_to_content_fails_with_no_content_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_content_decision("user_id", None, {})
def test_validate_apply_decision_request_no_analyst_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_validate_apply_decision_request_no_source_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_validate_empty_apply_decision_request_fails(self):
apply_decision_request = {}
with self.assertRaises(ValueError):
self.sift_client._validate_apply_decision_request(apply_decision_request, "userId")
def test_apply_decision_manual_review_no_analyst_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client.apply_user_decision(user_id, apply_decision_request)
def test_apply_decision_no_source_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'time': 1481569575
}
with self.assertRaises(ValueError):
self.sift_client.apply_user_decision(user_id, apply_decision_request)
def test_apply_decision_invalid_source_fails(self):
user_id = '54321'
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'INVALID_SOURCE',
'time': 1481569575
}
self.assertRaises(ValueError, self.sift_client.apply_user_decision, user_id, apply_decision_request)
def test_apply_decision_to_order_ok(self):
user_id = '54321'
order_id = '43210'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'order_looks_bad_payment_abuse',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "order"
},
"decision": {
"id": "order_looks_bad_payment_abuse"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_order_decision(user_id, order_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/orders/%s/decisions' % (user_id, order_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'order')
def test_apply_decision_to_session_ok(self):
user_id = '54321'
session_id = 'gigtleqddo84l8cm15qe4il'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'session_looks_bad_ato',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "login"
},
"decision": {
"id": "session_looks_bad_ato"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_session_decision(user_id, session_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/sessions/%s/decisions' % (user_id, session_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'login')
def test_apply_decision_to_content_ok(self):
user_id = '54321'
content_id = 'listing-1231'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'content_looks_bad_content_abuse',
'source': 'AUTOMATED_RULE',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "create_content"
},
"decision": {
"id": "content_looks_bad_content_abuse"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_content_decision(user_id, content_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/content/%s/decisions' % (user_id, content_id),
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.http_status_code == 200)
assert(response.body['entity']['type'] == 'create_content')
def test_label_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(user_id, valid_label_properties())
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': '[email protected]'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
data=data, headers=mock.ANY, timeout=mock.ANY, params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_label_user_with_timeout_param_ok(self):
user_id = '54321'
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(
user_id, valid_label_properties(), test_timeout)
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': '[email protected]'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
data=data, headers=mock.ANY, timeout=test_timeout, params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_unlabel_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
mock_response.status_code = 204
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.return_value = mock_response
response = self.sift_client.unlabel(user_id, abuse_type='account_abuse')
mock_delete.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % user_id,
headers=mock.ANY,
timeout=mock.ANY,
params={'api_key': self.test_key, 'abuse_type': 'account_abuse'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
def test_unicode_string_parameter_support(self):
# str is unicode in python 3, so no need to check as this was covered
# by other unit tests.
if sys.version_info[0] < 3:
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
user_id = '23056'
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
assert(self.sift_client.track(
'$transaction',
valid_transaction_properties()))
assert(self.sift_client.label(
user_id,
valid_label_properties()))
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
assert(self.sift_client.score(
user_id, abuse_types=['payment_abuse', 'content_abuse']))
def test_unlabel_user_with_special_chars_ok(self):
user_id = "54321=.-_+@:&^%!$"
mock_response = mock.Mock()
mock_response.status_code = 204
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.return_value = mock_response
response = self.sift_client.unlabel(user_id)
mock_delete.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % urllib.parse.quote(user_id),
headers=mock.ANY,
timeout=mock.ANY,
params={'api_key': self.test_key})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
def test_label_user__with_special_chars_ok(self):
user_id = '54321=.-_+@:&^%!$'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.label(
user_id, valid_label_properties())
properties = {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': '[email protected]'
}
properties.update({'$api_key': self.test_key, '$type': '$label'})
data = json.dumps(properties)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/%s/labels' % urllib.parse.quote(user_id),
data=data,
headers=mock.ANY,
timeout=mock.ANY,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_score__with_special_user_id_chars_ok(self):
user_id = '54321=.-_+@:&^%!$'
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score(user_id, abuse_types=['legacy'])
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/%s' % urllib.parse.quote(user_id),
params={'api_key': self.test_key, 'abuse_types': 'legacy'},
headers=mock.ANY,
timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_exception_during_track_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.track('$transaction', valid_transaction_properties())
def test_exception_during_score_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.score('Fred')
def test_exception_during_unlabel_call(self):
warnings.simplefilter("always")
with mock.patch.object(self.sift_client.session, 'delete') as mock_delete:
mock_delete.side_effect = mock.Mock(
side_effect=requests.exceptions.RequestException("Failed"))
with self.assertRaises(sift.client.ApiException):
self.sift_client.unlabel('Fred')
def test_return_actions_on_track(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = ('{"status": 0, "error_message": "OK", "score_response": %s}'
% action_response_json())
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event, valid_transaction_properties(), return_action=True)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={'return_action': 'true'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
actions = response.body["score_response"]['actions']
assert(actions)
assert(actions[0]['action'])
assert(actions[0]['action']['id'] == 'freds_action')
assert(actions[0]['triggers'])
def test_get_workflow_status(self):
mock_response = mock.Mock()
mock_response.content = """
{
"id": "4zxwibludiaaa",
"config": {
"id": "5rrbr4iaaa",
"version": "1468367620871"
},
"config_display_name": "workflow config",
"abuse_types": [
"payment_abuse"
],
"state": "running",
"entity": {
"id": "example_user",
"type": "user"
},
"history": [
{
"app": "decision",
"name": "decision",
"state": "running",
"config": {
"decision_id": "user_decision"
}
},
{
"app": "event",
"name": "Event",
"state": "finished",
"config": {}
},
{
"app": "user",
"name": "Entity",
"state": "finished",
"config": {}
}
]
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_workflow_status('4zxwibludiaaa', timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/workflows/runs/4zxwibludiaaa',
headers=mock.ANY, auth=mock.ANY, timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['state'] == 'running')
def test_get_user_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"payment_abuse": {
"decision": {
"id": "user_decision"
},
"time": 1468707128659,
"webhook_succeeded": false
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_decisions('example_user')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['payment_abuse']['decision']['id'] == 'user_decision')
def test_get_order_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"payment_abuse": {
"decision": {
"id": "decision7"
},
"time": 1468599638005,
"webhook_succeeded": false
},
"promotion_abuse": {
"decision": {
"id": "good_order"
},
"time": 1468517407135,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_order_decisions('example_order')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/orders/example_order/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['payment_abuse']['decision']['id'] == 'decision7')
assert(response.body['decisions']['promotion_abuse']['decision']['id'] == 'good_order')
def test_get_session_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"account_takeover": {
"decision": {
"id": "session_decision"
},
"time": 1461963839151,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_session_decisions('example_user', 'example_session')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/sessions/example_session/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['account_takeover']['decision']['id'] == 'session_decision')
def test_get_content_decisions(self):
mock_response = mock.Mock()
mock_response.content = """
{
"decisions": {
"content_abuse": {
"decision": {
"id": "content_looks_bad_content_abuse"
},
"time": 1468517407135,
"webhook_succeeded": true
}
}
}
"""
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_content_decisions('example_user', 'example_content')
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/example_user/content/example_content/decisions',
headers=mock.ANY, auth=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['decisions']['content_abuse']['decision']['id'] == 'content_looks_bad_content_abuse')
def test_provided_session(self):
session = mock.Mock()
client = sift.Client(api_key=self.test_key, account_id=self.account_id, session=session)
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
session.post.return_value = mock_response
event = '$transaction'
client.track(event, valid_transaction_properties())
session.post.assert_called_once()
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitPeeringListResult"]:
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
|
|
"""
GPS Training GUI
The GPS Training GUI is used to interact with the GPS algorithm during training.
It contains the below seven functionalities:
Action Panel contains buttons for stop, reset, go, fail
Action Status Textbox displays action status
Algorithm Status Textbox displays algorithm status
Cost Plot displays costs after each iteration
Algorithm Output Textbox displays algorithm output after each iteration
3D Trajectory Visualizer displays 3D trajectories after each iteration
Image Visualizer displays images received from a rostopic
For more detailed documentation, visit: rll.berkeley.edu/gps/gui
"""
import time
import threading
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gps.gui.config import config
from gps.gui.action_panel import Action, ActionPanel
from gps.gui.textbox import Textbox
from gps.gui.mean_plotter import MeanPlotter
from gps.gui.plotter_3d import Plotter3D
from gps.gui.image_visualizer import ImageVisualizer
from gps.gui.util import buffered_axis_limits, load_data_from_npz
from gps.proto.gps_pb2 import END_EFFECTOR_POINTS
# Needed for typechecks
from gps.algorithm.algorithm_badmm import AlgorithmBADMM
from gps.algorithm.algorithm_mdgps import AlgorithmMDGPS
class GPSTrainingGUI(object):
def __init__(self, hyperparams):
self._hyperparams = hyperparams
self._log_filename = self._hyperparams['log_filename']
if 'target_filename' in self._hyperparams:
self._target_filename = self._hyperparams['target_filename']
else:
self._target_filename = None
# GPS Training Status.
self.mode = config['initial_mode'] # Modes: run, wait, end, request, process.
self.request = None # Requests: stop, reset, go, fail, None.
self.err_msg = None
self._colors = {
'run': 'cyan',
'wait': 'orange',
'end': 'red',
'stop': 'red',
'reset': 'yellow',
'go': 'green',
'fail': 'magenta',
}
self._first_update = True
# Actions.
actions_arr = [
Action('stop', 'stop', self.request_stop, axis_pos=0),
Action('reset', 'reset', self.request_reset, axis_pos=1),
Action('go', 'go', self.request_go, axis_pos=2),
Action('fail', 'fail', self.request_fail, axis_pos=3),
]
# Setup figure.
plt.ion()
plt.rcParams['toolbar'] = 'None'
for key in plt.rcParams:
if key.startswith('keymap.'):
plt.rcParams[key] = ''
self._fig = plt.figure(figsize=config['figsize'])
self._fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99,
wspace=0, hspace=0)
# Assign GUI component locations.
self._gs = gridspec.GridSpec(16, 8)
self._gs_action_panel = self._gs[0:2, 0:8]
self._gs_action_output = self._gs[2:3, 0:4]
self._gs_status_output = self._gs[3:4, 0:4]
self._gs_cost_plotter = self._gs[2:4, 4:8]
self._gs_algthm_output = self._gs[4:8, 0:8]
if config['image_on']:
self._gs_traj_visualizer = self._gs[8:16, 0:4]
self._gs_image_visualizer = self._gs[8:16, 4:8]
else:
self._gs_traj_visualizer = self._gs[8:16, 0:8]
# Create GUI components.
self._action_panel = ActionPanel(self._fig, self._gs_action_panel, 1, 4, actions_arr)
self._action_output = Textbox(self._fig, self._gs_action_output, border_on=True)
self._status_output = Textbox(self._fig, self._gs_status_output, border_on=False)
self._algthm_output = Textbox(self._fig, self._gs_algthm_output,
max_display_size=config['algthm_output_max_display_size'],
log_filename=self._log_filename,
fontsize=config['algthm_output_fontsize'],
font_family='monospace')
self._cost_plotter = MeanPlotter(self._fig, self._gs_cost_plotter,
color='blue', label='mean cost')
self._traj_visualizer = Plotter3D(self._fig, self._gs_traj_visualizer,
num_plots=self._hyperparams['conditions'])
if config['image_on']:
self._image_visualizer = ImageVisualizer(self._fig,
self._gs_image_visualizer, cropsize=config['image_size'],
rostopic=config['image_topic'], show_overlay_buttons=True)
# Setup GUI components.
self._algthm_output.log_text('\n')
self.set_output_text(self._hyperparams['info'])
if config['initial_mode'] == 'run':
self.run_mode()
else:
self.wait_mode()
# Setup 3D Trajectory Visualizer plot titles and legends
for m in range(self._hyperparams['conditions']):
self._traj_visualizer.set_title(m, 'Condition %d' % (m))
self._traj_visualizer.add_legend(linestyle='-', marker='None',
color='green', label='Trajectory Samples')
self._traj_visualizer.add_legend(linestyle='-', marker='None',
color='blue', label='Policy Samples')
self._traj_visualizer.add_legend(linestyle='None', marker='x',
color=(0.5, 0, 0), label='LG Controller Means')
self._traj_visualizer.add_legend(linestyle='-', marker='None',
color='red', label='LG Controller Distributions')
self._fig.canvas.draw()
# Display calculating thread
def display_calculating(delay, run_event):
while True:
if not run_event.is_set():
run_event.wait()
if run_event.is_set():
self.set_status_text('Calculating.')
time.sleep(delay)
if run_event.is_set():
self.set_status_text('Calculating..')
time.sleep(delay)
if run_event.is_set():
self.set_status_text('Calculating...')
time.sleep(delay)
self._calculating_run = threading.Event()
self._calculating_thread = threading.Thread(target=display_calculating,
args=(1, self._calculating_run))
self._calculating_thread.daemon = True
self._calculating_thread.start()
# GPS Training functions
def request_stop(self, event=None):
self.request_mode('stop')
def request_reset(self, event=None):
self.request_mode('reset')
def request_go(self, event=None):
self.request_mode('go')
def request_fail(self, event=None):
self.request_mode('fail')
def request_mode(self, request):
"""
Sets the request mode (stop, reset, go, fail). The request is read by
gps_main before sampling, and the appropriate action is taken.
"""
self.mode = 'request'
self.request = request
self.set_action_text(self.request + ' requested')
self.set_action_bgcolor(self._colors[self.request], alpha=0.2)
def process_mode(self):
"""
Completes the current request, after it is first read by gps_main.
Displays visual confirmation that the request was processed,
displays any error messages, and then switches into mode 'run' or 'wait'.
"""
self.mode = 'process'
self.set_action_text(self.request + ' processed')
self.set_action_bgcolor(self._colors[self.request], alpha=1.0)
if self.err_msg:
self.set_action_text(self.request + ' processed' + '\nERROR: ' +
self.err_msg)
self.err_msg = None
time.sleep(1.0)
else:
time.sleep(0.5)
if self.request in ('stop', 'reset', 'fail'):
self.wait_mode()
elif self.request == 'go':
self.run_mode()
self.request = None
def wait_mode(self):
self.mode = 'wait'
self.set_action_text('waiting')
self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)
def run_mode(self):
self.mode = 'run'
self.set_action_text('running')
self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)
def end_mode(self):
self.mode = 'end'
self.set_action_text('ended')
self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)
def estop(self, event=None):
self.set_action_text('estop: NOT IMPLEMENTED')
# GUI functions
def set_action_text(self, text):
self._action_output.set_text(text)
self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels
def set_action_bgcolor(self, color, alpha=1.0):
self._action_output.set_bgcolor(color, alpha)
self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels
def set_status_text(self, text):
self._status_output.set_text(text)
self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels
def set_output_text(self, text):
self._algthm_output.set_text(text)
self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels
def append_output_text(self, text):
self._algthm_output.append_text(text)
self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels
def start_display_calculating(self):
self._calculating_run.set()
def stop_display_calculating(self):
self._calculating_run.clear()
def set_image_overlays(self, condition):
"""
Sets up the image visualizer with what images to overlay if
"overlay_initial_image" or "overlay_target_image" is pressed.
"""
if not config['image_on'] or not self._target_filename:
return
initial_image = load_data_from_npz(self._target_filename,
config['image_overlay_actuator'], str(condition),
'initial', 'image', default=None)
target_image = load_data_from_npz(self._target_filename,
config['image_overlay_actuator'], str(condition),
'target', 'image', default=None)
self._image_visualizer.set_initial_image(initial_image,
alpha=config['image_overlay_alpha'])
self._image_visualizer.set_target_image(target_image,
alpha=config['image_overlay_alpha'])
# Iteration update functions
def update(self, itr, algorithm, agent, traj_sample_lists, pol_sample_lists):
"""
After each iteration, update the iteration data output, the cost plot,
and the 3D trajectory visualizations (if end effector points exist).
"""
if self._first_update:
self._output_column_titles(algorithm)
self._first_update = False
costs = [np.mean(np.sum(algorithm.prev[m].cs, axis=1)) for m in range(algorithm.M)]
self._update_iteration_data(itr, algorithm, costs, pol_sample_lists)
self._cost_plotter.update(costs, t=itr)
if END_EFFECTOR_POINTS in agent.x_data_types:
self._update_trajectory_visualizations(algorithm, agent,
traj_sample_lists, pol_sample_lists)
self._fig.canvas.draw()
self._fig.canvas.flush_events() # Fixes bug in Qt4Agg backend
def _output_column_titles(self, algorithm, policy_titles=False):
"""
Setup iteration data column titles: iteration, average cost, and for
each condition the mean cost over samples, step size, linear Guassian
controller entropies, and initial/final KL divergences for BADMM.
"""
self.set_output_text(self._hyperparams['experiment_name'])
if isinstance(algorithm, AlgorithmMDGPS) or isinstance(algorithm, AlgorithmBADMM):
condition_titles = '%3s | %8s %12s' % ('', '', '')
itr_data_fields = '%3s | %8s %12s' % ('itr', 'avg_cost', 'avg_pol_cost')
else:
condition_titles = '%3s | %8s' % ('', '')
itr_data_fields = '%3s | %8s' % ('itr', 'avg_cost')
for m in range(algorithm.M):
condition_titles += ' | %8s %9s %-7d' % ('', 'condition', m)
itr_data_fields += ' | %8s %8s %8s' % (' cost ', ' step ', 'entropy ')
if isinstance(algorithm, AlgorithmBADMM):
condition_titles += ' %8s %8s %8s' % ('', '', '')
itr_data_fields += ' %8s %8s %8s' % ('pol_cost', 'kl_div_i', 'kl_div_f')
elif isinstance(algorithm, AlgorithmMDGPS):
condition_titles += ' %8s' % ('')
itr_data_fields += ' %8s' % ('pol_cost')
self.append_output_text(condition_titles)
self.append_output_text(itr_data_fields)
def _update_iteration_data(self, itr, algorithm, costs, pol_sample_lists):
"""
Update iteration data information: iteration, average cost, and for
each condition the mean cost over samples, step size, linear Guassian
controller entropies, and initial/final KL divergences for BADMM.
"""
avg_cost = np.mean(costs)
if pol_sample_lists is not None:
test_idx = algorithm._hyperparams['test_conditions']
# pol_sample_lists is a list of singletons
samples = [sl[0] for sl in pol_sample_lists]
pol_costs = [np.sum(algorithm.cost[idx].eval(s)[0])
for s, idx in zip(samples, test_idx)]
itr_data = '%3d | %8.2f %12.2f' % (itr, avg_cost, np.mean(pol_costs))
else:
itr_data = '%3d | %8.2f' % (itr, avg_cost)
for m in range(algorithm.M):
cost = costs[m]
step = algorithm.prev[m].step_mult * algorithm.base_kl_step
entropy = 2*np.sum(np.log(np.diagonal(algorithm.prev[m].traj_distr.chol_pol_covar,
axis1=1, axis2=2)))
itr_data += ' | %8.2f %8.2f %8.2f' % (cost, step, entropy)
if isinstance(algorithm, AlgorithmBADMM):
kl_div_i = algorithm.cur[m].pol_info.init_kl.mean()
kl_div_f = algorithm.cur[m].pol_info.prev_kl.mean()
itr_data += ' %8.2f %8.2f %8.2f' % (pol_costs[m], kl_div_i, kl_div_f)
elif isinstance(algorithm, AlgorithmMDGPS):
# TODO: Change for test/train better.
if test_idx == algorithm._hyperparams['train_conditions']:
itr_data += ' %8.2f' % (pol_costs[m])
else:
itr_data += ' %8s' % ("N/A")
self.append_output_text(itr_data)
def _update_trajectory_visualizations(self, algorithm, agent,
traj_sample_lists, pol_sample_lists):
"""
Update 3D trajectory visualizations information: the trajectory samples,
policy samples, and linear Gaussian controller means and covariances.
"""
xlim, ylim, zlim = self._calculate_3d_axis_limits(traj_sample_lists, pol_sample_lists)
for m in range(algorithm.M):
self._traj_visualizer.clear(m)
self._traj_visualizer.set_lim(i=m, xlim=xlim, ylim=ylim, zlim=zlim)
self._update_linear_gaussian_controller_plots(algorithm, agent, m)
self._update_samples_plots(traj_sample_lists, m, 'green', 'Trajectory Samples')
if pol_sample_lists:
self._update_samples_plots(pol_sample_lists, m, 'blue', 'Policy Samples')
self._traj_visualizer.draw() # this must be called explicitly
def _calculate_3d_axis_limits(self, traj_sample_lists, pol_sample_lists):
"""
Calculate the 3D axis limits shared between trajectory plots,
based on the minimum and maximum xyz values across all samples.
"""
all_eept = np.empty((0, 3))
sample_lists = traj_sample_lists
if pol_sample_lists:
sample_lists += traj_sample_lists
for sample_list in sample_lists:
for sample in sample_list.get_samples():
ee_pt = sample.get(END_EFFECTOR_POINTS)
for i in range(ee_pt.shape[1]/3):
ee_pt_i = ee_pt[:, 3*i+0:3*i+3]
all_eept = np.r_[all_eept, ee_pt_i]
min_xyz = np.amin(all_eept, axis=0)
max_xyz = np.amax(all_eept, axis=0)
xlim = buffered_axis_limits(min_xyz[0], max_xyz[0], buffer_factor=1.25)
ylim = buffered_axis_limits(min_xyz[1], max_xyz[1], buffer_factor=1.25)
zlim = buffered_axis_limits(min_xyz[2], max_xyz[2], buffer_factor=1.25)
return xlim, ylim, zlim
def _update_linear_gaussian_controller_plots(self, algorithm, agent, m):
"""
Update the linear Guassian controller plots with iteration data,
for the mean and covariances of the end effector points.
"""
# Calculate mean and covariance for end effector points
eept_idx = agent.get_idx_x(END_EFFECTOR_POINTS)
start, end = eept_idx[0], eept_idx[-1]
mu, sigma = algorithm.traj_opt.forward(algorithm.prev[m].traj_distr, algorithm.prev[m].traj_info)
mu_eept, sigma_eept = mu[:, start:end+1], sigma[:, start:end+1, start:end+1]
# Linear Gaussian Controller Distributions (Red)
for i in range(mu_eept.shape[1]/3):
mu, sigma = mu_eept[:, 3*i+0:3*i+3], sigma_eept[:, 3*i+0:3*i+3, 3*i+0:3*i+3]
self._traj_visualizer.plot_3d_gaussian(i=m, mu=mu, sigma=sigma,
edges=100, linestyle='-', linewidth=1.0, color='red',
alpha=0.15, label='LG Controller Distributions')
# Linear Gaussian Controller Means (Dark Red)
for i in range(mu_eept.shape[1]/3):
mu = mu_eept[:, 3*i+0:3*i+3]
self._traj_visualizer.plot_3d_points(i=m, points=mu, linestyle='None',
marker='x', markersize=5.0, markeredgewidth=1.0,
color=(0.5, 0, 0), alpha=1.0, label='LG Controller Means')
def _update_samples_plots(self, sample_lists, m, color, label):
"""
Update the samples plots with iteration data, for the trajectory samples
and the policy samples.
"""
samples = sample_lists[m].get_samples()
for sample in samples:
ee_pt = sample.get(END_EFFECTOR_POINTS)
for i in range(ee_pt.shape[1]/3):
ee_pt_i = ee_pt[:, 3*i+0:3*i+3]
self._traj_visualizer.plot_3d_points(m, ee_pt_i, color=color, label=label)
def save_figure(self, filename):
self._fig.savefig(filename)
|
|
# -*- coding: utf-8 -*-
# fabfile for Django:
import time
import re
import os
import sys
from contextlib import contextmanager as _contextmanager
from fabric.api import *
from fabric.colors import *
from fabric.state import commands
from fabric.contrib.files import comment, uncomment
if sys.platform.startswith('linux'):
env.remote_deployment = True
else:
env.remote_deployment = False
__all__ = (
'deploy_to_dev_server', 'dev', 'setup', 'deploy', 'venv',
'deploy_version', 'rollback', 'releases', 'update_code',
'update_code_from_repo', 'update_code_from_archive',
'install_requirements', 'symlink_current_release', 'collect_static_files',
'syncdb', 'migrate', 'cleanup', 'debug', 'restart_webserver', 'clean',
'setup_local', 'setup_remote',
)
# globals
env.git_host = ''
env.project_name = ''
env.venv_name = ''
env.repo = 'git@{git_host}:/projects/{project_name}'.format(**env)
env.use_ssh_config = env.remote_deployment
env.shared_dirs = 'config media static releases/{current,previous}'
env.requirements_file = 'requirements.txt'
@task(default=True)
def deploy_to_dev_server():
execute(dev)
execute(deploy)
def _set_venv_name():
if not env.venv_name:
env.venv_name = env.project_name
@task
def dev():
"""Development server"""
env.user = ''
env.branch = ''
env.hosts = []
env.vhosts_root = "/var/www/vhosts"
env.host_name = ''
env.django_settings_module = '{project_name}.settings'.format(**env)
env.vhost_path = '{vhosts_root}/{project_name}.{host_name}'.format(**env)
env.release_path = "{vhost_path}/releases/current".format(**env)
@task(alias='up')
def setup():
"""Initial deployment setup"""
_set_venv_name()
run("mkvirtualenv {venv_name}".format(**env))
with cd(env.vhost_path):
run('mkdir -p {shared_dirs}'.format(**env))
execute(setup_remote)
@task(alias='dep')
def deploy(param=''):
"""
Deploy the latest version of the site to the servers, install any
required third party modules, install the virtual host and
then restart the webserver
"""
require('branch', 'vhosts_root', 'host_name', 'vhost_path', 'release_path',
provided_by=['dev', 'prod'])
_set_venv_name()
try:
print(green("Start deployment to production"))
env.release = time.strftime('%Y%m%d%H%M%S')
execute('update_code')
execute('symlink_current_release')
execute('install_requirements')
execute('collect_static_files')
execute('cleanup')
if param == 'migrate':
execute('migrate')
if 'after_deploy' in commands:
execute('after_deploy')
execute('restart_webserver')
except (SystemExit, KeyboardInterrupt):
tarball = '{release}.tar.gz'.format(**env)
if os.path.exists(tarball):
print('Cleanup tarball')
os.remove(tarball)
@task
def after_deploy():
pass
@_contextmanager
def venv():
_set_venv_name()
require('django_settings_module', provided_by=['dev', 'prod'])
with cd(env.release_path):
with shell_env(DJANGO_SETTINGS_MODULE=env.django_settings_module):
with prefix('workon {venv_name}'.format(**env)):
yield
@task
def deploy_version(version):
"""Specify a specific version to be made live"""
require('hosts', provided_by=['prod', 'dev'])
env.version = version
with cd(env.vhost_path):
run("rm -rf releases/previous; mv -f releases/current releases/previous;")
run("ln -s {version} releases/current".format(**env))
run("ln -nfs {vhost_path}/config/local_settings.py {release_path}/{project_name}/local_settings.py".format(**env))
restart_webserver()
@task
def rollback():
"""
Limited rollback capability. Simply loads the previously current
version of the code. Rolling back again will swap between the two.
"""
print(green("Rollback deployed changes"))
require('hosts', provided_by=['prod', 'dev'])
with cd(env.vhost_path):
run("mv -f releases/current releases/_previous;")
run("mv -f releases/previous releases/current;")
run("mv -f releases/_previous releases/previous;")
restart_webserver()
def releases():
"""List a releases made"""
with cd(env.vhost_path):
env.releases = sorted(run('ls -x releases').split())
if len(env.releases) >= 1:
env.current_revision = env.releases[-1]
env.current_release = "releases/{current_revision}".format(**env)
if len(env.releases) > 1:
env.previous_revision = env.releases[-2]
env.previous_release = "releases/previous"
@task
def update_code():
"""Create an archive from the current Git master branch and upload it"""
if env.remote_deployment:
update_code_from_repo()
else:
update_code_from_archive()
@task
def update_code_from_repo():
require('release', provided_by=['deploy'])
print(green("Update code from git"))
result = local('git ls-remote {repo} {branch}'.format(**env), capture=True)
revdata = re.split(r'[\t\n]', result)
env.rev = revdata[0]
raw_commands = (
"if [ -d {vhost_path}/cached-copy ]; then cd {vhost_path}/cached-copy",
"git fetch -q origin",
"git reset -q --hard {rev}",
"git clean -q -d -x -f; else git clone -q {repo} {vhost_path}/cached-copy",
"cd {vhost_path}/cached-copy",
"git checkout -q -b deploy {rev}; fi"
)
run(' && '.join(map(lambda c: c.format(**env), raw_commands)))
run('cp -RPp {vhost_path}/cached-copy {vhost_path}/releases/{release}'.format(**env))
@task
def update_code_from_archive():
"""Pack local repository copy to archive and upload to server"""
require('release', provided_by=['deploy'])
print(green("Create local git snapshot"))
result = local('git ls-remote {repo} {branch}'.format(**env), capture=True)
revdata = re.split(r'[\t\n]', result)
env.rev = revdata[0]
result = local('git rev-parse --revs-only {rev}'.format(**env), capture=True)
local_revdata = re.split(r'[\t\n]', result)
if local_revdata:
if local_revdata[0] == env.rev:
local('git archive --format=tar.gz {branch} -o {release}.tar.gz'.format(**env))
run("mkdir -p {vhost_path}/releases/{release}".format(**env))
put('{release}.tar.gz'.format(**env), '/tmp/')
run("cd {vhost_path}/releases/{release} && tar zmxf /tmp/{release}.tar.gz".format(**env))
run('rm -rf /tmp/{release}.tar.gz'.format(**env))
os.remove('{release}.tar.gz'.format(**env))
else:
abort(red("Please update you repository from {repo} remote.".format(**env)))
@task
def install_requirements(param=''):
"""Install the required packages from the requirements file using pip"""
with venv():
print(green("Install runtime requirements"))
if param == "upgrade":
env.upgrade = "--upgrade"
else:
env.upgrade = ''
run("pip install -r {requirements_file} {upgrade}".format(**env))
@task
def symlink_current_release():
"""Symlink our current release"""
require('release', provided_by=['deploy'])
with cd(env.vhost_path):
run("rm -rf releases/previous; mv -f releases/current releases/previous;")
run("ln -s {release} releases/current".format(**env))
run("ln -nfs {vhost_path}/config/local_settings.py {release_path}/{project_name}/local_settings.py".format(**env))
@task
def collect_static_files():
"""Collect static files"""
with venv():
print(green("Collect static files"))
run("django-admin.py collectstatic -v0 --noinput".format(**env))
@task
def syncdb(param=''):
"""Update the database"""
with venv():
print(green("Run syncdb for apps"))
run('django-admin.py syncdb -v0'.format(**env))
@task
def migrate(param=''):
"""Update the database"""
with venv():
print(green("Migrate apps"))
run("django-admin.py migrate".format(**env))
@task
def cleanup():
"""Clean up old releases"""
with hide('running', 'stdout'):
if 'releases' not in env:
releases()
size = len(env.releases)
if len(env.releases) > 5:
directories = env.releases
directories.reverse()
del directories[:5]
dirs_count = len(directories)
env.directories = ' '.join(["{0}/releases/{1}".format(env.vhost_path, release) for release in directories])
run("rm -rf {directories}".format(**env))
print(red("Cleaned {0} of {1} releases".format(dirs_count, size)))
@task
def debug(param="on"):
"""Toogle DEBUG variable in local_settings.py"""
with cd(env.path):
config_path = '{vhost_path}/config/local_settings.py'.format(**env)
if param == "on":
uncomment(config_path, r'(DEBUG)')
else:
comment(config_path, r'^(DEBUG)')
execute(restart_webserver)
@task(alias='restart')
def restart_webserver():
"""Restart the web server"""
with cd(env.path):
if env.django_settings_module.split('.')[-1] == 'stage':
run('touch {release_path}/{project_name}/wsgi_stage.py'.format(**env))
else:
run('touch {release_path}/{project_name}/wsgi.py'.format(**env))
@task
def clean():
local('find . -name "*.pyc" -exec rm -f {} \;')
@task(alias='setup-local')
def setup_local():
env.venvwrapper = local('which virtualenvwrapper.sh', capture=True)
local('source {venvwrapper} && workon {venv_name} && add2virtualenv .'.format(**env), shell='/bin/bash')
local('echo "export DJANGO_SETTINGS_MODULE={django_settings_module}" >> ~/.virtualenvs/{venv_name}/bin/postactivate'.format(**env))
local('echo "unset DJANGO_SETTINGS_MODULE" >> ~/.virtualenvs/{venv_name}/bin/postdeactivate'.format(**env))
@task(alias='setup-remote')
def setup_remote():
with venv():
run('add2virtualenv {release_path}'.format(**env), shell='/bin/bash')
run('echo "export DJANGO_SETTINGS_MODULE={django_settings_module}" >> ~/.virtualenvs/{venv_name}/bin/postactivate'.format(**env))
run('echo "unset DJANGO_SETTINGS_MODULE" >> ~/.virtualenvs/{venv_name}/bin/postdeactivate'.format(**env))
|
|
#---------------------------------------------------------------------------
#
# DEnum: is a 'dynamic enum' trait whose values are obtained from
# another trait on the object.
#
# Caveat:
# The problem with this trait is that the listeners (for changes to
# the valid values) are added only when the attribute is read or
# set. Thus if the acceptable list of values are changed before the
# listeners are activated then the value will be set correctly only
# when it is accessed and not when the values are set.
#
# Written by: David C. Morrill and Prabhu Ramachandran
#
# (c) Copyright 2006-2008 by Enthought, Inc.
#
#---------------------------------------------------------------------------
import operator
from traits.api import (CArray, Int, NO_COMPARE, Property, TraitError,
TraitFactory, TraitType)
from traitsui.api import EnumEditor
from traits.traits import trait_cast
#---------------------------------------------------------------------------
# Utility functions:
#---------------------------------------------------------------------------
def super_getattr(object, name, *args):
"""Works the same way as getattr, except that name can be of the
form 'a.b.c' (as many levels as you like). For example:
>>> class A:
... pass
...
>>> a = A()
>>> a.b = A()
>>> a.b.c = 1
>>> super_getattr(a, 'b.c')
1
>>> super_getattr(a.b, 'c')
1
"""
if '.' in name:
attrs = name.split('.')
last = attrs.pop()
obj = object
for attr in attrs:
obj = getattr(obj, attr)
return getattr(obj, last, *args)
else:
return getattr(object, name, *args)
def super_setattr(object, name, value):
"""Works the same way as setattr, except that name can be of the
form 'a.b.c' (as many levels as you like). For example:
>>> class A:
... pass
...
>>> a = A()
>>> a.b = A()
>>> super_setattr(a, 'b.c', 1)
>>> a.b.c
1
"""
if '.' in name:
attrs = name.split('.')
last = attrs.pop()
obj = object
for attr in attrs:
obj = getattr(obj, attr)
setattr(obj, last, value)
else:
setattr(object, name, value)
#--------------------------------------------------------------------------------
# Helper class for DEnum trait.
#--------------------------------------------------------------------------------
class DEnumHelper(object):
"""Defines a bunch of staticmethods that collect all the helper
functions needed for the DEnum trait.
"""
######################################################################
# Get/Set functions for the property.
def get_value ( object, name ):
return super_getattr(object, DEnumHelper._init_listeners(object, name))
get_value = staticmethod(get_value)
def set_value ( object, name, value ):
_name = DEnumHelper._init_listeners( object, name )
trait = object.trait( name )
values = super_getattr(object, trait.values_name)
if value not in values:
raise TraitError, (object, name,
"one of %s"%values,
value )
old = super_getattr(object, _name)
super_setattr( object, _name, value )
object.trait_property_changed(name, old, value)
set_value = staticmethod(set_value)
######################################################################
# Makes a default EnumEditor for the trait:
def make_editor ( trait = None ):
return EnumEditor( name=trait.values_name )
make_editor = staticmethod(make_editor)
######################################################################
# Ensures that the listeners are initialized.
def _init_listeners ( object, name ):
_name = '_' + name
if not hasattr( object, _name ):
trait = object.trait( name )
DEnumHelper._add_listeners( object, name, trait.values_name)
default = trait.default or ''
values = super_getattr( object, trait.values_name )
if values:
if default is None or default not in values:
default = values[0]
super_setattr( object, _name, default )
return _name
_init_listeners = staticmethod(_init_listeners)
def _add_listeners ( object, name, values_name ):
def check_values(object, values_name, old, new):
cur_choice = super_getattr(object, name)
if cur_choice not in new:
if new:
super_setattr(object, name, new[0])
else:
super_setattr(object, name, '')
def check_values_items(object, values_name, list_event):
cur_choice = super_getattr(object, name)
values = super_getattr(object, values_name[:-6])
if cur_choice not in values:
if values:
super_setattr(object, name, values[0])
else:
super_setattr(object, name, '')
object.on_trait_change( check_values, values_name )
object.on_trait_change( check_values_items, values_name + '_items' )
_add_listeners = staticmethod(_add_listeners)
#-------------------------------------------------------------------------------
# Defines the DEnum property:
#-------------------------------------------------------------------------------
DEnum = Property(DEnumHelper.get_value, DEnumHelper.set_value,
values_name = 'values',
editor = (DEnumHelper.make_editor, {'trait': None})
)
DEnum = TraitFactory(DEnum)
##########################################################################
# `ShadowProperty` trait type.
##########################################################################
class ShadowProperty(TraitType):
# Not really necessary but specifies the attribute up front.
trait_type = None
# Call the notifiers smartly only when the value has really changed.
# If this is set to False, the notification will always occur.
smart_notify = True
def __init__(self, trait_type, smart_notify=True, **metadata):
"""Defines a shadow property trait that is best explained by
example::
class Thing(HasTraits):
x = ShadowProperty(Float, smart_notify=False)
def _x_changed(self, value):
print value
In this example, the actual value of the property (`x`) will be
stored in `_x` and `_x_changed` will be called regardless
whether the value actually changed or not. If `smart_notify` is
set to `True` then the handler is called only if the value has
actually changed.
Note that the validation uses the validation of the specified
`trait_type` parameter.
"""
self.trait_type = trait_cast(trait_type)
self.smart_notify = smart_notify
super(ShadowProperty, self).__init__(**metadata)
def validate(self, object, name, value):
"""Validates that a specified value is valid for this trait.
"""
trt = self.trait_type
if trt is not None and hasattr(trt, 'validate'):
value = trt.validate(object, name, value)
return value
def get(self, object, name):
"""Get the value of the trait."""
shadow = self._get_shadow(name)
d = object.__dict__
if shadow in d:
return d[shadow]
else:
return None
def set(self, object, name, value):
"""Set the value of the trait."""
old = self.get(object, name)
shadow = self._get_shadow(name)
object.__dict__[shadow] = value
# Fire a trait property changed.
fire = True
if self.smart_notify:
if old is value:
fire = False
if fire and self._check_notification(object):
object.trait_property_changed(name, old, value)
def _get_shadow(self, name):
"""Get the shadow attribute name to use."""
return '_' + name
def _check_notification(self, object):
"""Checks to see if notifications are allowed or not i.e. has
the trait been set via:
object.set(name=value, trait_change_notify=False)
"""
if hasattr(object, '_get_trait_change_notify'):
return object._get_trait_change_notify()
else:
# Traits won't tell us so we find out by adding a dynamic
# trait, changing it and then seeing if the callback was
# called, sigh!
attr = '_testing_Notification_handlers_tmp_dont_touch'
def callback(value):
callback.value = value
callback.value = -1
object.add_trait(attr, Int)
object.on_trait_change(callback, attr)
setattr(object, attr, 1)
status = False
if callback.value == 1:
status = True
object.on_trait_change(callback, attr, remove=True)
object.remove_trait(attr)
return status
class ArrayOrNone(CArray):
""" Either an array-like object or None.
"""
def __init__(self, *args, **metadata):
metadata['comparison_mode'] = NO_COMPARE
super(ArrayOrNone, self).__init__(*args, **metadata)
def validate(self, object, name, value):
if value is None:
return value
return super(ArrayOrNone, self).validate(object, name, value)
def get_default_value(self):
return (0, None)
class ArrayNumberOrNone(CArray):
""" Either an array-like, number converted to a 1D array, or None.
"""
def __init__(self, *args, **metadata):
metadata['comparison_mode'] = NO_COMPARE
super(ArrayNumberOrNone, self).__init__(*args, **metadata)
def validate(self, object, name, value):
if value is None:
return value
elif operator.isNumberType(value):
# Local import to avoid explicit dependency.
import numpy
value = numpy.atleast_1d(value)
return super(ArrayNumberOrNone, self).validate(object, name, value)
def get_default_value(self):
return (0, None)
|
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Only used if method='exact'
Maximum number of iterations without progress before we abort the
optimization. If method='barnes_hut' this parameter is fixed to
a value of 30 and cannot be changed.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
Only used if method='exact'
If the gradient norm is below this threshold, the optimization will
be aborted. If method='barnes_hut' this parameter is fixed to a value
of 1e-3 and cannot be changed.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if not ((isinstance(init, string_types) and
init in ["pca", "random"]) or
isinstance(init, np.ndarray)):
msg = "'init' must be 'pca', 'random', or a numpy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'n_iter_' instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"n_iter_without_progress": self.n_iter_without_progress,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = self.min_grad_norm
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_ = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
|
# Copyright (c) 2016-2018, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''History by script hash (address).'''
import array
import ast
import bisect
import time
from collections import defaultdict
from functools import partial
import electrumx.lib.util as util
from electrumx.lib.util import (
pack_be_uint16, pack_le_uint64, unpack_be_uint16_from, unpack_le_uint64,
)
from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN
class History(object):
DB_VERSIONS = [0, 1]
def __init__(self):
self.logger = util.class_logger(__name__, self.__class__.__name__)
# For history compaction
self.max_hist_row_entries = 12500
self.unflushed = defaultdict(bytearray)
self.unflushed_count = 0
self.flush_count = 0
self.comp_flush_count = -1
self.comp_cursor = -1
self.db_version = max(self.DB_VERSIONS)
self.upgrade_cursor = -1
self.db = None
def open_db(self, db_class, for_sync, utxo_flush_count, compacting):
self.db = db_class('hist', for_sync)
self.read_state()
self.clear_excess(utxo_flush_count)
# An incomplete compaction needs to be cancelled otherwise
# restarting it will corrupt the history
if not compacting:
self._cancel_compaction()
return self.flush_count
def close_db(self):
if self.db:
self.db.close()
self.db = None
def read_state(self):
state = self.db.get(b'state\0\0')
if state:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise RuntimeError('failed reading state from history DB')
self.flush_count = state['flush_count']
self.comp_flush_count = state.get('comp_flush_count', -1)
self.comp_cursor = state.get('comp_cursor', -1)
self.db_version = state.get('db_version', 0)
self.upgrade_cursor = state.get('upgrade_cursor', -1)
else:
self.flush_count = 0
self.comp_flush_count = -1
self.comp_cursor = -1
self.db_version = max(self.DB_VERSIONS)
self.upgrade_cursor = -1
if self.db_version not in self.DB_VERSIONS:
msg = (f'your history DB version is {self.db_version} but '
f'this software only handles DB versions {self.DB_VERSIONS}')
self.logger.error(msg)
raise RuntimeError(msg)
if self.db_version != max(self.DB_VERSIONS):
self.upgrade_db()
self.logger.info(f'history DB version: {self.db_version}')
self.logger.info(f'flush count: {self.flush_count:,d}')
def clear_excess(self, utxo_flush_count):
# < might happen at end of compaction as both DBs cannot be
# updated atomically
if self.flush_count <= utxo_flush_count:
return
self.logger.info('DB shut down uncleanly. Scanning for '
'excess history flushes...')
keys = []
for key, _hist in self.db.iterator(prefix=b''):
flush_id, = unpack_be_uint16_from(key[-2:])
if flush_id > utxo_flush_count:
keys.append(key)
self.logger.info(f'deleting {len(keys):,d} history entries')
self.flush_count = utxo_flush_count
with self.db.write_batch() as batch:
for key in keys:
batch.delete(key)
self.write_state(batch)
self.logger.info('deleted excess history entries')
def write_state(self, batch):
'''Write state to the history DB.'''
state = {
'flush_count': self.flush_count,
'comp_flush_count': self.comp_flush_count,
'comp_cursor': self.comp_cursor,
'db_version': self.db_version,
'upgrade_cursor': self.upgrade_cursor,
}
# History entries are not prefixed; the suffix \0\0 ensures we
# look similar to other entries and aren't interfered with
batch.put(b'state\0\0', repr(state).encode())
def add_unflushed(self, hashXs_by_tx, first_tx_num):
unflushed = self.unflushed
count = 0
for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num):
tx_numb = pack_le_uint64(tx_num)[:5]
hashXs = set(hashXs)
for hashX in hashXs:
unflushed[hashX].extend(tx_numb)
count += len(hashXs)
self.unflushed_count += count
def unflushed_memsize(self):
return len(self.unflushed) * 180 + self.unflushed_count * 5
def assert_flushed(self):
assert not self.unflushed
def flush(self):
start_time = time.time()
self.flush_count += 1
flush_id = pack_be_uint16(self.flush_count)
unflushed = self.unflushed
with self.db.write_batch() as batch:
for hashX in sorted(unflushed):
key = hashX + flush_id
batch.put(key, bytes(unflushed[hashX]))
self.write_state(batch)
count = len(unflushed)
unflushed.clear()
self.unflushed_count = 0
if self.db.for_sync:
elapsed = time.time() - start_time
self.logger.info(f'flushed history in {elapsed:.1f}s '
f'for {count:,d} addrs')
def backup(self, hashXs, tx_count):
# Not certain this is needed, but it doesn't hurt
self.flush_count += 1
nremoves = 0
bisect_left = bisect.bisect_left
chunks = util.chunks
with self.db.write_batch() as batch:
for hashX in sorted(hashXs):
deletes = []
puts = {}
for key, hist in self.db.iterator(prefix=hashX, reverse=True):
a = array.array('Q')
a.frombytes(b''.join(item + bytes(3) for item in chunks(hist, 5)))
# Remove all history entries >= tx_count
idx = bisect_left(a, tx_count)
nremoves += len(a) - idx
if idx > 0:
puts[key] = hist[:5 * idx]
break
deletes.append(key)
for key in deletes:
batch.delete(key)
for key, value in puts.items():
batch.put(key, value)
self.write_state(batch)
self.logger.info(f'backing up removed {nremoves:,d} history entries')
def get_txnums(self, hashX, limit=1000):
'''Generator that returns an unpruned, sorted list of tx_nums in the
history of a hashX. Includes both spending and receiving
transactions. By default yields at most 1000 entries. Set
limit to None to get them all. '''
limit = util.resolve_limit(limit)
chunks = util.chunks
for _key, hist in self.db.iterator(prefix=hashX):
for tx_numb in chunks(hist, 5):
if limit == 0:
return
tx_num, = unpack_le_uint64(tx_numb + bytes(3))
yield tx_num
limit -= 1
#
# History compaction
#
# comp_cursor is a cursor into compaction progress.
# -1: no compaction in progress
# 0-65535: Compaction in progress; all prefixes < comp_cursor have
# been compacted, and later ones have not.
# 65536: compaction complete in-memory but not flushed
#
# comp_flush_count applies during compaction, and is a flush count
# for history with prefix < comp_cursor. flush_count applies
# to still uncompacted history. It is -1 when no compaction is
# taking place. Key suffixes up to and including comp_flush_count
# are used, so a parallel history flush must first increment this
#
# When compaction is complete and the final flush takes place,
# flush_count is reset to comp_flush_count, and comp_flush_count to -1
def _flush_compaction(self, cursor, write_items, keys_to_delete):
'''Flush a single compaction pass as a batch.'''
# Update compaction state
if cursor == 65536:
self.flush_count = self.comp_flush_count
self.comp_cursor = -1
self.comp_flush_count = -1
else:
self.comp_cursor = cursor
# History DB. Flush compacted history and updated state
with self.db.write_batch() as batch:
# Important: delete first! The keyspace may overlap.
for key in keys_to_delete:
batch.delete(key)
for key, value in write_items:
batch.put(key, value)
self.write_state(batch)
def _compact_hashX(self, hashX, hist_map, hist_list,
write_items, keys_to_delete):
'''Compres history for a hashX. hist_list is an ordered list of
the histories to be compressed.'''
# History entries (tx numbers) are 4 bytes each. Distribute
# over rows of up to 50KB in size. A fixed row size means
# future compactions will not need to update the first N - 1
# rows.
max_row_size = self.max_hist_row_entries * 5
full_hist = b''.join(hist_list)
nrows = (len(full_hist) + max_row_size - 1) // max_row_size
if nrows > 4:
self.logger.info('hashX {} is large: {:,d} entries across '
'{:,d} rows'
.format(hash_to_hex_str(hashX),
len(full_hist) // 5, nrows))
# Find what history needs to be written, and what keys need to
# be deleted. Start by assuming all keys are to be deleted,
# and then remove those that are the same on-disk as when
# compacted.
write_size = 0
keys_to_delete.update(hist_map)
for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
key = hashX + pack_be_uint16(n)
if hist_map.get(key) == chunk:
keys_to_delete.remove(key)
else:
write_items.append((key, chunk))
write_size += len(chunk)
assert n + 1 == nrows
self.comp_flush_count = max(self.comp_flush_count, n)
return write_size
def _compact_prefix(self, prefix, write_items, keys_to_delete):
'''Compact all history entries for hashXs beginning with the
given prefix. Update keys_to_delete and write.'''
prior_hashX = None
hist_map = {}
hist_list = []
key_len = HASHX_LEN + 2
write_size = 0
for key, hist in self.db.iterator(prefix=prefix):
# Ignore non-history entries
if len(key) != key_len:
continue
hashX = key[:-2]
if hashX != prior_hashX and prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map,
hist_list, write_items,
keys_to_delete)
hist_map.clear()
hist_list.clear()
prior_hashX = hashX
hist_map[key] = hist
hist_list.append(hist)
if prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map, hist_list,
write_items, keys_to_delete)
return write_size
def _compact_history(self, limit):
'''Inner loop of history compaction. Loops until limit bytes have
been processed.
'''
keys_to_delete = set()
write_items = [] # A list of (key, value) pairs
write_size = 0
# Loop over 2-byte prefixes
cursor = self.comp_cursor
while write_size < limit and cursor < 65536:
prefix = pack_be_uint16(cursor)
write_size += self._compact_prefix(prefix, write_items,
keys_to_delete)
cursor += 1
max_rows = self.comp_flush_count + 1
self._flush_compaction(cursor, write_items, keys_to_delete)
self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), '
'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
.format(len(write_items), write_size / 1000000,
len(keys_to_delete), max_rows,
100 * cursor / 65536))
return write_size
def _cancel_compaction(self):
if self.comp_cursor != -1:
self.logger.warning('cancelling in-progress history compaction')
self.comp_flush_count = -1
self.comp_cursor = -1
#
# DB upgrade
#
def upgrade_db(self):
self.logger.info(f'history DB version: {self.db_version}')
self.logger.info('Upgrading your history DB; this can take some time...')
def upgrade_cursor(cursor):
count = 0
prefix = pack_be_uint16(cursor)
key_len = HASHX_LEN + 2
chunks = util.chunks
with self.db.write_batch() as batch:
batch_put = batch.put
for key, hist in self.db.iterator(prefix=prefix):
# Ignore non-history entries
if len(key) != key_len:
continue
count += 1
hist = b''.join(item + b'\0' for item in chunks(hist, 4))
batch_put(key, hist)
self.upgrade_cursor = cursor
self.write_state(batch)
return count
last = time.time()
count = 0
for cursor in range(self.upgrade_cursor + 1, 65536):
count += upgrade_cursor(cursor)
now = time.time()
if now > last + 10:
last = now
self.logger.info(f'DB 3 of 3: {count:,d} entries updated, '
f'{cursor * 100 / 65536:.1f}% complete')
self.db_version = max(self.DB_VERSIONS)
self.upgrade_cursor = -1
with self.db.write_batch() as batch:
self.write_state(batch)
self.logger.info('DB 3 of 3 upgraded successfully')
|
|
"""
sentry.db.models.manager
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import hashlib
import logging
import threading
import weakref
from django.conf import settings
from django.db import router
from django.db.models import Manager, Model
from django.db.models.signals import (
post_save, post_delete, post_init, class_prepared)
from django.utils.encoding import smart_str
from sentry.utils.cache import cache
from .query import create_or_update
__all__ = ('BaseManager',)
logger = logging.getLogger('sentry.errors')
class ImmutableDict(dict):
def __setitem__(self, key, value):
raise TypeError
def __delitem__(self, key):
raise TypeError
UNSAVED = ImmutableDict()
def __prep_value(model, key, value):
if isinstance(value, Model):
value = value.pk
else:
value = unicode(value)
return value
def __prep_key(model, key):
if key == 'pk':
return model._meta.pk.name
return key
def make_key(model, prefix, kwargs):
kwargs_bits = []
for k, v in sorted(kwargs.iteritems()):
k = __prep_key(model, k)
v = smart_str(__prep_value(model, k, v))
kwargs_bits.append('%s=%s' % (k, v))
kwargs_bits = ':'.join(kwargs_bits)
return '%s:%s:%s' % (prefix, model.__name__, hashlib.md5(kwargs_bits).hexdigest())
class BaseManager(Manager):
lookup_handlers = {
'iexact': lambda x: x.upper(),
}
use_for_related_fields = True
def __init__(self, *args, **kwargs):
self.cache_fields = kwargs.pop('cache_fields', [])
self.cache_ttl = kwargs.pop('cache_ttl', 60 * 5)
self.__local_cache = threading.local()
super(BaseManager, self).__init__(*args, **kwargs)
def _get_cache(self):
if not hasattr(self.__local_cache, 'value'):
self.__local_cache.value = weakref.WeakKeyDictionary()
return self.__local_cache.value
def _set_cache(self, value):
self.__local_cache.value = value
__cache = property(_get_cache, _set_cache)
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_BaseManager__cache', None)
d.pop('_BaseManager__local_cache', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__local_cache = weakref.WeakKeyDictionary()
def __class_prepared(self, sender, **kwargs):
"""
Given the cache is configured, connects the required signals for invalidation.
"""
if not self.cache_fields:
return
post_init.connect(self.__post_init, sender=sender, weak=False)
post_save.connect(self.__post_save, sender=sender, weak=False)
post_delete.connect(self.__post_delete, sender=sender, weak=False)
def __cache_state(self, instance):
"""
Updates the tracked state of an instance.
"""
if instance.pk:
self.__cache[instance] = dict((f, getattr(instance, f)) for f in self.cache_fields)
else:
self.__cache[instance] = UNSAVED
def __post_init(self, instance, **kwargs):
"""
Stores the initial state of an instance.
"""
self.__cache_state(instance)
def __post_save(self, instance, **kwargs):
"""
Pushes changes to an instance into the cache, and removes invalid (changed)
lookup values.
"""
pk_name = instance._meta.pk.name
pk_names = ('pk', pk_name)
pk_val = instance.pk
for key in self.cache_fields:
if key in pk_names:
continue
# store pointers
cache.set(self.__get_lookup_cache_key(**{key: getattr(instance, key)}), pk_val, self.cache_ttl) # 1 hour
# Ensure we don't serialize the database into the cache
db = instance._state.db
instance._state.db = None
# store actual object
try:
cache.set(self.__get_lookup_cache_key(**{pk_name: pk_val}), instance, self.cache_ttl)
except Exception as e:
logger.error(e, exc_info=True)
instance._state.db = db
# Kill off any keys which are no longer valid
if instance in self.__cache:
for key in self.cache_fields:
if key not in self.__cache[instance]:
continue
value = self.__cache[instance][key]
if value != getattr(instance, key):
cache.delete(self.__get_lookup_cache_key(**{key: value}))
self.__cache_state(instance)
def __post_delete(self, instance, **kwargs):
"""
Drops instance from all cache storages.
"""
pk_name = instance._meta.pk.name
for key in self.cache_fields:
if key in ('pk', pk_name):
continue
# remove pointers
cache.delete(self.__get_lookup_cache_key(**{key: getattr(instance, key)}))
# remove actual object
cache.delete(self.__get_lookup_cache_key(**{pk_name: instance.pk}))
def __get_lookup_cache_key(self, **kwargs):
return make_key(self.model, 'modelcache', kwargs)
def contribute_to_class(self, model, name):
super(BaseManager, self).contribute_to_class(model, name)
class_prepared.connect(self.__class_prepared, sender=model)
def get_from_cache(self, **kwargs):
"""
Wrapper around QuerySet.get which supports caching of the
intermediate value. Callee is responsible for making sure
the cache key is cleared on save.
"""
if not self.cache_fields or len(kwargs) > 1:
return self.get(**kwargs)
key, value = kwargs.items()[0]
pk_name = self.model._meta.pk.name
if key == 'pk':
key = pk_name
# Kill __exact since it's the default behavior
if key.endswith('__exact'):
key = key.split('__exact', 1)[0]
if key in self.cache_fields or key == pk_name:
cache_key = self.__get_lookup_cache_key(**{key: value})
retval = cache.get(cache_key)
if retval is None:
result = self.get(**kwargs)
# Ensure we're pushing it into the cache
self.__post_save(instance=result)
return result
# If we didn't look up by pk we need to hit the reffed
# key
if key != pk_name:
return self.get_from_cache(**{pk_name: retval})
if type(retval) != self.model:
if settings.DEBUG:
raise ValueError('Unexpected value type returned from cache')
logger.error('Cache response returned invalid value %r', retval)
return self.get(**kwargs)
retval._state.db = router.db_for_read(self.model, **kwargs)
return retval
else:
return self.get(**kwargs)
def create_or_update(self, **kwargs):
return create_or_update(self.model, **kwargs)
def bind_nodes(self, object_list, *node_names):
from sentry import app
object_node_list = []
for name in node_names:
object_node_list.extend((getattr(i, name) for i in object_list if getattr(i, name).id))
node_ids = [n.id for n in object_node_list]
if not node_ids:
return
node_results = app.nodestore.get_multi(node_ids)
for node in object_node_list:
node.bind_data(node_results.get(node.id) or {})
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.estimator import estimator_lib as estimator
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import manip_ops as manip
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import spectral_ops as spectral
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Import boosted trees ops to make sure the ops are registered (but unused).
from tensorflow.python.ops import gen_boosted_trees_ops as _gen_boosted_trees_ops
# Import cudnn rnn ops to make sure their ops are registered.
from tensorflow.python.ops import gen_cudnn_rnn_ops as _
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export
# Import modules whose docstrings contribute, for use by remove_undocumented
# below.
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import framework_lib
from tensorflow.python.framework import subscribe
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix as confusion_matrix_m
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.framework.ops import enable_eager_execution
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
_allowed_symbols = [
'AttrValue',
'ConfigProto',
'ClusterDef',
'DeviceSpec',
'Event',
'GPUOptions',
'GRAPH_DEF_VERSION',
'GRAPH_DEF_VERSION_MIN_CONSUMER',
'GRAPH_DEF_VERSION_MIN_PRODUCER',
'GraphDef',
'GraphOptions',
'HistogramProto',
'LogMessage',
'MetaGraphDef',
'NameAttrList',
'NodeDef',
'OptimizerOptions',
'RunOptions',
'RunMetadata',
'SessionLog',
'Summary',
'SummaryMetadata',
'TensorInfo', # Used for tf.saved_model functionality.
]
# Export protos
# pylint: disable=undefined-variable
tf_export('AttrValue')(AttrValue)
tf_export('ConfigProto')(ConfigProto)
tf_export('Event', 'summary.Event')(Event)
tf_export('GPUOptions')(GPUOptions)
tf_export('GraphDef')(GraphDef)
tf_export('GraphOptions')(GraphOptions)
tf_export('HistogramProto')(HistogramProto)
tf_export('LogMessage')(LogMessage)
tf_export('MetaGraphDef')(MetaGraphDef)
tf_export('NameAttrList')(NameAttrList)
tf_export('NodeDef')(NodeDef)
tf_export('OptimizerOptions')(OptimizerOptions)
tf_export('RunMetadata')(RunMetadata)
tf_export('RunOptions')(RunOptions)
tf_export('SessionLog', 'summary.SessionLog')(SessionLog)
tf_export('Summary', 'summary.Summary')(Summary)
tf_export('summary.SummaryDescription')(SummaryDescription)
tf_export('SummaryMetadata')(SummaryMetadata)
tf_export('summary.TaggedRunMetadata')(TaggedRunMetadata)
tf_export('TensorInfo')(TensorInfo)
# pylint: enable=undefined-variable
# The following symbols are kept for compatibility. It is our plan
# to remove them in the future.
_allowed_symbols.extend([
'arg_max',
'arg_min',
'create_partitioned_variables',
'deserialize_many_sparse',
'lin_space',
'listdiff', # Use tf.listdiff instead.
'parse_single_sequence_example',
'serialize_many_sparse',
'serialize_sparse',
'sparse_matmul', ## use tf.matmul instead.
])
# This is needed temporarily because we import it explicitly.
_allowed_symbols.extend([
'pywrap_tensorflow',
])
# Dtypes exported by framework/dtypes.py.
# TODO(cwhipkey): expose these through documentation.
_allowed_symbols.extend([
'QUANTIZED_DTYPES',
'bfloat16',
'bool',
'complex64',
'complex128',
'double',
'half',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'int8',
'qint16',
'qint32',
'qint8',
'quint16',
'quint8',
'string',
'uint64',
'uint32',
'uint16',
'uint8',
'resource',
'variant',
])
# Export modules and constants.
_allowed_symbols.extend([
'app',
'bitwise',
'compat',
'data',
'distributions',
'errors',
'estimator',
'feature_column',
'flags',
'gfile',
'graph_util',
'image',
'initializers',
'keras',
'layers',
'linalg',
'logging',
'losses',
'manip',
'metrics',
'newaxis',
'nn',
'profiler',
'python_io',
'resource_loader',
'saved_model',
'sets',
'spectral',
'summary',
'sysconfig',
'test',
'train',
'user_ops',
])
# Variables framework.versions:
_allowed_symbols.extend([
'VERSION',
'GIT_VERSION',
'COMPILER_VERSION',
'CXX11_ABI_FLAG',
'MONOLITHIC_BUILD',
])
# Eager execution
_allowed_symbols.extend([
'enable_eager_execution',
'executing_eagerly',
])
# Remove all extra symbols that don't have a docstring or are not explicitly
# referenced in the whitelist.
remove_undocumented(__name__, _allowed_symbols, [
framework_lib, array_ops, check_ops, client_lib, compat, constant_op,
control_flow_ops, confusion_matrix_m, data, distributions,
functional_ops, histogram_ops, io_ops, keras, layers,
losses, math_ops, metrics, nn, profiler, resource_loader, sets, script_ops,
session_ops, sparse_ops, state_ops, string_ops, summary, tensor_array_ops,
train
])
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
'__cxx11_abi_flag__',
'__monolithic_build__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
import os
import sys
import pickle
import warnings
import functools
import pytest
from astropy.units import allclose as quantity_allclose # noqa: F401
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611 # noqa
__all__ = ['assert_follows_unicode_guidelines',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
@deprecated('5.1', alternative='pytest.raises')
class raises:
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
.. note:: Usage of ``pytest.raises`` is preferred.
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
# TODO: Remove these when deprecation period of things deprecated in PR 12633 are removed.
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
r'compiler', # A deprecated stdlib module used by pytest
r'scipy',
r'pygments',
r'ipykernel',
r'IPython', # deprecation warnings for async and await
r'setuptools'])
_warnings_to_ignore_entire_module = set([])
_warnings_to_ignore_by_pyver = {
None: set([ # Python version agnostic
# https://github.com/astropy/astropy/pull/7372
(r"Importing from numpy\.testing\.decorators is deprecated, "
r"import from numpy\.testing instead\.", DeprecationWarning),
# inspect raises this slightly different warning on Python 3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
(r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
DeprecationWarning),
# https://github.com/astropy/pytest-doctestplus/issues/29
(r"split\(\) requires a non-empty pattern match", FutureWarning),
# Package resolution warning that we can do nothing about
(r"can't resolve package from __spec__ or __package__, "
r"falling back on __name__ and __path__", ImportWarning)]),
(3, 7): set([
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
(r"Using or importing the ABCs from 'collections'",
DeprecationWarning)])
}
@deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html')
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of ``(warning_message, warning_class)`` to ignore.
Python version-agnostic warnings should be mapped to `None` key.
This is in addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
@deprecated('5.1', alternative='https://docs.pytest.org/en/stable/warnings.html')
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
try:
del module.__warningregistry__
except Exception:
pass
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (pytest and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn these warnings into exceptions
_all_warns = [DeprecationWarning, FutureWarning, ImportWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all specified warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
# This ignores only specified warnings by Python version, if applicable.
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s[0], s[1])
@deprecated('5.1', alternative='pytest.warns')
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
.. note:: Usage of :ref:`pytest.warns <pytest:warns>` is preferred.
"""
def __init__(self, *classes):
super().__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super().__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
@deprecated('5.1', alternative='pytest.mark.filterwarnings')
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super().__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super().__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
unicode_x.encode('ascii')
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__getstate__() if hasattr(a, '__getstate__') else a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
f"Did not pickle {key}"
if hasattr(dict_a[key], '__eq__'):
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, f"Value of {key} changed by pickling"
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(*_unquantify_allclose_arguments(
actual, desired, rtol, atol), **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.forms import ValidationError
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
vol_type_description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
is_public = forms.BooleanField(
label=_("Public"),
initial=True,
required=False,
help_text=_("By default, volume type is created as public. To "
"create a private volume type, uncheck this field."))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(
request,
data['name'],
data['vol_type_description'],
data['is_public'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception as e:
if getattr(e, 'code', None) == 409:
msg = _('Volume type name "%s" already '
'exists.') % data['name']
self._errors['name'] = self.error_class([msg])
else:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ThemableChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception as ex:
if getattr(ex, 'code', None) == 409:
msg = _('QoS Spec name "%s" already '
'exists.') % data['name']
self._errors['name'] = self.error_class([msg])
else:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
class CreateVolumeTypeEncryption(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False,
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
provider = forms.CharField(max_length=255, label=_("Provider"))
control_location = forms.ThemableChoiceField(label=_("Control Location"),
choices=(('front-end',
_('front-end')),
('back-end',
_('back-end')))
)
cipher = forms.CharField(label=_("Cipher"), required=False)
key_size = forms.IntegerField(label=_("Key Size (bits)"),
required=False,
min_value=1)
volume_type_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
# Set Cipher to None if empty
if data['cipher'] == u'':
data['cipher'] = None
volume_type_id = data.pop('volume_type_id')
volume_type_name = data.pop('name')
# Create encryption for the volume type
volume_type = cinder.\
volume_encryption_type_create(request,
volume_type_id,
data)
messages.success(request, _('Successfully created encryption for '
'volume type: %s') % volume_type_name)
return volume_type
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to create encrypted volume type.'),
redirect=redirect)
class UpdateVolumeTypeEncryption(CreateVolumeTypeEncryption):
def handle(self, request, data):
try:
# Set Cipher to None if empty
if data['cipher'] == u'':
data['cipher'] = None
volume_type_id = data.pop('volume_type_id')
volume_type_name = data.pop('name')
# Update encryption for the volume type
volume_type = cinder.\
volume_encryption_type_update(request,
volume_type_id,
data)
messages.success(request, _('Successfully updated encryption for '
'volume type: %s') % volume_type_name)
return volume_type
except NotImplementedError:
messages.error(request, _('Updating encryption is not '
'implemented. Unable to update '
' encrypted volume type.'))
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Unable to update encrypted volume type.'),
redirect=redirect)
return False
class ManageQosSpecAssociation(forms.SelfHandlingForm):
qos_spec_choice = forms.ThemableChoiceField(
label=_("QoS Spec to be associated"),
help_text=_("Choose associated QoS Spec."))
def __init__(self, request, *args, **kwargs):
super(ManageQosSpecAssociation, self).__init__(request,
*args,
**kwargs)
qos_spec_field = self.fields['qos_spec_choice']
qos_spec_field.choices = \
self.populate_qos_spec_choices()
def populate_qos_spec_choices(self):
# populate qos spec list box
qos_specs = self.initial["qos_specs"]
current_qos_spec = self.initial["cur_qos_spec_id"]
qos_spec_list = [(qos_spec.id, qos_spec.name)
for qos_spec in qos_specs
if qos_spec.id != current_qos_spec]
if current_qos_spec:
# used to remove the current spec
qos_spec_list.insert(0, ("-1", _("None (removes spec)")))
if qos_spec_list:
qos_spec_list.insert(0, ("", _("Select a new QoS spec")))
else:
qos_spec_list.insert(0, ("", _("No new QoS spec available")))
return qos_spec_list
def handle(self, request, data):
vol_type_id = self.initial['type_id']
new_qos_spec_id = data['qos_spec_choice']
# Update QOS Spec association information
try:
# NOTE - volume types can only be associated with
# ONE QOS Spec at a time
# first we need to un-associate the current QOS Spec, if it exists
cur_qos_spec_id = self.initial['cur_qos_spec_id']
if cur_qos_spec_id:
qos_spec = cinder.qos_spec_get(request,
cur_qos_spec_id)
cinder.qos_spec_disassociate(request,
qos_spec,
vol_type_id)
# now associate with new QOS Spec, if user wants one associated
if new_qos_spec_id != '-1':
qos_spec = cinder.qos_spec_get(request,
new_qos_spec_id)
cinder.qos_spec_associate(request,
qos_spec,
vol_type_id)
messages.success(request,
_('Successfully updated QoS Spec association.'))
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_('Error updating QoS Spec association.'),
redirect=redirect)
class EditQosSpecConsumer(forms.SelfHandlingForm):
current_consumer = forms.CharField(label=_("Current consumer"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
required=False)
consumer_choice = forms.ThemableChoiceField(
label=_("New QoS Spec Consumer"),
choices=cinder.CONSUMER_CHOICES,
help_text=_("Choose consumer for this QoS Spec."))
def __init__(self, request, *args, **kwargs):
super(EditQosSpecConsumer, self).__init__(request, *args, **kwargs)
consumer_field = self.fields['consumer_choice']
qos_spec = self.initial["qos_spec"]
self.fields['current_consumer'].initial = qos_spec.consumer
choices = [choice for choice in cinder.CONSUMER_CHOICES
if choice[0] != qos_spec.consumer]
choices.insert(0, ("", _("Select a new consumer")))
consumer_field.choices = choices
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
new_consumer = data['consumer_choice']
# Update QOS Spec consumer information
try:
cinder.qos_spec_set_keys(request,
qos_spec_id,
{'consumer': new_consumer})
messages.success(request,
_('Successfully modified QoS Spec consumer.'))
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request, _('Error editing QoS Spec consumer.'),
redirect=redirect)
class EditVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False,
help_text=_(
"To make volume type private, uncheck "
"this field."))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
msg = _('New name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return cleaned_name
def handle(self, request, data):
volume_type_id = self.initial['id']
try:
cinder.volume_type_update(request,
volume_type_id,
data['name'],
data['description'],
data['is_public'])
message = _('Successfully updated volume type.')
messages.success(request, message)
return True
except Exception as ex:
redirect = reverse("horizon:admin:volume_types:index")
if ex.code == 409:
error_message = _('New name conflicts with another '
'volume type.')
else:
error_message = _('Unable to update volume type.')
exceptions.handle(request, error_message,
redirect=redirect)
class EditTypeAccessForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(EditTypeAccessForm, self).__init__(request, *args, **kwargs)
err_msg = _('Unable to retrieve volume type access list.')
self.fields["member"] = forms.MultipleChoiceField(
required=False,
widget=forms.ThemableCheckboxSelectMultiple())
# Get list of available projects.
try:
all_projects, has_more = keystone.tenant_list(request)
except Exception:
exceptions.handle(request, err_msg)
projects_list = [(project.id, project.name)
for project in all_projects]
self.fields["member"].choices = projects_list
volume_type_id = self.initial.get('volume_type_id')
volume_type_access = []
try:
if volume_type_id:
volume_type = cinder.volume_type_get(request,
volume_type_id)
if not volume_type.is_public:
volume_type_access = [
project.project_id for project in
cinder.volume_type_access_list(request,
volume_type_id)]
except Exception:
exceptions.handle(request, err_msg)
self.fields["member"].initial = volume_type_access
def handle(self, request, data):
type_id = self.initial['volume_type_id']
current_projects = self.fields["member"].initial
removed_projects = current_projects
for p in data['member']:
if p not in current_projects:
# Newly added project access
try:
cinder.volume_type_add_project_access(request, type_id, p)
except Exception:
exceptions.handle(request,
_('Failed to add project %(project)s to '
'volume type access.') %
{'project': p})
else:
removed_projects.remove(p)
for p in removed_projects:
try:
cinder.volume_type_remove_project_access(request, type_id, p)
except Exception:
exceptions.handle(request, _('Failed to remove project '
'%(project)s from volume type '
'access.') % {'project': p})
messages.success(request,
_('Modified volume type access: %s') % type_id)
return True
|
|
import colorama
import logging
import os
import sys
import traceback
import warnings
from logging import (getLogger, CRITICAL, ERROR, WARNING, INFO, # noqa: F401
DEBUG)
from traceback import FrameSummary
from .iterutils import tween
from .platforms.host import platform_info
from .safe_str import safe_string
class UserDeprecationWarning(DeprecationWarning):
pass
def _path_within(path, parent):
try:
rel = os.path.relpath(path, parent)
except ValueError:
return False
return not rel.startswith(os.pardir + os.sep)
def _is_user_src(filename):
# On Windows, always treat paths within Python's exec_prefix as non-user
# paths. This lets us correctly identify things like runpy.py and
# setuptools wrappers as non-user.
if ( platform_info().family == 'windows' and
_path_within(filename, sys.exec_prefix) ):
return False
return not _path_within(filename, os.path.dirname(__file__))
def _filter_stack(stack):
# Find where the user's stack frames begin and end.
gen = enumerate(stack)
for start, line in gen:
if _is_user_src(line[0]):
break
else:
start = len(stack)
for end, line in gen:
if not _is_user_src(line[0]):
break
else:
end = len(stack)
return stack[:start], stack[start:end], stack[end:]
def _format_stack(stack, user=False):
if len(stack) == 0:
return ''
if user:
stack = [FrameSummary(os.path.relpath(i.filename), i.lineno, i.name,
locals=i.locals, line=i.line) for i in stack]
# Put the newline at the beginning, since this helps our formatting later.
return '\n' + ''.join(traceback.format_list(stack)).rstrip()
class StackFilter:
def __init__(self, has_stack=True):
self.has_stack = has_stack
def filter(self, record):
has_stack = bool((record.exc_info and record.exc_info[0]) or
getattr(record, 'show_stack', False))
return has_stack == self.has_stack
class ColoredStreamHandler(logging.StreamHandler):
_format_codes = {
DEBUG: '1;35',
INFO: '1;34',
WARNING: '1;33',
ERROR: '1;31',
CRITICAL: '1;41;37',
}
def format(self, record):
record.coloredlevel = '\033[{format}m{name}\033[0m'.format(
format=self._format_codes.get(record.levelno, '1'),
name=record.levelname.lower()
)
return super().format(record)
class StackfulStreamHandler(ColoredStreamHandler):
def __init__(self, *args, debug=False, **kwargs):
self.debug = debug
super().__init__(*args, **kwargs)
def emit(self, record):
if record.exc_info:
if isinstance(record.exc_info[1], SyntaxError):
e = record.exc_info[1]
record.msg = e.msg
# Figure out where to put the caret.
text = e.text.expandtabs().rstrip()
dedent = len(text) - len(text.lstrip())
offset = 4 - dedent - 1 + e.offset
record.full_stack = [
FrameSummary(e.filename, e.lineno, '<module>',
line=e.text + '\n' + ' ' * offset + '^')
]
else:
if not record.msg:
record.msg = record.exc_info[0].__name__
elif self.debug:
record.msg = '{}: {}'.format(record.exc_info[0].__name__,
record.msg)
record.full_stack = traceback.extract_tb(record.exc_info[2])
record.exc_info = None
pre, stack, post = _filter_stack(record.full_stack)
record.stack_pre = _format_stack(pre)
record.stack = _format_stack(stack, user=True)
record.stack_post = _format_stack(post)
if len(stack):
record.user_pathname = os.path.relpath(stack[-1][0])
record.user_lineno = stack[-1][1]
else:
record.user_pathname = record.pathname
record.user_lineno = record.lineno
if len(stack) or self.debug:
return super().emit(record)
record.show_stack = False
logging.root.handle(record)
def _clicolor(environ):
if environ.get('CLICOLOR_FORCE', '0') != '0':
return 'always'
if 'CLICOLOR' in environ:
return 'never' if environ['CLICOLOR'] == '0' else 'auto'
return None
def _init_logging(logger, debug, stream=None):
logger.setLevel(logging.DEBUG if debug else logging.INFO)
stackless = ColoredStreamHandler(stream)
stackless.addFilter(StackFilter(has_stack=False))
fmt = '%(coloredlevel)s: %(message)s'
stackless.setFormatter(logging.Formatter(fmt))
logger.addHandler(stackless)
stackful = StackfulStreamHandler(stream, debug=debug)
stackful.addFilter(StackFilter(has_stack=True))
fmt = '%(coloredlevel)s: %(user_pathname)s:%(user_lineno)d: %(message)s'
if debug:
fmt += '\033[90m%(stack_pre)s\033[0m'
fmt += '%(stack)s'
if debug:
fmt += '\033[90m%(stack_post)s\033[0m'
stackful.setFormatter(logging.Formatter(fmt))
logger.addHandler(stackful)
def init(color='auto', debug=False, warn_once=False, environ=os.environ):
color = _clicolor(environ) or color
if color == 'always':
colorama.init(strip=False)
elif color == 'never':
colorama.init(strip=True, convert=False)
else: # color == 'auto'
colorama.init()
warnings.filterwarnings('default', category=UserDeprecationWarning)
if warn_once:
warnings.filterwarnings('once')
_init_logging(logging.root, debug)
def log_stack(level, message, *args, logger=logging, stacklevel=0,
show_stack=True, **kwargs):
extra = {
'full_stack': traceback.extract_stack()[1:-1 - stacklevel],
'show_stack': show_stack
}
logger.log(level, message, *args, extra=extra, **kwargs)
def format_message(*args):
def str_implemented(s):
try:
str(s)
return True
except NotImplementedError:
return False
message = ''
for i in tween(args, ' '):
if isinstance(i, safe_string) and not str_implemented(i):
message += repr(i)
else:
message += str(i)
return message
def log_message(level, *args, logger=logging, stacklevel=0, **kwargs):
stacklevel += 1
log_stack(level, format_message(*args), logger=logger,
stacklevel=stacklevel, **kwargs)
def info(*args, show_stack=False):
log_message(INFO, *args, show_stack=show_stack, stacklevel=1)
def debug(*args, show_stack=True):
log_message(DEBUG, *args, show_stack=show_stack, stacklevel=1)
def _showwarning(message, category, filename, lineno, file=None, line=None):
# Python 3.6 changes how stacklevel is counted.
stacklevel = 2 if sys.version_info >= (3, 6) else 1
log_stack(WARNING, message, stacklevel=stacklevel)
warnings.showwarning = _showwarning
|
|
import os
import sys
def main(args):
if len(args) != 2:
print("Usage: python project-diff.py [path-to-project-1] [path-to-project-2]")
return
dir1 = args[0]
dir2 = args[1]
project1 = collect_text_files(dir1)
project2 = collect_text_files(dir2)
files_only_in_1 = []
files_only_in_2 = []
files_in_both = []
perform_venn_analysis(set(project1.keys()), set(project2.keys()), files_only_in_1, files_only_in_2, files_in_both)
if len(files_only_in_1) > 0:
print("The following files are only in Project 1:")
for file in files_only_in_1:
print(" " + file)
print("")
if len(files_only_in_2) > 0:
print("The following files are only in Project 2:")
for file in files_only_in_2:
print(" " + file)
print("")
print(str(len(files_in_both)) + " files in both projects.")
print("")
files_in_both.sort()
files_with_diffs = []
for file in files_in_both:
text_1 = project1[file]
text_2 = project2[file]
diff = perform_diff(text_1, text_2)
if len(diff) > 0:
files_with_diffs.append(file)
print("There's a difference in " + file)
print("\n".join(diff))
print("")
if len(files_with_diffs) == 0:
print("No files with text differences.")
else:
print("Diffs were in the following files:")
print("\n".join(files_with_diffs))
print("")
def perform_venn_analysis(set_a, set_b, only_in_a_out, only_in_b_out, in_both_out):
for item in set_a:
if item not in set_b:
only_in_a_out.append(item)
else:
in_both_out.append(item)
for item in set_b:
if item not in set_a:
only_in_b_out.append(item)
def collect_text_files(root):
output = {}
root = root.replace('\\', '/')
if root.endswith('/'):
root = root[:-1]
collect_text_files_impl(root, '', output)
return output
def get_file_extension(file):
if '.' in file:
return file.split('.')[-1].lower()
return ''
FILE_EXTENSION_IGNORE_LIST = set([
'png', 'jpg',
'xcuserstate',
])
def is_text_file(path):
ext = get_file_extension(path)
return ext not in FILE_EXTENSION_IGNORE_LIST
def collect_text_files_impl(root, current_dir, output):
full_dir = root
if current_dir != '':
full_dir += '/' + current_dir
for file in os.listdir(full_dir.replace('/', os.sep)):
full_file = full_dir + '/' + file
if os.path.isdir(full_file.replace('/', os.sep)):
next_cd = file if current_dir == '' else (current_dir + '/' + file)
collect_text_files_impl(root, next_cd, output)
else:
rel_file = file if current_dir == '' else (current_dir + '/' + file)
if is_text_file(rel_file):
c = open(full_file.replace('/', os.sep), 'rt')
text = c.read()
c.close()
output[rel_file] = text
else:
output[rel_file] = '\n'.join([
"Binary file:",
"size X", # TODO: get file size
"first 20 bytes: ...", # TODO: this
"last 20 bytes: ...", # TODO: do this as well
])
def perform_diff(text_1, text_2):
if text_1 == text_2:
return []
lines_1 = text_1.split('\n')
lines_2 = text_2.split('\n')
trimmed_front = 0
trimmed_back = 0
# Remove identical lines at the beginning and end of the file
while len(lines_1) > trimmed_front and len(lines_2) > trimmed_front and lines_1[trimmed_front] == lines_2[trimmed_front]:
trimmed_front += 1
lines_1 = lines_1[trimmed_front:]
lines_2 = lines_2[trimmed_front:]
while len(lines_1) > trimmed_back and len(lines_2) > trimmed_back and lines_1[-1 - trimmed_back] == lines_2[-1 - trimmed_back]:
trimmed_back += 1
lines_1 = lines_1[:-trimmed_back]
lines_2 = lines_2[:-trimmed_back]
length_1 = len(lines_1)
length_2 = len(lines_2)
grid = []
for x in range(length_2 + 1):
column = []
for y in range(length_1 + 1):
column.append(None)
grid.append(column)
# Perform levenshtein difference
# each grid cell will consist of a tuple: (diff-size, previous-path: up|left|diag)
# Each step to the right indicates taking a line from lines 2
# Each step downwards indicates taking a line from lines 1
# Prepopulate the left and top rows indicating starting the diff by removing all
# lines from lines 1 and adding all lines from lines 2.
for x in range(length_2 + 1):
grid[x][0] = (x, 'left')
for y in range(length_1 + 1):
grid[0][y] = (y, 'up')
grid[0][0] = (0, 'diag')
# Populate the grid. Figure out the minimum diff to get to each point.
for y in range(1, length_1 + 1):
for x in range(1, length_2 + 1):
if lines_1[y - 1] == lines_2[x - 1]:
grid[x][y] = (grid[x - 1][y - 1][0], 'diag')
elif (grid[x - 1][y][0] <= grid[x][y - 1][0]):
grid[x][y] = (grid[x - 1][y][0] + 1, 'left')
else:
grid[x][y] = (grid[x][y - 1][0] + 1, 'up')
# Start from the bottom right corner and walk backwards to the origin
x = length_2
y = length_1
diff_chain = []
ellipsis_used = False
while x != 0 and y != 0:
node = grid[x][y]
if node[1] == 'diag':
if not ellipsis_used:
diff_chain.append('...')
ellipsis_used = True
x -= 1
y -= 1
elif node[1] == 'left':
diff_chain.append('+ [' + str(trimmed_front + x) + '] ' + lines_2[x - 1])
x -= 1
ellipsis_used = False
else:
diff_chain.append('- [' + str(trimmed_front + y) + '] ' + lines_1[y - 1])
y -= 1
ellipsis_used = False
diff_chain.reverse()
return diff_chain
main(sys.argv[1:])
|
|
from coapthon.utils import parse_blockwise
from coapthon import defines
from coapthon.messages.option import Option
__author__ = 'Giacomo Tanganelli'
class Message(object):
"""
Class to handle the Messages.
"""
def __init__(self):
"""
Data structure that represent a CoAP message
"""
self._type = None
self._mid = None
self._token = None
self._options = []
self._payload = None
self._destination = None
self._source = None
self._code = None
self._acknowledged = None
self._rejected = None
self._timeouted = None
self._cancelled = None
self._duplicated = None
self._timestamp = None
self._version = 1
self._usecache = True
@property
def version(self):
"""
Return the CoAP version
:return: the version
"""
return self._version
@property
def cache(self):
return self._usecache
@cache.setter
def cache(self,temp):
if not isinstance(temp,bool):
raise AttributeError
self._usecache = temp
@version.setter
def version(self, v):
"""
Sets the CoAP version
:param v: the version
:raise AttributeError: if value is not 1
"""
if not isinstance(v, int) or v != 1:
raise AttributeError
self._version = v
@property
def type(self):
"""
Return the type of the message.
:return: the type
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the type of the message.
:type value: Types
:param value: the type
:raise AttributeError: if value is not a valid type
"""
if value not in defines.Types.values():
raise AttributeError
self._type = value
@property
def mid(self):
"""
Return the mid of the message.
:return: the MID
"""
return self._mid
@mid.setter
def mid(self, value):
"""
Sets the MID of the message.
:type value: Integer
:param value: the MID
:raise AttributeError: if value is not int or cannot be represented on 16 bits.
"""
if not isinstance(value, int) or value > 65536:
raise AttributeError
self._mid = value
@mid.deleter
def mid(self):
"""
Unset the MID of the message.
"""
self._mid = None
@property
def token(self):
"""
Get the Token of the message.
:return: the Token
"""
return self._token
@token.setter
def token(self, value):
"""
Set the Token of the message.
:type value: String
:param value: the Token
:raise AttributeError: if value is longer than 256
"""
if value is None:
self._token = value
return
if not isinstance(value, str):
value = str(value)
if len(value) > 256:
raise AttributeError
self._token = value
@token.deleter
def token(self):
"""
Unset the Token of the message.
"""
self._token = None
@property
def options(self):
"""
Return the options of the CoAP message.
:rtype: list
:return: the options
"""
return self._options
@options.setter
def options(self, value):
"""
Set the options of the CoAP message.
:type value: list
:param value: list of options
"""
if value is None:
value = []
assert isinstance(value, list)
self._options = value
@property
def payload(self):
"""
Return the payload.
:return: the payload
"""
return self._payload
@payload.setter
def payload(self, value):
"""
Sets the payload of the message and eventually the Content-Type
:param value: the payload
"""
if isinstance(value, tuple):
content_type, payload = value
self.content_type = content_type
self._payload = payload
else:
self._payload = value
@property
def destination(self):
"""
Return the destination of the message.
:rtype: tuple
:return: (ip, port)
"""
return self._destination
@destination.setter
def destination(self, value):
"""
Set the destination of the message.
:type value: tuple
:param value: (ip, port)
:raise AttributeError: if value is not a ip and a port.
"""
if value is not None and (not isinstance(value, tuple) or len(value)) != 2:
raise AttributeError
self._destination = value
@property
def source(self):
"""
Return the source of the message.
:rtype: tuple
:return: (ip, port)
"""
return self._source
@source.setter
def source(self, value):
"""
Set the source of the message.
:type value: tuple
:param value: (ip, port)
:raise AttributeError: if value is not a ip and a port.
"""
if not isinstance(value, tuple) or len(value) != 2:
raise AttributeError
self._source = value
@property
def code(self):
"""
Return the code of the message.
:rtype: Codes
:return: the code
"""
return self._code
@code.setter
def code(self, value):
"""
Set the code of the message.
:type value: Codes
:param value: the code
:raise AttributeError: if value is not a valid code
"""
if value not in defines.Codes.LIST.keys() and value is not None:
raise AttributeError
self._code = value
@property
def acknowledged(self):
"""
Checks if is this message has been acknowledged.
:return: True, if is acknowledged
"""
return self._acknowledged
@acknowledged.setter
def acknowledged(self, value):
"""
Marks this message as acknowledged.
:type value: Boolean
:param value: if acknowledged
"""
assert (isinstance(value, bool))
self._acknowledged = value
if value:
self._timeouted = False
self._rejected = False
self._cancelled = False
@property
def rejected(self):
"""
Checks if this message has been rejected.
:return: True, if is rejected
"""
return self._rejected
@rejected.setter
def rejected(self, value):
"""
Marks this message as rejected.
:type value: Boolean
:param value: if rejected
"""
assert (isinstance(value, bool))
self._rejected = value
if value:
self._timeouted = False
self._acknowledged = False
self._cancelled = True
@property
def timeouted(self):
"""
Checks if this message has timeouted. Confirmable messages in particular
might timeout.
:return: True, if has timeouted
"""
return self._timeouted
@timeouted.setter
def timeouted(self, value):
"""
Marks this message as timeouted. Confirmable messages in particular might
timeout.
:type value: Boolean
:param value:
"""
assert (isinstance(value, bool))
self._timeouted = value
if value:
self._acknowledged = False
self._rejected = False
self._cancelled = True
@property
def duplicated(self):
"""
Checks if this message is a duplicate.
:return: True, if is a duplicate
"""
return self._duplicated
@duplicated.setter
def duplicated(self, value):
"""
Marks this message as a duplicate.
:type value: Boolean
:param value: if a duplicate
"""
assert (isinstance(value, bool))
self._duplicated = value
@property
def timestamp(self):
"""
Return the timestamp of the message.
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value):
"""
Set the timestamp of the message.
:type value: timestamp
:param value: the timestamp
"""
self._timestamp = value
def _already_in(self, option):
"""
Check if an option is already in the message.
:type option: Option
:param option: the option to be checked
:return: True if already present, False otherwise
"""
for opt in self._options:
if option.number == opt.number:
return True
return False
def add_option(self, option):
"""
Add an option to the message.
:type option: Option
:param option: the option
:raise TypeError: if the option is not repeatable and such option is already present in the message
"""
assert isinstance(option, Option)
repeatable = defines.OptionRegistry.LIST[option.number].repeatable
if not repeatable:
ret = self._already_in(option)
if ret:
raise TypeError("Option : %s is not repeatable", option.name)
else:
self._options.append(option)
else:
self._options.append(option)
def del_option(self, option):
"""
Delete an option from the message
:type option: Option
:param option: the option
"""
assert isinstance(option, Option)
while option in list(self._options):
self._options.remove(option)
def del_option_by_name(self, name):
"""
Delete an option from the message by name
:type name: String
:param name: option name
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.name == name:
self._options.remove(o)
def del_option_by_number(self, number):
"""
Delete an option from the message by number
:type number: Integer
:param number: option naumber
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.number == number:
self._options.remove(o)
@property
def etag(self):
"""
Get the ETag option of the message.
:rtype: list
:return: the ETag values or [] if not specified by the request
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.ETAG.number:
value.append(option.value)
return value
@etag.setter
def etag(self, etag):
"""
Add an ETag option to the message.
:param etag: the etag
"""
if not isinstance(etag, list):
etag = [etag]
for e in etag:
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = e
self.add_option(option)
@etag.deleter
def etag(self):
"""
Delete an ETag from a message.
"""
self.del_option_by_number(defines.OptionRegistry.ETAG.number)
@property
def content_type(self):
"""
Get the Content-Type option of a response.
:return: the Content-Type value or 0 if not specified by the response
"""
value = 0
for option in self.options:
if option.number == defines.OptionRegistry.CONTENT_TYPE.number:
value = int(option.value)
return value
@content_type.setter
def content_type(self, content_type):
"""
Set the Content-Type option of a response.
:type content_type: int
:param content_type: the Content-Type
"""
option = Option()
option.number = defines.OptionRegistry.CONTENT_TYPE.number
option.value = int(content_type)
self.add_option(option)
@content_type.deleter
def content_type(self):
"""
Delete the Content-Type option of a response.
"""
self.del_option_by_number(defines.OptionRegistry.CONTENT_TYPE.number)
@property
def observe(self):
"""
Check if the request is an observing request.
:return: 0, if the request is an observing request
"""
for option in self.options:
if option.number == defines.OptionRegistry.OBSERVE.number:
# if option.value is None:
# return 0
if option.value is None:
return 0
return option.value
return None
@observe.setter
def observe(self, ob):
"""
Add the Observe option.
:param ob: observe count
"""
option = Option()
option.number = defines.OptionRegistry.OBSERVE.number
option.value = ob
self.del_option_by_number(defines.OptionRegistry.OBSERVE.number)
self.add_option(option)
@observe.deleter
def observe(self):
"""
Delete the Observe option.
"""
self.del_option_by_number(defines.OptionRegistry.OBSERVE.number)
@property
def block1(self):
"""
Get the Block1 option.
:return: the Block1 value
"""
value = None
for option in self.options:
if option.number == defines.OptionRegistry.BLOCK1.number:
value = parse_blockwise(option.value)
return value
@block1.setter
def block1(self, value):
"""
Set the Block1 option.
:param value: the Block1 value
"""
option = Option()
option.number = defines.OptionRegistry.BLOCK1.number
num, m, size = value
if size > 512:
szx = 6
elif 256 < size <= 512:
szx = 5
elif 128 < size <= 256:
szx = 4
elif 64 < size <= 128:
szx = 3
elif 32 < size <= 64:
szx = 2
elif 16 < size <= 32:
szx = 1
else:
szx = 0
value = (num << 4)
value |= (m << 3)
value |= szx
option.value = value
self.add_option(option)
@block1.deleter
def block1(self):
"""
Delete the Block1 option.
"""
self.del_option_by_number(defines.OptionRegistry.BLOCK1.number)
@property
def block2(self):
"""
Get the Block2 option.
:return: the Block2 value
"""
value = None
for option in self.options:
if option.number == defines.OptionRegistry.BLOCK2.number:
value = parse_blockwise(option.value)
return value
@block2.setter
def block2(self, value):
"""
Set the Block2 option.
:param value: the Block2 value
"""
option = Option()
option.number = defines.OptionRegistry.BLOCK2.number
num, m, size = value
if size > 512:
szx = 6
elif 256 < size <= 512:
szx = 5
elif 128 < size <= 256:
szx = 4
elif 64 < size <= 128:
szx = 3
elif 32 < size <= 64:
szx = 2
elif 16 < size <= 32:
szx = 1
else:
szx = 0
value = (num << 4)
value |= (m << 3)
value |= szx
option.value = value
self.add_option(option)
@block2.deleter
def block2(self):
"""
Delete the Block2 option.
"""
self.del_option_by_number(defines.OptionRegistry.BLOCK2.number)
@property
def line_print(self):
"""
Return the message as a one-line string.
:return: the string representing the message
"""
inv_types = {v: k for k, v in defines.Types.iteritems()}
if self._code is None:
self._code = defines.Codes.EMPTY.number
msg = "From {source}, To {destination}, {type}-{mid}, {code}-{token}, ["\
.format(source=self._source, destination=self._destination, type=inv_types[self._type], mid=self._mid,
code=defines.Codes.LIST[self._code].name, token=self._token)
for opt in self._options:
msg += "{name}: {value}, ".format(name=opt.name, value=opt.value)
msg += "]"
if self.payload is not None:
if isinstance(self.payload, dict):
tmp = self.payload.values()[0][0:20]
else:
tmp = self.payload[0:20]
msg += " {payload}...{length} bytes".format(payload=tmp, length=len(self.payload))
else:
msg += " No payload"
return msg
def __str__(self):
return self.line_print
def pretty_print(self):
"""
Return the message as a formatted string.
:return: the string representing the message
"""
msg = "Source: " + str(self._source) + "\n"
msg += "Destination: " + str(self._destination) + "\n"
inv_types = {v: k for k, v in defines.Types.iteritems()}
msg += "Type: " + str(inv_types[self._type]) + "\n"
msg += "MID: " + str(self._mid) + "\n"
if self._code is None:
self._code = 0
msg += "Code: " + str(defines.Codes.LIST[self._code].name) + "\n"
msg += "Token: " + str(self._token) + "\n"
for opt in self._options:
msg += str(opt)
msg += "Payload: " + "\n"
msg += str(self._payload) + "\n"
return msg
|
|
"""Provide functionality to TTS."""
import asyncio
import ctypes
import functools as ft
import hashlib
import io
import logging
import mimetypes
import os
import re
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA)
from homeassistant.components.media_player.const import DOMAIN as DOMAIN_MP
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, CONF_PLATFORM
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
ATTR_CACHE = 'cache'
ATTR_LANGUAGE = 'language'
ATTR_MESSAGE = 'message'
ATTR_OPTIONS = 'options'
ATTR_PLATFORM = 'platform'
CONF_BASE_URL = 'base_url'
CONF_CACHE = 'cache'
CONF_CACHE_DIR = 'cache_dir'
CONF_LANG = 'language'
CONF_SERVICE_NAME = 'service_name'
CONF_TIME_MEMORY = 'time_memory'
DEFAULT_CACHE = True
DEFAULT_CACHE_DIR = 'tts'
DEFAULT_TIME_MEMORY = 300
DOMAIN = 'tts'
MEM_CACHE_FILENAME = 'filename'
MEM_CACHE_VOICE = 'voice'
SERVICE_CLEAR_CACHE = 'clear_cache'
SERVICE_SAY = 'say'
_RE_VOICE_FILE = re.compile(
r"([a-f0-9]{40})_([^_]+)_([^_]+)_([a-z_]+)\.[a-z0-9]{3,4}")
KEY_PATTERN = '{0}_{1}_{2}_{3}'
def _deprecated_platform(value):
"""Validate if platform is deprecated."""
if value == 'google':
raise vol.Invalid(
'google tts service has been renamed to google_translate,'
' please update your configuration.')
return value
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): vol.All(cv.string, _deprecated_platform),
vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean,
vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string,
vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY):
vol.All(vol.Coerce(int), vol.Range(min=60, max=57600)),
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SERVICE_NAME): cv.string,
})
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SCHEMA_SERVICE_SAY = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.string,
vol.Optional(ATTR_CACHE): cv.boolean,
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_LANGUAGE): cv.string,
vol.Optional(ATTR_OPTIONS): dict,
})
SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
async def async_setup(hass, config):
"""Set up TTS."""
tts = SpeechManager(hass)
try:
conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {}
use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
base_url = conf.get(CONF_BASE_URL) or hass.config.api.base_url
await tts.async_init_cache(use_cache, cache_dir, time_memory, base_url)
except (HomeAssistantError, KeyError) as err:
_LOGGER.error("Error on cache init %s", err)
return False
hass.http.register_view(TextToSpeechView(tts))
hass.http.register_view(TextToSpeechUrlView(tts))
async def async_setup_platform(p_type, p_config, disc_info=None):
"""Set up a TTS platform."""
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, 'async_get_engine'):
provider = await platform.async_get_engine(
hass, p_config)
else:
provider = await hass.async_add_job(
platform.get_engine, hass, p_config)
if provider is None:
_LOGGER.error("Error setting up platform %s", p_type)
return
tts.async_register_engine(p_type, provider, p_config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform: %s", p_type)
return
async def async_say_handle(service):
"""Service handle for say."""
entity_ids = service.data.get(ATTR_ENTITY_ID, ENTITY_MATCH_ALL)
message = service.data.get(ATTR_MESSAGE)
cache = service.data.get(ATTR_CACHE)
language = service.data.get(ATTR_LANGUAGE)
options = service.data.get(ATTR_OPTIONS)
try:
url = await tts.async_get_url(
p_type, message, cache=cache, language=language,
options=options
)
except HomeAssistantError as err:
_LOGGER.error("Error on init TTS: %s", err)
return
data = {
ATTR_MEDIA_CONTENT_ID: url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_ENTITY_ID: entity_ids,
}
await hass.services.async_call(
DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True)
service_name = p_config.get(CONF_SERVICE_NAME, "{}_{}".format(
p_type, SERVICE_SAY))
hass.services.async_register(
DOMAIN, service_name, async_say_handle,
schema=SCHEMA_SERVICE_SAY)
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_clear_cache_handle(service):
"""Handle clear cache service call."""
await tts.async_clear_cache()
hass.services.async_register(
DOMAIN, SERVICE_CLEAR_CACHE, async_clear_cache_handle,
schema=SCHEMA_SERVICE_CLEAR_CACHE)
return True
class SpeechManager:
"""Representation of a speech store."""
def __init__(self, hass):
"""Initialize a speech store."""
self.hass = hass
self.providers = {}
self.use_cache = DEFAULT_CACHE
self.cache_dir = DEFAULT_CACHE_DIR
self.time_memory = DEFAULT_TIME_MEMORY
self.base_url = None
self.file_cache = {}
self.mem_cache = {}
async def async_init_cache(self, use_cache, cache_dir, time_memory,
base_url):
"""Init config folder and load file cache."""
self.use_cache = use_cache
self.time_memory = time_memory
self.base_url = base_url
def init_tts_cache_dir(cache_dir):
"""Init cache folder."""
if not os.path.isabs(cache_dir):
cache_dir = self.hass.config.path(cache_dir)
if not os.path.isdir(cache_dir):
_LOGGER.info("Create cache dir %s.", cache_dir)
os.mkdir(cache_dir)
return cache_dir
try:
self.cache_dir = await self.hass.async_add_job(
init_tts_cache_dir, cache_dir)
except OSError as err:
raise HomeAssistantError("Can't init cache dir {}".format(err))
def get_cache_files():
"""Return a dict of given engine files."""
cache = {}
folder_data = os.listdir(self.cache_dir)
for file_data in folder_data:
record = _RE_VOICE_FILE.match(file_data)
if record:
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3),
record.group(4)
)
cache[key.lower()] = file_data.lower()
return cache
try:
cache_files = await self.hass.async_add_job(get_cache_files)
except OSError as err:
raise HomeAssistantError("Can't read cache dir {}".format(err))
if cache_files:
self.file_cache.update(cache_files)
async def async_clear_cache(self):
"""Read file cache and delete files."""
self.mem_cache = {}
def remove_files():
"""Remove files from filesystem."""
for _, filename in self.file_cache.items():
try:
os.remove(os.path.join(self.cache_dir, filename))
except OSError as err:
_LOGGER.warning(
"Can't remove cache file '%s': %s", filename, err)
await self.hass.async_add_job(remove_files)
self.file_cache = {}
@callback
def async_register_engine(self, engine, provider, config):
"""Register a TTS provider."""
provider.hass = self.hass
if provider.name is None:
provider.name = engine
self.providers[engine] = provider
async def async_get_url(self, engine, message, cache=None, language=None,
options=None):
"""Get URL for play message.
This method is a coroutine.
"""
provider = self.providers[engine]
msg_hash = hashlib.sha1(bytes(message, 'utf-8')).hexdigest()
use_cache = cache if cache is not None else self.use_cache
# Languages
language = language or provider.default_language
if language is None or \
language not in provider.supported_languages:
raise HomeAssistantError("Not supported language {0}".format(
language))
# Options
if provider.default_options and options:
merged_options = provider.default_options.copy()
merged_options.update(options)
options = merged_options
options = options or provider.default_options
if options is not None:
invalid_opts = [opt_name for opt_name in options.keys()
if opt_name not in (provider.supported_options or
[])]
if invalid_opts:
raise HomeAssistantError(
"Invalid options found: {}".format(invalid_opts))
options_key = ctypes.c_size_t(hash(frozenset(options))).value
else:
options_key = '-'
key = KEY_PATTERN.format(
msg_hash, language, options_key, engine).lower()
# Is speech already in memory
if key in self.mem_cache:
filename = self.mem_cache[key][MEM_CACHE_FILENAME]
# Is file store in file cache
elif use_cache and key in self.file_cache:
filename = self.file_cache[key]
self.hass.async_create_task(self.async_file_to_mem(key))
# Load speech from provider into memory
else:
filename = await self.async_get_tts_audio(
engine, key, message, use_cache, language, options)
return "{}/api/tts_proxy/{}".format(self.base_url, filename)
async def async_get_tts_audio(
self, engine, key, message, cache, language, options):
"""Receive TTS and store for view in cache.
This method is a coroutine.
"""
provider = self.providers[engine]
extension, data = await provider.async_get_tts_audio(
message, language, options)
if data is None or extension is None:
raise HomeAssistantError(
"No TTS from {} for '{}'".format(engine, message))
# Create file infos
filename = ("{}.{}".format(key, extension)).lower()
data = self.write_tags(
filename, data, provider, message, language, options)
# Save to memory
self._async_store_to_memcache(key, filename, data)
if cache:
self.hass.async_create_task(
self.async_save_tts_audio(key, filename, data))
return filename
async def async_save_tts_audio(self, key, filename, data):
"""Store voice data to file and file_cache.
This method is a coroutine.
"""
voice_file = os.path.join(self.cache_dir, filename)
def save_speech():
"""Store speech to filesystem."""
with open(voice_file, 'wb') as speech:
speech.write(data)
try:
await self.hass.async_add_job(save_speech)
self.file_cache[key] = filename
except OSError:
_LOGGER.error("Can't write %s", filename)
async def async_file_to_mem(self, key):
"""Load voice from file cache into memory.
This method is a coroutine.
"""
filename = self.file_cache.get(key)
if not filename:
raise HomeAssistantError("Key {} not in file cache!".format(key))
voice_file = os.path.join(self.cache_dir, filename)
def load_speech():
"""Load a speech from filesystem."""
with open(voice_file, 'rb') as speech:
return speech.read()
try:
data = await self.hass.async_add_job(load_speech)
except OSError:
del self.file_cache[key]
raise HomeAssistantError("Can't read {}".format(voice_file))
self._async_store_to_memcache(key, filename, data)
@callback
def _async_store_to_memcache(self, key, filename, data):
"""Store data to memcache and set timer to remove it."""
self.mem_cache[key] = {
MEM_CACHE_FILENAME: filename,
MEM_CACHE_VOICE: data,
}
@callback
def async_remove_from_mem():
"""Cleanup memcache."""
self.mem_cache.pop(key)
self.hass.loop.call_later(self.time_memory, async_remove_from_mem)
async def async_read_tts(self, filename):
"""Read a voice file and return binary.
This method is a coroutine.
"""
record = _RE_VOICE_FILE.match(filename.lower())
if not record:
raise HomeAssistantError("Wrong tts file format!")
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4))
if key not in self.mem_cache:
if key not in self.file_cache:
raise HomeAssistantError("{} not in cache!".format(key))
await self.async_file_to_mem(key)
content, _ = mimetypes.guess_type(filename)
return (content, self.mem_cache[key][MEM_CACHE_VOICE])
@staticmethod
def write_tags(filename, data, provider, message, language, options):
"""Write ID3 tags to file.
Async friendly.
"""
import mutagen
data_bytes = io.BytesIO(data)
data_bytes.name = filename
data_bytes.seek(0)
album = provider.name
artist = language
if options is not None:
if options.get('voice') is not None:
artist = options.get('voice')
try:
tts_file = mutagen.File(data_bytes, easy=True)
if tts_file is not None:
tts_file['artist'] = artist
tts_file['album'] = album
tts_file['title'] = message
tts_file.save(data_bytes)
except mutagen.MutagenError as err:
_LOGGER.error("ID3 tag error: %s", err)
return data_bytes.getvalue()
class Provider:
"""Represent a single TTS provider."""
hass = None
name = None
@property
def default_language(self):
"""Return the default language."""
return None
@property
def supported_languages(self):
"""Return a list of supported languages."""
return None
@property
def supported_options(self):
"""Return a list of supported options like voice, emotionen."""
return None
@property
def default_options(self):
"""Return a dict include default options."""
return None
def get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider."""
raise NotImplementedError()
def async_get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider.
Return a tuple of file extension and data as bytes.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.get_tts_audio, message, language, options=options))
class TextToSpeechUrlView(HomeAssistantView):
"""TTS view to get a url to a generated speech file."""
requires_auth = True
url = '/api/tts_get_url'
name = 'api:tts:geturl'
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def post(self, request):
"""Generate speech and provide url."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON specified', 400)
if not data.get(ATTR_PLATFORM) and data.get(ATTR_MESSAGE):
return self.json_message('Must specify platform and message', 400)
p_type = data[ATTR_PLATFORM]
message = data[ATTR_MESSAGE]
cache = data.get(ATTR_CACHE)
language = data.get(ATTR_LANGUAGE)
options = data.get(ATTR_OPTIONS)
try:
url = await self.tts.async_get_url(
p_type, message, cache=cache, language=language,
options=options
)
resp = self.json({'url': url}, 200)
except HomeAssistantError as err:
_LOGGER.error("Error on init tts: %s", err)
resp = self.json({'error': err}, 400)
return resp
class TextToSpeechView(HomeAssistantView):
"""TTS view to serve a speech audio."""
requires_auth = False
url = '/api/tts_proxy/{filename}'
name = 'api:tts:speech'
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def get(self, request, filename):
"""Start a get request."""
try:
content, data = await self.tts.async_read_tts(filename)
except HomeAssistantError as err:
_LOGGER.error("Error on load tts: %s", err)
return web.Response(status=404)
return web.Response(body=data, content_type=content)
|
|
import datetime
import hashlib
import logging
import re
import threading
import types
from django.utils.encoding import force_unicode
logger = logging.getLogger(__name__)
class JSVar(unicode):
"""
A JS variable.
This is a simple Unicode string. This class type acts as a marker that this string is a JS variable name,
so it must not be quoted by :py:func:`.convert_py_to_js_data` while rendering the JS code.
"""
pass
class JSFunction(JSVar):
"""
A JS function name.
From rendering point of view, rendering this is no different from :py:class:`JSVar`. After all, a JS variable
can refer a function instance, primitive constant or any other object. They are still all variables.
.. tip:: Do use this marker for JS functions. This will make the code clearer, and the purpose more easier to
understand.
"""
pass
class JSFunctionInContext(JSVar):
"""
A JS function name to run in context of some other HTML DOM element.
Like :py:class:`JSFunction`, this too flags the string as JS function, but with a special requirement. The JS function
needs to be invoked in the context of a HTML DOM, such that, ``this`` inside the function refers to that DOM instead of
``window``.
.. tip:: JS functions of this type are wrapped inside special another JS function -- ``django_select2.runInContextHelper``.
"""
pass
def render_js_script(inner_code):
"""
This wraps ``inner_code`` string inside the following code block::
<script type="text/javascript">
jQuery(function ($) {
// inner_code here
});
</script>
:rtype: :py:obj:`unicode`
"""
return u"""
<script type="text/javascript">
jQuery(function ($) {
%s
});
</script>""" % inner_code
def extract_some_key_val(dct, keys):
"""
Gets a sub-set of a :py:obj:`dict`.
:param dct: Source dictionary.
:type dct: :py:obj:`dict`
:param keys: List of subset keys, which to extract from ``dct``.
:type keys: :py:obj:`list` or any iterable.
:rtype: :py:obj:`dict`
"""
edct = {}
for k in keys:
v = dct.get(k, None)
if v is not None:
edct[k] = v
return edct
def convert_to_js_str(val):
val = force_unicode(val).replace('\'', '\\\'')
return u"'%s'" % val
def convert_py_to_js_data(val, id_):
"""
Converts Python data type to JS data type.
Practically what this means is, convert ``False`` to ``false``, ``True`` to ``true`` and so on.
It also takes care of the conversion of :py:class:`.JSVar`, :py:class:`.JSFunction`
and :py:class:`.JSFunctionInContext`. It takes care of recursively converting lists and dictionaries
too.
:param val: The Python data to convert.
:type val: Any
:param id_: The DOM id of the element in which context :py:class:`.JSFunctionInContext` functions
should run. (This is not needed if ``val`` contains no :py:class:`.JSFunctionInContext`)
:type id_: :py:obj:`str`
:rtype: :py:obj:`unicode`
"""
if type(val) == types.BooleanType:
return u'true' if val else u'false'
elif type(val) in [types.IntType, types.LongType, types.FloatType]:
return force_unicode(val)
elif isinstance(val, JSFunctionInContext):
return u"django_select2.runInContextHelper(%s, '%s')" % (val, id_)
elif isinstance(val, JSVar):
return val # No quotes here
elif isinstance(val, dict):
return convert_dict_to_js_map(val, id_)
elif isinstance(val, list):
return convert_to_js_arr(val, id_)
else:
return convert_to_js_str(val)
def convert_dict_to_js_map(dct, id_):
"""
Converts a Python dictionary to JS map.
:param dct: The Python dictionary to convert.
:type dct: :py:obj:`dict`
:param id_: The DOM id of the element in which context :py:class:`.JSFunctionInContext` functions
should run. (This is not needed if ``dct`` contains no :py:class:`.JSFunctionInContext`)
:type id_: :py:obj:`str`
:rtype: :py:obj:`unicode`
"""
out = u'{'
is_first = True
for name in dct:
if not is_first:
out += u", "
else:
is_first = False
out += u"%s: " % convert_to_js_str(name)
out += convert_py_to_js_data(dct[name], id_)
return out + u'}'
def convert_to_js_arr(lst, id_):
"""
Converts a Python list (or any iterable) to JS array.
:param lst: The Python iterable to convert.
:type lst: :py:obj:`list` or Any iterable
:param id_: The DOM id of the element in which context :py:class:`.JSFunctionInContext` functions
should run. (This is not needed if ``lst`` contains no :py:class:`.JSFunctionInContext`)
:type id_: :py:obj:`str`
:rtype: :py:obj:`unicode`
"""
out = u'['
is_first = True
for val in lst:
if not is_first:
out += u", "
else:
is_first = False
out += convert_py_to_js_data(val, id_)
return out + u']'
def convert_to_js_string_arr(lst):
"""
Converts a Python list (or any iterable) of strings to JS array.
:py:func:`convert_to_js_arr` can always be used instead of this. However, since it
knows that it only contains strings, it cuts down on unnecessary computations.
:rtype: :py:obj:`unicode`
"""
lst = [convert_to_js_str(l) for l in lst]
return u"[%s]" % (",".join(lst))
### Auto view helper utils ###
from . import __ENABLE_MULTI_PROCESS_SUPPORT as ENABLE_MULTI_PROCESS_SUPPORT, \
__MEMCACHE_HOST as MEMCACHE_HOST, __MEMCACHE_PORT as MEMCACHE_PORT, __MEMCACHE_TTL as MEMCACHE_TTL
from . import __GENERATE_RANDOM_ID as GENERATE_RANDOM_ID, __SECRET_SALT as SECRET_SALT
def synchronized(f):
"Decorator to synchronize multiple calls to a functions."
f.__lock__ = threading.Lock()
def synced_f(*args, **kwargs):
with f.__lock__:
return f(*args, **kwargs)
synced_f.__doc__ = f.__doc__
return synced_f
# Generated Id to field instance mapping.
__id_store = {}
# Field's key to generated Id mapping.
__field_store = {}
ID_PATTERN = r"[0-9_a-zA-Z.:+\- ]+"
def is_valid_id(val):
"""
Checks if ``val`` is a valid generated Id.
:param val: The value to check.
:type val: :py:obj:`str`
:rtype: :py:obj:`bool`
"""
regex = "^%s$" % ID_PATTERN
if re.match(regex, val) is None:
return False
else:
return True
if ENABLE_MULTI_PROCESS_SUPPORT:
from memcache_wrapped_db_client import Client
remote_server = Client(MEMCACHE_HOST, str(MEMCACHE_PORT), MEMCACHE_TTL)
@synchronized
def register_field(key, field):
"""
Registers an Auto field for use with :py:class:`.views.AutoResponseView`.
:param key: The key to use while registering this field.
:type key: :py:obj:`unicode`
:param field: The field to register.
:type field: :py:class:`AutoViewFieldMixin`
:return: The generated Id for this field. If given ``key`` was already registered then the
Id generated that time, would be returned.
:rtype: :py:obj:`unicode`
"""
global __id_store, __field_store
from fields import AutoViewFieldMixin
if not isinstance(field, AutoViewFieldMixin):
raise ValueError('Field must extend AutoViewFieldMixin')
if key not in __field_store:
# Generating id
if GENERATE_RANDOM_ID:
id_ = u"%d:%s" % (len(__id_store), unicode(datetime.datetime.now()))
else:
id_ = unicode(hashlib.sha1("%s:%s" % (key, SECRET_SALT)).hexdigest())
__field_store[key] = id_
__id_store[id_] = field
if logger.isEnabledFor(logging.INFO):
logger.info("Registering new field: %s; With actual id: %s", key, id_)
if ENABLE_MULTI_PROCESS_SUPPORT:
logger.info("Multi process support is enabled. Adding id-key mapping to remote server.")
remote_server.set(id_, key)
else:
id_ = __field_store[key]
if logger.isEnabledFor(logging.INFO):
logger.info("Field already registered: %s; With actual id: %s", key, id_)
return id_
def get_field(id_):
"""
Returns an Auto field instance registered with the given Id.
:param id_: The generated Id the field is registered with.
:type id_: :py:obj:`unicode`
:rtype: :py:class:`AutoViewFieldMixin` or None
"""
field = __id_store.get(id_, None)
if field is None and ENABLE_MULTI_PROCESS_SUPPORT:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Id "%s" not found in this process. Looking up in remote server.', id_)
key = remote_server.get(id_)
if key is not None:
id_in_current_instance = __field_store[key]
if id_in_current_instance:
field = __id_store.get(id_in_current_instance, None)
if field:
__id_store[id_] = field
else:
logger.error('Unknown id "%s".', id_in_current_instance)
else:
logger.error('Unknown id "%s".', id_)
return field
def timer_start(name):
import sys, time
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
multiplier = 1.0
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
multiplier = 1000.0
return (name, default_timer, multiplier, default_timer())
def timer_end(t):
(name, default_timer, multiplier, timeS) = t
timeE = default_timer()
logger.debug("Time taken by %s: %0.3f ms" % (name, (timeE - timeS) * multiplier))
def timer(f):
def inner(*args, **kwargs):
t = timer_start(f.func_name)
ret = f(*args, **kwargs)
timer_end(t)
return ret
return inner
|
|
# -*- coding: utf-8 -*-
import json
import sys
import warnings
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.test import testcases
from django.test.client import RequestFactory
from django.utils.translation import activate
from menus.menu_pool import menu_pool
from cms.models import Page
from cms.test_utils.util.context_managers import (UserLoginContext,
SettingsOverride)
from cms.utils.compat.dj import get_user_model
from cms.utils.compat.urls import urljoin, unquote
from cms.utils.permissions import set_current_user
URL_CMS_PAGE = "/en/admin/cms/page/"
URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/")
URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/")
URL_CMS_PAGE_ADVANCED_CHANGE = urljoin(URL_CMS_PAGE, "%d/advanced-settings/")
URL_CMS_PAGE_PERMISSION_CHANGE = urljoin(URL_CMS_PAGE, "%d/permission-settings/")
URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s"
URL_CMS_PAGE_CHANGE_TEMPLATE = URL_CMS_PAGE_CHANGE + "change_template/"
URL_CMS_PAGE_PUBLISH = URL_CMS_PAGE_CHANGE + "%s/publish/"
URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/")
URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE, "add-plugin/")
URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE, "edit-plugin/")
URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE, "move-plugin/")
URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE, "delete-plugin/")
URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/")
URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/")
URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/")
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.values():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class BaseCMSTestCase(object):
counter = 1
def _fixture_setup(self):
super(BaseCMSTestCase, self)._fixture_setup()
self.create_fixtures()
activate("en")
def create_fixtures(self):
pass
def _post_teardown(self):
menu_pool.clear()
cache.clear()
super(BaseCMSTestCase, self)._post_teardown()
set_current_user(None)
def login_user_context(self, user):
return UserLoginContext(self, user)
def _create_user(self, username, is_staff=False, is_superuser=False,
is_active=True, add_default_permissions=False, permissions=None):
"""
Use this method to create users.
Default permissions on page and text plugin are added if creating a
non-superuser and `add_default_permissions` is set.
Set `permissions` parameter to an iterable of permission codes to add
custom permissios.
"""
User = get_user_model()
fields = dict(email=username + '@django-cms.org',
is_staff=is_staff, is_active=is_active, is_superuser=is_superuser
)
# Check for special case where email is used as username
if (get_user_model().USERNAME_FIELD != 'email'):
fields[get_user_model().USERNAME_FIELD] = username
user = User(**fields)
user.set_password(getattr(user, get_user_model().USERNAME_FIELD))
user.save()
if is_staff and not is_superuser and add_default_permissions:
user.user_permissions.add(Permission.objects.get(codename='add_text'))
user.user_permissions.add(Permission.objects.get(codename='delete_text'))
user.user_permissions.add(Permission.objects.get(codename='change_text'))
user.user_permissions.add(Permission.objects.get(codename='publish_page'))
user.user_permissions.add(Permission.objects.get(codename='add_page'))
user.user_permissions.add(Permission.objects.get(codename='change_page'))
user.user_permissions.add(Permission.objects.get(codename='delete_page'))
if is_staff and not is_superuser and permissions:
for permission in permissions:
user.user_permissions.add(Permission.objects.get(codename=permission))
return user
def get_superuser(self):
try:
query = dict()
if get_user_model().USERNAME_FIELD != "email":
query[get_user_model().USERNAME_FIELD] = "admin"
else:
query[get_user_model().USERNAME_FIELD] = "[email protected]"
admin = get_user_model().objects.get(**query)
except get_user_model().DoesNotExist:
admin = self._create_user("admin", is_staff=True, is_superuser=True)
return admin
def get_staff_user_with_no_permissions(self):
"""
Used in security tests
"""
staff = self._create_user("staff", is_staff=True, is_superuser=False)
return staff
def get_staff_user_with_std_permissions(self):
"""
This is a non superuser staff
"""
staff = self._create_user("staff", is_staff=True, is_superuser=False,
add_permissions=True)
return staff
def get_new_page_data(self, parent_id=''):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'parent': parent_id,
'site': 1,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
self.counter += 1
return page_data
def get_new_page_data_dbfields(self, parent=None, site=None,
language=None,
template='nav_playground.html', ):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0] if not language else language,
'template': template,
'parent': parent if parent else None,
'site': site if site else Site.objects.get_current(),
}
self.counter = self.counter + 1
return page_data
def get_pagedata_from_dbfields(self, page_data):
"""Converts data created by get_new_page_data_dbfields to data
created from get_new_page_data so you can switch between test cases
in api.create_page and client.post"""
page_data['site'] = page_data['site'].id
page_data['parent'] = page_data['parent'].id if page_data['parent'] else ''
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
return page_data
def print_page_structure(self, qs):
"""Just a helper to see the page struct.
"""
for page in qs.order_by('tree_id', 'lft'):
ident = " " * page.level
print(u"%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page,
page.pk, page.lft, page.rght, page.tree_id))
def print_node_structure(self, nodes, *extra):
def _rec(nodes, level=0):
ident = level * ' '
for node in nodes:
raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra]
attrs = ', '.join(['%s: %r' % data for data in raw_attrs])
print(u"%s%s: %s" % (ident, node.title, attrs))
_rec(node.children, level + 1)
_rec(nodes)
def assertObjectExist(self, qs, **filter):
try:
return qs.get(**filter)
except ObjectDoesNotExist:
pass
raise self.failureException("ObjectDoesNotExist raised for filter %s" % filter)
def assertObjectDoesNotExist(self, qs, **filter):
try:
qs.get(**filter)
except ObjectDoesNotExist:
return
raise self.failureException("ObjectDoesNotExist not raised for filter %s" % filter)
def copy_page(self, page, target_page):
from cms.utils.page import get_available_slug
data = {
'position': 'last-child',
'target': target_page.pk,
'site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data)
self.assertEqual(response.status_code, 200)
# Altered to reflect the new django-js jsonified response messages
expected = {"status": 200, "content": "ok"}
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
title = page.title_set.all()[0]
copied_slug = get_available_slug(title)
copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=target_page)
return copied_page
def move_page(self, page, target_page, position="first-child"):
page.move_page(target_page, position)
return self.reload_page(page)
def reload_page(self, page):
"""
Returns a fresh instance of the page from the database
"""
return self.reload(page)
def reload(self, obj):
return obj.__class__.objects.get(pk=obj.pk)
def get_pages_root(self):
return unquote(reverse("pages-root"))
def get_context(self, path=None, page=None):
if not path:
path = self.get_pages_root()
context = {}
request = self.get_request(path, page=page)
context['request'] = request
return Context(context)
def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False, page=None):
factory = RequestFactory()
if not path:
path = self.get_pages_root()
if not language:
if settings.USE_I18N:
language = settings.LANGUAGES[0][0]
else:
language = settings.LANGUAGE_CODE
if post_data:
request = factory.post(path, post_data)
else:
request = factory.get(path)
request.session = self.client.session
request.user = getattr(self, 'user', AnonymousUser())
request.LANGUAGE_CODE = language
request._dont_enforce_csrf_checks = not enforce_csrf_checks
if page:
request.current_page = page
else:
request.current_page = None
class MockStorage(object):
def __len__(self):
return 0
def __iter__(self):
return iter([])
def add(self, level, message, extra_tags=''):
pass
def update(self, response):
pass
request._messages = MockStorage()
return request
def check_published_page_attributes(self, page):
public_page = page.publisher_public
if page.parent:
self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id)
self.assertEqual(page.level, public_page.level)
# TODO: add check for siblings
draft_siblings = list(page.get_siblings(True).filter(
publisher_is_draft=True
).order_by('tree_id', 'parent', 'lft'))
public_siblings = list(public_page.get_siblings(True).filter(
publisher_is_draft=False
).order_by('tree_id', 'parent', 'lft'))
skip = 0
for i, sibling in enumerate(draft_siblings):
if not sibling.publisher_public_id:
skip += 1
continue
self.assertEqual(sibling.id,
public_siblings[i - skip].publisher_draft.id)
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
class CMSTestCase(BaseCMSTestCase, testcases.TestCase):
pass
class TransactionCMSTestCase(BaseCMSTestCase, testcases.TransactionTestCase):
pass
class SettingsOverrideTestCase(CMSTestCase):
settings_overrides = {}
def _pre_setup(self):
self._enter_settings_override()
super(SettingsOverrideTestCase, self)._pre_setup()
def _enter_settings_override(self):
self._settings_ctx_manager = SettingsOverride(**self.settings_overrides)
self._settings_ctx_manager.__enter__()
def _post_teardown(self):
super(SettingsOverrideTestCase, self)._post_teardown()
self._exit_settings_override()
def _exit_settings_override(self):
self._settings_ctx_manager.__exit__(None, None, None)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.ml
~~~~~~~~~~~~~~~~~~
Lexers for ML family languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer']
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
.. versionadded:: 1.5
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun']
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = set((
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
))
symbolicid_reserved = set((
# Core
':', '\|', '=', '=>', '->', '#',
# Modules
':>',
))
nonid_reserved = set(('(', ')', '[', ']', '{', '}', ',', ';', '...', '_'))
alphanumid_re = r"[a-zA-Z][\w']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy(whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
else:
token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
elif match.group(1) in self.symbolicid_reserved:
token = Error
else:
token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved:
token = Keyword.Reserved
elif str in self.symbolicid_reserved:
token = Punctuation
else:
token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|\{', Punctuation, 'main'),
(r'\)|\]|\}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[\w\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [
default('main')
],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
default('#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
default('#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
default('#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[\w\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
.. versionadded:: 0.7
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
.. versionadded:: 1.5
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = (
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
)
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?=\{)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
default(('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?\{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
default('#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
default(('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
default('#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
default('#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
default('#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^()/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\{', Keyword.Type, '#push'),
(r'\}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# default(('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'\}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?=\{)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'\{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'\{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
|
|
## Copyright (c) 2003 Henk Punt
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from venster.windows import *
from venster.comctl import *
WM_TRACK = WM_USER + 1
class ToolBar(ToolBar): #add some styles to the comctl ToolBar
_window_style_ = ToolBar._window_style_ | CCS_NODIVIDER |\
CCS_NORESIZE | CCS_NOPARENTALIGN | TBSTYLE_FLAT | TBSTYLE_LIST
class CommandBar(ToolBar):
def __init__(self, *args, **kwargs):
ToolBar.__init__(self, *args, **kwargs)
self.SendMessage(TB_SETIMAGELIST, 0, 0)
self.SendMessage(TB_SETDRAWTEXTFLAGS, DT_HIDEPREFIX, DT_HIDEPREFIX)
self.parentNotify = self.Intercept(self.GetParent(), self._msg_map2_)
#TODO remove on dispose
GetMessageLoop().AddFilter(self.PreTranslateMessage)
self.cbHook = MessageProc(self.OnHook)
#TODO remove on dispose
self.iCurMenu = -1
self.inTrackPopupMenu = 0
self.isHotTracking = 0
self.inCancel = 0
self.menuActive = 0
self.atPopupItem = 0 #>0 when menu item is selected which will popup submenu
def OnHook(self, nCode, wParam, lParam):
if nCode > 0:
msg = MSG.from_address(int(lParam))
#print msg
result, handled = self._msg_hook_.DispatchMSG(self, msg)
if handled:
return 1 #prevent other hooks from processing
return CallNextHookEx(self.hHook, nCode, wParam, lParam)
def MenuActiveToggle(self):
if not self.menuActive:
SendMessage(self.handle, TB_SETDRAWTEXTFLAGS, DT_HIDEPREFIX, 0)
self.menuActive = 1
else:
SendMessage(self.handle, TB_SETDRAWTEXTFLAGS, DT_HIDEPREFIX, DT_HIDEPREFIX)
self.menuActive = 0
self.Invalidate()
self.UpdateWindow()
def AttachMenu(self, menu):
idc = 0
self.menu = menu
self.SetRedraw(FALSE)
minfo = MENUITEMINFO()
strbuff = '\0' * 255
for i in range(GetMenuItemCount(handle(menu))):
minfo.cbSize = sizeof(MENUITEMINFO)
minfo.fMask = MIIM_STATE | MIIM_TYPE | MIIM_SUBMENU
minfo.fType = MFT_STRING
minfo.cch = 255 #TODO max
minfo.dwTypeData = strbuff
GetMenuItemInfo(menu.handle, i, 1, byref(minfo))
tbButt = TBBUTTON()
tbButt.iBitmap = I_IMAGENONE
tbButt.idCommand = i
tbButt.fsState = TBSTATE_ENABLED
tbButt.fsStyle = TBSTYLE_BUTTON | TBSTYLE_AUTOSIZE | TBSTYLE_DROPDOWN
tbButt.dwData = 0
tbButt.iString = 0
SendMessage(self.handle, TB_INSERTBUTTON, -1, byref(tbButt))
bi = TBBUTTONINFO()
bi.cbSize = sizeof(TBBUTTONINFO)
bi.dwMask = TBIF_TEXT
bi.pszText = " " + minfo.dwTypeData
SendMessage(self.handle, TB_SETBUTTONINFO, i, byref(bi))
self.SetRedraw(TRUE)
self.Invalidate()
self.UpdateWindow()
def OnDropDown(self, event):
if event.nmhdr.hwndFrom != self.handle: return
#print "dropdown"
nmtoolbar = NMTOOLBAR.from_address(int(event.lParam))
self.isHotTracking = 1
self.TrackPopupMenu(nmtoolbar.iItem)
def OnTrackPopupMenu(self, event):
self.Cancel()
print "track: ", event.wParam, self.inTrackPopupMenu, self.isHotTracking
self.TrackPopupMenu(event.wParam)
def TrackPopupMenu(self, iMenu):
print "tpm", iMenu
if iMenu < 0: return
self.iCurMenu = iMenu
self.PressButton(iMenu, 1)
self.hHook = SetWindowsHookEx(WH_MSGFILTER, self.cbHook, hInstance, GetCurrentThreadId())
#position popup menu right under button
btnRc = self.GetRect(iMenu)
pt = POINT(btnRc.left, btnRc.bottom)
self.ClientToScreen(pt)
self.inTrackPopupMenu = 1
print "inTrackPopupMenu"
self.menu.GetSubMenu(iMenu).TrackPopupMenuEx(TPM_LEFTBUTTON | TPM_VERTICAL |\
TPM_LEFTALIGN | TPM_TOPALIGN,\
int(pt.x), int(pt.y), self, 0)
print "exitTrackPopupMenu"
self.inTrackPopupMenu = 0
UnhookWindowsHookEx(self.hHook)
if not self.inCancel:
#drop out of hot tracking mode
self.isHotTracking = 0
self.inCancel = 0
self.PressButton(iMenu, 0)
def OnHotItemChange(self, event):
print "onhic"
nmhi = NMTBHOTITEM.from_address(int(event.lParam))
if nmhi.idNew != self.iCurMenu and not self.inTrackPopupMenu and self.isHotTracking:
self.TrackPopupMenu(nmhi.idNew)
def Cancel(self):
self.inCancel = 1
self.SendMessage(WM_CANCELMODE)
def OnHookMouseMove(self, event):
"""test if mouse moves out of current popup menu and cancels
it when so"""
pt = GET_POINT_LPARAM(event.lParam)
self.ScreenToClient(pt)
hit = self.HitTest(pt)
if hit >= 0 and hit != self.iCurMenu:
self.Cancel()
event.handled = 0
def OnKeyDown(self, event):
if event.wParam == VK_DOWN:
pass
## self.TrackPopupMenu(self.GetHotItem())
elif event.wParam == VK_LEFT:
print "left"
elif event.wParam == VK_RIGHT and not self.atPopupItem:
print "select next menu", self.iCurMenu
self.PostMessage(WM_TRACK, self.iCurMenu + 1)
event.handled = 0
def OnPtlSysKeyDown(self, event):
self.MenuActiveToggle()
event.handled = 0
def OnPtlSysKeyUp(self, event):
event.handled = 0
def PreTranslateMessage(self, msg):
self._msg_ptl_.DispatchMSG(self, msg)
def OnDestroy(self, event):
#print "destroy commandbar"
GetMessageLoop().RemoveFilter(self.PreTranslateMessage)
def OnMenuPopup(self, event):
print "omp"
def OnHookMenuSelect(self, event):
self.atPopupItem = HIWORD(event.wParam) & MF_POPUP
_msg_map_ = MSG_MAP([MSG_HANDLER(WM_DESTROY, OnDestroy),
MSG_HANDLER(WM_INITMENUPOPUP, OnMenuPopup),
MSG_HANDLER(WM_TRACK, OnTrackPopupMenu)])
#parent notifications comming from common control
_msg_map2_ = MSG_MAP([NTF_HANDLER(TBN_DROPDOWN, OnDropDown),
NTF_HANDLER(TBN_HOTITEMCHANGE, OnHotItemChange)])
#msgs received during popup tracking from msg hook
_msg_hook_ = MSG_MAP([MSG_HANDLER(WM_MOUSEMOVE, OnHookMouseMove),
MSG_HANDLER(WM_KEYDOWN, OnKeyDown),
MSG_HANDLER(WM_MENUSELECT, OnHookMenuSelect)])
#pretranslate msgs
_msg_ptl_ = MSG_MAP([MSG_HANDLER(WM_KEYDOWN, OnKeyDown),
MSG_HANDLER(WM_SYSKEYDOWN, OnPtlSysKeyDown),
MSG_HANDLER(WM_SYSKEYUP, OnPtlSysKeyUp)])
class CoolBar(Rebar):
def AddSimpleRebarBandCtrl(self, ctrl, nID = 0, title = NULL, bNewRow = FALSE,
cxWidth = 0, bFullWidthAlways = FALSE):
hWndBand = ctrl.handle
#Get number of buttons on the toolbar
nBtnCount = SendMessage(hWndBand, TB_BUTTONCOUNT, 0, 0)
#Set band info structure
rbBand = REBARBANDINFO()
rbBand.cbSize = sizeof(REBARBANDINFO)
if WIN32_IE >= 0x0400:
rbBand.fMask = RBBIM_CHILD | RBBIM_CHILDSIZE | RBBIM_STYLE | RBBIM_ID | RBBIM_SIZE\
| RBBIM_IDEALSIZE
else:
rbBand.fMask = RBBIM_CHILD | RBBIM_CHILDSIZE | RBBIM_STYLE | RBBIM_ID | RBBIM_SIZE
if title != NULL:
rbBand.fMask |= RBBIM_TEXT
rbBand.fStyle = RBBS_CHILDEDGE
if WIN32_IE >= 0x0500 and nBtnCount > 0:
# add chevron style for toolbar with buttons
#rbBand.fStyle |= RBBS_USECHEVRON
#TODO find RBBS_USECHEVRON constant
pass
if bNewRow:
rbBand.fStyle |= RBBS_BREAK
if title != NULL:
rbBand.lpText = title
rbBand.hwndChild = hWndBand
if nID == 0: # calc band ID
nID = ATL_IDW_BAND_FIRST + SendMessage(self.handle, RB_GETBANDCOUNT, 0, 0)
rbBand.wID = nID
rcTmp = RECT()
if nBtnCount > 0:
bRet = SendMessage(hWndBand, TB_GETITEMRECT, nBtnCount - 1, byref(rcTmp))
if cxWidth != 0:
rbBand.cx = cxWidth
else:
rbBand.cx = rcTmp.right
rbBand.cyMinChild = rcTmp.bottom - rcTmp.top
if bFullWidthAlways:
rbBand.cxMinChild = rbBand.cx
elif title == 0:
SendMessage(hWndBand, TB_GETITEMRECT, 0, byref(rcTmp))
rbBand.cxMinChild = rcTmp.right
else:
rbBand.cxMinChild = 0
else: # // no buttons, either not a toolbar or really has no buttons
GetWindowRect(hWndBand, byref(rcTmp))
if cxWidth != 0:
rbBand.cx = cxWidth
else:
rbBand.cx = rcTmp.right - rcTmp.left
if bFullWidthAlways:
rbBand.cxMinChild = rbBand.cx
else:
rbBand.cxMinChild = 0
rbBand.cyMinChild = rcTmp.bottom - rcTmp.top
if WIN32_IE >= 0x0400:
rbBand.cxIdeal = rbBand.cx;
#Add the band
SendMessage(self.handle, RB_INSERTBAND, -1, byref(rbBand))
#if WIN32_IE >= 0x0501:
# exStyle = SendMessage(hWndBand, TB_GETEXTENDEDSTYLE, 0, 0)
# SendMessage(hWndBand, TB_SETEXTENDEDSTYLE, 0, dwExStyle | \
# TBSTYLE_EX_HIDECLIPPEDBUTTONS)
|
|
# pylint: disable=too-many-lines
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import collections
from contextlib import suppress
from datetime import timedelta
import logging
import hashlib
from random import SystemRandom
import aiohttp
from aiohttp import web
import async_timeout
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'camera'
DEPENDENCIES = ['http']
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
TOKEN_CHANGE_INTERVAL = timedelta(minutes=5)
_RND = SystemRandom()
@asyncio.coroutine
def async_get_image(hass, entity_id, timeout=10):
"""Fetch a image from a camera entity."""
websession = async_get_clientsession(hass)
state = hass.states.get(entity_id)
if state is None:
raise HomeAssistantError(
"No entity '{0}' for grab a image".format(entity_id))
url = "{0}{1}".format(
hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE)
)
try:
with async_timeout.timeout(timeout, loop=hass.loop):
response = yield from websession.get(url)
if response.status != 200:
raise HomeAssistantError("Error {0} on {1}".format(
response.status, url))
image = yield from response.read()
return image
except (asyncio.TimeoutError, aiohttp.ClientError):
raise HomeAssistantError("Can't connect to {0}".format(url))
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the camera component."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(component.entities))
hass.http.register_view(CameraMjpegStream(component.entities))
yield from component.async_setup(config)
@callback
def update_tokens(time):
"""Update tokens of the entities."""
for entity in component.entities.values():
entity.async_update_token()
hass.async_add_job(entity.async_update_ha_state())
async_track_time_interval(hass, update_tokens, TOKEN_CHANGE_INTERVAL)
return True
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
self.access_tokens = collections.deque([], 2)
self.async_update_token()
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Return the camera brand."""
return None
@property
def model(self):
"""Return the camera model."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(None, self.camera_image)
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--jpegboundary')
yield from response.prepare(request)
def write(img_bytes):
"""Write image to stream."""
response.write(bytes(
'--jpegboundary\r\n'
'Content-Type: image/jpeg\r\n'
'Content-Length: {}\r\n\r\n'.format(
len(img_bytes)), 'utf-8') + img_bytes + b'\r\n')
last_image = None
try:
while True:
img_bytes = yield from self.async_camera_image()
if not img_bytes:
break
if img_bytes and img_bytes != last_image:
write(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
write(img_bytes)
last_image = img_bytes
yield from response.drain()
yield from asyncio.sleep(.5)
except asyncio.CancelledError:
_LOGGER.debug("Stream closed by frontend.")
response = None
finally:
if response is not None:
yield from response.write_eof()
@property
def state(self):
"""Return the camera state."""
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
"""Return the camera state attributes."""
attr = {
'access_token': self.access_tokens[-1],
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
@callback
def async_update_token(self):
"""Update the used token."""
self.access_tokens.append(
hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, 'little')).hexdigest())
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, entities):
"""Initialize a basic camera view."""
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a GET request."""
camera = self.entities.get(entity_id)
if camera is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (request[KEY_AUTHENTICATED] or
request.GET.get('token') in camera.access_tokens)
if not authenticated:
return web.Response(status=401)
response = yield from self.handle(request, camera)
return response
@asyncio.coroutine
def handle(self, request, camera):
"""Handle the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = '/api/camera_proxy/{entity_id}'
name = 'api:camera:image'
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=request.app['hass'].loop):
image = yield from camera.async_camera_image()
if image:
return web.Response(body=image)
return web.Response(status=500)
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = '/api/camera_proxy_stream/{entity_id}'
name = 'api:camera:stream'
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
yield from camera.handle_async_mjpeg_stream(request)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron.i18n import _LE, _LW
from neutron import manager
LOG = logging.getLogger(__name__)
AGENT_OPTS = [
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")),
cfg.StrOpt('dhcp_load_type', default='networks',
choices=['networks', 'subnets', 'ports'],
help=_('Representing the resource type whose load is being '
'reported by the agent. This can be "networks", '
'"subnets" or "ports". '
'When specified (Default is networks), the server will '
'extract particular load sent as part of its agent '
'configuration object from the agent report state, '
'which is the number of resources being consumed, at '
'every report_interval.'
'dhcp_load_type can be used in combination with '
'network_scheduler_driver = '
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '
'When the network_scheduler_driver is WeightScheduler, '
'dhcp_load_type can be configured to represent the '
'choice for the resource being balanced. '
'Example: dhcp_load_type=networks')),
]
cfg.CONF.register_opts(AGENT_OPTS)
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
model_base.BASEV2.__table_args__
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sql.true(), nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
# load - number of resources hosted by the agent
load = sa.Column(sa.Integer, server_default='0', nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
def get_enabled_agent_on_host(self, context, agent_type, host):
"""Return agent of agent_type for the specified host."""
query = context.session.query(Agent)
query = query.filter(Agent.agent_type == agent_type,
Agent.host == host,
Agent.admin_state_up == sql.true())
try:
agent = query.one()
except exc.NoResultFound:
LOG.debug('No enabled %(agent_type)s agent on host '
'%(host)s', {'agent_type': agent_type, 'host': host})
return
if self.is_agent_down(agent.heartbeat_timestamp):
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
{'agent_type': agent_type, 'agent_id': agent.id})
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _LW('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _get_agent_load(self, agent):
configs = agent.get('configurations', {})
load_type = None
load = 0
if(agent['agent_type'] == constants.AGENT_TYPE_DHCP):
load_type = cfg.CONF.dhcp_load_type
if load_type:
load = int(configs.get(load_type, 0))
return load
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
agents = self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
alive = filters and filters.get('alive', None)
if alive:
# alive filter will be a list
alive = attributes.convert_to_boolean(alive[0])
agents = [agent for agent in agents if agent['alive'] == alive]
return agents
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
res['load'] = self._get_agent_load(agent)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations.
This class implements the server side of an rpc interface. The client side
can be found in neutron.agent.rpc.PluginReportStateAPI. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
agent_state = kwargs['agent_state']['agent_state']
self._check_clock_sync_on_agent_start(agent_state, time)
if self.START_TIME > time:
time_agent = timeutils.isotime(time)
time_server = timeutils.isotime(self.START_TIME)
log_dict = {'agent_time': time_agent, 'server_time': time_server}
LOG.debug("Stale message received with timestamp: %(agent_time)s. "
"Skipping processing because it's older than the "
"server start timestamp: %(server_time)s", log_dict)
return
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
def _check_clock_sync_on_agent_start(self, agent_state, agent_time):
"""Checks if the server and the agent times are in sync.
Method checks if the agent time is in sync with the server time
on start up. Ignores it, on subsequent re-connects.
"""
if agent_state.get('start_flag'):
time_server_now = timeutils.utcnow()
diff = abs((time_server_now - agent_time).seconds)
if diff > cfg.CONF.agent_down_time:
agent_name = agent_state['agent_type']
time_agent = timeutils.isotime(agent_time)
host = agent_state['host']
log_dict = {'host': host,
'agent_name': agent_name,
'agent_time': time_agent,
'threshold': cfg.CONF.agent_down_time,
'serv_time': timeutils.isotime(time_server_now)}
LOG.error(_LE("Message received from the host: %(host)s "
"during the registration of %(agent_name)s has "
"a timestamp: %(agent_time)s. This differs from "
"the current server timestamp: %(serv_time)s by "
"more than the threshold agent down"
"time: %(threshold)s."), log_dict)
|
|
"""Kraken - objects.scene_item module.
Classes:
SceneItem - Base SceneItem Object.
"""
class SceneItem(object):
"""Kraken base object type for any 3D object."""
__maxId = 0
def __init__(self, name, parent=None, metaData=None):
super(SceneItem, self).__init__()
self._parent = parent
self._name = name
self._component = None
self._sources = []
self._depends = []
self._id = SceneItem.__maxId
self._metaData = {}
if metaData is not None:
self._metaData = metaData
SceneItem.__maxId = SceneItem.__maxId + 1
# ==============
# Type Methods
# ==============
def getId(self):
"""Returns the unique Id of this object.
Returns:
int: Unique id.
"""
return self._id
def getTypeName(self):
"""Returns the class name of this object.
Returns:
bool: True if successful.
"""
return self.__class__.__name__
def getTypeHierarchyNames(self):
"""Returns the class name of this object.
Returns:
bool: True if successful.
"""
khierarchy = []
for cls in type.mro(type(self)):
if cls == object:
break
khierarchy.append(cls.__name__)
return khierarchy
def isTypeOf(self, typeName):
"""Returns the class name of this object.
Arguments:
typeName (str): Name to check against.
Returns:
bool: True if the scene item is of the given type.
"""
for cls in type.mro(type(self)):
if cls.__name__ == typeName:
return True
return False
def isOfAnyType(self, typeNames):
"""Returns true if this item has any of the given type names
Arguments:
typeNames (tuple): Type names to check against.
Returns:
bool: True if the scene item is of the given type.
"""
for typeName in typeNames:
if self.isTypeOf(typeName):
return True
return False
# =============
# Name methods
# =============
def getName(self):
"""Returns the name of the object as a string.
Returns:
str: Object's name.
"""
return self._name
def setName(self, name):
"""Sets the name of the object with a string.
Arguments:
name (str): The new name for the item.
Returns:
bool: True if successful.
"""
self._name = name
return True
def getPath(self):
"""Returns the full hierarchical path to this object.
Returns:
str: Full name of the object.
"""
if self.getParent() is not None:
return self.getParent().getPath() + '.' + self.getName()
return self.getName()
def getBuildPath(self):
"""Returns the full hierarchical path to this object using buildNames.
Returns:
str: Full name of the object.
"""
if hasattr(self, 'getBuildName'):
buildName = self.getBuildName()
else:
buildName = self.getName()
if self.getParent() is not None:
return self.getParent().getBuildPath() + '.' + buildName
return buildName
def getNameDecoration(self):
"""Gets the decorated name of the object.
Returns:
str: Decorated name of the object.
"""
location = ""
component = self.getComponent()
if component and component.isTypeOf("Component"):
location = component.getLocation()
altLocation = self.getMetaDataItem("altLocation")
if altLocation is not None:
location = altLocation
return location
def getDecoratedName(self):
"""Gets the decorated name of the object.
Returns:
str: Decorated name of the object.
"""
return self.getName() + self.getNameDecoration()
def getDecoratedPath(self):
"""Gets the decorated path of the object.
Returns:
str: Decorated path of the object.
"""
if self.getParent() is not None:
return self.getParent().getDecoratedPath() + '.' + \
self.getDecoratedName()
return self.getDecoratedName()
# ===============
# Parent Methods
# ===============
def getParent(self):
"""Returns the parent of the object as an object.
Returns:
Object: Parent of this object.
"""
return self._parent
def setParent(self, parent):
"""Sets the parent attribute of this object.
Arguments:
parent (Object): Object that is the parent of this one.
Returns:
bool: True if successful.
"""
self.removeSource(self._parent)
self._parent = parent
self.addSource(parent, prepend=True) #always want parent to be first source, then constraints etc.
return True
# ===============
# Source Methods
# ===============
def getSources(self):
"""Returns the sources of the object.
Returns:
list: All sources of this object.
"""
return self._sources
def getCurrentSource(self):
"""Returns the source of the object which is currently driving it.
Returns:
Object: source of this object
"""
if len(self.getSources()) > 0:
return self.getSources()[-1]
return None
def addSource(self, source, prepend=False):
"""Adds another source to this object.
Arguments:
source (Object): Object that is the source of this one.
prepend (bool): Add this source to the beginning of the list instead of the end
Returns:
int: Index of the source used
"""
if not isinstance(source, SceneItem):
return False
for prevSource in self._sources:
if prevSource.getId() == source.getId():
return False
if prepend:
self._sources.insert(0, source)
else:
self._sources.append(source)
if self not in source._depends:
source._depends.append(self)
return True
def removeSource(self, source):
"""Removes a source from this object.
Arguments:
source (Object): Object that is no longer a source of this one.
"""
if not isinstance(source, SceneItem):
return False
self._sources[:] = [s for s in self._sources if s != source]
if self not in source._depends:
source._depends[:] = [s for s in self._depends if s != self]
def setSource(self, index, source):
"""Sets the source of this object.
Arguments:
index (int): The index of the source to update.
source (Object): Object that is the source of this one.
"""
self._sources[index] = source
return True
def getDepends(self):
"""Returns the objects that depend on this object.
Returns:
list: All depending objects of this object.
"""
return self._depends
# ==================
# Component Methods
# ==================
def getComponent(self):
"""Returns the component of the object as an object.
Returns:
Object: Component of this object.
"""
return self._component
def setComponent(self, component):
"""Sets the component attribute of this object.
Args:
component (Object): Object that is the component of this one.
Returns:
bool: True if successful.
"""
self._component = component
return True
# =============
# Flag Methods
# =============
def setFlag(self, name):
"""Sets the flag of the specified name.
Returns:
bool: True if successful.
"""
self._flags[name] = True
return True
def testFlag(self, name):
"""Tests if the specified flag is set.
Args:
name (str): Name of the flag to test.
Returns:
bool: True if flag is set.
"""
return name in self._flags
def clearFlag(self, name):
"""Clears the flag of the specified name.
Args:
name (str): Name of the flag to clear.
Returns:
bool: True if successful.
"""
if name in self._flags:
del self._flags[name]
return True
return False
def getFlags(self):
"""Returns all flags set on this object.
Returns:
list: Flags set on this object.
"""
return self._flags.keys()
# ==========
# Meta Data
# ==========
def getMetaData(self):
"""Gets the meta data from the rig.
Returns:
dict: Extra data stored on the rig.
"""
return self._metaData
def getMetaDataItem(self, name):
"""Returns an item in the meta data by the key name.
Args:
name (String): Name of the key in the meta data dictionary to get
data for.
Returns:
Data from the specified key, None if not present.
"""
if name in self._metaData:
return self._metaData.get(name, None)
def setMetaDataItem(self, name, data):
"""Sets meta data on the rig.
Args:
data: Extra data needed to persist the rig / graph.
Returns:
bool: True if successful.
"""
self._metaData[name] = data
return True
def appendMetaDataListItem(self, name, data, allowDuplicates=False):
"""Appends an item to a meta data that is a list
Args:
data: Extra data needed to persist the rig / graph.
allowDuplicates (bool) : first check to see if item in list
Returns:
bool: True if successful.
"""
if name not in self._metaData:
self._metaData[name] = []
if not isinstance(self._metaData[name], list):
raise ValueError("%s._metaData[%s] is not a list. Cannot append." % (self.getName(), name))
if not allowDuplicates and data in self._metaData[name]:
return
self._metaData[name].append(data)
return True
def removeMetaDataListItem(self, name, data):
"""Removes an item to a meta data that is a list (if match is possible)
Args:
data: Item to match (string most likely)
Returns:
bool: True if successful.
"""
if name not in self._metaData:
raise ValueError("%s._metaData[%s] does not exist. Cannot remove." % (self.getName(), name))
if not isinstance(self._metaData[name], list):
raise ValueError("%s._metaData[%s] is not a list. Cannot remove." % (self.getName(), name))
try:
self._metaData[name].remove(data)
except:
return False
return True
|
|
#!/usr/bin/python
"""
Browser.py Providing easy and extended control to the Selenium Webdriver API
This enables to use a selenium webdriver object in an exploit
"""
import os,sys,time,json
from os.path import isfile,isdir,abspath,dirname
import hashlib
import urllib
import platform
from urllib2 import URLError
import requests
# Import Selenium framework
from selenium import webdriver
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
# Import PinkWave extensions
appDir = dirname(dirname(__file__ ))
sys.path.append(appDir)
from extensions.Util import Util,vdkException
import extensions.Http as Http
from extensions.Request import Request
"""
Exception classes for quering the DOM
"""
class ElementNotFoundException(NoSuchElementException):pass
"""
Browser Class
Providing easy and extended control to the Selenium Webdriver API
"""
class Browser:
def __init__(self,browser,verifySsl = True, timeoutInSeconds=8,debugMode = False):
self.driver = None
self.browser = browser
self.verifySsl = verifySsl
self.request = "GET"
self.timeoutInSeconds = timeoutInSeconds
self.debugMode = debugMode
if "64" in platform.architecture()[0]:
self.architecture = 64
else:
self.architecture = 32
self.driverPath = self.getDriverPath(self.browser)
if self.browser.lower() == "firefox":
fprofile = None
if not verifySsl:
fprofile = webdriver.FirefoxProfile()
fprofile.accept_untrusted_certs = True
self.driver = webdriver.Firefox(executable_path=self.driverPath,firefox_profile=fprofile)
elif self.browser.lower() == "chrome":
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-xss-auditor')
if not verifySsl:
chrome_options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
self.driver = webdriver.Chrome(executable_path=self.driverPath,chrome_options=chrome_options)
elif self.browser.lower() == "phantomjs":
sargs = []
if not verifySsl:
sargs = ['--ignore-ssl-errors=true', '--ssl-protocol=any']
self.driver = webdriver.PhantomJS(executable_path=self.driverPath, service_log_path=os.path.devnull,service_args=sargs)
else:
raise Exception("Browser %s not supported" % browser)
self.driver.set_page_load_timeout(self.timeoutInSeconds)
Request.setBrowser(self)
"""
Searches for submit button, else submits element
"""
def hitSubmit(self,element):
try:
self.driver.find_element_by_xpath("//input[@type='submit']").click()
except (NoSuchElementException, ElementNotVisibleException,UnexpectedAlertPresentException):
try:
element.submit()
# Sometimes previous elements are cached and therefore not to be found, try again a few times to be sure...
except StaleElementReferenceException as sr:
print "retrying to find cached element (StaleElementReferenceException)..."
for i in range(0,5):
if i == 5:
raise sr
# Enter copy of hitSubmit function, to prevent recursion
try:
self.driver.find_element_by_xpath("//input[@type='submit']").click()
except (NoSuchElementException, ElementNotVisibleException,UnexpectedAlertPresentException):
try:
element.submit()
except:pass
"""
Get Driver path based on browserName and architecture
"""
def getDriverPath(self,browserName):
root = dirname(dirname(abspath(__file__))) + "/drivers"
if isdir(root) == False:
raise Exception("Pinkwave Drivers Root not found: %s" % root)
root += "/" + str(self.architecture)
root += "/" + browserName.lower()
if isdir(root) == False:
raise Exception("Pinkwave Drivers Root not found: %s" % root)
for filename in os.listdir(root):
root += "/" + filename
break
if isfile(root) == False:
raise Exception("Can't load Pinkwave Driver File: %s" % root)
return root
"""
Navigate to URL into browser
"""
def nav(self,url):
self.driver.set_page_load_timeout(self.timeoutInSeconds)
if "://" not in url:
url = "http://" + url
if not Http.is_ok(url):
raise Exception("Failed to establish connection to url %s" % url)
if self.debugMode:
print "%sNavigating to %s%s" % ('\033[1;35m',url,'\033[0m')
try:
self.request = "GET"
self.driver.get(url)
except UnexpectedAlertPresentException:
# Double exception handling because Selenium might close alert automaticly and might not (on Chrome for example)
self.request = "GET"
try:
self.driver.get(url)
except UnexpectedAlertPresentException:
alert = self.driver.switch_to_alert()
alert.accept()
self.driver.get(url)
except TimeoutException as t:
raise vdkException("timeout triggered by webdriver");
"""
Submits POST form to remote URL via bouncer
"""
def directPost(self,url,requestNames,values):
bouncer = Util.getBouncer()
newUrl = Util.transformUrl(bouncer,["values",'url'],[",".join(requestNames),url])
self.nav(newUrl)
self.post(requestNames,values)
self.request = "POST/DIRECT"
"""
Enter and submit POST form to current url
"""
def post(self, requestNames, values):
if requestNames is None or len(requestNames) == 0:
raise Exception("requestNames is empty")
if values is None or len(values) == 0:
raise Exception("values is empty")
try:
self.driver.set_page_load_timeout(self.timeoutInSeconds)
e = None
self.request = "POST"
if self.debugMode:
print "Posting to %s, fields: [%s], data: [%s]" % (self.url(), requestNames, values)
if not isinstance(values,list):
for post in requestNames:
e = self.driver.find_element_by_name(post)
e.clear()
values = values.strip(os.linesep)
e.send_keys(values)
else:
if len(requestNames) != len(values):
raise Exception("requestNames length does not match with values length")
postIndex = 0
for val in values:
e = self.driver.find_element_by_name(requestNames[postIndex])
e.clear()
val = val.strip(os.linesep)
e.send_keys(val)
postIndex = postIndex + 1
self.hitSubmit(e)
except TimeoutException as t:
raise vdkTimeoutException("Timeout triggered by WebDriver");
except NoSuchElementException as nse:
print "Element not found by POST names: %s" % ",".join(requestNames)
raise nse
except UnicodeDecodeError as u:
if isinstance(values,list):
for val in values:
val = unicode(val, errors='replace')
else:
values = unicode(values,errors='replace')
return self.post(requestNames,values)
except WebDriverException as we:
self.nav(self.url())
self.post(requestNames,values)
"""
Api for querying elements by name
"""
def getElementByName(self,name):
return self.driver.find_element_by_name(name)
"""
Get text from current browser window
"""
def text(self):
try:
text = self.driver.page_source
except UnexpectedAlertPresentException:
# Double exception handling because Selenium might close alert automaticly and might not (on Chrome for example)
self.request = "GET"
try:
text = self.driver.page_source
except UnexpectedAlertPresentException:
alert = self.driver.switch_to_alert()
alert.accept()
text = self.driver.page_source
except URLError:
print "Connection refused for url: %s" % self.url()
raise
return text
"""
Get size in bytes of browser text
"""
def byteSize(self):
return len(self.text())
"""
Get current URL from browser
"""
def url(self):
url = self.driver.current_url
return url.strip("/").encode("utf8")
"""
Save current window as screenshot in given path
"""
def saveScreenshot(self,path):
self.driver.save_screenshot(path)
"""
Kill the webdriver process
"""
def close(self):
self.driver.quit()
"""
Get all cookies from current session
"""
def cookies(self):
cookies = self.driver.get_cookies()
if len(cookies) == 0:
cookies = []
else:
for cookie in cookies:
if cookie.get('httpOnly') is None:
cookie['httpOnly'] = False
if cookie.get('secure') is None:
cookie['secure'] = False
return cookies
"""
Get title of current URL
"""
def title(self):
return self.driver.title
"""
Calculate and return request time
"""
def time(self):
return self.timeEnd - self.timeStart
"""
Get sha256 hash of the current URL text
"""
def hash(self):
m = hashlib.sha256()
m.update(self.text().encode("utf8"))
return m.digest().encode("hex")
"""
Disable logging of all Browser functions
"""
def disableLogging(self):
self.logging = False
|
|
# Copyright (c) 2017 Dirk Hartmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.layout
import libqtile.bar
import libqtile.widget
import libqtile.config
import libqtile.scratchpad
# import .conftest
from .conftest import Retry
from .conftest import no_xinerama
from .layouts.layout_utils import assert_focused, assert_focus_path
class ScratchPadBaseConfic:
auto_fullscreen = True
main = None
screens = []
groups = [
libqtile.config.ScratchPad('SCRATCHPAD', dropdowns=[
libqtile.config.DropDown('dd-a', 'xterm -T dd-a sh', on_focus_lost_hide=False),
libqtile.config.DropDown('dd-b', 'xterm -T dd-b sh', on_focus_lost_hide=False),
libqtile.config.DropDown('dd-c', 'xterm -T dd-c sh', on_focus_lost_hide=True),
libqtile.config.DropDown('dd-d', 'xterm -T dd-d sh', on_focus_lost_hide=True)
]),
libqtile.config.Group("a"),
libqtile.config.Group("b"),
]
layouts = [libqtile.layout.max.Max()]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
# scratchpad_config = lambda x:
def scratchpad_config(x):
return no_xinerama(pytest.mark.parametrize("qtile", [ScratchPadBaseConfic], indirect=True)(x))
@Retry(ignore_exceptions=(KeyError,))
def is_spawned(qtile, name):
qtile.c.group["SCRATCHPAD"].dropdown_info(name)['window']
return True
@Retry(ignore_exceptions=(ValueError,))
def is_killed(qtile, name):
if 'window' not in qtile.c.group["SCRATCHPAD"].dropdown_info(name):
return True
raise ValueError('not yet killed')
@scratchpad_config
def test_toggling(qtile):
# adjust command for current display
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-a', command='xterm -T dd-a -display %s sh' % qtile.display)
qtile.test_window("one")
assert qtile.c.group["a"].info()['windows'] == ['one']
# First toggling: wait for window
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
is_spawned(qtile, 'dd-a')
# assert window in current group "a"
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'one']
assert_focused(qtile, 'dd-a')
# toggle again --> "hide" xterm in scratchpad group
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
assert qtile.c.group["a"].info()['windows'] == ['one']
assert_focused(qtile, 'one')
assert qtile.c.group["SCRATCHPAD"].info()['windows'] == ['dd-a']
# toggle again --> show again
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'one']
assert_focused(qtile, 'dd-a')
assert qtile.c.group["SCRATCHPAD"].info()['windows'] == []
@scratchpad_config
def test_focus_cycle(qtile):
# adjust command for current display
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-a', command='xterm -T dd-a -display %s sh' % qtile.display)
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-b', command='xterm -T dd-b -display %s sh' % qtile.display)
qtile.test_window("one")
# spawn dd-a by toggling
assert_focused(qtile, 'one')
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
is_spawned(qtile, 'dd-a')
assert_focused(qtile, 'dd-a')
qtile.test_window("two")
assert_focused(qtile, 'two')
# spawn dd-b by toggling
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-b')
is_spawned(qtile, 'dd-b')
assert_focused(qtile, 'dd-b')
# check all windows
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'dd-b', 'one', 'two']
assert_focus_path(qtile, 'one', 'two', 'dd-a', 'dd-b')
@scratchpad_config
def test_focus_lost_hide(qtile):
# adjust command for current display
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-c', command='xterm -T dd-c -display %s sh' % qtile.display)
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-d', command='xterm -T dd-d -display %s sh' % qtile.display)
qtile.test_window("one")
assert_focused(qtile, 'one')
# spawn dd-c by toggling
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-c')
is_spawned(qtile, 'dd-c')
assert_focused(qtile, 'dd-c')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-c', 'one']
# New Window with Focus --> hide current DropDown
qtile.test_window("two")
assert_focused(qtile, 'two')
assert sorted(qtile.c.group["a"].info()['windows']) == ['one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-c']
# spawn dd-b by toggling
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-d')
is_spawned(qtile, 'dd-d')
assert_focused(qtile, 'dd-d')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-d', 'one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-c']
# focus next, is the first tiled window --> "hide" dd-d
qtile.c.group.next_window()
assert_focused(qtile, 'one')
assert sorted(qtile.c.group["a"].info()['windows']) == ['one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-c', 'dd-d']
# Bring dd-c to front
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-c')
assert_focused(qtile, 'dd-c')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-c', 'one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-d']
# Bring dd-d to front --> "hide dd-c
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-d')
assert_focused(qtile, 'dd-d')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-d', 'one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-c']
# change current group to "b" hids DropDowns
qtile.c.group['b'].toscreen()
assert sorted(qtile.c.group["a"].info()['windows']) == ['one', 'two']
assert sorted(qtile.c.group["SCRATCHPAD"].info()['windows']) == ['dd-c', 'dd-d']
@scratchpad_config
def test_kill(qtile):
# adjust command for current display
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-a', command='xterm -T dd-a -display %s sh' % qtile.display)
qtile.test_window("one")
assert_focused(qtile, 'one')
# dd-a has no window associated yet
assert 'window' not in qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')
# First toggling: wait for window
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
is_spawned(qtile, 'dd-a')
assert_focused(qtile, 'dd-a')
assert qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')['window']['name'] == 'dd-a'
# kill current window "dd-a"
qtile.c.window.kill()
is_killed(qtile, 'dd-a')
assert_focused(qtile, 'one')
assert 'window' not in qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')
@scratchpad_config
def test_floating_toggle(qtile):
# adjust command for current display
qtile.c.group["SCRATCHPAD"].dropdown_reconfigure('dd-a', command='xterm -T dd-a -display %s sh' % qtile.display)
qtile.test_window("one")
assert_focused(qtile, 'one')
# dd-a has no window associated yet
assert 'window' not in qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')
# First toggling: wait for window
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
is_spawned(qtile, 'dd-a')
assert_focused(qtile, 'dd-a')
assert 'window' in qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'one']
qtile.c.window.toggle_floating()
# dd-a has no window associated any more, but is still in group
assert 'window' not in qtile.c.group["SCRATCHPAD"].dropdown_info('dd-a')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'one']
qtile.c.group["SCRATCHPAD"].dropdown_toggle('dd-a')
is_spawned(qtile, 'dd-a')
assert sorted(qtile.c.group["a"].info()['windows']) == ['dd-a', 'dd-a', 'one']
|
|
#!/usr/bin/env python
# Test Harness to test the Game Controller Functionality
#
import requests # https://github.com/kennethreitz/requests/
import time
import random
#Replace this variables as appropriate
server_url = 'http://192.168.59.103:8080/api'
admin_header = {'X-Auth-Token': '1234'}
# Server Calls ----------------------------------------------
def server_start(server_url):
"""Start game
curl -i -H 'X-Auth-Token: 1234' -X POST http://localhost:8080/api/start
"""
url = server_url + '/start'
start_game = requests.post(url, headers=admin_header)
if start_game.status_code == 200:
print('Server: Game has been Started!')
else:
print ('Server: Game Start Failed!')
print ("HTTP Code: " + str(start_game.status_code) + " | Response: " + start_game.text)
def server_reset(server_url):
"""Reset game
curl -i -H 'X-Auth-Token: 1234' -X POST http://localhost:8080/api/start
"""
url = server_url + '/reset'
reset_game = requests.post(url, headers=admin_header)
if reset_game.status_code == 200:
print('Server: Game has been Reset!')
else:
print ('Server: Game Reset Failed!')
print ("HTTP Code: " + str(reset_game.status_code) + " | Response: " + reset_game.text)
def server_stop(server_url):
"""Stop game
curl -i -H 'X-Auth-Token: 1234' -X POST http://localhost:8080/api/stop
"""
game_stop_url = server_url + '/stop'
stop_game = requests.post(game_stop_url, headers=admin_header)
if stop_game.status_code == 200:
print('Server: Game has been Stopped!')
else:
print ('Server: Game Stop Failed!')
print ("HTTP Code: " + str(stop_game.status_code) + " | Response: " + stop_game.text)
def server_check_game_started(server_url):
"""
Start game
curl -i -H 'X-Auth-Token: 1234' -X POST http://localhost:8080/api/start
"""
gstart_url = '{0}/start'.format(server_url)
start_game = requests.post(gstart_url, headers=admin_header)
if start_game.status_code == 400:
return True
else:
return False
def server_kick_team(server_url, team_name):
"""
Kicks a team from the registration list. Before the game is started.
Example Curl: curl -i -H 'X-Auth-Token: 1234' -X POST http://localhost:8080/api/kick/foobar
:param server_url: The Server URL
:param team_name: The Team's name
"""
kick_url = server_url + '/kick/' + team_name
team_kicked = requests.post(kick_url, headers=admin_header)
if team_kicked.status_code == 200:
print('Server: The team: {0} has been Kicked out!'.format(team_name))
else:
print ('Server: Team Kick failed for Team: {0}'.format(team_name))
print ("HTTP Code: {0} | Response: {1}".format(str(team_kicked.status_code), team_kicked.text))
def server_config(server_url):
"""
Retries the Server's configuration parameters
curl -i -X GET http://localhost:8080/api/config
:param server_url:
:return: Nothing
"""
kick_url = '{0}/config'.format(server_url)
srv_config = requests.get(kick_url)
print ("HTTP Code: {0} | Response: {1}".format(str(srv_config.status_code), srv_config.text))
# Shield Calls ------------------------------------------------
def team_shield_up(team_name, team_auth):
"""
Sets the team shield up
curl -i -H 'X-Auth-Token: 1335aa6af5d0289f' -X POST http://localhost:8080/api/shield/enable
"""
url = server_url + '/shield/enable'
auth_header = {'X-Auth-Token': team_auth}
shield_up = requests.post(url, headers=auth_header)
if shield_up.status_code == 200:
print ('Server: Team: ' + team_name + ' Shield is UP!')
else:
print ('Server: Team: ' + team_name + ' Shield UP! request Failed!')
print ("HTTP Code: " + str(shield_up.status_code) + " | Response: " + shield_up.text)
def team_shield_down(team_name, team_auth):
"""
Sets the team shield Down
curl -i -H 'X-Auth-Token: 1335aa6af5d0289f' -X POST http://localhost:8080/api/shield/disable
"""
url = server_url + '/shield/disable'
auth_header = {'X-Auth-Token': team_auth}
shield_down = requests.post(url, headers=auth_header)
if shield_down.status_code == 200:
print ('Server: Team: ' + team_name + ' Shield is DOWN!')
else:
print ('Server: Team: ' + team_name + ' Shield DOWN! request Failed!')
print ("HTTP Code: " + str(shield_down.status_code) + " | Response: " + shield_down.text)
# Test Harness ------------------------------------------------
print("Starting the Test Harness")
print("-------------------------")
print("\nChecking the Server Status...")
# Check that Server has not started
if server_check_game_started(server_url):
print("...previous game running....")
server_stop(server_url)
server_reset(server_url)
else:
print("...cleaning up....")
server_stop(server_url)
server_reset(server_url)
time.sleep(2)
# Testing Server Configuration Functionality
# ------------------------------------------------
print("\nChecking the Server Configuration")
print("------------------------------------")
server_config(server_url)
# Testing Adding Teams to Game Functionality
# ------------------------------------------------
print("\nAdding Teams")
print("--------------")
# Adding team: TheBorg
team1_name = 'TheBorgs'
team1_auth = ''
url = server_url + '/join/' + team1_name
payload = ''
# POST with form-encoded data
response = requests.post(url, data=payload)
team1_auth = response.text
if response.status_code == 200:
print ('Team \'' + team1_name + '\' joined the game!')
print (team1_name + ' authentication Code: ' + team1_auth)
else:
print ('Team \'' + team1_name + '\' joining game Failed!')
print ("HTTP Code: " + str(response.status_code) + " | Response: " + response.text)
time.sleep(2)
# Adding team: QuickFandango
team2_name = 'QuickFandango'
team2_auth = ''
url = server_url + "/join/" + team2_name
payload = ''
# POST with form-encoded data
response = requests.post(url, data=payload)
team2_auth = response.text
if response.status_code == 200:
print ('Team \'' + team2_name + '\' joined the game!')
print (team2_name + ' authentication Code: ' + team2_auth)
else:
print ('Team \'' + team2_name + '\' joining game Failed!')
print ("HTTP Code: " + str(response.status_code) + " | Response: " + response.text)
time.sleep(2)
# Adding team: InTheBigMessos
team3_name = 'InTheBigMessos'
team3_auth = ''
url = server_url + "/join/" + team3_name
payload = ''
# POST with form-encoded data
response = requests.post(url, data=payload)
team3_auth = response.text
if response.status_code == 200:
print ('Team \'' + team3_name + '\' joined the game!')
print (team3_name + ' authentication Code: ' + team3_auth)
else:
print ('Team \'' + team3_name + '\' joining game Failed!')
print ("HTTP Code: " + str(response.status_code) + " | Response: " + response.text)
# Testing Kick Team Functionality
# ------------------------------------------------
print("\nChecking the Server Kick Functionality")
print("----------------------------------------")
print("Kicking {0} team out...".format(team1_name))
print("Team {0} has Auth Key: {1}".format(team1_name, str(team1_auth)))
server_kick_team(server_url, team1_name)
print("Adding {0} team back in...".format(team1_name))
url = server_url + '/join/' + team1_name
payload = ''
# POST with form-encoded data
response = requests.post(url, data=payload)
team1_auth = response.text
if response.status_code == 200:
print ('Team \'' + team1_name + '\' joined the game!')
print (team1_name + ' authentication Code: ' + team1_auth)
else:
print ('Team \'' + team1_name + '\' joining game Failed!')
print ("HTTP Code: " + str(response.status_code) + " | Response: " + response.text)
time.sleep(10)
# Starting the the GAME
# ------------------------------------------------
print("\nStarting the Game")
print("-------------------")
# Starting the Game Server
server_start(server_url)
# Starting the Teams Logic
while True:
team = random.randrange(0, 3)
action = random.randrange(1, 3)
team_list = [(team1_name, team1_auth), (team2_name, team2_auth), (team3_name, team3_auth)]
# print("\nGameMove: Team: " + team_list[team][0] + ' Action:' + str(action) + ' Name: ' + team_list[team][0] +'|'+ team_list[team][1])
if action > 1:
print("\nGameMove: Team: " + team_list[team][0] + ' Action: Shield UP! | Team Key: ' + team_list[team][1])
team_shield_up(team_list[team][0], (team_list[team][1]))
else:
print("\nGameMove: Team: " + team_list[team][0] + ' Action: Shield Down! | Team Key: ' + team_list[team][1])
team_shield_down(team_list[team][0], (team_list[team][1]))
time.sleep(2)
if server_check_game_started(server_url) == False:
print('\nServer: Game is Over...')
break
|
|
import os
import sys
import types
from . import common
from . import overrides
from . import _girepository
# major python version
if sys.version_info[0] == 2:
PY2, PY3 = True, False
elif sys.version_info[0] == 3:
PY2, PY3 = False, True
# cache used for modules and classes
_pygirepository = None
_pygirepository_modules = {}
_pygirepository_classes = {}
# cache used for signals/callbacks; prevents to be GC'ed
_cfunctype_cache = {}
_cfunctype_last = 0
class GIError(Exception):
# default exception class
pass
class GIRepository(types.ModuleType):
def __new__(cls, *args, **kwargs):
global _pygirepository
# act as singleton
if not _pygirepository:
# default/single instance of GIRepository
_pygirepository = super(GIRepository, cls).__new__(cls, *args, **kwargs)
cls.__init__(_pygirepository)
return _pygirepository
def __init__(self, modulename='GIRepository', moduledoc=''):
types.ModuleType.__init__(self, modulename, moduledoc)
self._repository = _girepository.g_irepository_get_default()
def __getattr__(self, attr):
try:
return self.require(attr, None)
except GIError:
raise AttributeError('missing attribute "%s"' % attr)
def require(self, namespace, version=None):
global _pygirepository_modules
# namespace
if PY2: namespace_bytes = namespace
elif PY3: namespace_bytes = namespace.encode()
# version
if PY2: version_bytes = version if version else None
elif PY3: version_bytes = version.encode() if version else None
# prepare function args
_repository = self._repository
_namespace = _girepository.gchar_p(namespace_bytes)
_version = _girepository.gchar_p(version_bytes)
_flags = _girepository.G_IREPOSITORY_LOAD_FLAG_LAZY
_error = _girepository.cast(
_girepository.gpointer(),
_girepository.POINTER(
_girepository.GError
)
)
# typelib
_typelib = _girepository.g_irepository_require(_repository, _namespace, _version, _flags, _error)
if not _typelib and _error.contents:
raise GIError(_error.contents.message.value)
# module
try:
module = _pygirepository_modules[namespace]
except KeyError:
# new module
module = GITypelib(namespace, '', _typelib)
# dependencies
_dependencies = _girepository.g_irepository_get_dependencies(_repository, _namespace)
if _dependencies:
i = 0
while True:
# dependency
_dependency = _dependencies[i]
if _dependency.value:
dependency_bytes = _dependency.value
if PY2: dependency = dependency_bytes
elif PY3: dependency = dependency_bytes.decode()
# require (import) dependency
namespace_, version_ = dependency.split('-')
module_ = self.require(namespace_, version_)
else:
break
i += 1
# override module/namespace
module = overrides.override(module, namespace)
_pygirepository_modules[namespace] = module
setattr(self, namespace, module)
return module
class GITypelib(types.ModuleType):
def __init__(self, modulename, moduledoc, _typelib):
types.ModuleType.__init__(self, modulename, moduledoc)
self._typelib = _typelib
def __del__(self):
#if self._typelib and _girepository:
# _girepository.g_typelib_free(self._typelib)
pass
def __getattr__(self, attr):
try:
return self._wrap(attr)
except GIError:
raise AttributeError('missing attribute "%s"' % attr)
def _wrap(self, attr):
global _pygirepository
# attr
if PY2: attr_bytes = attr
elif PY3: attr_bytes = attr.encode()
_attr = _girepository.gchar_p(attr_bytes)
# namespace
_namespace = _girepository.g_typelib_get_namespace(self._typelib)
namespace_bytes = _namespace.value
if PY2: namespace = namespace_bytes
elif PY3: namespace = namespace_bytes.decode()
# namespace_classname
namespace_classname = '%s.%s' % (namespace, attr)
# base info
_base_info = _girepository.g_irepository_find_by_name(_pygirepository._repository, _namespace, _attr)
if not _base_info:
raise GIError('missing attribute "%s"' % attr)
# "switch" info type
_info_type = _girepository.g_base_info_get_type(_base_info)
if _info_type.value == _girepository.GI_INFO_TYPE_INVALID.value:
# invalid
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_FUNCTION.value:
# function
_function_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIFunctionInfo))
function = GIFunction(_function_info=_function_info)
setattr(self, attr, function)
return function
elif _info_type.value == _girepository.GI_INFO_TYPE_CALLBACK.value:
# callback
# raise GIError('callback info type is not supported for %s' % (attr,))
pass
elif _info_type.value in (_girepository.GI_INFO_TYPE_STRUCT.value, _girepository.GI_INFO_TYPE_BOXED.value):
# struct/boxed
_struct_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIStructInfo))
_registered_info = _girepository.cast(_struct_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
# create class
clsname = namespace_classname
clsbases = (GIStruct,)
clsdict = {
'_struct_info': _struct_info,
'_registered_info': _registered_info,
}
# FIXME: parse fields
# methods
_n_methods = _girepository.g_struct_info_get_n_methods(_struct_info)
for i in range(_n_methods.value):
# function info
_method_function_info = _girepository.g_struct_info_get_method(_struct_info, _girepository.gint(i))
_method_base_info = _girepository.cast(_method_function_info, _girepository.POINTER(_girepository.GIBaseInfo))
# name
_method_name = _girepository.g_base_info_get_name(_method_base_info)
method_name_bytes = _method_name.value
if PY2: method_name = method_name_bytes
elif PY3: method_name = method_name_bytes.decode()
# attach gifunction to class dict
gifunction = GIFunction(_function_info=_method_function_info)
clsdict[method_name] = gifunction
# new class
class_ = type(clsname, clsbases, clsdict)
class_.__module__ = self
_pygirepository_classes[namespace_classname] = class_
setattr(self, attr, class_)
return class_
elif _info_type.value in (_girepository.GI_INFO_TYPE_ENUM.value, _girepository.GI_INFO_TYPE_FLAGS.value):
# enum/flags
_enum_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIEnumInfo))
_registered_info = _girepository.cast(_enum_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
# class/type args
clsname = namespace_classname
clsbases = (GIEnum,)
clsdict = {
'_enum_info': _enum_info,
'_registered_info': _registered_info,
}
# values
_n_values = _girepository.g_enum_info_get_n_values(_enum_info)
for i in range(_n_values.value):
# value info
_value_info = _girepository.g_enum_info_get_value(_enum_info, _girepository.gint(i))
_value_base_info = _girepository.cast(_value_info, _girepository.POINTER(_girepository.GIBaseInfo))
# name
_value_name = _girepository.g_base_info_get_name(_value_base_info)
value_name_bytes = _value_name.value
if PY2: value_name = value_name_bytes
elif PY3: value_name = value_name_bytes.decode()
# attach value to class dict
_value = _girepository.g_value_info_get_value(_value_info)
value = _value.value
clsdict[value_name] = value
# create new class
class_ = type(clsname, clsbases, clsdict)
class_.__module__ = self
_pygirepository_classes[namespace_classname] = class_
setattr(self, attr, class_)
return class_
elif _info_type.value == _girepository.GI_INFO_TYPE_OBJECT.value:
# object
_object_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIObjectInfo))
_registered_info = _girepository.cast(_object_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
# class name
clsname = namespace_classname
# class bases
clsbases = []
# parent
_parent_object_info = _girepository.g_object_info_get_parent(_object_info)
_parent_base_info = _girepository.cast(_parent_object_info, _girepository.POINTER(_girepository.GIBaseInfo))
# parent namespace
_parent_namespace = _girepository.g_base_info_get_namespace(_parent_base_info)
parent_namespace_bytes = _parent_namespace.value
if PY2: parent_namespace = parent_namespace_bytes
elif PY3: parent_namespace = parent_namespace_bytes.decode()
# parent name
_parent_name = _girepository.g_base_info_get_name(_parent_base_info)
parent_name_bytes = _parent_name.value
if PY2: parent_name = parent_name_bytes
elif PY3: parent_name = parent_name_bytes.decode()
# parents
if namespace == parent_namespace and attr == parent_name:
clsbases.append(GIObject)
else:
# parent
module_parent = _pygirepository_modules[parent_namespace]
clsbase = getattr(module_parent, parent_name)
clsbases.append(clsbase)
# interfaces
_n_interfaces = _girepository.g_object_info_get_n_interfaces(_object_info)
for i in range(_n_interfaces.value):
# interface info
_interface_object_info = _girepository.g_object_info_get_interface(_object_info, _girepository.gint(i))
_interface_base_info = _girepository.cast(_interface_object_info, _girepository.POINTER(_girepository.GIBaseInfo))
# interface namespace
_interface_namespace = _girepository.g_base_info_get_namespace(_interface_base_info)
interface_namespace_bytes = _interface_namespace.value
if PY2: interface_namespace = interface_namespace_bytes
elif PY3: interface_namespace = interface_namespace_bytes.decode()
# interface name
_interface_name = _girepository.g_base_info_get_name(_interface_base_info)
interface_name_bytes = _interface_name.value
if PY2: interface_name = interface_name_bytes
elif PY3: interface_name = interface_name_bytes.decode()
# add interface to clsbasses
interface_module = _pygirepository_modules[interface_namespace]
interface_class = getattr(interface_module, interface_name)
clsbases.append(interface_class)
clsbases = tuple([clsbase for clsbase in _mro(clsbases) if clsbase in clsbases])
# class dict
clsdict = {
'_object_info': _object_info,
'_registered_info': _registered_info,
}
# new class
class_ = type(clsname, clsbases, clsdict)
# FIXME: parse fields
# FIXME: parse properties
# methods
_n_methods = _girepository.g_object_info_get_n_methods(_object_info)
for i in range(_n_methods.value):
# function info
_method_function_info = _girepository.g_object_info_get_method(_object_info, _girepository.gint(i))
_method_base_info = _girepository.cast(_method_function_info, _girepository.POINTER(_girepository.GIBaseInfo))
# method name
_method_name = _girepository.g_base_info_get_name(_method_base_info)
method_name_bytes = _method_name.value
if PY2: method_name = method_name_bytes
elif PY3: method_name = method_name_bytes.decode()
# new gifunction, and preserve class
gifunction = GIFunction(_function_info=_method_function_info)
gifunction._pytype = class_
# 'switch' method function info flags
_method_function_info_flags = _girepository.g_function_info_get_flags(_method_function_info)
if _method_function_info_flags.value == 0:
method = gifunction
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_IS_METHOD.value:
method = common.instancemethod(gifunction)
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_IS_CONSTRUCTOR.value:
method = classmethod(gifunction)
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_IS_GETTER.value:
method = None
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_IS_SETTER.value:
method = None
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_WRAPS_VFUNC.value:
method = None
elif _method_function_info_flags.value & _girepository.GI_FUNCTION_THROWS.value:
method = None
else:
raise GIError('usupported function info flag "%i" for "%s.%s"' % (_method_function_info_flags.value, clsname, method_name))
# attach method to class dict
setattr(class_, method_name, method)
# FIXME: parse signals
# FIXME: parse constant
# HACK: uses direct low-level access to shared library
if namespace_classname == 'GObject.Object':
_libgobject = _girepository.libgobject
#~ def __new__(cls, *args, **kwargs):
#~ self = super(class_, cls).__new__(cls, *args, **kwargs)
#~ return self
#~ def __init__(self, *args, **kwargs):
#~ pass
def connect(instance, detailed_signal, py_handler, *args, **kwargs):
global _cfunctype_cache
def py_handler_func():
return_ = py_handler(instance, *args, **kwargs)
try:
return int(return_)
except TypeError:
return 0
def py_closure_notify_func(_data, _closure):
return 0
# prepare low-level values/objects
_instance = instance._cself
if PY2: _detailed_signal = _girepository.gchar_p(detailed_signal)
elif PY3: _detailed_signal = _girepository.gchar_p(detailed_signal.encode())
_c_handler = _girepository.GCallback(py_handler_func)
_data = _girepository.gpointer(0)
_destroy_data = _girepository.GClosureNotify(py_closure_notify_func)
_connect_flags = _girepository.gint(0)
# connect
_handler_id = _libgobject.g_signal_connect_data(
_instance,
_detailed_signal,
_c_handler,
_data,
_destroy_data,
_connect_flags
)
# handler_id is always integer
handler_id = int(_handler_id.value)
# cache _c_handler to prevent it from GC
_cfunctype_cache[(instance, handler_id)] = (_c_handler, _destroy_data)
return handler_id
def disconnect(instance, handler_id):
global _cfunctype_cache
_instance = instance._cself
_handler_id = _girepository.gulong(handler_id)
_libgobject.g_signal_handler_disconnect(_instance, _handler_id)
del _cfunctype_cache[(instance, handler_id)]
def block(instance, handler_id):
_instance = instance._cself
_handler_id = _girepository.gulong(handler_id)
_libgobject.g_signal_handler_block(_instance, _handler_id)
def unblock(instance, handler_id):
_instance = instance._cself
_handler_id = _girepository.gulong(handler_id)
_libgobject.g_signal_handler_unblock(_instance, _handler_id)
#~ setattr(class_, '__new__', __new__)
#~ setattr(class_, '__init__', __init__)
setattr(class_, 'connect', connect)
setattr(class_, 'disconnect', disconnect)
setattr(class_, 'block', block)
setattr(class_, 'unblock', unblock)
# class
class_.__module__ = self
_pygirepository_classes[namespace_classname] = class_
setattr(self, attr, class_)
return class_
elif _info_type.value == _girepository.GI_INFO_TYPE_INTERFACE.value:
# interface
_interface_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIInterfaceInfo))
_registered_info = _girepository.cast(_interface_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
# class name
clsname = namespace_classname
# class bases
clsbases = []
# interfaces/prerequisites
_interface_info_n_prerequisites = _girepository.g_interface_info_get_n_prerequisites(_interface_info)
if _interface_info_n_prerequisites.value:
# if any prerequisite
for i in range(_interface_info_n_prerequisites.value):
# prerequisite
_base_info_prerequisite = _girepository.g_interface_info_get_prerequisite(_interface_info, _girepository.gint(i))
# prerequisite namespace
_base_info_prerequisite_namespace = _girepository.g_base_info_get_namespace(_base_info_prerequisite)
base_info_prerequisite_namespace_bytes = _base_info_prerequisite_namespace.value
if PY2: base_info_prerequisite_namespace = base_info_prerequisite_namespace_bytes
elif PY3: base_info_prerequisite_namespace = base_info_prerequisite_namespace_bytes.decode()
# prerequisite name
_base_info_prerequisite_name = _girepository.g_base_info_get_name(_base_info_prerequisite)
base_info_prerequisite_name_bytes = _base_info_prerequisite_name.value
if PY2: base_info_prerequisite_name = base_info_prerequisite_name_bytes
elif PY3: base_info_prerequisite_name = base_info_prerequisite_name_bytes.decode()
# append prerequisite (parent interface) to clsbases
module_prerequisite = _pygirepository_modules[base_info_prerequisite_namespace]
clsbase = getattr(module_prerequisite, base_info_prerequisite_name)
clsbases.append(clsbase)
else:
# other, base class is GIInterface
clsbases.append(GIInterface)
clsbases = tuple([clsbase for clsbase in _mro(clsbases) if clsbase in clsbases])
# class dict
clsdict = {
'_interface_info': _interface_info,
'_registered_info': _registered_info,
}
# FIXME: parse properties
# methods
_interface_info_n_methods = _girepository.g_interface_info_get_n_methods(_interface_info)
for i in range(_interface_info_n_methods.value):
# method
_function_info_method = _girepository.g_interface_info_get_method(_interface_info, _girepository.gint(i))
_base_info_method = _girepository.cast(_function_info_method, _girepository.POINTER(_girepository.GIBaseInfo))
# method name
_base_info_method_name = _girepository.g_base_info_get_name(_base_info_method)
base_info_method_name_bytes = _base_info_method_name.value
if PY2: base_info_method_name = base_info_method_name_bytes
elif PY3: base_info_method_name = base_info_method_name_bytes.decode()
# FIXME: gifunction can be method, constructor, etc
# attach method to class dict
gifunction = GIFunction(_function_info=_function_info_method)
clsdict[base_info_method_name] = gifunction
# FIXME: parse signals
# FIXME: parse vfuncs
# FIXME: parse constants
# create class
class_ = type(clsname, clsbases, clsdict)
class_.__module__ = self
_pygirepository_classes[namespace_classname] = class_
setattr(self, attr, class_)
return class_
elif _info_type.value == _girepository.GI_INFO_TYPE_CONSTANT.value:
# constant
_constant_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIConstantInfo))
_arg = _girepository.GIArgument()
_transfer = _girepository.GI_TRANSFER_NOTHING
_type_info = _girepository.g_constant_info_get_type(_constant_info)
argument = _convert_giargument_to_pyobject_with_typeinfo_transfer(_arg, _type_info, _transfer)
setattr(self, attr, argument)
return argument
elif _info_type.value == _girepository.GI_INFO_TYPE_ERROR_DOMAIN.value:
# error domain
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_UNION.value:
# union
_union_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIUnionInfo))
_registered_info = _girepository.cast(_union_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
# create class
clsname = namespace_classname
clsbases = (GIUnion,)
clsdict = {
'_union_info': _union_info,
'_registered_info': _registered_info,
}
# FIXME: parse fields
# methods
_union_info_n_methods = _girepository.g_union_info_get_n_methods(_union_info)
for i in range(_union_info_n_methods.value):
# method
_function_info_method = _girepository.g_union_info_get_method(_union_info, _girepository.gint(i))
_base_info_method = _girepository.cast(_function_info_method, _girepository.POINTER(_girepository.GIBaseInfo))
_base_info_method_name = _girepository.g_base_info_get_name(_base_info_method)
base_info_method_name_bytes = _base_info_method_name.value
if PY2: base_info_method_name = base_info_method_name_bytes
elif PY3: base_info_method_name = base_info_method_name_bytes.decode()
# FIXME: gifunction can be method, constructor, etc
gifunction = GIFunction(_function_info=_function_info_method)
clsdict[base_info_method_name] = gifunction
# new class
class_ = type(clsname, clsbases, clsdict)
class_.__module__ = self
_pygirepository_classes[namespace_classname] = class_
setattr(self, attr, class_)
return class_
elif _info_type.value == _girepository.GI_INFO_TYPE_VALUE.value:
# value
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_SIGNAL.value:
# signal
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_VFUNC.value:
# vfunc
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_PROPERTY.value:
# property
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_FIELD.value:
# field
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_ARG.value:
# arg
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_TYPE.value:
# type
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
elif _info_type.value == _girepository.GI_INFO_TYPE_UNRESOLVED.value:
# unresolved
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
else:
# error
raise GIError('unknown info type "%s" for %s' % (_info_type.value, attr))
def _wrap_all(self):
# repository, namespace
_repository = _pygirepository._repository
_namespace = _girepository.g_typelib_get_namespace(self._typelib)
# infos
_n_infos = _girepository.g_irepository_get_n_infos(_repository, _namespace)
for i in range(_n_infos.value):
# info
_base_info = _girepository.g_irepository_get_info(_repository, _namespace, _girepository.gint(i))
_name = _girepository.g_base_info_get_name(_base_info)
name_bytes = _name.value
if PY2: name = name_bytes
elif PY3: name = name_bytes.decode()
o = self._wrap(name)
########################################################################
class GIBase(object):
_base_info = None
def __new__(cls, *args, **kwargs):
# self = super(GIBase, cls).__new__(cls, *args, **kwargs)
self = object.__new__(cls)
return self
def __init__(self, *args, **kwargs):
try:
_base_info = kwargs.pop('_base_info')
self._base_info = _base_info
except KeyError:
pass
try:
self._cself = kwargs.pop('_cself')
except KeyError:
self._cself = None
def __del__(self):
#if self._base_info:
# _girepository.g_base_info_unref(self._base_info)
pass
class GICallable(GIBase):
_callable_info = None
def __init__(self, *args, **kwargs):
try:
_callable_info = kwargs.pop('_callable_info')
_base_info = _girepository.cast(_callable_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._callable_info = _callable_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIFunction(GICallable):
_function_info = None
def __init__(self, *args, **kwargs):
try:
_function_info = kwargs.pop('_function_info')
_callable_info = _girepository.cast(_function_info, _girepository.POINTER(_girepository.GICallableInfo))
GICallable.__init__(self, _callable_info=_callable_info, *args, **kwargs)
self._function_info = _function_info
except KeyError:
GICallable.__init__(self, *args, **kwargs)
self._pytype = None
def __repr__(self):
# symbol
_function_info_symbol = _girepository.g_function_info_get_symbol(self._function_info)
function_info_symbol_bytes = _function_info_symbol.value
if PY2: function_info_symbol = function_info_symbol_bytes
elif PY3: function_info_symbol = function_info_symbol_bytes.decode()
# name
_function_info_name = _girepository.g_base_info_get_name(self._base_info)
function_info_name_bytes = _function_info_name.value
if PY2: function_info_name = function_info_name_bytes
elif PY3: function_info_name = function_info_name_bytes.decode()
return ''.join((
'<',
self.__class__.__name__,
' ',
function_info_symbol if function_info_symbol else function_info_name,
' at ',
hex(id(self)),
'>',
))
def __call__(self, *args, **kwargs):
# print('GIFunction.__call__:', args, kwargs)
args = list(args)
# prepare args for g_function_info_invoke
_callable_info = self._callable_info
_function_info = self._function_info
_function_info_flags = _girepository.g_function_info_get_flags(_function_info)
_return_type_type_info = _girepository.g_callable_info_get_return_type(_callable_info)
_return_type_type_tag = _girepository.g_type_info_get_tag(_return_type_type_info)
_return_transfer = _girepository.g_callable_info_get_caller_owns(_callable_info)
_may_return_null_type_info = _girepository.g_callable_info_may_return_null(_callable_info)
# symbol
_function_info_symbol = _girepository.g_function_info_get_symbol(self._function_info)
function_info_symbol_bytes = _function_info_symbol.value
if PY2: function_info_symbol = function_info_symbol_bytes
elif PY3: function_info_symbol = function_info_symbol_bytes.decode()
# name
_function_info_name = _girepository.g_base_info_get_name(self._base_info)
function_info_name_bytes = _function_info_name.value
if PY2: function_info_name = function_info_name_bytes
elif PY3: function_info_name = function_info_name_bytes.decode()
# prepare in/out args
_arg_info_ins = []
_arg_info_outs = []
_arg_ins = []
_arg_outs = []
# function info flags?
if _function_info_flags.value & _girepository.GI_FUNCTION_IS_METHOD.value:
# preserve instance
# pop first (instance)
self_arg = args.pop(0)
_self_arg = _girepository.GIArgument()
_self_arg.v_pointer = self_arg._cself
elif _function_info_flags.value & _girepository.GI_FUNCTION_IS_CONSTRUCTOR.value:
# preserve class
# pop first (class)
cls_arg = args.pop(0)
# args
_n_args = _girepository.g_callable_info_get_n_args(_callable_info)
for i in range(_n_args.value):
# arg
_arg_info = _girepository.g_callable_info_get_arg(_callable_info, _girepository.gint(i))
_direction = _girepository.g_arg_info_get_direction(_arg_info)
_is_optional = _girepository.g_arg_info_is_optional(_arg_info)
if i < len(args):
arg = args[i]
_arg = _convert_pyobject_to_giargument_with_arginfo(arg, _arg_info)
else:
raise GIError('too few arguments')
# arg in or out according to direction
if _direction.value == _girepository.GI_DIRECTION_IN.value:
_arg_info_ins.append(_arg_info)
_arg_ins.append(_arg)
elif _direction.value == _girepository.GI_DIRECTION_OUT.value:
_arg_info_outs.append(_arg_info)
_arg_outs.append(_arg)
elif _direction.value == _girepository.GI_DIRECTION_INOUT.value:
_arg_info_ins.append(_arg_info)
_arg_info_outs.append(_arg_info)
_arg_ins.append(_arg)
_arg_outs.append(_arg)
# function info flags?
if _function_info_flags.value & _girepository.GI_FUNCTION_IS_METHOD.value:
# prepend instance
_arg_ins[0:0] = [_self_arg]
# print('GIFunction.__call__:', _arg_info_ins, _arg_info_outs)
# print('GIFunction.__call__:', _arg_ins, _arg_outs)
# final preparation of args for g_function_info_invoke
_inargs = (_girepository.GIArgument * len(_arg_ins))(*_arg_ins)
_ninargs = _girepository.gint(len(_inargs))
_outargs = (_girepository.GIArgument * len(_arg_outs))(*_arg_outs)
_noutargs = _girepository.gint(len(_outargs))
_retarg = (_girepository.GIArgument * 1)(_girepository.GIArgument())
_error = _girepository.cast(
_girepository.gpointer(),
_girepository.POINTER(
_girepository.GError
)
)
# invoke function
_result = _girepository.g_function_info_invoke(
_function_info,
_inargs,
_ninargs,
_outargs,
_noutargs,
_retarg,
_error,
)
# did error occur?
if not _result.value:
# error occured, raise an exception with GError message
error_message = _error.contents.message.value
raise GIError(error_message)
# function info flags?
if _function_info_flags.value in (0, _girepository.GI_FUNCTION_IS_METHOD.value):
_type_info_return = _girepository.g_callable_info_get_return_type(_callable_info)
_type_tag_return = _girepository.g_type_info_get_tag(_type_info_return)
_transfer_return = _girepository.g_callable_info_get_caller_owns(_callable_info)
obj = _convert_giargument_to_pyobject_with_typeinfo_transfer(_retarg[0], _type_info_return, _transfer_return)
if _arg_outs:
# return as list
if _type_tag_return.value == _girepository.GI_TYPE_TAG_VOID.value:
return_ = []
else:
return_ = [obj]
for _arg, _arg_info in zip(_arg_outs, _arg_info_outs):
obj_ = _convert_giargument_to_pyobject_with_arginfo(_arg, _arg_info)
return_.append(obj_)
if len(return_) == 1:
return_ = return_[0]
else:
# return as single object
return_ = obj
elif _function_info_flags.value == _girepository.GI_FUNCTION_IS_CONSTRUCTOR.value:
#~ if PY2:
#~ pyself = super(self._pytype, cls_arg).__new__.im_func(cls_arg)
#~ elif PY3:
#~ pyself = super(self._pytype, cls_arg).__new__(cls_arg)
pyself = super(self._pytype, cls_arg).__new__(cls_arg)
pyself._cself = _retarg[0].v_pointer
return_ = pyself
elif _function_info_flags.value == _girepository.GI_FUNCTION_IS_GETTER.value:
raise GIError('unsupported GIFunctionInfoFlags "%i"' % _function_info_flags.value)
elif _function_info_flags.value == _girepository.GI_FUNCTION_IS_SETTER.value:
raise GIError('unsupported GIFunctionInfoFlags "%i"' % _function_info_flags.value)
elif _function_info_flags.value == _girepository.GI_FUNCTION_WRAPS_VFUNC.value:
raise GIError('unsupported GIFunctionInfoFlags "%i"' % _function_info_flags.value)
elif _function_info_flags.value == _girepository.GI_FUNCTION_THROWS.value:
raise GIError('unsupported GIFunctionInfoFlags "%i"' % _function_info_flags.value)
else:
raise GIError('unsupported GIFunctionInfoFlags "%i"' % _function_info_flags.value)
return return_
class GISignal(GICallable):
_signal_info = None
def __init__(self, *args, **kwargs):
try:
_signal_info = kwargs.pop('_signal_info')
_callable_info = _girepository.cast(_signal_info, _girepository.POINTER(_girepository.GICallableInfo))
GICallable.__init__(self, _callable_info=_callable_info, *args, **kwargs)
self._signal_info = _signal_info
except KeyError:
GICallable.__init__(self, *args, **kwargs)
class GIVFunc(GICallable):
_vfunc_info = None
def __init__(self, *args, **kwargs):
try:
_vfunc_info = kwargs.pop('_vfunc_info')
_callable_info = _girepository.cast(_vfunc_info, _girepository.POINTER(_girepository.GICallableInfo))
GICallable.__init__(self, _callable_info=_callable_info, *args, **kwargs)
self._vfunc_info = _vfunc_info
except KeyError:
GICallable.__init__(self, *args, **kwargs)
class GIRegisteredType(GIBase):
_registered_info = None
def __init__(self, _registered_info=None, *args, **kwargs):
GIBase.__init__(self, *args, **kwargs)
try:
self._transfer = kwargs.pop('_transfer')
except KeyError:
self._transfer = _girepository.GI_TRANSFER_NOTHING
#~ if not self._cself:
#~ _g_type = _girepository.g_registered_type_info_get_g_type(self._registered_info)
#~ self._cself = _girepository.g_type_create_instance(_g_type)
#~ self._cself = _girepository.g_object_new(_g_type)
#~ print self, self._registered_info
#~ print self, dict(self.__dict__), dict(self.__class__.__dict__)
class GIEnum(GIRegisteredType):
_enum_info = None
_registered_info = None
def __init__(self, _enum_info=None, *args, **kwargs):
GIRegisteredType.__init__(self, *args, **kwargs)
class GIInterface(GIRegisteredType):
_interface_info = None
_registered_info = None
def __init__(self, _interface_info=None, *args, **kwargs):
GIRegisteredType.__init__(self, *args, **kwargs)
class GIObject(GIRegisteredType):
_object_info = None
_registered_info = None
def __init__(self, *args, **kwargs):
GIRegisteredType.__init__(self, *args, **kwargs)
if self._object_info:
_g_type = _girepository.g_registered_type_info_get_g_type(self._registered_info)
#~ # prepare arguments for g_object_new
#~ _args = []
#~
#~ for k, v in kwargs.items():
#~ #print k, v
#~ _args.append(_girepository.gchar_p(k))
#~ _args.append(_convert_pyobject_to_gvalue(v))
#~
#~ _args.append(None)
#~ print _args
# new gobject
self._cself = _girepository.g_object_new(_g_type, None)
_cself_gobject = _girepository.cast(self._cself, _girepository.POINTER(_girepository.GObject))
for k, v in kwargs.items():
_girepository.g_object_set_property(
_cself_gobject,
_girepository.gchar_p(k.encode('utf-8')),
_convert_pyobject_to_gvalue(v),
)
class GIStruct(GIRegisteredType):
_struct_info = None
_registered_info = None
def __init__(self, *args, **kwargs):
GIRegisteredType.__init__(self, *args, **kwargs)
if self._struct_info:
_size = _girepository.g_struct_info_get_size(self._struct_info)
self._cself = _girepository.cast(_girepository.g_malloc0(_size), _girepository.gpointer)
class GIUnion(GIRegisteredType):
_union_info = None
_registered_info = None
def __init__(self, *args, **kwargs):
GIRegisteredType.__init__(self, *args, **kwargs)
class GIArg(GIBase):
_arg_info = None
def __init__(self, *args, **kwargs):
try:
_arg_info = kwargs.pop('_arg_info')
_base_info = _girepository.cast(_arg_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._arg_info = _arg_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIConstant(GIBase):
_constant_info = None
def __init__(self, *args, **kwargs):
try:
_constant_info = kwargs.pop('_constant_info')
_base_info = _girepository.cast(_constant_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._constant_info = _constant_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIErrorDomain(GIBase):
_error_domain_info = None
def __init__(self, *args, **kwargs):
try:
_error_domain_info = kwargs.pop('_error_domain_info')
_base_info = _girepository.cast(_error_domain_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._error_domain_info = _error_domain_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIField(GIBase):
_field_info = None
def __init__(self, *args, **kwargs):
try:
_field_info = kwargs.pop('_field_info')
_base_info = _girepository.cast(_field_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._field_info = _field_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIProperty(GIBase):
_property_info = None
def __init__(self, *args, **kwargs):
try:
_property_info = kwargs.pop('_property_info')
_base_info = _girepository.cast(_property_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._property_info = _property_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
class GIType(GIBase):
_type_info = None
def __init__(self, *args, **kwargs):
try:
_type_info = kwargs.pop('_type_info')
_base_info = _girepository.cast(_type_info, _girepository.POINTER(_girepository.GIBaseInfo))
GIBase.__init__(self, _base_info=_base_info, *args, **kwargs)
self._type_info = _type_info
except KeyError:
GIBase.__init__(self, *args, **kwargs)
########################################################################
def _merge_mro(seqs):
res = []
i = 0
while 1:
nonemptyseqs = [seq for seq in seqs if seq]
if not nonemptyseqs:
return res
i += 1
for seq in nonemptyseqs:
cand = seq[0]
nothead = [s for s in nonemptyseqs if cand in s[1:]]
if nothead:
cand = None
else:
break
if not cand:
raise GIError("Inconsistent hierarchy")
res.append(cand)
for seq in nonemptyseqs:
if seq[0] == cand:
del seq[0]
def _calc_mro(C):
return _merge_mro([[C]] + [_calc_mro(base) for base in C.__bases__] + [list(C.__bases__)])
def _mro(bases):
segs = []
for base in bases:
segs.append(_calc_mro(base))
segs = _merge_mro(segs)
return tuple(segs)
########################################################################
def _convert_giargument_to_pyobject_with_arginfo(_arg, _arg_info):
_type_info = _girepository.g_arg_info_get_type(_arg_info)
_transfer = _girepository.g_arg_info_get_ownership_transfer(_arg_info)
return _convert_giargument_to_pyobject_with_typeinfo_transfer(_arg, _type_info, _transfer)
def _convert_pyobject_to_giargument_with_arginfo(obj, _arg_info):
_type_info = _girepository.g_arg_info_get_type(_arg_info)
_transfer = _girepository.g_arg_info_get_ownership_transfer(_arg_info)
return _convert_pyobject_to_giargument_with_typeinfo_transfer(obj, _type_info, _transfer)
def _convert_giargument_to_pyobject_with_typeinfo_transfer(_arg, _type_info, _transfer):
_type_tag = _girepository.g_type_info_get_tag(_type_info)
if _type_tag.value == _girepository.GI_TYPE_TAG_VOID.value:
obj = None
elif _type_tag.value == _girepository.GI_TYPE_TAG_BOOLEAN.value:
obj = bool(_arg.v_boolean.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT8.value:
obj = int(_arg.v_int8.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT8.value:
obj = int(_arg.v_uint8.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT16.value:
obj = int(_arg.v_int16.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT16.value:
obj = int(_arg.v_uint16.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT32.value:
obj = int(_arg.v_int32.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT32.value:
obj = int(_arg.v_uint32.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT64.value:
obj = int(_arg.v_int64.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT64.value:
obj = int(_arg.v_uint64.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_FLOAT.value:
obj = float(_arg.v_float.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_DOUBLE.value:
obj = float(_arg.v_double.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GTYPE.value:
obj = int(_arg.v_long.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UTF8.value:
obj = str(_arg.v_string.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_FILENAME.value:
obj = str(_arg.v_string.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_ARRAY.value:
# array
if not _arg.v_pointer:
obj = []
else:
_array = _girepository.cast(_arg.v_pointer, _girepository.POINTER(_girepository.GArray))
_param_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(0))
_param_base_info = _girepository.cast(_param_type_info, _girepository.POINTER(_girepository.GIBaseInfo))
_param_type_tag = _girepository.g_type_info_get_tag(_param_type_info)
_param_transfer = _girepository.GI_TRANSFER_NOTHING if _transfer.value == _girepository.GI_TRANSFER_CONTAINER.value else _transfer
obj = []
for i in range(_array.contents.len.value):
is_struct = False
if _param_type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
_item_base_info = _girepository.g_type_info_get_interface(_param_type_info)
_item_interface_info = _girepository.cast(_item_base_info, _girepository.POINTER(_girepository.GIInterfaceInfo))
_item_type_tag = _girepository.g_base_info_get_type(_item_base_info)
if _item_type_tag.value in (
_girepository.GI_INFO_TYPE_STRUCT.value,
_girepository.GI_INFO_TYPE_BOXED.value,
):
is_struct = True
_girepository.g_base_info_unref(_item_base_info)
if is_struct:
_item = _girepository.GIArgument()
_item.v_pointer = _girepository.g_array_index(_array, _girepository.GIArgument, _girepository.gint(i))
else:
_item = _girepository.g_array_index(_array, _girepository.GIArgument, _girepository.gint(i))
item = _convert_giargument_to_pyobject_with_typeinfo_transfer(_item, _param_type_info, _param_transfer)
obj.append(item)
_girepository.g_base_info_unref(_param_base_info)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
# interface
_base_info = _girepository.g_type_info_get_interface(_type_info)
_registered_type_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
_struct_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIStructInfo))
_interface_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIInterfaceInfo))
_type_tag = _girepository.g_base_info_get_type(_base_info)
if _type_tag.value == _girepository.GI_INFO_TYPE_CALLBACK.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
elif _type_tag.value in (
_girepository.GI_INFO_TYPE_BOXED.value,
_girepository.GI_INFO_TYPE_STRUCT.value,
_girepository.GI_INFO_TYPE_UNION.value,
):
if _arg.v_pointer:
_type = _girepository.g_registered_type_info_get_g_type(_registered_type_info)
if _type.value == _girepository.G_TYPE_VALUE.value:
obj = _convert_gvalue_to_pyobject(_arg.v_pointer, False)
elif _type.value in (
_girepository.G_TYPE_NONE.value,
_girepository.G_TYPE_BOXED.value,
_girepository.G_TYPE_POINTER.value,
):
type_ = _convert_gibaseinfo_to_pytype(_base_info)
obj = type_()
obj._cself = _arg.v_pointer
obj._transfer = _transfer
else:
# raise GIError('structure type "%s" is not supported yet' % _girepository.g_type_name(_type).value)
type_ = _convert_gibaseinfo_to_pytype(_base_info)
obj = type_()
obj._cself = _arg.v_pointer
obj._transfer = _transfer
else:
obj = None
elif _type_tag.value in (
_girepository.GI_INFO_TYPE_ENUM.value,
_girepository.GI_INFO_TYPE_FLAGS.value,
):
_type = _girepository.g_registered_type_info_get_g_type(_registered_type_info)
type_ = _convert_gibaseinfo_to_pytype(_base_info)
obj = type_(_arg.v_long)
elif _type_tag.value in (
_girepository.GI_INFO_TYPE_INTERFACE.value,
_girepository.GI_INFO_TYPE_OBJECT.value,
):
if _arg.v_pointer:
type_ = _convert_gibaseinfo_to_pytype(_base_info)
obj = type_()
obj._cself = _arg.v_pointer
else:
obj = None
else:
raise GIError('unsupported type tag %i' % _type_tag.value)
_girepository.g_base_info_unref(_base_info)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GLIST.value:
# glist
_list = cast(_arg.v_pointer, POINTER(_girepository.GList))
_param_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(0))
_param_base_info = _girepository.cast(_param_type_info, POINTER(_girepository.GIBaseInfo))
_param_transfer = _girepository.GI_TRANSFER_NOTHING if _transfer.value == _girepository.GI_TRANSFER_CONTAINER.value else _transfer
obj = []
while _list:
_item = _girepository.GIArgument()
_item.v_pointer = _list.contents.data
item = _convert_giargument_to_pyobject_with_typeinfo_transfer(_item, _param_type_info, _param_transfer)
obj.append(item)
_list = _list.contents.next
_girepository.g_base_info_unref(_param_base_info)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GSLIST.value:
# gslist
_list = cast(_arg.v_pointer, POINTER(_girepository.GSList))
_param_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(0))
_param_base_info = _girepository.cast(_param_type_info, POINTER(_girepository.GIBaseInfo))
_param_transfer = _girepository.GI_TRANSFER_NOTHING if _transfer.value == _girepository.GI_TRANSFER_CONTAINER.value else _transfer
obj = []
while _list:
_item = _girepository.GIArgument()
_item.v_pointer = _list.contents.data
item = _convert_giargument_to_pyobject_with_typeinfo_transfer(_item, _param_type_info, _param_transfer)
obj.append(item)
_list = _list.contents.next
_girepository.g_base_info_unref(_param_base_info)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GHASH.value:
# ghash
if not _arg.v_pointer:
obj = None
else:
obj = {}
_key_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(0))
_key_base_info = _girepository.cast(_key_type_info, POINTER(_girepository.GIBaseInfo))
_value_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(1))
_value_base_info = _girepository.cast(_value_type_info, POINTER(_girepository.GIBaseInfo))
_param_transfer = _girepository.GI_TRANSFER_NOTHING if _transfer.value == _girepository.GI_TRANSFER_CONTAINER.value else _transfer
# FIXME: implement hash table iteration
# ...
_girepository.g_base_info_unref(_key_base_info)
_girepository.g_base_info_unref(_value_base_info)
elif _type_tag.value == _girepository.GI_TYPE_TAG_ERROR.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
else:
raise GIError('unsupported type tag %i' % _type_tag.value)
return obj
def _convert_pyobject_to_giargument_with_typeinfo_transfer(obj, _type_info, _transfer):
_arg = _girepository.GIArgument()
_type_tag = _girepository.g_type_info_get_tag(_type_info)
if _type_tag.value == _girepository.GI_TYPE_TAG_VOID.value:
_arg.v_pointer = obj._cself
elif _type_tag.value == _girepository.GI_TYPE_TAG_BOOLEAN.value:
_arg.v_boolean = _girepository.gboolean(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT8.value:
_arg.v_int8 = _girepository.gint8(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT8.value:
_arg.v_uint8 = _girepository.guint8(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT16.value:
_arg.v_int16 = _girepository.gint16(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT16.value:
_arg.v_uint16 = _girepository.guint16(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT32.value:
_arg.v_int32 = _girepository.gint32(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT32.value:
_arg.v_uint32 = _girepository.guint32(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INT64.value:
_arg.v_int64 = _girepository.gint64(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UINT64.value:
_arg.v_uint64 = _girepository.guint64(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_FLOAT.value:
_arg.v_float = _girepository.gfloat(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_DOUBLE.value:
_arg.v_double = _girepository.gdouble(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GTYPE.value:
_arg.v_long = _girepository.glong(obj)
elif _type_tag.value == _girepository.GI_TYPE_TAG_UTF8.value:
if PY2:
_arg.v_string = _girepository.gchar_p(obj)
elif PY3:
obj_bytes = obj.encode()
_arg.v_string = _girepository.gchar_p(obj_bytes)
elif _type_tag.value == _girepository.GI_TYPE_TAG_FILENAME.value:
if PY2:
_arg.v_string = _girepository.gchar_p(obj)
elif PY3:
obj_bytes = obj.encode()
_arg.v_string = _girepository.gchar_p(obj_bytes)
elif _type_tag.value == _girepository.GI_TYPE_TAG_ARRAY.value:
if obj:
_array_type = _girepository.g_type_info_get_array_type(_type_info)
_param_type_info = _girepository.g_type_info_get_param_type(_type_info, _girepository.gint(0))
_param_base_info = _girepository.cast(_param_type_info, _girepository.POINTER(_girepository.GIBaseInfo))
_param_type_tag = _girepository.g_type_info_get_tag(_param_type_info)
if _array_type.value == _girepository.GI_ARRAY_TYPE_C.value:
if _param_type_tag.value == _girepository.GI_TYPE_TAG_VOID.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_BOOLEAN.value:
_obj = (_girepository.gboolean * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT8.value:
_obj = (_girepository.gint8 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT8.value:
_obj = (_girepository.guint8 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT16.value:
_obj = (_girepository.gint16 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT16.value:
_obj = (_girepository.guint16 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT32.value:
_obj = (_girepository.gint32 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT32.value:
_obj = (_girepository.guint32 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT64.value:
_obj = (_girepository.gint64 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT64.value:
_obj = (_girepository.guint64 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_FLOAT.value:
_obj = (_girepository.gfloat * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_DOUBLE.value:
_obj = (_girepository.gdouble * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GTYPE.value:
_obj = (_girepository.GType * len(obj))(*obj)
_size = _girepository.sizeof(_girepository.GType)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UTF8.value:
_obj = (_girepository.gchar_p * len(obj))(*obj)
_size = _girepository.sizeof(_girepository.gchar_p)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_FILENAME.value:
_obj = (_girepository.gchar_p * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_ARRAY.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GLIST.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GSLIST.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GHASH.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_ERROR.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
else:
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
_obj_gpointer = _girepository.cast(_obj, _girepository.gpointer)
_arg.v_pointer = _obj_gpointer
elif _array_type.value == _girepository.GI_ARRAY_TYPE_ARRAY.value:
if _param_type_tag.value == _girepository.GI_TYPE_TAG_VOID.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_BOOLEAN.value:
_obj = (_girepository.gboolean * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT8.value:
_obj = (_girepository.gint8 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT8.value:
_obj = (_girepository.guint8 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT16.value:
_obj = (_girepository.gint16 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT16.value:
_obj = (_girepository.guint16 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT32.value:
_obj = (_girepository.gint32 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT32.value:
_obj = (_girepository.guint32 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INT64.value:
_obj = (_girepository.gint64 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UINT64.value:
_obj = (_girepository.guint64 * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_FLOAT.value:
_obj = (_girepository.gfloat * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_DOUBLE.value:
_obj = (_girepository.gdouble * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GTYPE.value:
_obj = (_girepository.GType * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_UTF8.value:
_obj = (_girepository.gchar_p * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_FILENAME.value:
_obj = (_girepository.gchar_p * len(obj))(*obj)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_ARRAY.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GLIST.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GSLIST.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_GHASH.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
elif _param_type_tag.value == _girepository.GI_TYPE_TAG_ERROR.value:
# FIXME: implement
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
else:
raise GIError('unsupported param type tag %i' % _param_type_tag.value)
_obj_gpointer = _girepository.cast(_obj, _girepository.gpointer)
_arg.v_pointer = _obj_gpointer
elif _array_type.value == _girepository.GI_ARRAY_TYPE_PTR_ARRAY.value:
# FIXME: implement
raise GIError('unsupported array type %i' % _array_type.value)
elif _array_type.value == _girepository.GI_ARRAY_TYPE_BYTE_ARRAY.value:
_obj = _girepository.gchar_p(''.join(obj))
_obj_gpointer = _girepository.cast(_obj, _girepository.gpointer)
_arg.v_pointer = _obj_gpointer
else:
raise GIError('unsupported array type %i' % _array_type.value)
_girepository.g_base_info_unref(_param_base_info)
else:
_arg.v_pointer = None
elif _type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
_base_info = _girepository.g_type_info_get_interface(_type_info)
_registered_type_info = _girepository.cast(_base_info, _girepository.POINTER(_girepository.GIRegisteredTypeInfo))
_info_type = _girepository.g_base_info_get_type(_base_info)
if _info_type.value == _girepository.GI_INFO_TYPE_CALLBACK.value:
# FIXME: implement
raise GIError('unsupported info type %i' % _info_type.value)
elif _info_type.value in (
_girepository.GI_INFO_TYPE_BOXED.value,
_girepository.GI_INFO_TYPE_STRUCT.value,
_girepository.GI_INFO_TYPE_UNION.value,
):
if obj is None:
_arg.v_pointer = None
else:
_type = _girepository.g_registered_type_info_get_g_type(_registered_type_info)
if _type.value == _girepository.G_TYPE_VALUE.value:
_value = _convert_pyobject_to_gvalue(obj)
_value_gpointer = _girepository.cast(_girepository.pointer(_value), _girepository.gpointer)
_arg.v_pointer = _value_gpointer
elif _type.value == _girepository.G_TYPE_CLOSURE.value:
_arg.v_pointer = obj._cself
elif _type.value == _girepository.G_TYPE_BOXED.value:
# FIXME: implement
raise GIError('unsupported type %i' % _type.value)
else:
_arg.v_pointer = obj._cself
elif _info_type.value in (
_girepository.GI_INFO_TYPE_ENUM.value,
_girepository.GI_INFO_TYPE_FLAGS.value,
):
_arg.v_int = _girepository.gint(obj)
elif _info_type.value in (
_girepository.GI_INFO_TYPE_INTERFACE.value,
_girepository.GI_INFO_TYPE_OBJECT.value,
):
_arg.v_pointer = obj._cself
else:
raise GIError('unsupported info type %i' % _info_type.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GLIST.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GSLIST.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_GHASH.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
elif _type_tag.value == _girepository.GI_TYPE_TAG_ERROR.value:
# FIXME: implement
raise GIError('unsupported type tag %i' % _type_tag.value)
else:
raise GIError('unsupported type tag %i' % _type_tag.value)
return _arg
def _convert_gibaseinfo_to_pytype(_gibaseinfo):
global _pygirepository
_namespace = _girepository.g_base_info_get_namespace(_gibaseinfo)
namespace_bytes = _namespace.value
if PY2:
namespace = namespace_bytes
elif PY3:
namespace = namespace_bytes.decode()
_name = _girepository.g_base_info_get_name(_gibaseinfo)
name_bytes = _name.value
if PY2:
name = name_bytes
elif PY3:
name = name_bytes.decode()
girepository = GIRepository()
gitypelib = getattr(girepository, namespace)
pytype = getattr(gitypelib, name)
return pytype
def _convert_gvalue_to_pyobject(_gvalue, copy_boxed):
_gtype = _girepository.G_VALUE_TYPE(_gvalue)
_gtype_fundamental = _girepository.G_TYPE_FUNDAMENTAL(_gtype)
if _gtype_fundamental.value == _girepository.G_TYPE_CHAR.value:
obj = _girepository.g_value_get_char(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_UCHAR.value:
obj = _girepository.g_value_get_uchar(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_BOOLEAN.value:
obj = _girepository.g_value_get_boolean(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_INT.value:
obj = _girepository.g_value_get_int(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_UINT.value:
obj = _girepository.g_value_get_uint(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_LONG.value:
obj = _girepository.g_value_get_long(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_ULONG.value:
obj = _girepository.g_value_get_ulong(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_INT64.value:
obj = _girepository.g_value_get_int64(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_UINT64.value:
obj = _girepository.g_value_get_uint64(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_ENUM.value:
obj = _girepository.g_value_get_enum(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_FLAGS.value:
obj = _girepository.g_value_get_flags(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_FLOAT.value:
obj = _girepository.g_value_get_float(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_DOUBLE.value:
obj = _girepository.g_value_get_double(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_STRING.value:
obj = _girepository.g_value_get_string(value).value
elif _gtype_fundamental.value == _girepository.G_TYPE_POINTER.value:
obj = _girepository.g_value_get_object(value)
elif _gtype_fundamental.value == _girepository.G_TYPE_BOXED.value:
# FIXME: implement me
raise GIError('unsupported GValue')
elif _gtype_fundamental.value == _girepository.G_TYPE_PARAM.value:
# FIXME: implement me
raise GIError('unsupported GValue')
elif _gtype_fundamental.value == _girepository.G_TYPE_INTERFACE.value:
obj = _girepository.g_value_get_object(value)
elif _gtype_fundamental.value == _girepository.G_TYPE_OBJECT.value:
obj = _girepository.g_value_get_object(value)
else:
# FIXME: implement me
raise GIError('unsupported GValue')
return obj
def _convert_pyobject_to_gvalue(obj):
_value = _girepository.GValue()
_data = _girepository.GValue_union0()
if obj is None:
_value.g_type = _girepository.G_TYPE_NONE
_data.v_pointer = None
elif isinstance(obj, bool):
_value.g_type = _girepository.G_TYPE_BOOLEAN
_data.v_int = _girepository.gint(obj)
elif isinstance(obj, int):
_value.g_type = _girepository.G_TYPE_INT
_data.v_long = _girepository.glong(obj)
elif isinstance(obj, float):
_value.g_type = _girepository.G_TYPE_DOUBLE
_data.v_double = _girepository.gdouble(obj)
elif PY2 and isinstance(obj, basestring):
_value.g_type = _girepository.G_TYPE_STRING
_data.v_pointer = _girepository.cast(_girepository.gchar_p(obj), _girepository.gpointer)
elif PY3 and (isinstance(obj, str) or isinstance(obj, bytes)):
_value.g_type = _girepository.G_TYPE_STRING
_data.v_pointer = _girepository.cast(_girepository.gchar_p(obj.encode('utf-8')), _girepository.gpointer)
elif hasattr(obj, '_cself'):
_value.g_type = _girepository.G_TYPE_OBJECT
_data.v_pointer = obj._cself
else:
raise GIError('unsupported object type "%s"' % obj.__class__.__name__)
_value.data[0] = _data
return _value
def _get_type_info_size(_type_info):
_type_tag = _girepository.g_type_info_get_tag(_type_info)
if _type_tag.value in (
_girepository.GI_TYPE_TAG_BOOLEAN.value,
_girepository.GI_TYPE_TAG_INT8.value,
_girepository.GI_TYPE_TAG_UINT8.value,
_girepository.GI_TYPE_TAG_INT16.value,
_girepository.GI_TYPE_TAG_UINT16.value,
_girepository.GI_TYPE_TAG_INT32.value,
_girepository.GI_TYPE_TAG_UINT32.value,
_girepository.GI_TYPE_TAG_INT64.value,
_girepository.GI_TYPE_TAG_UINT64.value,
_girepository.GI_TYPE_TAG_FLOAT.value,
_girepository.GI_TYPE_TAG_DOUBLE.value,
_girepository.GI_TYPE_TAG_GTYPE.value,
# UNSUPPORTED: _girepository.GI_TYPE_TAG_UNICHAR.value,
):
if _girepository.g_type_info_is_pointer(_type_info).value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
else:
_size = _get_type_tag_size(_type_tag)
elif _type_tag.value == _girepository.GI_TYPE_TAG_INTERFACE.value:
_interface_base_info = _girepository.g_type_info_get_interface(_type_info)
_interface_info_type = _girepository.g_base_info_get_type(_interface_base_info)
if _interface_info_type.value == _girepository.GI_INFO_TYPE_STRUCT.value:
if _girepository.g_type_info_is_pointer(_type_info).value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
else:
_interface_struct_info = _girepository.cast(_interface_base_info, _girepository.POINTER(_girepository.GIStructInfo))
_size = _girepository.guint(g_struct_info_get_size(_interface_struct_info))
elif _interface_info_type.value == _girepository.GI_INFO_TYPE_UNION.value:
if _girepository.g_type_info_is_pointer(_type_info).value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
else:
_interface_union_info = _girepository.cast(_interface_base_info, _girepository.POINTER(_girepository.GIUnionInfo))
_size = _girepository.guint(g_struct_info_get_size(_interface_union_info))
elif _interface_info_type.value in (
_girepository.GI_INFO_TYPE_ENUM.value,
_girepository.GI_INFO_TYPE_FLAGS.value,
):
if _girepository.g_type_info_is_pointer(_type_info).value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
else:
_interface_enum_info = _girepository.cast(_interface_base_info, _girepository.POINTER(_girepository.GIEnumInfo))
_type_tag = _girepository.g_enum_info_get_storage_type(_interface_enum_info)
_size = _get_type_tag_size(_type_tag)
elif _interface_info_type.value in (
_girepository.GI_INFO_TYPE_BOXED.value,
_girepository.GI_INFO_TYPE_OBJECT.value,
_girepository.GI_INFO_TYPE_INTERFACE.value,
_girepository.GI_INFO_TYPE_CALLBACK.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
elif _interface_info_type.value in (
_girepository.GI_INFO_TYPE_VFUNC.value,
_girepository.GI_INFO_TYPE_INVALID.value,
_girepository.GI_INFO_TYPE_FUNCTION.value,
_girepository.GI_INFO_TYPE_CONSTANT.value,
_girepository.GI_INFO_TYPE_ERROR_DOMAIN.value,
_girepository.GI_INFO_TYPE_VALUE.value,
_girepository.GI_INFO_TYPE_SIGNAL.value,
_girepository.GI_INFO_TYPE_PROPERTY.value,
_girepository.GI_INFO_TYPE_FIELD.value,
_girepository.GI_INFO_TYPE_ARG.value,
_girepository.GI_INFO_TYPE_TYPE.value,
_girepository.GI_INFO_TYPE_UNRESOLVED.value,
):
raise GIError('unsupported info type %i' % _interface_info_type.value)
_girepository.g_base_info_unref(_interface_base_info)
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_ARRAY.value,
_girepository.GI_TYPE_TAG_VOID.value,
_girepository.GI_TYPE_TAG_UTF8.value,
_girepository.GI_TYPE_TAG_FILENAME.value,
_girepository.GI_TYPE_TAG_GLIST.value,
_girepository.GI_TYPE_TAG_GSLIST.value,
_girepository.GI_TYPE_TAG_GHASH.value,
_girepository.GI_TYPE_TAG_ERROR.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
else:
_size = _girepository.guint(0)
return _size
def _get_type_tag_size(_type_tag):
if _type_tag.value == _girepository.GI_TYPE_TAG_BOOLEAN.value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gpointer))
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_INT8.value,
_girepository.GI_TYPE_TAG_UINT8.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gint8))
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_INT16.value,
_girepository.GI_TYPE_TAG_UINT16.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gint16))
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_INT32.value,
_girepository.GI_TYPE_TAG_UINT32.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gint32))
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_INT64.value,
_girepository.GI_TYPE_TAG_UINT64.value,
):
_size = _girepository.guint(_girepository.sizeof(_girepository.gint64))
elif _type_tag.value == _girepository.GI_TYPE_TAG_FLOAT.value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gfloat))
elif _type_tag.value == _girepository.GI_TYPE_TAG_DOUBLE.value:
_size = _girepository.guint(_girepository.sizeof(_girepository.gdouble))
elif _type_tag.value == _girepository.GI_TYPE_TAG_GTYPE.value:
_size = _girepository.guint(_girepository.sizeof(_girepository.GType))
# UNSUPPORTED:
# elif _type_tag.value == _girepository.GI_TYPE_TAG_UNICHAR.value:
# _size = _girepository.guint(_girepository.sizeof(_girepository.gunichar))
elif _type_tag.value in (
_girepository.GI_TYPE_TAG_VOID.value,
_girepository.GI_TYPE_TAG_UTF8.value,
_girepository.GI_TYPE_TAG_FILENAME.value,
_girepository.GI_TYPE_TAG_ARRAY.value,
_girepository.GI_TYPE_TAG_INTERFACE.value,
_girepository.GI_TYPE_TAG_GLIST.value,
_girepository.GI_TYPE_TAG_GSLIST.value,
_girepository.GI_TYPE_TAG_GHASH.value,
_girepository.GI_TYPE_TAG_ERROR.value,
):
raise GIError('unable to know the size')
else:
raise GIError('unknown size')
return _size
|
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import tempfile
from mhc_common import normalize_hla_allele_name
from peptide_binding_measure import IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
def create_input_fasta_file(df, mutation_window_size = None):
"""
Turn peptide entries from a dataframe into a FASTA file.
If mutation_window_size is an integer >0 then only use subsequence
around mutated residues.
Return the name of closed file which has to be manually deleted,
and a dictionary from FASTA IDs to peptide records.
"""
input_file = tempfile.NamedTemporaryFile(
"w", prefix="peptide", delete=False)
peptide_entries = {}
records = df.to_records()
n_records = len(records)
# create input file for all peptide sequences and also
# put the entries into a dictionary so we can read out the results later
for i, mutation_entry in enumerate(records):
seq = mutation_entry['SourceSequence']
if mutation_window_size:
start = max(
0,
mutation_entry.MutationStart - mutation_window_size)
stop = min(
len(seq),
mutation_entry.MutationEnd + mutation_window_size)
seq = seq[start:stop]
identifier = "%s_%s" % (i, mutation_entry['Gene'][:5])
peptide_entries[identifier] = mutation_entry
input_file.write(">%s\n" % identifier)
input_file.write(seq)
# newline unless at end of file
if i + 1 < n_records:
input_file.write("\n")
input_file.close()
return input_file.name, peptide_entries
def invalid_binding_score(x):
return x < 0 or np.isnan(x) or np.isinf(x)
def create_binding_result_row(
mutation_entry,
allele,
pos,
epitope,
log_ic50,
ic50,
rank,
mutation_window_size = None):
# if we have a bad IC50 score we might still get a salvageable
# log of the score. Strangely, this is necessary sometimes!
if invalid_binding_score(ic50):
ic50 = 50000 ** (-log_ic50 + 1)
# if IC50 is still NaN or otherwise invalid, abort
if invalid_binding_score(ic50):
logging.warn(
"Invalid IC50 value %0.4f for %s w/ allele %s",
ic50,
epitope,
allele)
return None
if invalid_binding_score(rank) or rank > 100:
logging.warn(
"Invalid percentile rank %s for %s w/ allele %s",
rank, epitope, allele)
return None
if mutation_window_size:
# if we clipped parts of the amino acid sequence which don't
# overlap mutations then we have to offset epitope positions by
# however much was removed from the beginning of the sequence
original_start = max(
0,
mutation_entry.MutationStart - mutation_window_size)
pos += original_start
# keep track of original genetic variant that
# gave rise to this epitope
new_row = {}
# fields shared by all epitopes from this sequence
new_row['chr'] = mutation_entry.chr
new_row['pos'] = mutation_entry.pos
new_row['ref'] = mutation_entry.ref
new_row['alt'] = mutation_entry.alt
new_row['SourceSequence'] = mutation_entry.SourceSequence
new_row['MutationStart'] = mutation_entry.MutationStart
new_row['MutationEnd'] = mutation_entry.MutationEnd
new_row['GeneInfo'] = mutation_entry.GeneInfo
new_row['Gene'] = mutation_entry.Gene
new_row["GeneMutationInfo"] = mutation_entry.GeneMutationInfo
new_row['PeptideMutationInfo'] = mutation_entry.PeptideMutationInfo
new_row['TranscriptId'] = mutation_entry.TranscriptId
# fields specific to this epitope
new_row['Allele'] = normalize_hla_allele_name(allele)
new_row['EpitopeStart'] = pos
new_row['EpitopeEnd'] = pos + len(epitope)
new_row['Epitope'] = epitope
new_row[IC50_FIELD_NAME] = ic50
new_row[PERCENTILE_RANK_FIELD_NAME] = rank
return new_row
def parse_netmhc_stdout(contents, peptide_entries, mutation_window_size = None):
"""
Parse the output format for NetMHC predictors, which looks like:
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
lines = contents.split("\n")
lines = [l.strip() for l in lines]
# remove empty lines
lines = [l for l in lines if len(l) > 0]
# remove comments
lines = [l for l in lines if not l.startswith("#")]
results = []
for line in lines:
fields = line.split()
n_required_fields = 7
if len(fields) >= n_required_fields:
pos, allele, peptide, ident, log_affinity, ic50, rank = \
fields[:n_required_fields]
try:
pos = int(pos)
allele = str(allele)
peptide = str(peptide)
ident = str(ident)
log_affinity = float(log_affinity)
ic50 = float(ic50)
rank = float(rank)
except:
# if position or affinity values can't be parsed,
# then skip this line
continue
assert ident in peptide_entries, \
"Unknown identifier %s in NetMHC output"
mutation_entry = peptide_entries[ident]
new_row = create_binding_result_row(
mutation_entry,
allele,
pos,
peptide,
log_affinity,
ic50,
rank,
mutation_window_size = mutation_window_size)
if not new_row:
# if we encountered an error, skip this line
logging.warn("Skipping allele=%s epitope=%s ic50=%s",
allele, epitope, ic50)
continue
results.append(new_row)
return results
def parse_xls_file(contents, peptide_entries, mutation_window_size = None):
"""
XLS is a wacky output format used by NetMHCpan and NetMHCcons
for peptide binding predictions.
First line of XLS file format has HLA alleles
and second line has fields like:
['Pos', 'Peptide', 'ID',
'1-log50k', 'nM', 'Rank',
'1-log50k', 'nM', 'Rank',
'1-log50k', 'nM', 'Rank',
...'Ave', 'NB']
"""
lines = [line.split("\t")
for line in contents.split("\n")
if len(line) > 0]
# top line of XLS file has alleles
alleles = [x for x in lines[0] if len(x) > 0]
# skip alleles and column headers
lines = lines[2:]
results = []
for line in lines:
pos = int(line[0])
epitope = line[1]
identifier = line[2]
assert identifier in peptide_entries, \
"Bad identifier %s, epitopes = %s" % (identifier, epitopes.head())
mutation_entry = peptide_entries[identifier]
for i, allele in enumerate(alleles):
# we start at an offset of 3 to skip the allele-invariant
# pos, epitope, identifier columns
# each allele has three columns: log IC50, IC50, rank
log_ic50 = float(line[3+3*i])
ic50 = float(line[3+3*i+1])
rank = float(line[3+3*i+2])
new_row = create_binding_result_row(
mutation_entry,
allele,
pos,
epitope,
log_ic50,
ic50,
rank,
mutation_window_size = mutation_window_size)
if not new_row:
# if we encountered an error, skip this line
logging.warn("Skipping allele=%s epitope=%s ic50=%s",
allele, epitope, ic50)
continue
results.append(new_row)
return results
|
|
class ProcessTree:
"""ProcessTree encapsulates a process tree. The tree is built from log files
retrieved during the boot process. When building the process tree, it is
pruned and merged in order to be able to visualize it in a comprehensible
manner.
The following pruning techniques are used:
* idle processes that keep running during the last process sample
(which is a heuristic for a background processes) are removed,
* short-lived processes (i.e. processes that only live for the
duration of two samples or less) are removed,
* the processes used by the boot logger are removed,
* exploders (i.e. processes that are known to spawn huge meaningless
process subtrees) have their subtrees merged together,
* siblings (i.e. processes with the same command line living
concurrently -- thread heuristic) are merged together,
* process runs (unary trees with processes sharing the command line)
are merged together.
"""
LOGGER_PROC = 'bootchartd'
EXPLODER_PROCESSES = set(['hwup'])
def __init__(self, psstats, monitoredApp, prune, for_testing = False):
self.process_tree = []
self.psstats = psstats
self.process_list = sorted(psstats.process_list, key = lambda p: p.pid)
self.sample_period = psstats.sample_period
self.build()
self.update_ppids_for_daemons(self.process_list)
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
self.duration = self.end_time - self.start_time
if for_testing:
return
# print 'proc_tree before prune: num_proc=%i, duration=%i' % (self.num_nodes(self.process_list), self.duration)
removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False)
print "Merged %i logger processes" % removed
if prune:
removed = self.prune(self.process_tree, None)
print "Pruned %i processes" % removed
removed = self.merge_exploders(self.process_tree, self.EXPLODER_PROCESSES)
print "Pruned %i exploders" % removed
removed = self.merge_siblings(self.process_tree)
print "Pruned %i threads" % removed
removed = self.merge_runs(self.process_tree)
print "Pruned %i runs" % removed
self.sort(self.process_tree)
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
self.duration = self.end_time - self.start_time
self.num_proc = self.num_nodes(self.process_tree)
def build(self):
"""Build the process tree from the list of top samples."""
self.process_tree = []
for proc in self.process_list:
if not proc.parent:
self.process_tree.append(proc)
else:
proc.parent.child_list.append(proc)
def sort(self, process_subtree):
"""Sort process tree."""
for p in process_subtree:
p.child_list.sort(key = lambda p: p.pid)
self.sort(p.child_list)
def num_nodes(self, process_list):
"Counts the number of nodes in the specified process tree."""
nodes = 0
for proc in process_list:
nodes = nodes + self.num_nodes(proc.child_list)
return nodes + len(process_list)
def get_start_time(self, process_subtree):
"""Returns the start time of the process subtree. This is the start
time of the earliest process.
"""
if not process_subtree:
return 100000000;
return min( [min(proc.start_time, self.get_start_time(proc.child_list)) for proc in process_subtree] )
def get_end_time(self, process_subtree):
"""Returns the end time of the process subtree. This is the end time
of the last collected sample.
"""
if not process_subtree:
return -100000000;
return max( [max(proc.start_time + proc.duration, self.get_end_time(proc.child_list)) for proc in process_subtree] )
def get_max_pid(self, process_subtree):
"""Returns the max PID found in the process tree."""
if not process_subtree:
return -100000000;
return max( [max(proc.pid, self.get_max_pid(proc.child_list)) for proc in process_subtree] )
def update_ppids_for_daemons(self, process_list):
"""Fedora hack: when loading the system services from rc, runuser(1)
is used. This sets the PPID of all daemons to 1, skewing
the process tree. Try to detect this and set the PPID of
these processes the PID of rc.
"""
rcstartpid = -1
rcendpid = -1
rcproc = None
for p in process_list:
if p.cmd == "rc" and p.ppid == 1:
rcproc = p
rcstartpid = p.pid
rcendpid = self.get_max_pid(p.child_list)
if rcstartpid != -1 and rcendpid != -1:
for p in process_list:
if p.pid > rcstartpid and p.pid < rcendpid and p.ppid == 1:
p.ppid = rcstartpid
p.parent = rcproc
for p in process_list:
p.child_list = []
self.build()
def prune(self, process_subtree, parent):
"""Prunes the process tree by removing idle processes and processes
that only live for the duration of a single top sample. Sibling
processes with the same command line (i.e. threads) are merged
together. This filters out sleepy background processes, short-lived
processes and bootcharts' analysis tools.
"""
def is_idle_background_process_without_children(p):
process_end = p.start_time + p.duration
return not p.active and \
process_end >= self.start_time + self.duration and \
p.start_time > self.start_time and \
p.duration > 0.9 * self.duration and \
self.num_nodes(p.child_list) == 0
num_removed = 0
idx = 0
while idx < len(process_subtree):
p = process_subtree[idx]
if parent != None or len(p.child_list) == 0:
prune = False
if is_idle_background_process_without_children(p):
prune = True
elif p.duration <= 2 * self.sample_period:
# short-lived process
prune = True
if prune:
process_subtree.pop(idx)
for c in p.child_list:
process_subtree.insert(idx, c)
num_removed += 1
continue
else:
num_removed += self.prune(p.child_list, p)
else:
num_removed += self.prune(p.child_list, p)
idx += 1
return num_removed
def merge_logger(self, process_subtree, logger_proc, monitored_app, app_tree):
"""Merges the logger's process subtree. The logger will typically
spawn lots of sleep and cat processes, thus polluting the
process tree.
"""
num_removed = 0
for p in process_subtree:
is_app_tree = app_tree
if logger_proc == p.cmd and not app_tree:
is_app_tree = True
num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
# don't remove the logger itself
continue
if app_tree and monitored_app != None and monitored_app == p.cmd:
is_app_tree = False
if is_app_tree:
for child in p.child_list:
self.__merge_processes(p, child)
num_removed += 1
p.child_list = []
else:
num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
return num_removed
def merge_exploders(self, process_subtree, processes):
"""Merges specific process subtrees (used for processes which usually
spawn huge meaningless process trees).
"""
num_removed = 0
for p in process_subtree:
if processes in processes and len(p.child_list) > 0:
subtreemap = self.getProcessMap(p.child_list)
for child in subtreemap.values():
self.__merge_processes(p, child)
num_removed += len(subtreemap)
p.child_list = []
p.cmd += " (+)"
else:
num_removed += self.merge_exploders(p.child_list, processes)
return num_removed
def merge_siblings(self,process_subtree):
"""Merges thread processes. Sibling processes with the same command
line are merged together.
"""
num_removed = 0
idx = 0
while idx < len(process_subtree)-1:
p = process_subtree[idx]
nextp = process_subtree[idx+1]
if nextp.cmd == p.cmd:
process_subtree.pop(idx+1)
idx -= 1
num_removed += 1
p.child_list.extend(nextp.child_list)
self.__merge_processes(p, nextp)
num_removed += self.merge_siblings(p.child_list)
idx += 1
if len(process_subtree) > 0:
p = process_subtree[-1]
num_removed += self.merge_siblings(p.child_list)
return num_removed
def merge_runs(self, process_subtree):
"""Merges process runs. Single child processes which share the same
command line with the parent are merged.
"""
num_removed = 0
idx = 0
while idx < len(process_subtree):
p = process_subtree[idx]
if len(p.child_list) == 1 and p.child_list[0].cmd == p.cmd:
child = p.child_list[0]
p.child_list = list(child.child_list)
self.__merge_processes(p, child)
num_removed += 1
continue
num_removed += self.merge_runs(p.child_list)
idx += 1
return num_removed
def __merge_processes(self, p1, p2):
"""Merges two process samples."""
p1.samples.extend(p2.samples)
p1time = p1.start_time
p2time = p2.start_time
p1.start_time = min(p1time, p2time)
pendtime = max(p1time + p1.duration, p2time + p2.duration)
p1.duration = pendtime - p1.start_time
|
|
import cairo
from color import Color, Gradient
# Helpers
def get_view_extents(area_position, area_length, area_total_length, total_items):
scale = float(area_total_length) / total_items
first_item = int(total_items * float(area_position) / area_total_length)
last_item = int(total_items * float(area_position + area_length) / area_total_length)
items_in_view = min(last_item + 1, total_items) - first_item
return first_item, items_in_view, scale
CAIRO_VECTOR_SURFACES = tuple(getattr(cairo, s) for s in ['PDFSurface', 'PSSurface', 'SVGSurface', 'Win32Printing'] if hasattr(cairo, s))
def vector_based(cr):
return isinstance(cr.get_target(), CAIRO_VECTOR_SURFACES)
# Fills
def stripes(fg, bg, width, spacing, flip=False):
size = width + spacing
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
cr = cairo.Context(surf)
cr.rectangle(0, 0, size, size)
cr.clip()
cr.set_source_rgba(*bg)
cr.fill()
cr.set_source_rgba(*fg)
cr.set_line_width(width)
if flip:
cr.move_to(0, -0.5 * size)
cr.line_to(1.5 * size, size)
cr.move_to(-0.5 * size, 0)
cr.line_to(size, 1.5 * size)
else:
cr.move_to(-0.5 * size, size)
cr.line_to(size, -0.5 * size)
cr.move_to(0, 1.5 * size)
cr.line_to(1.5*size, 0)
cr.stroke()
pattern = cairo.SurfacePattern(surf)
pattern.set_extend(cairo.EXTEND_REPEAT)
return pattern
def circles(fg, bg, radius):
size = 2 * radius + 2
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
cr = cairo.Context(surf)
cr.rectangle(0, 0, size, size)
cr.clip()
cr.set_source_rgba(*bg)
cr.fill()
cr.set_source_rgba(*fg)
import math
cr.arc(size*0.5, size*0.5, radius, 0, 2 * math.pi)
cr.close_path()
cr.fill()
pattern = cairo.SurfacePattern(surf)
pattern.set_extend(cairo.EXTEND_REPEAT)
return pattern
def chequers(fg, bg, width):
size = 2 * width
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
cr = cairo.Context(surf)
cr.rectangle(0, 0, size, size)
cr.clip()
cr.set_source_rgba(*bg)
cr.fill()
cr.rectangle(0, 0, width, width)
cr.rectangle(width, width, size, size)
cr.set_source_rgba(*fg)
cr.fill()
pattern = cairo.SurfacePattern(surf)
pattern.set_extend(cairo.EXTEND_REPEAT)
return pattern
def lines(fg, bg, width):
size = 2 * width
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
cr = cairo.Context(surf)
cr.rectangle(0, 0, size, size)
cr.clip()
cr.set_source_rgba(*bg)
cr.fill()
cr.rectangle(0, 0, size, width)
cr.set_source_rgba(*fg)
cr.fill()
pattern = cairo.SurfacePattern(surf)
pattern.set_extend(cairo.EXTEND_REPEAT)
return pattern
# Plotting functions:
def tick_lines(cr, color, alpha, tick_height, tick_positions, item_size, area_x, area_width):
"""Draw tick lines.
cr is assumed to be clipped (and rotated) and translated to
(msa 0, line top) before calling this function.
"""
pixels = not vector_based(cr)
cr.set_line_width(0.5)
offset = 0
if pixels:
cr.set_line_width(1)
offset = -0.5
for tick in tick_positions:
x = tick * item_size
if pixels:
x = round(x)
if x < area_x:
continue
if x > area_x + area_width:
break
cr.move_to(x + offset, 0)
cr.line_to(x + offset, tick_height)
cr.set_source_rgba(*color.with_alpha(alpha).rgba)
cr.stroke()
def tick_labels(cr, layout, color, alpha, tick_positions, item_size, area_x, area_width):
"""Draw tick labels.
cr is assumed to be clipped (and rotated) and translated to
(msa 0, label top) before calling this function.
"""
# The intricate return values from ...get_pixel_extents():
#ink, logic = layout.get_line(0).get_pixel_extents()
#ink_xbearing, ink_ybearing, ink_w, ink_h = ink
#log_xbearing, log_ybearing, log_w, log_h = logic
margin = 2
cr.set_source_rgba(*color.with_alpha(alpha).rgba)
for tick in tick_positions:
if int(tick * item_size) < area_x:
continue
layout.set_text(str(tick))
label_width = layout.get_line(0).get_pixel_extents()[1][2]
x = int(tick * item_size) - label_width - margin
if x > area_x + area_width:
break
cr.move_to(x, 0)
cr.show_layout(layout)
def bar(cr, color, alpha, values, first_pos, n_pos, x_offset, total_width, total_height):
gradient = isinstance(color, Gradient)
length = len(values)
for pos in range(first_pos, first_pos + n_pos):
#x_start = int(round(float(pos) / length * total_width - x_offset))
#x_stop = int(round(float(pos + 1) / length * total_width - x_offset))
#x_stop = max(x_stop, x_start + 1)
bar_height = int(round(total_height * values[pos]))
x_start = float(pos) / length * total_width - x_offset
x_stop = float(pos + 1) / length * total_width - x_offset
cr.rectangle(x_start, total_height - bar_height, x_stop - x_start, bar_height)
if gradient:
c = color.get_color_from_offset(values[pos])
cr.set_source_rgba(*c.with_alpha(alpha).rgba)
cr.fill()
if color is None:
return
if not gradient:
cr.set_source_rgba(*color.with_alpha(alpha).rgba)
cr.fill()
def v_bar(cr, color, alpha, values, first_seq, n_seq, y_offset, total_width, total_height):
gradient = isinstance(color, Gradient)
length = len(values)
for seq in range(first_seq, first_seq + n_seq):
#y_start = int(round(float(seq) / length * total_height - y_offset))
#y_stop = int(round(float(seq + 1) / length * total_height - y_offset))
#y_stop = max(y_stop, y_start + 1)
y_start = float(seq) / length * total_height - y_offset
y_stop = float(seq + 1) / length * total_height - y_offset
bar_width = int(round(total_width * values[seq]))
cr.rectangle(0, y_start, bar_width, y_stop - y_start)
if gradient:
c = color.get_color_from_offset(values[seq])
cr.set_source_rgba(*c.with_alpha(alpha).rgba)
cr.fill()
if color is None:
return
if not gradient:
cr.set_source_rgba(*color.with_alpha(alpha).rgba)
cr.fill()
def quartile_guidelines(cr, width, x_offset, total_width, total_height):
cr.save()
cr.set_line_width(0.5)
y = 0.25
if not vector_based(cr):
cr.set_line_width(1)
y = 0.5
# Top
cr.move_to(-1, y)
cr.line_to(width, y)
cr.stroke()
# Half
cr.set_dash([2, 2], x_offset % 4)
y = int(0.5 * total_height) - 0.5
cr.move_to(-1, y)
cr.line_to(width, y)
cr.stroke()
# Quartiles
cr.set_dash([1, 1], x_offset % 2)
for n in [.25, .75]:
y = int(total_height * n) - 0.5
cr.move_to(-1, y)
cr.line_to(width, y)
cr.stroke()
cr.restore()
def v_quartile_guidelines(cr, height, y_offset, total_width, total_height):
cr.save()
cr.set_line_width(0.5)
x = 0.25
if not vector_based(cr):
cr.set_line_width(1)
x = 0.5
# Left
cr.move_to(x, -1)
cr.line_to(x, height)
cr.stroke()
# Half
cr.set_dash([2, 2], y_offset % 4)
x = int(0.5 * total_width) - 0.5
cr.move_to(x, -1)
cr.line_to(x, height)
cr.stroke()
# Quartiles
cr.set_dash([1, 1], y_offset % 2)
for n in [.25, .75]:
x = int(total_width * n) - 0.5
cr.move_to(x, -1)
cr.line_to(x, height)
cr.stroke()
cr.restore()
def scaled_image(cr, area, image, alpha):
width = image.get_width()
height = image.get_height()
first_pos, x_offset = divmod(float(width * area.x) / area.total_width, 1)
first_seq, y_offset = divmod(float(height * area.y) / area.total_height, 1)
first_pos = int(first_pos)
first_seq = int(first_seq)
last_pos = int(width * float(area.x + area.width) / area.total_width)
last_seq = int(height * float(area.y + area.height) / area.total_height)
n_pos = min(last_pos - first_pos + 1, width)
n_seq = min(last_seq - first_seq + 1, height)
temp = cairo.ImageSurface(cairo.FORMAT_ARGB32, n_pos, n_seq)
temp_cr = cairo.Context(temp)
temp_cr.rectangle(0, 0, n_pos, n_seq)
temp_cr.clip()
temp_cr.translate(-first_pos, -first_seq)
temp_cr.set_source_surface(image, 0, 0)
temp_cr.paint_with_alpha(alpha)
cr.rectangle(0, 0, area.width, area.height)
cr.clip()
cr.scale(area.total_width / float(width), area.total_height / float(height))
cr.translate(-x_offset, -y_offset)
pattern = cairo.SurfacePattern(temp)
pattern.set_filter(cairo.FILTER_NEAREST)
cr.set_source(pattern)
cr.rectangle(0, 0, n_pos, n_seq)
cr.fill()
def scaled_image_rectangles(cr, area, array, alpha):
height, width = array.shape[:2]
(first_pos, n_pos, xscale), (first_seq, n_seq, yscale) = area.item_extents(width, height)
cr.rectangle(0, 0, area.width, area.height)
cr.clip()
cr.translate(-area.x, -area.y)
for seq in range(first_seq, first_seq + n_seq):
for pos in range(first_pos, first_pos + n_pos):
b, g, r, a = array[seq,pos]/255.0
cr.set_source_rgba(r, g, b, a * alpha)
x = pos * xscale
y = seq*yscale
xstop = (pos + 1) * xscale
ystop = (seq + 1) * yscale
cr.rectangle(x, y, xscale, yscale)
cr.fill()
def outlined_regions(cr, area, n_positions, n_sequences, features, linewidth, color, alpha, merged=False):
first_pos, n_pos, x_scale = area.item_extents_for_axis(n_positions, 'width')
first_seq, n_seq, y_scale = area.item_extents_for_axis(n_sequences, 'height')
cr.save()
cr.translate(-area.x, -area.y)
cr.set_line_width(linewidth)
cr.set_source_rgba(*color.rgba)
def draw_outline(seq, region):
x = int(region.start * x_scale)
w = round((region.start + region.length) * x_scale) - x
y = int(seq * y_scale)
h = round((seq + 1) * y_scale) - y
r = (x + linewidth/2.0, y + linewidth/2.0, w - linewidth, h - linewidth)
if not ((region.start >= first_pos + n_pos) or (region.start + region.length < first_pos)):
cr.rectangle(*r)
return r
for feature in features:
if not (first_seq <= feature.sequence_index < first_seq + n_seq):
continue
if merged:
draw_outline(feature.sequence_index, feature.mapping)
continue
previous = None
for part in feature.mapping.parts:
r = draw_outline(feature.sequence_index, part)
if previous:
midpoint = (previous[0] + previous[2] + r[0]) * 0.5
cr.move_to(previous[0] + previous[2], r[1] + 0.55 * r[3])
cr.line_to(midpoint, r[1] + 0.75 * r[3])
cr.line_to(r[0], r[1] + 0.55 * r[3])
previous = r
cr.stroke()
cr.restore()
|
|
"""
what we have:
- show OR draw
- update, remove
- reserved identifiers
What we need:
show1: obj; returns id
show2: obj, id
draw1: obj, bbox; returns id
draw2: obj, bbox, id
draw3: obj, id
update1: id (draw or show)
update2: id, bbox (draw only)
update3: id, parameters (draw or show)
In addition:
boundingboxes
and get_boundingbox
identifier management (also "remove" plugin)
connect mouseareas with boundingboxes: remove/update must also effect areas!
"""
class canvasargs(object):
def __init__(self, *args, **kwargs):
obj = None
identifier = None
box = None
parameters = None
assert len(args) <= 4
if len(args):
obj = args[0]
else:
obj = kwargs.get("obj", None)
if len(args) > 1:
identifier = args[0]
else:
identifier = kwargs.get("identifier", None)
if len(args) > 2:
box = args[2]
elif "box" in kwargs:
box = kwargs["box"]
elif "bbox" in kwargs:
box = kwargs["bbox"]
if len(args) > 3:
parameters = args[3]
elif "parameters" in kwargs:
box = kwargs["parameters"]
elif "params" in kwargs:
box = kwargs["params"]
assert obj is not None # canvas object
self.obj = obj
self.identifier = identifier
self.box = box
self.parameters = parameters
import bee, libcontext
from libcontext.socketclasses import *
from libcontext.pluginclasses import *
import functools
expand_plugins = set(("draw", "draw0", "show", "show0", "remove", "update"))
class canvasdrone(bee.drone):
def __init__(self):
self._canvasobjects = {}
self._idcount = {}
self._classname = self.__class__.__name__.rstrip("&")
self._reserves = {}
self._placed = False
self._expandtypes = {}
self._drawfuncs = {}
self._showfuncs = {}
self._updatefuncs = {}
self._removefuncs = {}
self._showdraw_inits = []
self._has_mouseareas = False
def _set_canvasobject(self, identifier, typ, obj, bbox, parameters):
self._canvasobjects[identifier] = (typ, obj, bbox, parameters)
if self._has_mouseareas:
if identifier in self._mouseareas:
self._mouseareas[identifier] = bbox
def _get_identifier(self, idtype, idnr):
return "%s-%s-%d" % (self._classname, idtype, idnr)
def _get_new_idnr(self, idtype):
if idtype not in self._idcount: self._idcount[idtype] = 0
self._idcount[idtype] += 1
return self._idcount[idtype]
def set_parent(self, parent):
self.parent = parent
def reserve(self, identifier, type_=None, box=None, parameters=None):
if self._placed:
raise Exception("canvasdrone.reserve must be called before place()")
type_ = bee.resolve(type_, self.parent)
box = bee.resolve(box, self.parent)
parameters = bee.resolve(parameters, self.parent)
self._reserves[identifier] = (type_, box, parameters)
def dynamic_reserve(self, identifier, type_=None, box=None, parameters=None):
self._set_canvasobject(identifier, type_, None, box, parameters)
def _draw1(self, typ, obj, bbox, parameters=None):
drawfunc = self._drawfuncs[typ]
idnr = self._get_new_idnr(typ)
identifier = self._get_identifier(typ, idnr)
drawfunc(obj, identifier, bbox, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
return identifier
def _draw2(self, typ, obj, bbox, identifier, parameters=None):
if identifier in self._canvasobjects:
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
if curr[0] is not None and curr[0] != typ:
raise TypeError(
"Canvas identifier '%s' has been registered for type '%s', attempted drawing with type '%s'" \
% (identifier, curr[0], typ)
)
if parameters is None:
parameters = curr[3]
updatefunc(obj, identifier, bbox, parameters=parameters)
else:
drawfunc = self._drawfuncs[typ]
drawfunc(obj, identifier, bbox, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _draw3(self, typ, obj, identifier):
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
if curr[0] is not None and curr[0] != typ:
raise TypeError(
"Canvas identifier '%s' has been registered for type '%s', attempted drawing with type '%s'" \
% (identifier, curr[0], typ)
)
bbox, parameters = curr[2], curr[3]
if curr[1] is None:
drawfunc = self._drawfuncs[typ]
drawfunc(obj, identifier, bbox, parameters=parameters)
else:
updatefunc(obj, identifier, bbox, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _show1(self, typ, obj, parameters=None):
showfunc = self._showfuncs[typ]
idnr = self._get_new_idnr(typ)
identifier = self._get_identifier(typ, idnr)
showfunc(obj, identifier, parameters=parameters)
bbox = None
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
return identifier
def _show2(self, typ, obj, identifier, parameters=None):
if identifier in self._canvasobjects:
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
if curr[0] != typ:
raise TypeError(
"Canvas identifier '%s' has been registered for type '%s', attempted drawing with type '%s'" \
% (identifier, curr[0], typ)
)
if parameters is None:
parameters = curr[3]
updatefunc(obj, identifier, parameters=parameters)
else:
showfunc = self._showfuncs[typ]
showfunc(obj, identifier, parameters=parameters)
bbox = None
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _update1show(self, typ, identifier):
if identifier not in self._canvasobjects: return False
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
obj, parameters = curr[1], curr[3]
updatefunc(obj, identifier, parameters=parameters)
return True
def _update1draw(self, typ, identifier):
if identifier not in self._canvasobjects: return False
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
obj, bbox, parameters = curr[1], curr[2], curr[3]
updatefunc(obj, identifier, bbox, parameters=parameters)
return True
def _update1(self, identifier):
if identifier not in self._canvasobjects: return False
curr = self._canvasobjects[identifier]
typ = curr[0]
if self._expandtypes[typ] == "show":
self._update1show(typ, identifier)
else: # draw
self._update1draw(typ, identifier)
return True
def _update2draw(self, typ, identifier, bbox):
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
obj, parameters = curr[1], curr[3]
updatefunc(obj, identifier, bbox, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _update2(self, identifier, bbox):
if identifier not in self._canvasobjects: return False
curr = self._canvasobjects[identifier]
typ = curr[0]
if self._expandtypes[typ] == "show":
raise TypeError("Canvas type %s: bounding boxes are only supported for 'draw', not 'show'" % typ)
else: # draw
self._update2draw(typ, identifier, bbox)
return True
def _update3show(self, typ, identifier, parameters):
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
obj = curr[1]
updatefunc(obj, identifier, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _update3draw(self, typ, identifier, parameters):
updatefunc = self._updatefuncs[typ]
curr = self._canvasobjects[identifier]
obj, bbox = curr[1], curr[2]
updatefunc(obj, identifier, bbox, parameters=parameters)
self._set_canvasobject(identifier, typ, obj, bbox, parameters)
def _update3(self, identifier, parameters):
if identifier not in self._canvasobjects: return False
curr = self._canvasobjects[identifier]
typ = curr[0]
if self._expandtypes[typ] == "show":
self._update3show(typ, identifier, parameters)
else: # draw
self._update3draw(typ, identifier, parameters)
return True
def _remove(self, identifier):
if identifier not in self._canvasobjects: return False
curr = self._canvasobjects[identifier]
typ = curr[0]
removefunc = self._removefuncs[typ]
removefunc(identifier)
self._canvasobjects.pop(identifier)
return True
def _set_drawfunc(self, typ, drawfunc):
self._drawfuncs[typ] = drawfunc
def _set_showfunc(self, typ, showfunc):
self._showfuncs[typ] = showfunc
def _set_updatefunc(self, typ, updatefunc):
self._updatefuncs[typ] = updatefunc
def _set_removefunc(self, typ, removefunc):
self._removefuncs[typ] = removefunc
def _add_showdraw_init(self, typ, args):
assert isinstance(args, canvasargs)
self._showdraw_inits.append((typ, args))
def _showdraw_init(self):
for typ, args in self._showdraw_inits:
showdraw = self._expandtypes[typ]
obj = args.obj
identifier = args.identifier
if identifier is None:
idnr = self._get_new_idnr(typ)
identifier = self._get_identifier(typ, idnr)
parameters = args.parameters
box = args.box
if showdraw == "show":
showfunc = self._showfuncs[typ]
i = showfunc(obj, identifier, parameters=parameters)
else:
assert box is not None # boundingbox
drawfunc = self._drawfuncs[typ]
i = drawfunc(obj, identifier, box, parameters=parameters)
if identifier is None: identifier = i
self._set_canvasobject(identifier, typ, obj, box, parameters)
def _set_mouseareas(self, mouseareas):
self._has_mouseareas = True
self._mouseareas = mouseareas
def place(self):
self._placed = True
# process reserves
for identifier in self._reserves:
type_, bbox, parameters = self._reserves[identifier]
p = plugin_supplier(type_, bbox, parameters)
libcontext.plugin(("canvas", "reserve", identifier), p)
#expand plugins
plugs = dict(libcontext.get_curr_context().plugins)
plug_detect = {}
for plug in plugs:
if not isinstance(plug, tuple): continue
if len(plug) != 3: continue
if plug[0] != "canvas": continue
if plug[1] not in expand_plugins: continue
typ = plug[2]
if plug[2] not in plug_detect: plug_detect[typ] = set()
plug_detect[typ].add(plug[1])
for typ in plug_detect:
p = plug_detect[typ]
has_show = "show" in p
has_draw = "draw" in p
if has_show and has_draw in p:
raise TypeError(
"Canvasdrone: cannot expand 'show' AND 'draw' function plugins for %s, only one can exist" % typ
)
if not has_show and not has_draw:
raise TypeError(
"Canvasdrone: cannot expand plugins for %s: declares %s but neither 'show' nor 'draw'" % (
typ, list(p))
)
if not "update" in p:
raise TypeError(
"Canvasdrone: cannot expand plugins for %s: declares %s but not 'update'" % (typ, list(p))
)
if not "remove" in p:
raise TypeError(
"Canvasdrone: cannot expand plugins for %s: declares %s but not 'remove'" % (typ, list(p))
)
if has_show:
s = socket_container(functools.partial(self._add_showdraw_init, typ))
libcontext.socket(("canvas", "show", "init", typ), s)
s = socket_single_required(functools.partial(self._set_showfunc, typ))
libcontext.socket(("canvas", "show", typ), s)
p = plugin_supplier(functools.partial(self._show1, typ))
libcontext.plugin(("canvas", "show1", typ), p)
p = plugin_supplier(functools.partial(self._show2, typ))
libcontext.plugin(("canvas", "show2", typ), p)
self._expandtypes[typ] = "show"
else: #has_draw
s = socket_container(functools.partial(self._add_showdraw_init, typ))
libcontext.socket(("canvas", "draw", "init", typ), s)
s = socket_single_required(functools.partial(self._set_drawfunc, typ))
libcontext.socket(("canvas", "draw", typ), s)
p = plugin_supplier(functools.partial(self._draw1, typ))
libcontext.plugin(("canvas", "draw1", typ), p)
p = plugin_supplier(functools.partial(self._draw2, typ))
libcontext.plugin(("canvas", "draw2", typ), p)
p = plugin_supplier(functools.partial(self._draw3, typ))
libcontext.plugin(("canvas", "draw3", typ), p)
self._expandtypes[typ] = "draw"
s = socket_single_required(functools.partial(self._set_updatefunc, typ))
libcontext.socket(("canvas", "update", typ), s)
s = socket_single_required(functools.partial(self._set_removefunc, typ))
libcontext.socket(("canvas", "remove", typ), s)
p = plugin_supplier(self._remove)
libcontext.plugin(("canvas", "remove1"), p)
p = plugin_supplier(self.dynamic_reserve)
libcontext.plugin(("canvas", "dynamic-reserve"), p)
p = plugin_supplier(self._update1)
libcontext.plugin(("canvas", "update1"), p)
p = plugin_supplier(self._update2)
libcontext.plugin(("canvas", "update2"), p)
p = plugin_supplier(self._update3)
libcontext.plugin(("canvas", "update3"), p)
p = plugin_single_required(self._showdraw_init)
libcontext.plugin(("bee", "init"), p)
s = socket_single_optional(self._set_mouseareas)
libcontext.socket(("canvas", "mousearea", "mouseareas"), s)
#TODO: dragonfly.logic.set_attribute/get_attribute
|
|
import timeit
import numpy
import os
import sys
import theano
import theano.tensor
from Boards import *
from NeuralNetwork import *
from RuleLearner import RuleLearner
class NeuralNetworkRuleLearner(RuleLearner):
"""Neural network rule learner class
The class is a concrete RuleLearner implementation based on a neural network. The neural network is first trained on
a series of randomly generated board states. The network learns to predict which moves are valid. After training,
the neural network can be asked to indicate which moves are valid for any board of the same type and of the same
size as the network was trained on.
"""
def __init__(self, board_type, board_height, board_width, rng):
"""Initialize the neural network rule learner
:type board_type: Boards.Board-(sub)class
:param board: the board class for which to create the rule learner network
:type board_height: positive integer
:param board_height: the height (number of rows) of the board
:type board_width: positive integer
:param board_width: the width (number of columns) of the board
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
"""
RuleLearner.__init__(self, board_height, board_width)
self._board_type = board_type
self._x = theano.tensor.fmatrix("x")
input_size = self._board_height * self._board_width * 3
output_size = self._board_height * self._board_width
self._hidden_layer_count = 2
self._hidden_layers = self._hidden_layer_count * [None]
self._hidden_layers[0] = HiddenLayer(
rng=rng,
input=self._x,
n_in=input_size,
n_out=50,
activation_function=theano.tensor.tanh)
self._hidden_layers[1] = HiddenLayer(
rng=rng,
input=self._hidden_layers[0].output,
n_in=50,
n_out=output_size,
activation_function=theano.tensor.tanh)
output = self._hidden_layers[-1].output
# Theano does not provide a thresholding operation, but we can achieve the same by doing
# b = threshold(a, t) <--> b = (sign(a - t) + 1) / 2
self._y_predicted = (theano.tensor.sgn(output - 0.5) + 1) / 2
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self._l1_norm = 0
self._l2_norm_squared = 0
for hidden_layer in self._hidden_layers:
self._l1_norm = self._l1_norm + abs(hidden_layer.W).sum()
self._l2_norm_squared = self._l2_norm_squared + (hidden_layer.W ** 2).sum()
# The parameters of the model are the parameters of the layers it consists of.
self._params = []
[self._params.extend(layer.params) for layer in self._hidden_layers]
# Keep track of model input
self._input = input
def get_valid_moves(self, board):
"""Get the valid moves for the board.
:type board: Boards.Board
:param board: the board for which to determine the valid moves
:returns: a 2D Numpy array with the same dimensions as the board contains, the cells where moves are valid set
to 1, the rest set to 0
"""
dataset = self._create_dataset_from(board)
result = self._deploy_model(dataset[0].get_value(), dataset[1].eval())
return result
def train(self, learning_rate=0.01, l1_reg=0.00, l2_reg=0.0001, n_epochs=1000, batch_size=20):
"""
Train the rule learner using backpropagation and stochastic gradient descent.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient
:type l1_reg: float
:param l1_reg: L1-norm's weight when added to the cost (see regularization)
:type l2_reg: float
:param l2_reg: L2-norm's weight when added to the cost (see regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type batch_size: positive integer
:param batch_size: the size of each train, validation and test batch
"""
train_dataset = self._create_dataset_random(2000)
validation_dataset = self._create_dataset_random(2000)
test_dataset = self._create_dataset_random(2000)
train_set_x, train_set_y = train_dataset
validation_set_x, validation_set_y = validation_dataset
test_set_x, test_set_y = test_dataset
# Compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_validation_batches = validation_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... Building the model'
# Allocate symbolic variables for the data
batch_index = theano.tensor.lscalar() # Index to a [mini]batch
x = theano.tensor.imatrix('x') # The data is presented as a set or row vectors of int labels.
y = theano.tensor.imatrix('y') # the labels are presented as a set or row vectors of int labels.
rng = numpy.random.RandomState(1234)
# The cost we minimize during training is the euclidean cost of the model plus the regularization terms
# (L1 and L2); cost is expressed here symbolically.
cost = self._euclidean_cost(y) + l1_reg * self._l1_norm + l2_reg * self._l2_norm_squared
# Compile the Theano function that computes the mistakes that are made by the model on a minibatch.
test_model = theano.function(
inputs=[batch_index],
outputs=self._error(y),
givens={
self._x: test_set_x[batch_index * batch_size:(batch_index + 1) * batch_size],
y: test_set_y[batch_index * batch_size:(batch_index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[batch_index],
outputs=self._error(y),
givens={
self._x: validation_set_x[batch_index * batch_size:(batch_index + 1) * batch_size],
y: validation_set_y[batch_index * batch_size:(batch_index + 1) * batch_size]
}
)
# Compute the gradient of cost with respect to each of the parameters.
param_gradients = [theano.tensor.grad(cost, param) for param in self._params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
updates = [
(param, param - learning_rate * param_gradient)
for param, param_gradient in zip(self._params, param_gradients)]
# Compile a Theano function `train_model` that returns the cost, but at the same time updates the model
# parameters on the rules defined in `updates`
train_model = theano.function(
inputs=[batch_index],
outputs=cost,
updates=updates,
givens={
self._x: train_set_x[batch_index * batch_size: (batch_index + 1) * batch_size],
y: train_set_y[batch_index * batch_size: (batch_index + 1) * batch_size]
}
)
###############
# TRAIN MODEL #
###############
print '... Training'
# Early-stopping parameters
patience = 10000 # Look as this many examples regardless.
patience_increase = 2 # Wait this much longer when a new best is found.
improvement_threshold = 0.995 # A relative improvement of this much is considered significant.
# Go through this many minibatche before checking the network on the validation set; in this case we check every
# epoch
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = numpy.inf
best_iteration = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch += 1
for batch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(batch_index)
# iteration number
iteration = (epoch - 1) * n_train_batches + batch_index
if (iteration + 1) % validation_frequency == 0:
# Compute zero-one loss on validation set.
validation_losses = [validate_model(i) for i in xrange(n_validation_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'Epoch %i, batch %i/%i, validation error %f' % (
epoch,
batch_index + 1,
n_train_batches,
this_validation_loss
))
# If we got the best validation score until now run a new test.
if this_validation_loss < best_validation_loss:
# Improve patience if loss improvement is good enough.
if this_validation_loss < best_validation_loss * improvement_threshold:
patience = max(patience, iteration * patience_increase)
best_validation_loss = this_validation_loss
best_iteration = iteration
# Test it on the test set
test_losses = [test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
'Epoch %i, batch %i/%i, test error of best model %f' % (
epoch,
batch_index + 1,
n_train_batches,
test_score))
if patience <= iteration:
print "Stopped early"
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete. Best validation score of %f obtained at iteration %i, with test performance %f' %
(best_validation_loss,
best_iteration + 1,
test_score))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# Create the function for actual rule learner use.
self._deploy_model = theano.function(
inputs=[self._x],
outputs=[self._y_predicted])
def _cross_entropy_cost(self, y):
""" Calculates cross-entropy cost of the network's predictions given a set of target outputs averaged across all
batch samples
NOT CURRENTLY IMPLEMENTED
:type y: theano.tensor.TensorType
:param y: the sample target outputs, represented as a 2D matrix where each row described a different sample's
target outputs (valid moves)
:return: a theano expression representing the cross-entropy cost
"""
raise NotImplementedError()
def _euclidean_cost(self, y):
"""Calculates Euclidean cost of the network's predictions given a set of target outputs averaged across all
batch samples
:type y: theano.tensor.TensorType
:param y: the sample target outputs, represented as a 2D matrix where each row described a different sample's
target outputs (valid moves)
Note: we use the mean/average instead of the sum so that the learning rate is less dependent on the batch size
"""
output_layer = self._hidden_layers[-1]
sample_euclidean_costs = ((output_layer.output - y) ** 2).sum(1) # Sum across the columns.
mean_euclidean_cost = theano.tensor.mean(sample_euclidean_costs)
return mean_euclidean_cost
def _error(self, y):
"""Calculates the number of mistakes per batch sample averaged across all batch samples
:type y: theano.tensor.TensorType
:param y: the sample target outputs, represented as a 2D matrix where each row described a different sample's
target outputs (valid moves)
:returns: a theano expression representing the number of mistakes per batch sample averaged across all batch
samples
"""
# Check if y has same dimension of y_predicted
if y.ndim != self._y_predicted.ndim:
raise TypeError(
'y should have the same shape as self._y_predicted',
('y', y.type, 'y_predicted', self._y_predicted.type))
# Check if y is of the correct data type
if y.dtype.startswith('int'):
# Per sample we calculate the error. The sample error is defined as the number of mistakes made for that
# single sample, i.e. false positives (moves considered valid that are not actually valid) and false
# negatives (moves considered invalid that are actually valid).
# TODO: We might want to consider returning dividing this error by the board size. That way, the sample
# error is independent from the board size. However, that makes the error less intuitive to interpret.
sample_errors = theano.tensor.neq(self._y_predicted, y).sum(1) # Sum across the columns.
return theano.tensor.mean(sample_errors)
else:
raise NotImplementedError()
def _create_dataset_random(self, size):
"""Creates a random dataset which can be used for training, validation and/or testing
:type size: non-negative integer
:param size: the number of samples in the dataset
:returns: a 2-tuple of the shared theano variables representing the dataset inputs and targets, where the inputs
variable is an N by (H*W*3) matrix and the targets variable an N by (H*W) matrix, N being
the number of samples, H the board height and W the board width
"""
inputs = numpy.zeros([size, self._board_height * self._board_width * 3])
targets = numpy.zeros([size, self._board_height * self._board_width])
for sample_index in xrange(size):
board = self._board_type(self._board_height, self._board_width, random_state=True)
input = self._create_board_input(board)
inputs[sample_index, :] = input.reshape([1, -1])
target = self._create_board_target(board)
targets[sample_index, :] = target.reshape([1, -1])
return self._create_dataset(inputs, targets)
def _create_dataset_from(self, boards):
"""Creates a dataset which can be used for training, validation and/or testing
:type boards: A list of Board objects or a single Board object
:param boards: the boards to construct a dataset out of
:returns: a 2-tuple of the shared theano variables representing the dataset inputs and targets, where the inputs
variable is an N by (H*W*3) matrix and the targets variable an N by (H*W) matrix, N being
the number of samples, H the board height and W the board width
"""
try:
iterator = iter(boards)
except TypeError:
boards = [boards]
size = len(boards)
inputs = numpy.zeros([size, self._board_height * self._board_width * 3])
targets = numpy.zeros([size, self._board_height * self._board_width])
for sample_index in xrange(size):
board = boards[sample_index]
if board.height != self._board_height or board.width != self._board_width:
raise ValueError("Incorrect board dimensions")
input = self._create_board_input(board)
inputs[sample_index, :] = input.reshape([1, -1])
target = self._create_board_target(board)
targets[sample_index, :] = target.reshape([1, -1])
return self._create_dataset(inputs, targets)
def _create_dataset(self, inputs, targets):
"""Creates a dataset which can be used for training, validation and/or testing
:type inputs: N by (H*W*3) numpy array, where N is the number of samples, H the board height and W the board
width
:param inputs: the dataset sample inputs, each sample being an encoded board
:type targets: N by H*W nump array, where N is the number of samples, H the board height and W the board width
:param targets: the dataset sample targets, each target being an encoded set of valid moves
:returns: a 2-tuple of the shared theano variables representing the dataset inputs and targets, where the inputs
variable is an N by (H*W*3) matrix and the targets variable an N by (H*W) matrix, N being
the number of samples, H the board height and W the board width
"""
if inputs.shape[0] != targets.shape[0]:
raise ValueError("Inputs and targets must have the same number of samples")
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
:type borrow: boolean
:param borrow: whether to borrow the theano shared variable values
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, theano.tensor.cast(shared_y, 'int32')
dataset_x, dataset_y = shared_dataset((inputs, targets))
return (dataset_x, dataset_y)
def _create_board_input(self, board):
"""Converts a board state into input that can be given to the network.
:type board: Boards.Board
:param size: the board
:returns: a 2D numpy array of size bh by (bw * 3), where bh is the board height and bw the board width
"""
input = numpy.zeros([board.height, board.width, 3])
cell_state_to_input_dict = {
0: numpy.array([1, 0, 0]),
1: numpy.array([0, 1, 0]),
2: numpy.array([0, 0, 1])}
for row in range(board.height):
for col in range(board.width):
cell_state = cell_state_to_input_dict[board.board[row][col]]
input[row, col] = cell_state
return input
def _create_board_target(self, board):
"""Converts a board state into a target output that can be used to calculate the network's prediction cost
:type board: Boards.Board
:param size: the board
:returns: a 2D numpy array of the same size as the board
"""
target = numpy.zeros([board.height, board.width])
valid_moves = board.GetValidMoves()
# The valid moves are represented as a N by 2 list (list with N 2-element entries), where N is the number of
# valid moves and each valid move consists of a row and column index.
# The zip command converts this to a list of two tuples each consisting of N entries. The first tuple contains
# the rows indices, the second tuple contains the column indices. We directly index the target matrix using
# these two tuples (similar to MATLAB indexing).
target[zip(*valid_moves)] = 1
return target
if __name__ == '__main__':
rule_learner = NeuralNetworkRuleLearner(Connect4_Board, 6, 7, numpy.random.RandomState(1234))
rule_learner.train()
board = Connect4_Board(6, 7, True)
print rule_learner.get_valid_moves(board)
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ETERNITYD", "eternityd"),
help="eternityd binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
|
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum, AveragedPowerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
import matplotlib.pyplot as plt
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
np.random.seed(100)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
np.random.seed(1000)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = np.array([2.0])
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = np.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.allclose(res.mfit, mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
np.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
assert np.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.allclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_all_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.npar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = np.sqrt(2.0*test_sexp)
test_sobs = np.sum(self.ps.power - self.optres.p_opt[0])
assert np.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert np.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
npar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * npar
test_bic = self.optres.result + npar * np.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert np.isclose(test_aic, self.optres.aic)
assert np.isclose(test_bic, self.optres.bic)
assert np.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert np.allclose(self.optres.cov, np.asarray(self.opt.hess_inv))
assert np.allclose(self.optres.err, np.sqrt(np.diag(self.opt.hess_inv)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.allclose(optres.cov, hess_inv)
assert np.allclose(optres.err, np.sqrt(np.diag(np.abs(hess_inv))))
def test_print_summary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_summary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
# store all the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert np.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
test_mean = 2.0
test_std = 0.2
assert np.isclose(test_mean, s.mean[0], rtol=0.1)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.linspace(0, 10.0, nfreq + 1)[1:]
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
@pytest.mark.parametrize("rebin", [0, 0.01])
def test_fitting_with_ties_and_bounds(self, capsys, rebin):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
if rebin != 0:
ps = ps.rebin_log(rebin)
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, log=True, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
assert np.absolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
df = 0.01
freq = np.arange(df, 5 + df, df)
nfreq = freq.size
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = df
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, m=1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
pe = PSDParEst(ps)
max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)
assert np.isclose(max_x, ps.freq[mp_ind])
assert max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = np.ones_like(ps.freq)
max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)
assert np.isclose(max_y[0], 2*max_power)
assert np.isclose(max_x[0], ps.freq[mp_ind])
assert max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
max_post=False, seed=seed)
assert maxpow_sim.shape[0] == nsim
assert np.all(maxpow_sim > 9.00) and np.all(maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=s_all,
max_post=False, seed=seed)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_highest_outlier_works_with_sampling(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
max_post=True, seed=seed,
nsim=nsim, niter=10,
nwalkers=20, burnin=10)
assert pval > 0.001
|
|
from basescript import BaseScript
import os
import gc
import sys
import json
import time
import code
import inspect
import logging
import resource
import string
import random
import threading
import msgpack
import cStringIO
import traceback
import urlparse
from multiprocessing.pool import ThreadPool
import statsd
import requests
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.iostream
from tornado.template import BaseLoader, Template
from tornado.web import StaticFileHandler, HTTPError
MSG_TYPE_INFO = 0
MSG_TYPE_CONSOLE = 1
MSG_TYPE_LOG = 2
MAX_LOG_FILE_SIZE = 100 * 1024 * 1024 # 100MB
def disable_requests_debug_logs():
# set the logging level of requests module to warning
# otherwise it swamps with too many logs
logging.getLogger('requests').setLevel(logging.WARNING)
class StatsCollector(object):
STATS_FLUSH_INTERVAL = 1
def __init__(self, prefix, stats_loc):
self.cache = {}
self.gauge_cache = {}
self.stats = None
if not stats_loc: return
port = None
if ':' in stats_loc:
ip, port = stats_loc.split(':')
port = int(port)
else:
ip = stats_loc
S = statsd.StatsClient
self.stats = S(ip, port, prefix) if port is not None else S(ip, prefix=prefix)
def fn():
while 1:
time.sleep(self.STATS_FLUSH_INTERVAL)
self._collect_ramusage()
self.send()
self.stats_thread = threading.Thread(target=fn)
self.stats_thread.daemon = True
self.stats_thread.start()
def incr(self, key, n=1):
if self.stats is None: return
self.cache[key] = self.cache.get(key, 0) + n
def decr(self, key, n=1):
if self.stats is None: return
self.cache[key] = self.cache.get(key, 0) - n
def timing(self, key, ms):
if self.stats is None: return
return self.stats.timing(key, ms)
def gauge(self, key, n, delta=False):
if delta:
v, _ = self.gauge_cache.get(key, (0, True))
n += v
self.gauge_cache[key] = (n, delta)
def _collect_ramusage(self):
self.gauge('resource.maxrss',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
def send(self):
if self.stats is None: return
p = self.stats.pipeline()
for k, v in self.cache.iteritems():
p.incr(k, v)
for k, (v, d) in self.gauge_cache.iteritems():
p.gauge(k, v, delta=d)
p.send()
self.cache = {}
self.gauge_cache = {}
def tag(*tags):
'''
Constructs a decorator that tags a function with specified
strings (@tags). The tags on the decorated function are
available via fn.tags
'''
def dfn(fn):
_tags = getattr(fn, 'tags', set())
_tags.update(tags)
fn.tags = _tags
return fn
return dfn
def get_fn_tags(fn):
return getattr(fn, 'tags', set())
def mime(mime):
'''
Constructs a decorator that sets the preferred mime type
to be written in the http response when returning the
function result.
'''
def dfn(fn):
fn.mime = mime
return fn
return dfn
def raw(mime='application/octet-stream'):
'''
Constructs a decorator that marks the fn
as raw response format
'''
def dfn(fn):
tags = getattr(fn, 'tags', set())
tags.add('raw')
fn.tags = tags
fn.mime = getattr(fn, 'mime', mime)
return fn
return dfn
class RPCCallException(Exception):
pass
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
a = self.application
self.server = s = a.funcserver
self.stats = s.stats
self.log = s.log
self.api = s.api
def get_template_namespace(self):
ns = super(BaseHandler, self).get_template_namespace()
ns.update(sys.funcserver.define_template_namespace())
return ns
class PyInterpreter(code.InteractiveInterpreter):
def __init__(self, *args, **kwargs):
code.InteractiveInterpreter.__init__(self, *args, **kwargs)
self.output = []
def write(self, data):
self.output.append(data)
class WSConnection(tornado.websocket.WebSocketHandler):
'''
Websocket based communication channel between a
client and the server.
'''
WRITE_BUFFER_THRESHOLD = 1 * 1024 * 1024 # 1MB
def open(self, pysession_id):
'''
Called when client opens connection. Initialization
is done here.
'''
self.id = id(self)
self.funcserver = self.application.funcserver
self.pysession_id = pysession_id
# register this connection with node
self.state = self.funcserver.websocks[self.id] = {'id': self.id, 'sock': self}
def on_message(self, msg):
'''
Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned.
'''
msg = json.loads(msg)
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession is None:
interpreter = PyInterpreter(self.funcserver.define_python_namespace())
psession = dict(interpreter=interpreter, socks=set([self.id]))
self.funcserver.pysessions[self.pysession_id] = psession
else:
interpreter = psession['interpreter']
psession['socks'].add(self.id)
code = msg['code']
msg_id = msg['id']
stdout = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
interpreter.runsource(code)
output = sys.stdout.getvalue() or interpreter.output
if isinstance(output, list): output = ''.join(output)
interpreter.output = []
finally:
sys.stdout = stdout
msg = {'type': MSG_TYPE_CONSOLE, 'id': msg_id, 'data': output}
self.send_message(msg)
def on_close(self):
'''
Called when client closes this connection. Cleanup
is done here.
'''
if self.id in self.funcserver.websocks:
self.funcserver.websocks[self.id] = None
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession:
psession['socks'].remove(self.id)
if not psession['socks']:
del self.funcserver.pysessions[self.pysession_id]
def send_message(self, msg, binary=False):
# TODO: check if following two lines are required
# tornado documentation seems to indicate that
# this might be handled internally.
if not isinstance(msg, str):
msg = json.dumps(msg)
try:
if self.ws_connection:
self.write_message(msg, binary=binary)
except tornado.iostream.StreamClosedError:
self.on_close()
@property
def is_buffer_full(self):
bsize = sum([len(x) for x in self.stream._write_buffer])
return bsize >= self.WRITE_BUFFER_THRESHOLD
def _msg_from(self, msg):
return {'type': msg.get('type', ''), 'id': msg['id']}
def call(fn):
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(fn)
def make_handler(template, handler):
class SimpleHandler(handler):
def get(self):
return self.render(template)
return SimpleHandler
def resolve_path(path):
return path if os.path.isabs(path) else os.path.join(os.path.dirname(__file__), path)
class TemplateLoader(BaseLoader):
def __init__(self, dirs=None, **kwargs):
super(TemplateLoader, self).__init__(**kwargs)
self.dirs = dirs or []
def add_dir(self, d):
self.dirs.append(d)
def del_dir(self, d):
self.dirs.remove(d)
def resolve_path(self, name, parent_path=None):
for d in reversed(self.dirs):
p = os.path.join(d, name)
if not os.path.exists(p): continue
return os.path.abspath(p)
return name
def _create_template(self, name):
f = open(name, 'rb')
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class CustomStaticFileHandler(StaticFileHandler):
PATHS = []
@classmethod
def get_absolute_path(cls, root, path):
for p in reversed(cls.PATHS):
ap = os.path.join(p, path)
if not os.path.exists(ap):
continue
return ap
return path
def validate_absolute_path(self, root, absolute_path):
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
class RPCHandler(BaseHandler):
def _get_apifn(self, fn_name):
obj = self.api
for part in fn_name.split('.'):
obj = getattr(obj, part)
return obj
def _clean_kwargs(self, kwargs, fn):
'''
Remove unexpected keyword arguments from the
set of received keyword arguments.
'''
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k]
return kwargs
def _handle_single_call(self, request, m):
fn_name = m.get('fn', None)
sname = 'api.%s' % fn_name
t = time.time()
try:
fn = self._get_apifn(fn_name)
self.stats.incr(sname)
args = m['args']
kwargs = self._clean_kwargs(m['kwargs'], fn)
self.server.on_api_call_start(fn_name, args, kwargs, self)
if self.get_status() == 304:
return
r = fn(*args, **kwargs)
r = {'success': True, 'result': r}
except Exception, e:
self.log.exception('Exception during RPC call. '
'fn=%s, args=%s, kwargs=%s' % \
(m.get('fn', ''), repr(m.get('args', '[]')),
repr(m.get('kwargs', '{}'))))
r = {'success': False, 'result': repr(e)}
finally:
tdiff = (time.time() - t) * 1000
self.stats.timing(sname, tdiff)
try:
_r = self.server.on_api_call_end(fn_name, args, kwargs, self, r)
if _r is not None:
r = _r
except (SystemExit, KeyboardInterrupt): raise
except:
self.log.exception('In on_api_call_end for fn=%s' % fn_name)
return r
def _handle_call(self, request, fn, m, protocol):
if fn != '__batch__':
r = self._handle_single_call(request, m)
else:
# Batch calls
r = []
for call in m['calls']:
_r = self._handle_single_call(request, call)
# If the func invoked above is a streaming function, then fail
# this operation as we don't handle streaming functions in batch mode
if inspect.isgenerator(_r.get('result')):
raise APIException('Cannot invoke streaming API fn in batch mode')
if isinstance(_r, dict) and 'success' in _r:
_r = _r['result'] if _r['success'] else None
r.append(_r)
if self.get_status() == 304:
return
# Get the API function object
fnobj = self._get_apifn(fn) if fn != '__batch__' else (lambda: 0)
# Set response header based on chosen serialization mechanism
mime = getattr(fnobj, 'mime', self.get_mime(protocol))
self.set_header('Content-Type', mime)
is_raw = 'raw' in get_fn_tags(fnobj)
serializer = (lambda x: x) if is_raw else self.get_serializer(protocol)
if fn == '__batch__' or not r['success']:
r = serializer(r)
self.set_header('Content-Length', len(r))
self.write(r)
return
result = r['result']
if not inspect.isgenerator(result):
# Full response is available - Write it out in one shot
r = serializer(r)
self.set_header('Content-Length', len(r))
self.write(r)
return
# Streaming response - iterate and write out
for part in result:
part = serializer(part)
self.write(part)
sep = '\n' if is_raw else self.get_record_separator(protocol)
if sep: self.write(sep)
self.flush()
def get_record_separator(self, protocol):
return {'msgpack': '',
'json': '\n',
'python': '\n'}.get(protocol, self.server.SERIALIZER_RECORD_SEP)
def get_serializer(self, name):
return {'msgpack': msgpack.packb,
'json': json.dumps,
'python': repr}.get(name, self.server.SERIALIZER)
def get_deserializer(self, name):
return {'msgpack': msgpack.packb,
'json': json.loads,
'python': eval}.get(name, self.server.DESERIALIZER)
def get_mime(self, name):
return {'msgpack': 'application/x-msgpack',
'json': 'application/json',
'python': 'application/x-python'}\
.get(name, self.server.MIME)
def _handle_call_wrapper(self, request, fn, m, protocol):
try:
return self._handle_call(request, fn, m, protocol)
except Exception, e:
self.log.exception('Exception during RPC call. '
'fn=%s, args=%s, kwargs=%s' % \
(m.get('fn', ''), repr(m.get('args', '[]')),
repr(m.get('kwargs', '{}'))))
self.clear()
self.set_status(500)
finally:
self.finish()
@tornado.web.asynchronous
def post(self, protocol='default'):
m = self.get_deserializer(protocol)(self.request.body)
fn = m['fn']
self.server.threadpool.apply_async(lambda: self._handle_call_wrapper(self.request, fn, m, protocol))
def failsafe_json_decode(self, v):
try: v = json.loads(v)
except ValueError: pass
return v
@tornado.web.asynchronous
def get(self, protocol='default'):
D = self.failsafe_json_decode
args = dict([(k, D(v[0]) if len(v) == 1 else [D(x) for x in v])\
for k, v in self.request.arguments.iteritems()])
fn = args.pop('fn')
m = dict(kwargs=args, fn=fn, args=[])
self.server.threadpool.apply_async(lambda: self._handle_call_wrapper(self.request, fn, m, protocol))
class Server(BaseScript):
NAME = 'FuncServer'
DESC = 'Default Functionality Server'
DEFAULT_PORT = 9345
VIRTUAL_HOST = r'.*'
STATIC_PATH = 'static'
TEMPLATE_PATH = 'templates'
APP_CLASS = tornado.web.Application
RPC_HANDLER_CLASS = RPCHandler
SERIALIZER = staticmethod(msgpack.packb)
SERIALIZER_RECORD_SEP = ''
DESERIALIZER = staticmethod(msgpack.unpackb)
MIME = 'application/x-msgpack'
IGNORE_UNEXPECTED_KWARGS = False
# Number of worker threads in the threadpool
THREADPOOL_WORKERS = 32
DISABLE_REQUESTS_DEBUG_LOGS = True
def __init__(self):
self.log_id = 0
# all active websockets and their state
self.websocks = {}
# all active python interpreter sessions
self.pysessions = {}
super(Server, self).__init__()
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
self.stats = self.create_stats()
self.threadpool = ThreadPool(self.THREADPOOL_WORKERS)
self.api = None
# tornado app object
base_handlers = self.prepare_base_handlers()
handlers = self.prepare_handlers()
self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)])
_ = self.prepare_template_loader(self.template_loader)
if _ is not None: self.template_loader = _
shclass = CustomStaticFileHandler
shclass.PATHS.append(resolve_path(self.STATIC_PATH))
_ = self.prepare_static_paths(shclass.PATHS)
if _ is not None: shclass.PATHS = _
self.static_handler_class = shclass
self.nav_tabs = [('Home', '/')]
if self.args.debug:
self.nav_tabs += [('Console', '/console'), ('Logs', '/logs')]
self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs)
settings = {
'static_path': '<DUMMY-INEXISTENT-PATH>',
'static_handler_class': self.static_handler_class,
'template_loader': self.template_loader,
'compress_response': True,
'debug': self.args.debug,
}
all_handlers = handlers + base_handlers
self.app = self.APP_CLASS(**settings)
self.app.add_handlers(self.VIRTUAL_HOST, all_handlers)
sys.funcserver = self.app.funcserver = self
def create_stats(self):
stats_prefix = '.'.join([x for x in (self.hostname, self.name) if x])
return StatsCollector(stats_prefix, self.args.statsd_server)
def dump_stacks(self):
'''
Dumps the stack of all threads. This function
is meant for debugging. Useful when a deadlock happens.
borrowed from: http://blog.ziade.org/2012/05/25/zmq-and-gevent-debugging-nightmares/
'''
dump = []
# threads
threads = dict([(th.ident, th.name)
for th in threading.enumerate()])
for thread, frame in sys._current_frames().items():
if thread not in threads: continue
dump.append('Thread 0x%x (%s)\n' % (thread, threads[thread]))
dump.append(''.join(traceback.format_stack(frame)))
dump.append('\n')
return ''.join(dump)
@property
def name(self):
return '.'.join([x for x in (self.NAME, self.args.name) if x])
def new_pysession(self):
chars = list(set(string.letters + string.digits))
name = ''.join([random.choice(chars) for i in xrange(10)])
if name in self.pysessions:
return self.new_pysession()
return name
def define_args(self, parser):
super(Server, self).define_args(parser)
parser.add_argument('--port', default=self.DEFAULT_PORT,
type=int, help='port to listen on for server')
parser.add_argument('--statsd-server', default=None,
help='Location of StatsD server to send statistics. '
'Format is ip[:port]. Eg: localhost, localhost:8125')
parser.add_argument('--debug', action='store_true',
help='When enabled, auto reloads server on code change')
def define_log_pre_format_hooks(self):
hooks = super(Server, self).define_log_pre_format_hooks()
hooks.append(self._send_log_to_ws)
return hooks
def _send_log_to_ws(self, msg):
msg = {'type': MSG_TYPE_LOG, 'id': self.log_id, 'data': msg}
bad_ws = []
for _id, ws in self.websocks.iteritems():
if ws is None: bad_ws.append(_id); continue
ws['sock'].send_message(msg)
for _id in bad_ws: del self.websocks[_id]
self.log_id += 1
def prepare_base_handlers(self):
# Tornado URL handlers for core functionality
debug_mode_only = [
(r'/ws/(.*)', WSConnection),
(r'/logs', make_handler('logs.html', BaseHandler)),
(r'/console', make_handler('console.html', BaseHandler)),
]
others = [
(r'/', make_handler('home.html', BaseHandler)),
(r'/rpc(?:/([^/]*)/?)?', self.RPC_HANDLER_CLASS),
]
if self.args.debug:
return debug_mode_only + others
else:
return others
def prepare_handlers(self):
# Tornado URL handlers for additional functionality
return []
def prepare_template_loader(self, loader):
# add additional template dirs by using
# loader.add_dir(path)
return loader
def prepare_static_paths(self, paths):
# add static paths that can contain
# additional of override files
# eg: paths.append(PATH)
return paths
def prepare_nav_tabs(self, nav_tabs):
# Add additional tab buttons in the UI toolbar
# eg: nav_tabs.append(('MyTab', '/mytab'))
return nav_tabs
def define_python_namespace(self):
return {'server': self, 'logging': logging, 'call': call, 'api': self.api}
def define_template_namespace(self):
return self.define_python_namespace()
def on_api_call_start(self, fn, args, kwargs, handler):
pass
def on_api_call_end(self, fn, args, kwargs, handler, result):
return result
def prepare_api(self):
'''
Prepare the API object that is exposed as
functionality by the Server
'''
return None
def run(self):
self.api = self.prepare_api()
if self.api is not None and not hasattr(self.api, 'log'):
self.api.log = self.log
if self.args.port != 0:
self.app.listen(self.args.port)
tornado.ioloop.IOLoop.instance().start()
def _passthrough(name):
def fn(self, *args, **kwargs):
p = self.prefix + '.' + name
if self.bound or self.parent is None:
return self._call(p, args, kwargs)
else:
return self.parent._call(p, args, kwargs)
return fn
class Client(object):
SERIALIZER = staticmethod(msgpack.packb)
DESERIALIZER = staticmethod(msgpack.unpackb)
DISABLE_REQUESTS_DEBUG_LOGS = True
def __init__(self, server_url, prefix=None, parent=None):
self.server_url = server_url
self.rpc_url = urlparse.urljoin(server_url, 'rpc')
self.is_batch = False
self.prefix = prefix
self.parent = parent
self.bound = False
self._calls = []
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
def __getattr__(self, attr):
prefix = self.prefix + '.' + attr if self.prefix else attr
return self.__class__(self.server_url, prefix=prefix,
parent=self if self.bound else self.parent)
def get_handle(self):
self.bound = True
return self
def __call__(self, *args, **kwargs):
if self.bound or self.parent is None:
return self._call(self.prefix, args, kwargs)
else:
return self.parent._call(self.prefix, args, kwargs)
def _call(self, fn, args, kwargs):
if not self.is_batch:
return self._do_single_call(fn, args, kwargs)
else:
self._calls.append(dict(fn=fn, args=args, kwargs=kwargs))
__getitem__ = _passthrough('__getitem__')
__setitem__ = _passthrough('__setitem__')
__delitem__ = _passthrough('__delitem__')
__contains__ = _passthrough('__contains__')
__len__ = _passthrough('__len__')
def __nonzero__(self): return True
def set_batch(self):
self.is_batch = True
def unset_batch(self):
self.is_batch = False
def _do_single_call(self, fn, args, kwargs):
m = self.SERIALIZER(dict(fn=fn, args=args, kwargs=kwargs))
req = requests.post(self.rpc_url, data=m)
res = self.DESERIALIZER(req.content)
if not res['success']:
raise RPCCallException(res['result'])
else:
return res['result']
def execute(self):
if not self._calls: return
m = dict(fn='__batch__', calls=self._calls)
m = self.SERIALIZER(m)
req = requests.post(self.rpc_url, data=m)
res = self.DESERIALIZER(req.content)
self._calls = []
return res
if __name__ == '__main__':
Server().start()
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CategoryVideosApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_category_video_player(self, video_id, **kwargs):
"""
Get video player
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_video_player(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int customer_id: Customer ID to fetch
:param int country_id: Country ID to use in video analytics
:return: Player
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_video_player_with_http_info(video_id, **kwargs)
else:
(data) = self.get_category_video_player_with_http_info(video_id, **kwargs)
return data
def get_category_video_player_with_http_info(self, video_id, **kwargs):
"""
Get video player
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_video_player_with_http_info(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int customer_id: Customer ID to fetch
:param int country_id: Country ID to use in video analytics
:return: Player
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['video_id', 'customer_id', 'country_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_video_player" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'video_id' is set
if ('video_id' not in params) or (params['video_id'] is None):
raise ValueError("Missing the required parameter `video_id` when calling `get_category_video_player`")
collection_formats = {}
resource_path = '/categories/videos/{video_id}/player'.replace('{format}', 'json')
path_params = {}
if 'video_id' in params:
path_params['video_id'] = params['video_id']
query_params = {}
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
if 'country_id' in params:
query_params['country_id'] = params['country_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Player',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_category_video_subtitles(self, video_id, **kwargs):
"""
Get subtitles of a video
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_video_subtitles(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int page:
:param int per_page:
:return: SubtitleListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_video_subtitles_with_http_info(video_id, **kwargs)
else:
(data) = self.get_category_video_subtitles_with_http_info(video_id, **kwargs)
return data
def get_category_video_subtitles_with_http_info(self, video_id, **kwargs):
"""
Get subtitles of a video
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_video_subtitles_with_http_info(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:param int page:
:param int per_page:
:return: SubtitleListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['video_id', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_video_subtitles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'video_id' is set
if ('video_id' not in params) or (params['video_id'] is None):
raise ValueError("Missing the required parameter `video_id` when calling `get_category_video_subtitles`")
collection_formats = {}
resource_path = '/categories/videos/{video_id}/subtitles'.replace('{format}', 'json')
path_params = {}
if 'video_id' in params:
path_params['video_id'] = params['video_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubtitleListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_videos_from_categories(self, **kwargs):
"""
Get Videos attached to Categories
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_videos_from_categories(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: VideoCategoryListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_videos_from_categories_with_http_info(**kwargs)
else:
(data) = self.get_videos_from_categories_with_http_info(**kwargs)
return data
def get_videos_from_categories_with_http_info(self, **kwargs):
"""
Get Videos attached to Categories
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_videos_from_categories_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: VideoCategoryListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page', 'sort_by', 'sort_direction']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_videos_from_categories" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/categories/videos'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VideoCategoryListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_videos_from_category(self, category_id, **kwargs):
"""
Get Videos attached to Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_videos_from_category(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: VideoCategoryListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_videos_from_category_with_http_info(category_id, **kwargs)
else:
(data) = self.get_videos_from_category_with_http_info(category_id, **kwargs)
return data
def get_videos_from_category_with_http_info(self, category_id, **kwargs):
"""
Get Videos attached to Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_videos_from_category_with_http_info(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: VideoCategoryListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'page', 'per_page', 'sort_by', 'sort_direction']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_videos_from_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `get_videos_from_category`")
collection_formats = {}
resource_path = '/categories/{category_id}/videos'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VideoCategoryListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
|
from __future__ import unicode_literals
from datetime import datetime, timedelta
import threading
import warnings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import connections, DEFAULT_DB_ALIAS
from django.db import DatabaseError
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import QuerySet, EmptyQuerySet, ValuesListQuerySet, MAX_GET_RESULTS
from django.test import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, SelfRef, ArticleSelectOnSave
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_multiple_objects_max_num_fetched(self):
"""
#6785 - get() should fetch a limited number of results.
"""
Article.objects.bulk_create(
Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28))
for i in range(MAX_GET_RESULTS)
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
Article.objects.create(headline='Area %s' % MAX_GET_RESULTS, pub_date=datetime(2005, 7, 28))
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned more than %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertIsInstance(qs, ValuesListQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import time
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
class SheepCounter(object):
"""To be patched in for the time module, replacing sleep() and time()."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
self._time_calls = 0
def sleep(self, t):
self._total_time += t
self._sleeptimes += [t]
def time(self):
self._time_calls += 1
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
@property
def time_calls(self):
return self._time_calls
class TestBaseEstimator(object):
def __init__(self, config, max_evals, eval_dict):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self.eval_hooks = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
self._eval_dict = eval_dict
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
if 'hooks' in kwargs:
self.eval_hooks = kwargs['hooks']
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return self._eval_dict
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def train(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, serving_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, serving_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
class TestEstimator(
TestBaseEstimator, evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5, eval_dict=None):
super(TestEstimator, self).__init__(config, max_evals, eval_dict)
tf_logging.info('Create Estimator')
def fit(self, **kwargs):
if 'hooks' in kwargs:
raise ValueError('`hooks` is defined in core Estimator')
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return super(TestEstimator, self).train(**kwargs)
def train(self, **kwargs):
raise ValueError('`train` is not defined in Estimator.')
class TestCoreEstimator(TestBaseEstimator, core_estimator.Estimator):
def __init__(self, config=None, max_evals=5, eval_dict=None):
super(TestCoreEstimator, self).__init__(config, max_evals, eval_dict)
tf_logging.info('Create Core Estimator')
def evaluate(self, **kwargs):
if 'eval_metrics' in kwargs:
raise ValueError('`eval_metrics` is not defined in core Estimator')
return super(TestCoreEstimator, self).evaluate(**kwargs)
def train(self, **kwargs):
if 'monitors' in kwargs:
raise ValueError('`monitors` is not defined in core Estimator')
if 'hooks' in kwargs:
self.monitors = kwargs['hooks']
return super(TestCoreEstimator, self).train(**kwargs)
class _NoopHook(session_run_hook.SessionRunHook):
pass
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def _estimators_for_tests(self, config=None, eval_dict=None):
return [TestEstimator(config=config, eval_dict=eval_dict),
TestCoreEstimator(config=config, eval_dict=eval_dict)]
def test_eval_metrcis_for_core_estimator(self):
est = TestCoreEstimator()
with self.assertRaisesRegexp(
ValueError, '`eval_metrics` must be `None`'):
experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
def test_train(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics=eval_metrics)
fit_args = ex.train(delay_secs=0)
self.assertEqual(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEqual(0, est.eval_count)
def test_train_delay(self):
for est in self._estimators_for_tests():
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.time(), delta=1e-4)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.time(), delta=1e-4)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start server.
self.assertAlmostEqual(1, sheep.time(), delta=1e-4)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_evaluate_delay(self):
for est in self._estimators_for_tests():
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input',
eval_hooks=[noop_hook])
for delay in [0, 1, 3]:
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.time(), delta=1e-4)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertEqual(0, est.fit_count)
self.assertEqual(6, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_ends_after_train_step(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0,
train_steps=100)
ex.continuous_eval()
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
sheep = SheepCounter()
with test.mock.patch.object(time, 'time', sheep.time):
with test.mock.patch.object(time, 'sleep', sheep.sleep):
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.time(), delta=1e-4)
def test_continuous_eval_predicate_fn(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3 # pylint: disable=cell-var-from-loop
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(evaluate_checkpoint_only_once=False,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(3, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_run_local(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_train_hooks_extend_does_not_mutate_input_hooks(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
input_hooks = [noop_hook]
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_monitors=input_hooks)
self.assertAllEqual([noop_hook], ex._train_monitors)
another_noop_hook = _NoopHook()
# Assert that the extend API mutates the hooks, but not the input hooks
ex.extend_train_hooks([another_noop_hook])
self.assertAllEqual([noop_hook, another_noop_hook], ex._train_monitors)
self.assertAllEqual([noop_hook], input_hooks)
def test_invalid_export_strategies(self):
for est in self._estimators_for_tests():
with self.assertRaisesRegexp(ValueError, 'ExportStrategy'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=100,
eval_steps=100,
export_strategies='not_an_export_strategy')
with self.assertRaisesRegexp(ValueError, 'ExportStrategy'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=100,
eval_steps=100,
export_strategies=['not_an_export_srategy'])
def test_export_strategies_reset(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
export_strategy_1 = saved_model_export_utils.make_export_strategy(
est, 'export_input_1', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_steps=100,
eval_steps=100,
export_strategies=(export_strategy_1,))
ex.train_and_evaluate()
self.assertEqual(1, est.export_count)
# After reset with empty list (None), the count does not change and the
# user provided export strategy list should remain intact.
old_es = ex.reset_export_strategies()
ex.train_and_evaluate()
self.assertAllEqual([export_strategy_1], old_es)
self.assertEqual(1, est.export_count)
# After reset with list, the count should increase with the number of
# items.
export_strategy_2 = saved_model_export_utils.make_export_strategy(
est, 'export_input_2', exports_to_keep=None)
export_strategy_3 = saved_model_export_utils.make_export_strategy(
est, 'export_input_3', exports_to_keep=None)
old_es = ex.reset_export_strategies(
[export_strategy_2, export_strategy_3])
ex.train_and_evaluate()
self.assertAllEqual([], old_es)
self.assertEqual(3, est.export_count)
def test_train_and_evaluate(self):
for est in self._estimators_for_tests():
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_min_eval_frequency_defaults(self):
def dummy_model_fn(features, labels): # pylint: disable=unused-argument
pass
# The default value when model_dir is on GCS is 1000
estimator = core_estimator.Estimator(dummy_model_fn, 'gs://dummy_bucket')
ex = experiment.Experiment(
estimator, train_input_fn=None, eval_input_fn=None)
self.assertEquals(ex._min_eval_frequency, 1000)
# The default value when model_dir is not on GCS is 1
estimator = core_estimator.Estimator(dummy_model_fn, '/tmp/dummy')
ex = experiment.Experiment(
estimator, train_input_fn=None, eval_input_fn=None)
self.assertEquals(ex._min_eval_frequency, 1)
# Make sure default not used when explicitly set
estimator = core_estimator.Estimator(dummy_model_fn, 'gs://dummy_bucket')
ex = experiment.Experiment(
estimator,
min_eval_frequency=123,
train_input_fn=None,
eval_input_fn=None)
self.assertEquals(ex._min_eval_frequency, 123)
def test_continuous_train_and_eval(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.continuous_train_and_eval()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_train_and_eval_with_predicate_fn(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
train_steps=100000000000, # a value will make `ex` never stops.
eval_steps=100,
export_strategies=export_strategy)
def predicate_fn(eval_result):
del eval_result # unused. for fn signature.
return False
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(0, est.eval_count)
self.assertEqual(1, est.export_count)
def test_continuous_train_and_eval_with_adapted_steps_per_iteration(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
total_steps = 100000000000000
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps=total_steps)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=int(total_steps/10),
max_steps=test.mock.ANY,
hooks=test.mock.ANY)
def test_continuous_train_and_eval_with_steps_per_iteration_from_user(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
total_steps = 100000000000000
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration=1234,
train_steps=total_steps)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=1234,
max_steps=test.mock.ANY,
hooks=test.mock.ANY)
def test_continuous_train_and_eval_with_default_steps_per_iteration(self):
mock_estimator = test.mock.Mock(core_estimator.Estimator)
type(mock_estimator).model_dir = test.mock.PropertyMock(
return_value='test_dir')
ex = experiment.Experiment(
mock_estimator,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration=None,
train_steps=None)
def predicate_fn(eval_result):
# Allows the first invoke only.
return eval_result is None
ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn)
mock_estimator.train.assert_called_once_with(
input_fn='train_input',
steps=1000,
max_steps=test.mock.ANY,
hooks=test.mock.ANY)
def test_continuous_train_and_eval_with_invalid_predicate_fn(self):
for est in self._estimators_for_tests():
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
with self.assertRaisesRegexp(
ValueError, '`continuous_eval_predicate_fn` must be a callable'):
ex.continuous_train_and_eval(continuous_eval_predicate_fn='fn')
def test_continuous_train_and_eval_with_invalid_train_steps_iterations(self):
for est in self._estimators_for_tests():
with self.assertRaisesRegexp(
ValueError, '`train_steps_per_iteration` must be an integer.'):
experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
train_steps_per_iteration='123')
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
for est in self._estimators_for_tests(config):
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
for est in self._estimators_for_tests(config):
with self.assertRaises(ValueError):
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
for est in self._estimators_for_tests():
exp_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
export_strategies=(exp_strategy,))
ex.test()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
for est in self._estimators_for_tests(eval_dict={'global_step': 100}):
eval_metrics = 'eval_metrics' if not isinstance(
est, core_estimator.Estimator) else None
est.fake_checkpoint()
result = {
'called': 0,
'called_with_eval_result': 0,
}
# pylint: disable=cell-var-from-loop
def _predicate_fn(eval_result):
result['called'] += 1
if eval_result:
# If eval_result is not empty nor None, the checkpoint has been
# evaluated.
result['called_with_eval_result'] += 1
# With 300 times of evaluation, this should prove something.
return result['called'] < 300
# pylint: enable=cell-var-from-loop
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics=eval_metrics,
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(300, result['called'])
self.assertEqual(1, result['called_with_eval_result'])
if __name__ == '__main__':
test.main()
|
|
import logging as log
import time
from collections import namedtuple
from tempfile import TemporaryDirectory
from . import batch_job
from . import git
from . import job
from . import merge_request as merge_request_module
from . import single_merge_job
from . import store
from .project import AccessLevel, Project
MergeRequest = merge_request_module.MergeRequest
class Bot:
def __init__(self, *, api, config):
self._api = api
self._config = config
user = config.user
opts = config.merge_opts
if not user.is_admin:
assert not opts.reapprove, (
"{0.username} is not an admin, can't impersonate!".format(user)
)
assert not opts.add_reviewers, (
"{0.username} is not an admin, can't lookup Reviewed-by: email addresses ".format(user)
)
def start(self):
with TemporaryDirectory() as root_dir:
if self._config.use_https:
repo_manager = store.HttpsRepoManager(
user=self.user,
root_dir=root_dir,
auth_token=self._config.auth_token,
timeout=self._config.git_timeout,
reference=self._config.git_reference_repo,
)
else:
repo_manager = store.SshRepoManager(
user=self.user,
root_dir=root_dir,
ssh_key_file=self._config.ssh_key_file,
timeout=self._config.git_timeout,
reference=self._config.git_reference_repo,
)
self._run(repo_manager)
@property
def user(self):
return self._config.user
@property
def api(self):
return self._api
def _run(self, repo_manager):
time_to_sleep_between_projects_in_secs = 1
min_time_to_sleep_after_iterating_all_projects_in_secs = 30
while True:
projects = self._get_projects()
self._process_projects(
repo_manager,
time_to_sleep_between_projects_in_secs,
projects,
)
if self._config.cli:
return
big_sleep = max(0,
min_time_to_sleep_after_iterating_all_projects_in_secs -
time_to_sleep_between_projects_in_secs * len(projects))
log.info('Sleeping for %s seconds...', big_sleep)
time.sleep(big_sleep)
def _get_projects(self):
log.info('Finding out my current projects...')
my_projects = Project.fetch_all_mine(self._api)
project_regexp = self._config.project_regexp
filtered_projects = [p for p in my_projects if project_regexp.match(p.path_with_namespace)]
log.debug(
'Projects that match project_regexp: %s',
[p.path_with_namespace for p in filtered_projects]
)
filtered_out = set(my_projects) - set(filtered_projects)
if filtered_out:
log.debug(
'Projects that do not match project_regexp: %s',
[p.path_with_namespace for p in filtered_out]
)
return filtered_projects
def _process_projects(
self,
repo_manager,
time_to_sleep_between_projects_in_secs,
projects,
):
for project in projects:
project_name = project.path_with_namespace
if project.access_level < AccessLevel.reporter:
log.warning("Don't have enough permissions to browse merge requests in %s!", project_name)
continue
merge_requests = self._get_merge_requests(project, project_name)
self._process_merge_requests(repo_manager, project, merge_requests)
time.sleep(time_to_sleep_between_projects_in_secs)
def _get_merge_requests(self, project, project_name):
log.info('Fetching merge requests assigned to me in %s...', project_name)
my_merge_requests = MergeRequest.fetch_all_open_for_user(
project_id=project.id,
user=self.user,
api=self._api,
merge_order=self._config.merge_order,
)
branch_regexp = self._config.branch_regexp
filtered_mrs = [mr for mr in my_merge_requests
if branch_regexp.match(mr.target_branch)]
log.debug(
'MRs that match branch_regexp: %s',
[mr.web_url for mr in filtered_mrs]
)
filtered_out = set(my_merge_requests) - set(filtered_mrs)
if filtered_out:
log.debug(
'MRs that do not match branch_regexp: %s',
[mr.web_url for mr in filtered_out]
)
source_branch_regexp = self._config.source_branch_regexp
source_filtered_mrs = [mr for mr in filtered_mrs
if source_branch_regexp.match(mr.source_branch)]
log.debug(
'MRs that match source_branch_regexp: %s',
[mr.web_url for mr in source_filtered_mrs]
)
source_filtered_out = set(filtered_mrs) - set(source_filtered_mrs)
if source_filtered_out:
log.debug(
'MRs that do not match source_branch_regexp: %s',
[mr.web_url for mr in source_filtered_out]
)
return source_filtered_mrs
def _process_merge_requests(self, repo_manager, project, merge_requests):
if not merge_requests:
log.info('Nothing to merge at this point...')
return
try:
repo = repo_manager.repo_for_project(project)
except git.GitError:
log.exception("Couldn't initialize repository for project!")
raise
log.info('Got %s requests to merge;', len(merge_requests))
if self._config.batch and len(merge_requests) > 1:
log.info('Attempting to merge as many MRs as possible using BatchMergeJob...')
batch_merge_job = batch_job.BatchMergeJob(
api=self._api,
user=self.user,
project=project,
merge_requests=merge_requests,
repo=repo,
options=self._config.merge_opts,
)
try:
batch_merge_job.execute()
return
except batch_job.CannotBatch as err:
log.warning('BatchMergeJob aborted: %s', err)
except batch_job.CannotMerge as err:
log.warning('BatchMergeJob failed: %s', err)
return
except git.GitError as err:
log.exception('BatchMergeJob failed: %s', err)
log.info('Attempting to merge the oldest MR...')
merge_request = merge_requests[0]
merge_job = self._get_single_job(
project=project, merge_request=merge_request, repo=repo,
options=self._config.merge_opts,
)
merge_job.execute()
def _get_single_job(self, project, merge_request, repo, options):
return single_merge_job.SingleMergeJob(
api=self._api,
user=self.user,
project=project,
merge_request=merge_request,
repo=repo,
options=options,
)
class BotConfig(namedtuple('BotConfig',
'user use_https auth_token ssh_key_file project_regexp merge_order merge_opts ' +
'git_timeout git_reference_repo branch_regexp source_branch_regexp batch cli')):
pass
MergeJobOptions = job.MergeJobOptions
Fusion = job.Fusion
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# IMPORTS ----------------------------------------------------------------------
import collections
import os
import re
import syslog
import tempfile
# ------------------------------------------------------------------------------
# MODULE INFORMATIONS ----------------------------------------------------------
DOCUMENTATION = '''
---
module: create_partition
short_description: Create a new partition
author:
- "Alessandro Molari"
'''
EXAMPLES = '''
# Create a partition
- name: Create the UEFI partition
create_partition:
name: UEFI
disk: /dev/sda
fs: fat32
end: 512MiB
flags:
- boot
# Create partitions defined in the variable `partitions` and
# define the fact `partitions` shadowing that variable and adding some
# informations.
- name: Create partitions
create_partition:
name: "{{ item.name }}"
disk: "{{ item.disk }}"
fs: "{{ item.fs }}"
end: "{{ item.end }}"
flags: "{{ item.flags | default(omit) }}"
with_items: "{{ partitions }}"
register: partitions
- set_fact:
partitions: "{{ partitions.results | map(attribute='ansible_facts') | list }}"
'''
# ------------------------------------------------------------------------------
# LOGGING ----------------------------------------------------------------------
syslog.openlog('ansible-{name}'.format(name=os.path.basename(__file__)))
def log(msg, level=syslog.LOG_DEBUG):
'''Log to the system logging facility of the target system.'''
if os.name == 'posix': # syslog is unsupported on Windows.
syslog.syslog(level, msg)
# ------------------------------------------------------------------------------
# GLOBALS ----------------------------------------------------------------------
AVAILABLE_UNITS = ['s', 'B', 'kB', 'MB', 'GB', 'TB', 'compact', 'cyl', 'chs',
'%', 'kiB', 'MiB', 'GiB', 'TiB']
# ------------------------------------------------------------------------------
# UTILITIES --------------------------------------------------------------------
def list_get(l, idx, default=None):
'''Save version of `l[idx]`.
If the index `idx` is outside bounds, `default` is returned instead.
'''
try:
return l[idx]
except IndexError:
return default
# ------------------------------------------------------------------------------
# DATA STRUCTURES --------------------------------------------------------------
class StorageSize(collections.Mapping):
def __init__(self, value, unit, fail_handler):
self._fail_handler = fail_handler
self.value = value
self.unit = unit
@classmethod
def from_str(cls, size, fail_handler):
md = re.match(r'([.\d]+)\s*([^\s]+)', size)
if md:
value = md.group(1)
unit = md.group(2)
if not unit in AVAILABLE_UNITS:
fail_handler('Invalid unit {} for size {}'.format(unit, size))
return cls(value, unit, fail_handler)
else:
fail_handler('Invalid size: {}'.format(size))
def to_dict(self):
return {'value': self.value, 'unit': self.unit}
def __getitem__(self, key):
return self.to_dict()[key]
def __iter__(self):
return iter(self.to_dict())
def __len__(self):
return len(self.to_dict())
def __repr__(self):
return 'StorageSize(value={}, unit={})'.format(self.value, self.unit)
def __str__(self):
return '{value}{unit}'.format(value=self.value, unit=self.unit)
# ------------------------------------------------------------------------------
# LOGIC ------------------------------------------------------------------------
class PartitionManager(object):
def __init__(self, name, disk, fs, end, flags, enc_pwd,
cmd_runner, fail_handler):
# Init fields from provided arguments.
self._name = name
self._disk = disk
self._fs = fs
self._end = StorageSize.from_str(end, fail_handler)
self._flags = flags
self._enc_pwd = enc_pwd
self._cmd_runner = cmd_runner
self._fail_handler = fail_handler
# Init other fields.
prev_partitions = self.ls()
self._number = len(prev_partitions) + 1
self._raw_device = '{disk}{number}'.format(
disk=self._disk, number=self._number)
self._device = self._raw_device
self._raw_name = self._name
if len(prev_partitions) == 0:
# Set initial padding of `1 MiB`.
self._start = StorageSize(1, 'MiB', self._fail_handler)
else:
self._start = prev_partitions[-1]['end']
def ls(self):
_, out, err = self._run_parted_cmd('print')
lines = [line for line in out.split('\n') if line]
columns = ['Number', 'Start', 'End', 'Size', 'File system', 'Name', 'Flags']
header = '^{columns}$'.format(columns=r'\s+'.join(columns))
idxs = [idx for idx, line in enumerate(lines) if re.match(header, line)]
if len(idxs) != 1:
self._fail_handler(msg='Internal error: cannot parse parted print output')
partitions = []
for line in lines[idxs[0] + 1:]:
tokens = [token for token in re.split(r'\s+', line) if token]
partitions.append(dict(
number=list_get(tokens, 0),
start=StorageSize.from_str(list_get(tokens, 1), self._fail_handler),
end=StorageSize.from_str(list_get(tokens, 2), self._fail_handler),
size=StorageSize.from_str(list_get(tokens, 3), self._fail_handler),
fs=list_get(tokens, 4),
name=list_get(tokens, 5),
flags=list_get(tokens, 6)
))
return partitions
def create(self):
# Create the physical partition.
self._run_parted_cmd('mkpart {name} {fs} {start} {end}'.format(
name=self._name, fs=self._fs, start=self._start, end=self._end))
# Set the flags.
for flag in self._flags:
self._run_parted_cmd('set {number} {flag} on'.format(
number=self._number, flag=flag))
# Encrypt.
if self._enc_pwd:
pwd_file = tempfile.NamedTemporaryFile(delete=False)
pwd_file.write(self._enc_pwd)
pwd_file.close()
enc_name = 'luks-{name}'.format(name=self._name)
log('Encrypting device `{}` with name `{}`..'.format(
self._raw_device, enc_name))
self._run_crypt_cmd('luksFormat --use-urandom {device} {key_file}'.format(
device=self._raw_device, key_file=pwd_file.name))
self._run_crypt_cmd('luksOpen {device} {name} --key-file {key_file}'.format(
device=self._raw_device, name=enc_name,
key_file=pwd_file.name))
self._name = enc_name
self._device = "/dev/mapper/{}".format(self._name)
os.unlink(pwd_file.name)
log('Encrypt operation completed')
def _run_crypt_cmd(self, cmd):
cmd = 'cryptsetup -q {cmd}'.format(cmd=cmd)
log('Performing command `{}`'.format(cmd))
rc, out, err = self._cmd_runner(cmd, check_rc=True)
return rc, out, err
def _run_parted_cmd(self, cmd):
log('Running parted command `{cmd}` on disk `{disk}`'.format(
cmd=cmd, disk=self._disk))
return self._cmd_runner('parted -s -a opt {disk} {cmd}'.format(
disk=self._disk, cmd=cmd),
check_rc=True)
def to_dict(self):
return dict(
raw_name=self._raw_name,
name=self._name,
fs=self._fs,
start=self._start,
end=self._end,
disk=self._disk,
number=self._number,
raw_device=self._raw_device,
device=self._device,
flags=self._flags,
encryption=self._enc_pwd)
# ------------------------------------------------------------------------------
# MAIN FUNCTION ----------------------------------------------------------------
def main():
module = AnsibleModule(argument_spec=dict(
name=dict(type='str', required=True),
disk=dict(type='str', required=True),
fs=dict(choices=['btrfs', 'nilfs2', 'ext4', 'ext3', 'ext2',
'fat32', 'fat16', 'hfsx', 'hfs+', 'hfs', 'jfs',
'swsusp', 'linux-swap(v1)', 'linux-swap(v0)',
'ntfs', 'reiserfs', 'hp-ufs', 'sun-ufs', 'xfs',
'apfs2', 'apfs1', 'asfs', 'amufs5', 'amufs4',
'amu'],
required=True),
end=dict(type='str', required=True),
flags=dict(type='list', default=[]),
encryption=dict(type='str', default=None)))
fail_handler = lambda msg: module.fail_json(msg=msg)
cmd_runner = lambda *args, **kwargs: module.run_command(*args, **kwargs)
pm = PartitionManager(module.params['name'], module.params['disk'],
module.params['fs'], module.params['end'],
module.params['flags'], module.params['encryption'],
cmd_runner, fail_handler)
pm.create()
module.exit_json(changed=True, msg='Partition successfully created.',
result=pm.to_dict())
# ------------------------------------------------------------------------------
# ENTRY POINT ------------------------------------------------------------------
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------
# vim: set filetype=python :
|
|
""" A stylish alternative for caching your map tiles.
TileStache is a Python-based server application that can serve up map tiles
based on rendered geographic data. You might be familiar with TileCache
(http://tilecache.org), the venerable open source WMS server from MetaCarta.
TileStache is similar, but we hope simpler and better-suited to the needs of
designers and cartographers.
Documentation available at http://tilestache.org/doc/
"""
from __future__ import print_function
import os.path
__version__ = open(os.path.join(os.path.dirname(__file__), 'VERSION')).read().strip()
import re
from sys import stdout
from io import StringIO
from os.path import dirname, join as pathjoin, realpath
from datetime import datetime, timedelta
from .py3_compat import urljoin, urlparse, urlopen, parse_qs, httplib, is_string_type, reduce
from wsgiref.headers import Headers
from os import getcwd
from time import time
import logging
try:
from json import load as json_load
from json import loads as json_loads
except ImportError:
from simplejson import load as json_load
from simplejson import loads as json_loads
from ModestMaps.Core import Coordinate
# dictionary of configuration objects for requestLayer().
_previous_configs = {}
from . import Core
from . import Config
# regular expression for PATH_INFO
_pathinfo_pat = re.compile(r'^/?(?P<l>\w.+)/(?P<z>\d+)/(?P<x>-?\d+)/(?P<y>-?\d+)\.(?P<e>\w+)$')
_preview_pat = re.compile(r'^/?(?P<l>\w.+)/(preview\.html)?$')
def getTile(layer, coord, extension, ignore_cached=False):
''' Get a type string and tile binary for a given request layer tile.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#tilestache-gettile
Arguments:
- layer: instance of Core.Layer to render.
- coord: one ModestMaps.Core.Coordinate corresponding to a single tile.
- extension: filename extension to choose response type, e.g. "png" or "jpg".
- ignore_cached: always re-render the tile, whether it's in the cache or not.
This is the main entry point, after site configuration has been loaded
and individual tiles need to be rendered.
'''
status_code, headers, body = layer.getTileResponse(coord, extension, ignore_cached)
mime = headers.get('Content-Type')
return mime, body
def getPreview(layer):
""" Get a type string and dynamic map viewer HTML for a given layer.
"""
return 200, Headers([('Content-Type', 'text/html')]), Core._preview(layer)
def parseConfig(configHandle):
""" Parse a configuration file and return a Configuration object.
Configuration could be a Python dictionary or a file formatted as JSON. In both cases
it needs to be formatted with two sections, "cache" and "layers":
{
"cache": { ... },
"layers": {
"layer-1": { ... },
"layer-2": { ... },
...
}
}
The full path to the file is significant, used to
resolve any relative paths found in the configuration.
See the Caches module for more information on the "caches" section,
and the Core and Providers modules for more information on the
"layers" section.
"""
if isinstance(configHandle, dict):
config_dict = configHandle
dirpath = '.'
else:
scheme, host, path, p, q, f = urlparse(configHandle)
if scheme == '':
scheme = 'file'
path = realpath(path)
if scheme == 'file':
with open(path) as file:
config_dict = json_load(file)
else:
config_dict = json_load(urlopen(configHandle))
dirpath = '%s://%s%s' % (scheme, host, dirname(path).rstrip('/') + '/')
return Config.buildConfiguration(config_dict, dirpath)
parseConfigfile = parseConfig # Deprecated function
def splitPathInfo(pathinfo):
""" Converts a PATH_INFO string to layer name, coordinate, and extension parts.
Example: "/layer/0/0/0.png", leading "/" optional.
"""
if pathinfo == '/':
return None, None, None
if _pathinfo_pat.match(pathinfo or ''):
path = _pathinfo_pat.match(pathinfo)
layer, row, column, zoom, extension = [path.group(p) for p in 'lyxze']
coord = Coordinate(int(row), int(column), int(zoom))
elif _preview_pat.match(pathinfo or ''):
path = _preview_pat.match(pathinfo)
layer, extension = path.group('l'), 'html'
coord = None
else:
raise Core.KnownUnknown('Bad path: "{}". I was expecting something more like "/example/0/0/0.png"'.format(pathinfo))
return layer, coord, extension
def mergePathInfo(layer, coord, extension):
""" Converts layer name, coordinate and extension back to a PATH_INFO string.
See also splitPathInfo().
"""
z = coord.zoom
x = coord.column
y = coord.row
return '/%(layer)s/%(z)d/%(x)d/%(y)d.%(extension)s' % locals()
def requestLayer(config, path_info):
""" Return a Layer.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if is_string_type(config):
#
# Should be a path to a configuration file we can load;
# build a tuple key into previously-seen config objects.
#
key = hasattr(config, '__hash__') and (config, getcwd())
if key in _previous_configs:
config = _previous_configs[key]
else:
config = parseConfig(config)
if key:
_previous_configs[key] = config
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
if path_info == '/':
return Core.Layer(config, None, None)
layername = splitPathInfo(path_info)[0]
if layername not in config.layers:
raise Core.KnownUnknown('"{}" is not a layer I know about. Here are some that I do know about: {}.'.format(layername, ', '.join(sorted(config.layers.keys()))))
return config.layers[layername]
def requestHandler(config_hint, path_info, query_string=None):
""" Generate a mime-type and response body for a given request.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#tilestache-requesthandler
TODO: replace this with requestHandler2() in TileStache 2.0.0.
Calls requestHandler2().
"""
status_code, headers, content = requestHandler2(config_hint, path_info, query_string)
mimetype = headers.get('Content-Type')
return mimetype, content
def requestHandler2(config_hint, path_info, query_string=None, script_name=''):
""" Generate a set of headers and response body for a given request.
TODO: Replace requestHandler() with this function in TileStache 2.0.0.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config_hint parameter can be a path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
Query string is optional, currently used for JSON callbacks.
Calls Layer.getTileResponse() to render actual tiles, and getPreview() to render preview.html.
"""
headers = Headers([])
try:
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
layer = requestLayer(config_hint, path_info)
query = parse_qs(query_string or '')
try:
callback = query['callback'][0]
except KeyError:
callback = None
#
# Special case for index page.
#
if path_info == '/':
mimetype, content = getattr(layer.config, 'index', ('text/plain', 'TileStache says hello.'))
return 200, Headers([('Content-Type', mimetype)]), content
coord, extension = splitPathInfo(path_info)[1:]
if extension == 'html' and coord is None:
status_code, headers, content = getPreview(layer)
elif extension.lower() in layer.redirects:
other_extension = layer.redirects[extension.lower()]
redirect_uri = script_name
redirect_uri += mergePathInfo(layer.name(), coord, other_extension)
if query_string:
redirect_uri += '?' + query_string
headers['Location'] = redirect_uri
headers['Content-Type'] = 'text/plain'
return 302, headers, 'You are being redirected to %s\n' % redirect_uri
else:
status_code, headers, content = layer.getTileResponse(coord, extension)
if layer.allowed_origin:
headers.setdefault('Access-Control-Allow-Origin', layer.allowed_origin)
if callback and 'json' in headers['Content-Type']:
headers['Content-Type'] = 'application/javascript; charset=utf-8'
content = '%s(%s)' % (callback, content)
if layer.max_cache_age is not None:
expires = datetime.utcnow() + timedelta(seconds=layer.max_cache_age)
headers.setdefault('Expires', expires.strftime('%a, %d %b %Y %H:%M:%S GMT'))
headers.setdefault('Cache-Control', 'public, max-age=%d' % layer.max_cache_age)
except Core.KnownUnknown as e:
out = StringIO()
print('Known unknown!', file=out)
print(e, file=out)
print('', file=out)
print('\n'.join(Core._rummy()), file=out)
headers['Content-Type'] = 'text/plain'
status_code, content = 500, out.getvalue().encode('ascii')
return status_code, headers, content
def cgiHandler(environ, config='./tilestache.cfg', debug=False):
""" Read environment PATH_INFO, load up configuration, talk to stdout by CGI.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#cgi
Calls requestHandler().
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if debug:
import cgitb
cgitb.enable()
path_info = environ.get('PATH_INFO', None)
query_string = environ.get('QUERY_STRING', None)
script_name = environ.get('SCRIPT_NAME', None)
status_code, headers, content = requestHandler2(config, path_info, query_string, script_name)
headers.setdefault('Content-Length', str(len(content)))
# output the status code as a header
stdout.write('Status: %d\n' % status_code)
# output gathered headers
for k, v in headers.items():
stdout.write('%s: %s\n' % (k, v))
stdout.write('\n')
stdout.write(content)
class WSGITileServer:
""" Create a WSGI application that can handle requests from any server that talks WSGI.
This class is documented as part of TileStache's public API:
http://tilestache.org/doc/#wsgi
The WSGI application is an instance of this class. Example:
app = WSGITileServer('/path/to/tilestache.cfg')
werkzeug.serving.run_simple('localhost', 8080, app)
"""
def __init__(self, config, autoreload=False):
""" Initialize a callable WSGI instance.
Config parameter can be a file path string for a JSON configuration
file or a configuration object with 'cache', 'layers', and
'dirpath' properties.
Optional autoreload boolean parameter causes config to be re-read
on each request, applicable only when config is a JSON file.
"""
if is_string_type(config):
self.autoreload = autoreload
self.config_path = config
try:
self.config = parseConfig(config)
except:
print("Error loading Tilestache config:")
raise
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
self.autoreload = False
self.config_path = None
self.config = config
def __call__(self, environ, start_response):
"""
"""
if self.autoreload: # re-parse the config file on every request
try:
self.config = parseConfig(self.config_path)
except Exception as e:
raise Core.KnownUnknown("Error loading Tilestache config file:\n%s" % str(e))
try:
layer, coord, ext = splitPathInfo(environ['PATH_INFO'])
except Core.KnownUnknown as e:
return self._response(start_response, 400, str(e))
#
# WSGI behavior is different from CGI behavior, because we may not want
# to return a chatty rummy for likely-deployed WSGI vs. testing CGI.
#
if layer and layer not in self.config.layers:
return self._response(start_response, 404)
path_info = environ.get('PATH_INFO', None)
query_string = environ.get('QUERY_STRING', None)
script_name = environ.get('SCRIPT_NAME', None)
status_code, headers, content = requestHandler2(self.config, path_info, query_string, script_name)
return self._response(start_response, status_code, bytes(content), headers)
def _response(self, start_response, code, content='', headers=None):
"""
"""
headers = headers or Headers([])
if content:
headers.setdefault('Content-Length', str(len(content)))
start_response('%d %s' % (code, httplib.responses[code]), headers.items())
return [content]
def modpythonHandler(request):
""" Handle a mod_python request.
TODO: Upgrade to new requestHandler() so this can return non-200 HTTP.
Calls requestHandler().
Example Apache configuration for TileStache:
<Directory /home/migurski/public_html/TileStache>
AddHandler mod_python .py
PythonHandler TileStache::modpythonHandler
PythonOption config /etc/tilestache.cfg
</Directory>
Configuration options, using PythonOption directive:
- config: path to configuration file, defaults to "tilestache.cfg",
using request.filename as the current working directory.
"""
from mod_python import apache
config_path = request.get_options().get('config', 'tilestache.cfg')
config_path = realpath(pathjoin(dirname(request.filename), config_path))
path_info = request.path_info
query_string = request.args
mimetype, content = requestHandler(config_path, path_info, query_string)
request.status = apache.HTTP_OK
request.content_type = mimetype
request.set_content_length(len(content))
request.send_http_header()
request.write(content)
return apache.OK
|
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
|
# Copyright (C) 2014 Narf Industries <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import random
import re
import struct
import string
def random_string(a, b):
chars = string.letters + string.digits
return ''.join(random.choice(chars) for _ in range(random.randint(a, b)))
def random_regex(a, b):
def random_regex_element():
ret = ''
chars = string.letters + string.digits
if random.random() < 0.7:
ret = random.choice(chars)
else:
ret = random_string(2, 8)
if random.random() < 0.2:
ret = '^' + ret
ret = '[%s]' % ret
if random.random() < 0.1:
ret += '*'
return ret
ret = ''.join(random_regex_element() for _ in range(random.randint(a, b)))
if random.random() < 0.1:
ret = '^' + ret
if random.random() < 0.1:
ret = ret + '$'
return ret
class Command(object):
def __init__(self, cmd, start, end, buf):
self.cmd = cmd
self.start = start
self.end = end
self.buf = buf
def pack(self):
return struct.pack('<ciiI', self.cmd, self.start, self.end, len(self.buf)) + self.buf
class Result(object):
def __init__(self, result, buf, magic_page, full_unpack=False):
self.result = result
self.buf = buf
self.magic_page = magic_page
self.full_unpack = full_unpack
def generate_csum(self):
ret = 0
for i in xrange(1024):
ret = ret ^ ord(self.magic_page[i * 4])
for c in self.buf:
ret = ret ^ ord(c)
return ret & 0xff
def pack(self):
if self.buf or self.full_unpack:
return struct.pack('<iII', self.result, len(self.buf), self.generate_csum()) + self.buf
else:
return struct.pack('<i', self.result)
class Support(object):
def __init__(self, magic_page):
self.magic_page = magic_page
self.lines_list = []
self.marks = [-1] * 28
self.just_marked = None
self.EXIT_SUCCESS = 0
self.EXIT_FAILURE = -1
def fixup_marks(self, address):
if address < 0:
return self.marks[-address - 1]
else:
return address
def random_range(self):
if len(self.lines_list) == 0:
return None
if self.just_marked is not None:
index = self.just_marked
self.just_marked = None
return (self.marks[index], len(self.lines_list) - 1)
r = [random.randint(0, len(self.lines_list) - 1), random.randint(0, len(self.lines_list) - 1)]
return (min(r), max(r))
def do_invalid(self, cmd):
return Result(self.EXIT_FAILURE, '', self.magic_page)
def do_insert(self, cmd, append):
start = self.fixup_marks(cmd.start)
if append:
start += 1
for tok in cmd.buf.splitlines():
if len(tok) > 256 or len(self.lines_list) > 256:
return -1
self.lines_list.insert(start, tok)
start += 1
return Result(self.EXIT_SUCCESS, '', self.magic_page)
def do_delete(self, cmd):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
del self.lines_list[start:end + 1]
return Result(self.EXIT_SUCCESS, '', self.magic_page)
def do_change(self, cmd):
self.do_delete(cmd)
return self.do_insert(cmd, True)
def do_join(self, cmd):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
for line in self.lines_list[start + 1:end + 1]:
self.lines_list[start] += line
del self.lines_list[start + 1:end + 1]
return Result(self.EXIT_SUCCESS, '', self.magic_page)
def do_mark(self, cmd):
start = self.fixup_marks(cmd.start)
index = struct.unpack('<I', cmd.buf)[0]
if index >= 26:
return Result(self.EXIT_FAILURE, '', self.magic_page)
self.marks[index] = start
self.just_marked = index
return Result(self.EXIT_SUCCESS, '', self.magic_page)
def do_list(self, cmd):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
contents = '\n'.join(self.lines_list[start:end + 1]) + '\n'
return Result(self.EXIT_SUCCESS, contents, self.magic_page, True)
def do_num(self, cmd):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
lines = [str(n + start + 1) + ' ' + s for n, s in enumerate(self.lines_list[start:end + 1])]
contents = '\n'.join(lines) + '\n'
return Result(self.EXIT_SUCCESS, contents, self.magic_page, True)
def do_global(self, cmd, invert):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
lines = []
if invert:
lines = [line for line in self.lines_list[start:end + 1] if re.search(cmd.buf, line) is None]
else:
lines = [line for line in self.lines_list[start:end + 1] if re.search(cmd.buf, line) is not None]
if not lines:
return Result(self.EXIT_FAILURE, '', self.magic_page)
contents = ''
if lines:
contents = '\n'.join(lines) + '\n'
return Result(self.EXIT_SUCCESS, contents, self.magic_page, True)
def do_transform(self, cmd):
start = self.fixup_marks(cmd.start)
end = self.fixup_marks(cmd.end)
op = chr(struct.unpack('<I', cmd.buf)[0])
if op not in 'ilu':
return Result(self.EXIT_FAILURE, '', self.magic_page)
for i, line in enumerate(self.lines_list[start:end + 1]):
if op == 'i':
self.lines_list[i + start] = ''.join(c.lower() if c.isupper() else c.upper() for c in line)
elif op == 'l':
self.lines_list[i + start] = line.lower()
elif op == 'u':
self.lines_list[i + start] = line.upper()
return Result(self.EXIT_SUCCESS, '', self.magic_page)
|
|
from __future__ import absolute_import
from mock import patch
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.models import (
Activity, Environment, File, Release, ReleaseCommit, ReleaseFile, ReleaseProject, ReleaseProjectEnvironment, Repository
)
from sentry.testutils import APITestCase
class ReleaseDetailsTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
environment = Environment.objects.create(
organization_id=org.id,
name='prod',
)
environment.add_project(project)
environment.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
ReleaseProjectEnvironment.objects.create(
project_id=project.id,
release_id=release.id,
environment_id=environment.id,
new_issues_count=5,
)
ReleaseProject.objects.filter(project=project, release=release).update(new_groups=5)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
assert response.data['newGroups'] == 5
# no access
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release2.version,
}
)
response = self.client.get(url)
assert response.status_code == 403
def test_multiple_projects(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
release.add_project(project2)
self.create_member(teams=[team1, team2], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.get(url)
assert response.status_code == 200, response.content
class UpdateReleaseDetailsTest(APITestCase):
@patch('sentry.tasks.commits.fetch_commits')
def test_simple(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id,
name='example/example',
provider='dummy',
)
repo2 = Repository.objects.create(
organization_id=org.id,
name='example/example2',
provider='dummy',
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
base_release = Release.objects.create(
organization_id=org.id,
version='000000000',
)
base_release.add_project(project)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': base_release.version,
}
)
self.client.put(
url, {
'ref':
'master',
'headCommits': [
{
'currentId': '0' * 40,
'repository': repo.name
},
{
'currentId': '0' * 40,
'repository': repo2.name
},
],
}
)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url, {
'ref':
'master',
'refs': [
{
'commit': 'a' * 40,
'repository': repo.name
},
{
'commit': 'b' * 40,
'repository': repo2.name
},
],
}
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
'release_id':
release.id,
'user_id':
user.id,
'refs': [
{
'commit': 'a' * 40,
'repository': repo.name
},
{
'commit': 'b' * 40,
'repository': repo2.name
},
],
'prev_release_id':
base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == 'master'
# no access
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release2.version,
}
)
response = self.client.put(url, {'ref': 'master'})
assert response.status_code == 403
@patch('sentry.tasks.commits.fetch_commits')
def test_deprecated_head_commits(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id,
name='example/example',
provider='dummy',
)
repo2 = Repository.objects.create(
organization_id=org.id,
name='example/example2',
provider='dummy',
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
base_release = Release.objects.create(
organization_id=org.id,
version='000000000',
)
base_release.add_project(project)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': base_release.version,
}
)
self.client.put(
url, {
'ref':
'master',
'headCommits': [
{
'currentId': '0' * 40,
'repository': repo.name
},
{
'currentId': '0' * 40,
'repository': repo2.name
},
],
}
)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url, {
'ref':
'master',
'headCommits': [
{
'currentId': 'a' * 40,
'repository': repo.name
},
{
'currentId': 'b' * 40,
'repository': repo2.name
},
],
}
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
'release_id':
release.id,
'user_id':
user.id,
'refs': [
{
'commit': 'a' * 40,
'previousCommit': None,
'repository': repo.name
},
{
'commit': 'b' * 40,
'previousCommit': None,
'repository': repo2.name
},
],
'prev_release_id':
base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == 'master'
# no access
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release2.version,
}
)
response = self.client.put(url, {'ref': 'master'})
assert response.status_code == 403
def test_commits(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url, data={
'commits': [
{
'id': 'a' * 40
},
{
'id': 'b' * 40
},
],
}
)
assert response.status_code == 200, (response.status_code, response.content)
rc_list = list(
ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author').order_by('order')
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id == org.id
def test_activity_generation(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url, data={
'dateReleased': datetime.utcnow().isoformat() + 'Z',
}
)
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=Activity.RELEASE,
project=project,
ident=release.version,
)
assert activity.exists()
def test_activity_generation_long_release(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='x' * 65,
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url, data={
'dateReleased': datetime.utcnow().isoformat() + 'Z',
}
)
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=Activity.RELEASE,
project=project,
ident=release.version[:64],
)
assert activity.exists()
class ReleaseDeleteTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release_file = ReleaseFile.objects.create(
organization_id=project.organization_id,
release=release,
file=File.objects.create(
name='application.js',
type='release.file',
),
name='http://example.com/application.js'
)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.delete(url)
assert response.status_code == 204, response.content
assert not Release.objects.filter(id=release.id).exists()
assert not ReleaseFile.objects.filter(id=release_file.id).exists()
def test_existing_group(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_group(first_release=release)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.delete(url)
assert response.status_code == 400, response.content
assert Release.objects.filter(id=release.id).exists()
def test_bad_repo_name(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name='foo', organization=org, teams=[team])
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url,
data={
'version': '1.2.1',
'projects': [project.slug],
'refs': [{
'repository': 'not_a_repo',
'commit': 'a' * 40,
}]
}
)
assert response.status_code == 400
assert response.data == {'refs': [u'Invalid repository names: not_a_repo']}
def test_bad_commit_list(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name='foo', organization=org, teams=[team])
Repository.objects.create(organization_id=org.id, name='a_repo')
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
'sentry-api-0-organization-release-details',
kwargs={
'organization_slug': org.slug,
'version': release.version,
}
)
response = self.client.put(
url,
data={
'version': '1.2.1',
'projects': [project.slug],
'commits': [{
'repository': 'a_repo',
}]
}
)
assert response.status_code == 400
assert response.data == {'commits': ['id: This field is required.']}
|
|
from decimal import Decimal
import boto3
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
import sure # noqa # pylint: disable=unused-import
import pytest
from moto import mock_dynamodb2
from uuid import uuid4
@mock_dynamodb2
def test_get_item_without_range_key_boto3():
client = boto3.resource("dynamodb", region_name="us-east-1")
table = client.create_table(
TableName="messages",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 5},
)
hash_key = "3241526475"
range_key = "1234567890987"
table.put_item(Item={"id": hash_key, "subject": range_key})
with pytest.raises(ClientError) as ex:
table.get_item(Key={"id": hash_key})
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal("Validation Exception")
@mock_dynamodb2
def test_query_filter_boto3():
table_schema = {
"KeySchema": [
{"AttributeName": "pk", "KeyType": "HASH"},
{"AttributeName": "sk", "KeyType": "RANGE"},
],
"AttributeDefinitions": [
{"AttributeName": "pk", "AttributeType": "S"},
{"AttributeName": "sk", "AttributeType": "S"},
],
}
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
table = dynamodb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
for i in range(0, 3):
table.put_item(
Item={"pk": "pk".format(i), "sk": "sk-{}".format(i),}
)
res = table.query(KeyConditionExpression=Key("pk").eq("pk"))
res["Items"].should.have.length_of(3)
res = table.query(KeyConditionExpression=Key("pk").eq("pk") & Key("sk").lt("sk-1"))
res["Items"].should.have.length_of(1)
res["Items"].should.equal([{"pk": "pk", "sk": "sk-0"}])
res = table.query(KeyConditionExpression=Key("pk").eq("pk") & Key("sk").lte("sk-1"))
res["Items"].should.have.length_of(2)
res["Items"].should.equal([{"pk": "pk", "sk": "sk-0"}, {"pk": "pk", "sk": "sk-1"}])
res = table.query(KeyConditionExpression=Key("pk").eq("pk") & Key("sk").gt("sk-1"))
res["Items"].should.have.length_of(1)
res["Items"].should.equal([{"pk": "pk", "sk": "sk-2"}])
res = table.query(KeyConditionExpression=Key("pk").eq("pk") & Key("sk").gte("sk-1"))
res["Items"].should.have.length_of(2)
res["Items"].should.equal([{"pk": "pk", "sk": "sk-1"}, {"pk": "pk", "sk": "sk-2"}])
@mock_dynamodb2
def test_boto3_conditions():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
table = dynamodb.Table("users")
table.put_item(Item={"forum_name": "the-key", "subject": "123"})
table.put_item(Item={"forum_name": "the-key", "subject": "456"})
table.put_item(Item={"forum_name": "the-key", "subject": "789"})
# Test a query returning all items
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key") & Key("subject").gt("1"),
ScanIndexForward=True,
)
expected = ["123", "456", "789"]
for index, item in enumerate(results["Items"]):
item["subject"].should.equal(expected[index])
# Return all items again, but in reverse
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key") & Key("subject").gt("1"),
ScanIndexForward=False,
)
for index, item in enumerate(reversed(results["Items"])):
item["subject"].should.equal(expected[index])
# Filter the subjects to only return some of the results
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key")
& Key("subject").gt("234"),
ConsistentRead=True,
)
results["Count"].should.equal(2)
# Filter to return no results
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key")
& Key("subject").gt("9999")
)
results["Count"].should.equal(0)
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key")
& Key("subject").begins_with("12")
)
results["Count"].should.equal(1)
results = table.query(
KeyConditionExpression=Key("subject").begins_with("7")
& Key("forum_name").eq("the-key")
)
results["Count"].should.equal(1)
results = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key")
& Key("subject").between("567", "890")
)
results["Count"].should.equal(1)
@mock_dynamodb2
def test_boto3_conditions_ignorecase():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
dynamodb.put_item(
TableName="users",
Item={"forum_name": {"S": "the-key"}, "subject": {"S": "100"}},
)
dynamodb.put_item(
TableName="users",
Item={"forum_name": {"S": "the-key"}, "subject": {"S": "199"}},
)
dynamodb.put_item(
TableName="users",
Item={"forum_name": {"S": "the-key"}, "subject": {"S": "250"}},
)
between_expressions = [
"BETWEEN :start AND :end",
"between :start and :end",
"Between :start and :end",
"between :start AnD :end",
]
for expr in between_expressions:
results = dynamodb.query(
TableName="users",
KeyConditionExpression="forum_name = :forum_name and subject {}".format(
expr
),
ExpressionAttributeValues={
":forum_name": {"S": "the-key"},
":start": {"S": "100"},
":end": {"S": "200"},
},
)
results["Count"].should.equal(2)
with pytest.raises(ClientError) as ex:
dynamodb.query(
TableName="users",
KeyConditionExpression="forum_name = :forum_name and BegIns_WiTh(subject, :subject )",
ExpressionAttributeValues={
":forum_name": {"S": "the-key"},
":subject": {"S": "1"},
},
)
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal(
"Invalid KeyConditionExpression: Invalid function name; function: BegIns_WiTh"
)
@mock_dynamodb2
def test_boto3_put_item_with_conditions():
import botocore
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
table = dynamodb.Table("users")
table.put_item(Item={"forum_name": "the-key", "subject": "123"})
table.put_item(
Item={"forum_name": "the-key-2", "subject": "1234"},
ConditionExpression="attribute_not_exists(forum_name) AND attribute_not_exists(subject)",
)
table.put_item.when.called_with(
Item={"forum_name": "the-key", "subject": "123"},
ConditionExpression="attribute_not_exists(forum_name) AND attribute_not_exists(subject)",
).should.throw(botocore.exceptions.ClientError)
table.put_item.when.called_with(
Item={"forum_name": "bogus-key", "subject": "bogus", "test": "123"},
ConditionExpression="attribute_exists(forum_name) AND attribute_exists(subject)",
).should.throw(botocore.exceptions.ClientError)
def _create_table_with_range_key():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "TestGSI",
"KeySchema": [
{"AttributeName": "username", "KeyType": "HASH"},
{"AttributeName": "created", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
}
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
{"AttributeName": "username", "AttributeType": "S"},
{"AttributeName": "created", "AttributeType": "N"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
return dynamodb.Table("users")
@mock_dynamodb2
def test_update_item_range_key_set():
table = _create_table_with_range_key()
table.put_item(
Item={
"forum_name": "the-key",
"subject": "123",
"username": "johndoe",
"created": Decimal("3"),
}
)
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
AttributeUpdates={
"username": {"Action": "PUT", "Value": "johndoe2"},
"created": {"Action": "PUT", "Value": Decimal("4")},
"mapfield": {"Action": "PUT", "Value": {"key": "value"}},
},
)
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{
"username": "johndoe2",
"forum_name": "the-key",
"subject": "123",
"created": "4",
"mapfield": {"key": "value"},
}
)
@mock_dynamodb2
def test_update_item_does_not_exist_is_created():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
result = table.update_item(
Key=item_key,
AttributeUpdates={
"username": {"Action": "PUT", "Value": "johndoe2"},
"created": {"Action": "PUT", "Value": Decimal("4")},
"mapfield": {"Action": "PUT", "Value": {"key": "value"}},
},
ReturnValues="ALL_OLD",
)
assert not result.get("Attributes")
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{
"username": "johndoe2",
"forum_name": "the-key",
"subject": "123",
"created": "4",
"mapfield": {"key": "value"},
}
)
@mock_dynamodb2
def test_update_item_add_value():
table = _create_table_with_range_key()
table.put_item(
Item={"forum_name": "the-key", "subject": "123", "numeric_field": Decimal("-1")}
)
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
AttributeUpdates={"numeric_field": {"Action": "ADD", "Value": Decimal("2")}},
)
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{"numeric_field": "1", "forum_name": "the-key", "subject": "123"}
)
@mock_dynamodb2
def test_update_item_add_value_string_set():
table = _create_table_with_range_key()
table.put_item(
Item={
"forum_name": "the-key",
"subject": "123",
"string_set": set(["str1", "str2"]),
}
)
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
AttributeUpdates={"string_set": {"Action": "ADD", "Value": set(["str3"])}},
)
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{
"string_set": set(["str1", "str2", "str3"]),
"forum_name": "the-key",
"subject": "123",
}
)
@mock_dynamodb2
def test_update_item_delete_value_string_set():
table = _create_table_with_range_key()
table.put_item(
Item={
"forum_name": "the-key",
"subject": "123",
"string_set": set(["str1", "str2"]),
}
)
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
AttributeUpdates={"string_set": {"Action": "DELETE", "Value": set(["str2"])}},
)
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{"string_set": set(["str1"]), "forum_name": "the-key", "subject": "123"}
)
@mock_dynamodb2
def test_update_item_add_value_does_not_exist_is_created():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
AttributeUpdates={"numeric_field": {"Action": "ADD", "Value": Decimal("2")}},
)
returned_item = dict(
(k, str(v) if isinstance(v, Decimal) else v)
for k, v in table.get_item(Key=item_key)["Item"].items()
)
dict(returned_item).should.equal(
{"numeric_field": "2", "forum_name": "the-key", "subject": "123"}
)
@mock_dynamodb2
def test_update_item_with_expression():
table = _create_table_with_range_key()
table.put_item(Item={"forum_name": "the-key", "subject": "123", "field": "1"})
item_key = {"forum_name": "the-key", "subject": "123"}
table.update_item(
Key=item_key,
UpdateExpression="SET field = :field_value",
ExpressionAttributeValues={":field_value": 2},
)
dict(table.get_item(Key=item_key)["Item"]).should.equal(
{"field": Decimal("2"), "forum_name": "the-key", "subject": "123"}
)
table.update_item(
Key=item_key,
UpdateExpression="SET field = :field_value",
ExpressionAttributeValues={":field_value": 3},
)
dict(table.get_item(Key=item_key)["Item"]).should.equal(
{"field": Decimal("3"), "forum_name": "the-key", "subject": "123"}
)
def assert_failure_due_to_key_not_in_schema(func, **kwargs):
with pytest.raises(ClientError) as ex:
func(**kwargs)
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal(
"The provided key element does not match the schema"
)
@mock_dynamodb2
def test_update_item_add_with_expression():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
current_item = {
"forum_name": "the-key",
"subject": "123",
"str_set": {"item1", "item2", "item3"},
"num_set": {1, 2, 3},
"num_val": 6,
}
# Put an entry in the DB to play with
table.put_item(Item=current_item)
# Update item to add a string value to a string set
table.update_item(
Key=item_key,
UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": {"item4"}},
)
current_item["str_set"] = current_item["str_set"].union({"item4"})
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set
table.update_item(
Key=item_key,
UpdateExpression="ADD non_existing_str_set :v",
ExpressionAttributeValues={":v": {"item4"}},
)
current_item["non_existing_str_set"] = {"item4"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a num value to a num set
table.update_item(
Key=item_key,
UpdateExpression="ADD num_set :v",
ExpressionAttributeValues={":v": {6}},
)
current_item["num_set"] = current_item["num_set"].union({6})
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a value to a number value
table.update_item(
Key=item_key,
UpdateExpression="ADD num_val :v",
ExpressionAttributeValues={":v": 20},
)
current_item["num_val"] = current_item["num_val"] + 20
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number value to a string set, should raise Client Error
table.update_item.when.called_with(
Key=item_key,
UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": 20},
).should.have.raised(ClientError)
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number set to the string set, should raise a ClientError
table.update_item.when.called_with(
Key=item_key,
UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": {20}},
).should.have.raised(ClientError)
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to update with a bad expression
table.update_item.when.called_with(
Key=item_key, UpdateExpression="ADD str_set bad_value"
).should.have.raised(ClientError)
# Attempt to add a string value instead of a string set
table.update_item.when.called_with(
Key=item_key,
UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": "new_string"},
).should.have.raised(ClientError)
@mock_dynamodb2
def test_update_item_add_with_nested_sets():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
current_item = {
"forum_name": "the-key",
"subject": "123",
"nested": {"str_set": {"item1", "item2", "item3"}},
}
# Put an entry in the DB to play with
table.put_item(Item=current_item)
# Update item to add a string value to a nested string set
table.update_item(
Key=item_key,
UpdateExpression="ADD nested.str_set :v",
ExpressionAttributeValues={":v": {"item4"}},
)
current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union(
{"item4"}
)
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set
# Should raise
table.update_item(
Key=item_key,
UpdateExpression="ADD #ns.#ne :v",
ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"},
ExpressionAttributeValues={":v": {"new_item"}},
)
current_item["nested"]["non_existing_str_set"] = {"new_item"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
@mock_dynamodb2
def test_update_item_delete_with_nested_sets():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
current_item = {
"forum_name": "the-key",
"subject": "123",
"nested": {"str_set": {"item1", "item2", "item3"}},
}
# Put an entry in the DB to play with
table.put_item(Item=current_item)
# Update item to add a string value to a nested string set
table.update_item(
Key=item_key,
UpdateExpression="DELETE nested.str_set :v",
ExpressionAttributeValues={":v": {"item3"}},
)
current_item["nested"]["str_set"] = current_item["nested"]["str_set"].difference(
{"item3"}
)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item)
@mock_dynamodb2
def test_update_item_delete_with_expression():
table = _create_table_with_range_key()
item_key = {"forum_name": "the-key", "subject": "123"}
current_item = {
"forum_name": "the-key",
"subject": "123",
"str_set": {"item1", "item2", "item3"},
"num_set": {1, 2, 3},
"num_val": 6,
}
# Put an entry in the DB to play with
table.put_item(Item=current_item)
# Update item to delete a string value from a string set
table.update_item(
Key=item_key,
UpdateExpression="DELETE str_set :v",
ExpressionAttributeValues={":v": {"item2"}},
)
current_item["str_set"] = current_item["str_set"].difference({"item2"})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item)
# Update item to delete a num value from a num set
table.update_item(
Key=item_key,
UpdateExpression="DELETE num_set :v",
ExpressionAttributeValues={":v": {2}},
)
current_item["num_set"] = current_item["num_set"].difference({2})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item)
# Try to delete on a number, this should fail
table.update_item.when.called_with(
Key=item_key,
UpdateExpression="DELETE num_val :v",
ExpressionAttributeValues={":v": 20},
).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item)
# Try to delete a string set from a number set
table.update_item.when.called_with(
Key=item_key,
UpdateExpression="DELETE num_set :v",
ExpressionAttributeValues={":v": {"del_str"}},
).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item)
# Attempt to update with a bad expression
table.update_item.when.called_with(
Key=item_key, UpdateExpression="DELETE num_val badvalue"
).should.have.raised(ClientError)
@mock_dynamodb2
def test_boto3_query_gsi_range_comparison():
table = _create_table_with_range_key()
table.put_item(
Item={
"forum_name": "the-key",
"subject": "123",
"username": "johndoe",
"created": 3,
}
)
table.put_item(
Item={
"forum_name": "the-key",
"subject": "456",
"username": "johndoe",
"created": 1,
}
)
table.put_item(
Item={
"forum_name": "the-key",
"subject": "789",
"username": "johndoe",
"created": 2,
}
)
table.put_item(
Item={
"forum_name": "the-key",
"subject": "159",
"username": "janedoe",
"created": 2,
}
)
table.put_item(
Item={
"forum_name": "the-key",
"subject": "601",
"username": "janedoe",
"created": 5,
}
)
# Test a query returning all johndoe items
results = table.query(
KeyConditionExpression=Key("username").eq("johndoe") & Key("created").gt(0),
ScanIndexForward=True,
IndexName="TestGSI",
)
expected = ["456", "789", "123"]
for index, item in enumerate(results["Items"]):
item["subject"].should.equal(expected[index])
# Return all johndoe items again, but in reverse
results = table.query(
KeyConditionExpression=Key("username").eq("johndoe") & Key("created").gt(0),
ScanIndexForward=False,
IndexName="TestGSI",
)
for index, item in enumerate(reversed(results["Items"])):
item["subject"].should.equal(expected[index])
# Filter the creation to only return some of the results
# And reverse order of hash + range key
results = table.query(
KeyConditionExpression=Key("created").gt(1) & Key("username").eq("johndoe"),
ConsistentRead=True,
IndexName="TestGSI",
)
results["Count"].should.equal(2)
# Filter to return no results
results = table.query(
KeyConditionExpression=Key("username").eq("janedoe") & Key("created").gt(9),
IndexName="TestGSI",
)
results["Count"].should.equal(0)
results = table.query(
KeyConditionExpression=Key("username").eq("janedoe") & Key("created").eq(5),
IndexName="TestGSI",
)
results["Count"].should.equal(1)
# Test range key sorting
results = table.query(
KeyConditionExpression=Key("username").eq("johndoe") & Key("created").gt(0),
IndexName="TestGSI",
)
expected = [Decimal("1"), Decimal("2"), Decimal("3")]
for index, item in enumerate(results["Items"]):
item["created"].should.equal(expected[index])
@mock_dynamodb2
def test_boto3_update_table_throughput():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 6},
)
table = dynamodb.Table("users")
table.provisioned_throughput["ReadCapacityUnits"].should.equal(5)
table.provisioned_throughput["WriteCapacityUnits"].should.equal(6)
table.update(
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 11}
)
table = dynamodb.Table("users")
table.provisioned_throughput["ReadCapacityUnits"].should.equal(10)
table.provisioned_throughput["WriteCapacityUnits"].should.equal(11)
@mock_dynamodb2
def test_boto3_update_table_gsi_throughput():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "TestGSI",
"KeySchema": [
{"AttributeName": "username", "KeyType": "HASH"},
{"AttributeName": "created", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 3,
"WriteCapacityUnits": 4,
},
}
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
{"AttributeName": "username", "AttributeType": "S"},
{"AttributeName": "created", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 6},
)
table = dynamodb.Table("users")
gsi_throughput = table.global_secondary_indexes[0]["ProvisionedThroughput"]
gsi_throughput["ReadCapacityUnits"].should.equal(3)
gsi_throughput["WriteCapacityUnits"].should.equal(4)
table.provisioned_throughput["ReadCapacityUnits"].should.equal(5)
table.provisioned_throughput["WriteCapacityUnits"].should.equal(6)
table.update(
GlobalSecondaryIndexUpdates=[
{
"Update": {
"IndexName": "TestGSI",
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 11,
},
}
}
]
)
table = dynamodb.Table("users")
# Primary throughput has not changed
table.provisioned_throughput["ReadCapacityUnits"].should.equal(5)
table.provisioned_throughput["WriteCapacityUnits"].should.equal(6)
gsi_throughput = table.global_secondary_indexes[0]["ProvisionedThroughput"]
gsi_throughput["ReadCapacityUnits"].should.equal(10)
gsi_throughput["WriteCapacityUnits"].should.equal(11)
@mock_dynamodb2
def test_update_table_gsi_create():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 6},
)
table = dynamodb.Table("users")
table.global_secondary_indexes.should.have.length_of(0)
table.attribute_definitions.should.have.length_of(2)
table.update(
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
{"AttributeName": "username", "AttributeType": "S"},
{"AttributeName": "created", "AttributeType": "N"},
],
GlobalSecondaryIndexUpdates=[
{
"Create": {
"IndexName": "TestGSI",
"KeySchema": [
{"AttributeName": "username", "KeyType": "HASH"},
{"AttributeName": "created", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 3,
"WriteCapacityUnits": 4,
},
}
}
],
)
table = dynamodb.Table("users")
table.reload()
table.global_secondary_indexes.should.have.length_of(1)
table.attribute_definitions.should.have.length_of(4)
gsi_throughput = table.global_secondary_indexes[0]["ProvisionedThroughput"]
assert gsi_throughput["ReadCapacityUnits"].should.equal(3)
assert gsi_throughput["WriteCapacityUnits"].should.equal(4)
# Check update works
table.update(
GlobalSecondaryIndexUpdates=[
{
"Update": {
"IndexName": "TestGSI",
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 11,
},
}
}
]
)
table = dynamodb.Table("users")
gsi_throughput = table.global_secondary_indexes[0]["ProvisionedThroughput"]
assert gsi_throughput["ReadCapacityUnits"].should.equal(10)
assert gsi_throughput["WriteCapacityUnits"].should.equal(11)
table.update(GlobalSecondaryIndexUpdates=[{"Delete": {"IndexName": "TestGSI"}}])
table = dynamodb.Table("users")
table.global_secondary_indexes.should.have.length_of(0)
@mock_dynamodb2
def test_update_table_gsi_throughput():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName="users",
KeySchema=[
{"AttributeName": "forum_name", "KeyType": "HASH"},
{"AttributeName": "subject", "KeyType": "RANGE"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "TestGSI",
"KeySchema": [
{"AttributeName": "username", "KeyType": "HASH"},
{"AttributeName": "created", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 3,
"WriteCapacityUnits": 4,
},
}
],
AttributeDefinitions=[
{"AttributeName": "forum_name", "AttributeType": "S"},
{"AttributeName": "subject", "AttributeType": "S"},
{"AttributeName": "username", "AttributeType": "S"},
{"AttributeName": "created", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 6},
)
table = dynamodb.Table("users")
table.global_secondary_indexes.should.have.length_of(1)
table.update(GlobalSecondaryIndexUpdates=[{"Delete": {"IndexName": "TestGSI"}}])
table = dynamodb.Table("users")
table.global_secondary_indexes.should.have.length_of(0)
@mock_dynamodb2
def test_query_pagination():
table = _create_table_with_range_key()
for i in range(10):
table.put_item(
Item={
"forum_name": "the-key",
"subject": "{0}".format(i),
"username": "johndoe",
"created": Decimal("3"),
}
)
page1 = table.query(KeyConditionExpression=Key("forum_name").eq("the-key"), Limit=6)
page1["Count"].should.equal(6)
page1["Items"].should.have.length_of(6)
page1.should.have.key("LastEvaluatedKey")
page2 = table.query(
KeyConditionExpression=Key("forum_name").eq("the-key"),
Limit=6,
ExclusiveStartKey=page1["LastEvaluatedKey"],
)
page2["Count"].should.equal(4)
page2["Items"].should.have.length_of(4)
page2.should_not.have.key("LastEvaluatedKey")
results = page1["Items"] + page2["Items"]
subjects = set([int(r["subject"]) for r in results])
subjects.should.equal(set(range(10)))
@mock_dynamodb2
def test_scan_by_index():
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "range_key", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "range_key", "AttributeType": "S"},
{"AttributeName": "gsi_col", "AttributeType": "S"},
{"AttributeName": "gsi_range_key", "AttributeType": "S"},
{"AttributeName": "lsi_range_key", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
GlobalSecondaryIndexes=[
{
"IndexName": "test_gsi",
"KeySchema": [
{"AttributeName": "gsi_col", "KeyType": "HASH"},
{"AttributeName": "gsi_range_key", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
},
}
],
LocalSecondaryIndexes=[
{
"IndexName": "test_lsi",
"KeySchema": [
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "lsi_range_key", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
}
],
)
dynamodb.put_item(
TableName="test",
Item={
"id": {"S": "1"},
"range_key": {"S": "1"},
"col1": {"S": "val1"},
"gsi_col": {"S": "1"},
"gsi_range_key": {"S": "1"},
"lsi_range_key": {"S": "1"},
},
)
dynamodb.put_item(
TableName="test",
Item={
"id": {"S": "1"},
"range_key": {"S": "2"},
"col1": {"S": "val2"},
"gsi_col": {"S": "1"},
"gsi_range_key": {"S": "2"},
"lsi_range_key": {"S": "2"},
},
)
dynamodb.put_item(
TableName="test",
Item={"id": {"S": "3"}, "range_key": {"S": "1"}, "col1": {"S": "val3"}},
)
res = dynamodb.scan(TableName="test")
assert res["Count"] == 3
assert len(res["Items"]) == 3
res = dynamodb.scan(TableName="test", IndexName="test_gsi")
assert res["Count"] == 2
assert len(res["Items"]) == 2
res = dynamodb.scan(TableName="test", IndexName="test_gsi", Limit=1)
assert res["Count"] == 1
assert len(res["Items"]) == 1
last_eval_key = res["LastEvaluatedKey"]
assert last_eval_key["id"]["S"] == "1"
assert last_eval_key["gsi_col"]["S"] == "1"
assert last_eval_key["gsi_range_key"]["S"] == "1"
res = dynamodb.scan(TableName="test", IndexName="test_lsi")
assert res["Count"] == 2
assert len(res["Items"]) == 2
res = dynamodb.scan(TableName="test", IndexName="test_lsi", Limit=1)
assert res["Count"] == 1
assert len(res["Items"]) == 1
last_eval_key = res["LastEvaluatedKey"]
assert last_eval_key["id"]["S"] == "1"
assert last_eval_key["range_key"]["S"] == "1"
assert last_eval_key["lsi_range_key"]["S"] == "1"
@mock_dynamodb2
@pytest.mark.parametrize("create_item_first", [False, True])
@pytest.mark.parametrize(
"expression", ["set h=:New", "set r=:New", "set x=:New, r=:New"]
)
def test_update_item_throws_exception_when_updating_hash_or_range_key(
create_item_first, expression
):
client = boto3.client("dynamodb", region_name="ap-northeast-3")
table_name = "testtable_3877"
client.create_table(
TableName=table_name,
KeySchema=[
{"AttributeName": "h", "KeyType": "HASH"},
{"AttributeName": "r", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "h", "AttributeType": "S"},
{"AttributeName": "r", "AttributeType": "S"},
],
BillingMode="PAY_PER_REQUEST",
)
initial_val = str(uuid4())
if create_item_first:
client.put_item(
TableName=table_name, Item={"h": {"S": initial_val}, "r": {"S": "1"}},
)
# Updating the HASH key should fail
with pytest.raises(ClientError) as ex:
client.update_item(
TableName=table_name,
Key={"h": {"S": initial_val}, "r": {"S": "1"}},
UpdateExpression=expression,
ExpressionAttributeValues={":New": {"S": "2"}},
)
err = ex.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.match(
r"One or more parameter values were invalid: Cannot update attribute (r|h). This attribute is part of the key"
)
|
|
#!/usr/bin/python
import irtk
import sys
import os
import numpy as np
import scipy.ndimage as nd
import cv2
import argparse
#from skimage.filter import denoise_tv_chambolle
#from irtk.ext.patches import extract_oriented_patches2D as extract_patches2D
from irtk.ext.patches import extract_patches2D
from lib.BundledSIFT import get_OFD
from sklearn.externals import joblib
from joblib import Parallel, delayed
from scipy.stats.mstats import mquantiles
parser = argparse.ArgumentParser(
description='Slice-by-slice masking of fetal brain MRI (3D).' )
parser.add_argument( '--img', nargs='+', type=str, required=True )
parser.add_argument( '--mask', nargs='+', type=str, required=True )
parser.add_argument( '--ga', type=float, required=True )
parser.add_argument( '--output_dir', type=str, required=True )
parser.add_argument( '-r', '--radius', type=int, default=8,
help="patch size" )
parser.add_argument( '-l', '--l', type=float, default=30.0,
help="lambda" )
parser.add_argument( '--cpu', type=int, default=-1,
help="number of CPUs used" )
parser.add_argument( '--ntrees', type=int, default=30,
help="number of trees" )
parser.add_argument( '--do_3D', action="store_true", default=False )
parser.add_argument( '--do_patchZ', action="store_true", default=False )
parser.add_argument( '--no_cleaning', action="store_true", default=False )
parser.add_argument( '--debug', action="store_true", default=False )
parser.add_argument( '--mass', action="store_true", default=False )
args = parser.parse_args()
print args
DEBUG = args.debug
if not os.path.exists( args.output_dir ):
os.makedirs( args.output_dir )
def get_BV( GA ):
"""
Return expected brain volume according to gestational age.
Reference:
"The assessment of normal fetal brain volume by 3-D ultrasound"
Chiung-Hsin Chang, Chen-Hsiang Yu, Fong-Ming Chang, Huei-Chen Ko, Hsi-Yao Chen
"""
# mL to mm3 , 1 ml is 1 cm^3
return (-171.48036 + 4.8079*GA + 0.29521*GA**2)*1000
def get_noiseXY(img):
img = img.astype('float32')
new_img = np.zeros(img.shape,dtype='float32')
for z in xrange(img.shape[0]):
new_img[z] = nd.gaussian_filter( img[z], 2, mode='reflect' )
noise = img - new_img
#print "Noise XY:", noise.std(), img.std()
return noise.std()
def get_noiseZ(img):
img = img.astype('float32')
new_img = np.zeros(img.shape,dtype='float32')
for x in xrange(img.shape[2]):
new_img[:,:,x] = nd.gaussian_filter( img[:,:,x], 2, mode='reflect' )
noise = img - new_img
#print "Noise Z:", noise.std(), img.std()
return noise.std()
def get_training_data( file_img, file_mask, r ):
# create mask
input_mask = irtk.imread( file_mask )
x_min, y_min, z_min, x_max, y_max, z_max = (input_mask == 0).bbox()
background = irtk.zeros( input_mask.get_header(), dtype='uint8' )
background[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1] = 1
background = nd.morphological_gradient( background, size=7)
n = background[z_min+1:z_max,
y_min+1:y_max,
x_min+1:x_max].sum()
z = np.random.randint(low=0, high=input_mask.shape[0],size=1.25*n)
y = np.random.randint(low=0, high=input_mask.shape[1],size=1.25*n)
x = np.random.randint(low=0, high=input_mask.shape[2],size=1.25*n)
background[z,y,x] = 1
background[z_min+1:z_max,
y_min+1:y_max,
x_min+1:x_max] = 0
foreground = (input_mask == 1).astype('uint8')
new_mask = irtk.zeros( input_mask.get_header(), dtype='uint8' )
new_mask[foreground == 1] = 1
new_mask[background != 0] = 2
img = irtk.imread( file_img, dtype='float32' )
X = []
Y = []
for z in xrange(img.shape[0]):
YX = np.transpose( np.nonzero( foreground[z] ) )
if DEBUG:
YX = YX[::10]
else:
YX = YX[::2]
if YX.shape[0] == 0:
continue
patches = extract_patches2D( img[z], r, YX )
patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
print patches.shape, YX.shape
X.extend( patches )
Y.extend( [1]*len(YX) )
for z in xrange(img.shape[0]):
YX = np.transpose( np.nonzero( background[z] ) )
if DEBUG:
YX = YX[::10]
else:
YX = YX[::2]
if YX.shape[0] == 0:
continue
patches = extract_patches2D( img[z], r, YX )
patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
print patches.shape, YX.shape
X.extend( patches )
Y.extend( [0]*len(YX) )
return X, Y
XY = Parallel(n_jobs=args.cpu)(delayed(get_training_data)(file_img, file_mask, args.radius)
for file_img, file_mask in zip(args.img,args.mask) )
print len(XY)
X = []
Y = []
for x,y in XY:
X.extend(x)
Y.extend(y)
X = np.array( X, dtype='float32').copy()
Y = np.array(Y, dtype='int32').copy()
n_positive = Y.sum()
n_points = Y.shape[0]
print "RATIO = ", n_positive, n_points, float(n_positive) / float(n_points) * 100
print "learning..."
from sklearn.ensemble import RandomForestClassifier
neigh = RandomForestClassifier( n_estimators=args.ntrees,
criterion='gini',
n_jobs=args.cpu )
neigh.fit(X, Y)
neigh.set_params(n_jobs=1)
def mask_image( file_img, file_mask, ga, r, neigh, output_dir ):
img = irtk.imread( file_img, dtype='float32' )
input_mask = irtk.imread( file_mask )
print "predicting..."
res = irtk.zeros( img.get_header(), dtype='float32' )
res2 = irtk.zeros( img.get_header(), dtype='float32' )
res3 = irtk.zeros( img.get_header(), dtype='float32' )
res4 = irtk.zeros( img.get_header(), dtype='uint8' )
mask = irtk.ones( input_mask.get_header(), dtype='uint8' )
mask[input_mask == 2] = 0
for z in xrange(img.shape[0]):
print z
YX = np.transpose( np.nonzero( mask[z] ) )
if YX.shape[0] == 0:
continue # this slice does not intersect the box
patches = extract_patches2D( img[z], r, YX )
patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
predictions = neigh.predict_proba(patches)[:,1]
res[z,YX[:,0],YX[:,1]] = predictions
x_min, y_min, z_min, x_max, y_max, z_max = mask.bbox()
proba = res[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1]
if args.mass:
BV = get_BV( args.ga )
box_volume = (z_max-z_min)*img.header['pixelSize'][2]*(y_max-y_min)*img.header['pixelSize'][1]*(x_max-x_min)*img.header['pixelSize'][0]
ratio = float(BV) / float(box_volume)
print "ratio", ratio
q0,q1 = mquantiles( proba.flatten(), prob=[0.5*(1.0-ratio),
1.0-0.5*ratio] )
print "threshold", q0,q1
#threshold = max(0.5,threshold)
# labels = res[z_min:z_max+1,
# y_min:y_max+1,
# x_min:x_max+1] > threshold
#res = 1 / (np.exp(-(res-threshold)/(res.max()-res.min())))
res[res<q0] = q0
res[res>q1] = q1
res -= res.min()
res /= res.max()
labels = res[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1] > 0.5
proba = res[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1]
cropped_img = img[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1]
if args.do_3D:
labels = irtk.crf( cropped_img,
labels,
proba,
l=args.l,
sigma=get_noiseXY(cropped_img),
sigmaZ=get_noiseZ(cropped_img) )
# elif args.do_patchZ:
# labels = irtk.crf_patchZ( cropped_img,
# labels,
# proba,
# l=10.0 )
# else:
# for z in xrange(z_min,z_max+1):
# labels[z] = irtk.crf( cropped_img[z],
# labels[z],
# proba[z],
# l=1.0 )
print "MAX LABEL:", labels.max()
irtk.imwrite(output_dir + "/bare_"+os.path.basename(file_img), labels )
tmp = irtk.zeros( img.get_header(), dtype='uint8' )
tmp[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1] = labels
( min_x_bare, min_y_bare, min_z_bare,
max_x_bare, max_y_bare, max_z_bare ) = tmp.bbox()
if not args.no_cleaning:
# clean by fitting ellipses enlarged of 10%
for z in xrange(labels.shape[0]):
edges = nd.morphological_gradient( labels[z] > 0,size=5 )
points = np.transpose(edges.nonzero())[:,::-1]
if len(points) == 0:
continue
points = np.array(map(lambda x:[x],points),dtype='int32')
ellipse = cv2.fitEllipse(points)
cv2.ellipse( labels[z], (ellipse[0],
(1.1*ellipse[1][0],1.1*ellipse[1][1]),
ellipse[2]) , 1, -1 )
irtk.imwrite(output_dir + "/seg_"+os.path.basename(file_img), labels )
irtk.imwrite(output_dir + "/res_"+os.path.basename(file_img), res )
# re-read the image in case we processed it
img = irtk.imread( file_img, dtype='float32' )
cropped_img = img[z_min:z_max+1,
y_min:y_max+1,
x_min:x_max+1]
cropped_img[labels==0] = -1
masked = cropped_img.bbox(crop=True)
irtk.imwrite(output_dir + "/masked_"+os.path.basename(file_img), masked )
# re-read the image in case we processed it
img = irtk.imread( file_img, dtype='float32' )
x0 = min_x_bare + (max_x_bare - min_x_bare) / 2
y0 = min_y_bare + (max_y_bare - min_y_bare) / 2
ofd = get_OFD(ga)/img.header['pixelSize'][0]
cropped_img = img[min_z_bare:max_z_bare+1,
max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()
irtk.imwrite(output_dir + "/very_large_"+os.path.basename(file_img),
cropped_img )
cropped_proba = res[min_z_bare:max_z_bare+1,
max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()
irtk.imwrite(output_dir + "/proba_"+os.path.basename(file_img),
cropped_proba )
XY = Parallel(n_jobs=args.cpu)(delayed(mask_image)(file_img, file_mask, args.ga,
args.radius, neigh, args.output_dir)
for file_img, file_mask in zip(args.img,args.mask) )
|
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.cloud.datastore._http import _HAVE_GRPC
class Test_DatastoreAPIOverHttp(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore._http import _DatastoreAPIOverHttp
return _DatastoreAPIOverHttp
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test__rpc(self):
class ReqPB(object):
def SerializeToString(self):
return REQPB
class RspPB(object):
def __init__(self, pb):
self._pb = pb
@classmethod
def FromString(cls, pb):
return cls(pb)
REQPB = b'REQPB'
PROJECT = 'PROJECT'
METHOD = 'METHOD'
URI = 'http://api-url'
conn = _Connection(URI)
datastore_api = self._make_one(conn)
http = conn.http = Http({'status': '200'}, 'CONTENT')
response = datastore_api._rpc(PROJECT, METHOD, ReqPB(), RspPB)
self.assertIsInstance(response, RspPB)
self.assertEqual(response._pb, 'CONTENT')
called_with = http._called_with
self.assertEqual(called_with['uri'], URI)
self.assertEqual(called_with['method'], 'POST')
self.assertEqual(called_with['headers']['Content-Type'],
'application/x-protobuf')
self.assertEqual(called_with['headers']['User-Agent'],
conn.USER_AGENT)
self.assertEqual(called_with['body'], REQPB)
self.assertEqual(conn.build_kwargs,
[{'method': METHOD, 'project': PROJECT}])
def test__request_w_200(self):
PROJECT = 'PROJECT'
METHOD = 'METHOD'
DATA = b'DATA'
URI = 'http://api-url'
conn = _Connection(URI)
datastore_api = self._make_one(conn)
http = conn.http = Http({'status': '200'}, 'CONTENT')
self.assertEqual(datastore_api._request(PROJECT, METHOD, DATA),
'CONTENT')
called_with = http._called_with
self.assertEqual(called_with['uri'], URI)
self.assertEqual(called_with['method'], 'POST')
self.assertEqual(called_with['headers']['Content-Type'],
'application/x-protobuf')
self.assertEqual(called_with['headers']['User-Agent'],
conn.USER_AGENT)
self.assertEqual(called_with['body'], DATA)
self.assertEqual(conn.build_kwargs,
[{'method': METHOD, 'project': PROJECT}])
def test__request_not_200(self):
from google.cloud.exceptions import BadRequest
from google.rpc import status_pb2
error = status_pb2.Status()
error.message = 'Entity value is indexed.'
error.code = 9 # FAILED_PRECONDITION
PROJECT = 'PROJECT'
METHOD = 'METHOD'
DATA = 'DATA'
URI = 'http://api-url'
conn = _Connection(URI)
datastore_api = self._make_one(conn)
conn.http = Http({'status': '400'}, error.SerializeToString())
with self.assertRaises(BadRequest) as exc:
datastore_api._request(PROJECT, METHOD, DATA)
expected_message = '400 Entity value is indexed.'
self.assertEqual(str(exc.exception), expected_message)
self.assertEqual(conn.build_kwargs,
[{'method': METHOD, 'project': PROJECT}])
@unittest.skipUnless(_HAVE_GRPC, 'No gRPC')
class Test__grpc_catch_rendezvous(unittest.TestCase):
def _call_fut(self):
from google.cloud.datastore._http import _grpc_catch_rendezvous
return _grpc_catch_rendezvous()
@staticmethod
def _fake_method(exc, result=None):
if exc is None:
return result
else:
raise exc
def test_success(self):
expected = object()
with self._call_fut():
result = self._fake_method(None, expected)
self.assertIs(result, expected)
def test_failure_aborted(self):
from grpc import StatusCode
from grpc._channel import _RPCState
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import GrpcRendezvous
details = 'Bad things.'
exc_state = _RPCState((), None, None, StatusCode.ABORTED, details)
exc = GrpcRendezvous(exc_state, None, None, None)
with self.assertRaises(Conflict):
with self._call_fut():
self._fake_method(exc)
def test_failure_invalid_argument(self):
from grpc import StatusCode
from grpc._channel import _RPCState
from google.cloud.exceptions import BadRequest
from google.cloud.exceptions import GrpcRendezvous
details = ('Cannot have inequality filters on multiple '
'properties: [created, priority]')
exc_state = _RPCState((), None, None,
StatusCode.INVALID_ARGUMENT, details)
exc = GrpcRendezvous(exc_state, None, None, None)
with self.assertRaises(BadRequest):
with self._call_fut():
self._fake_method(exc)
def test_failure_cancelled(self):
from grpc import StatusCode
from grpc._channel import _RPCState
from google.cloud.exceptions import GrpcRendezvous
exc_state = _RPCState((), None, None, StatusCode.CANCELLED, None)
exc = GrpcRendezvous(exc_state, None, None, None)
with self.assertRaises(GrpcRendezvous):
with self._call_fut():
self._fake_method(exc)
def test_commit_failure_non_grpc_err(self):
exc = RuntimeError('Not a gRPC error')
with self.assertRaises(RuntimeError):
with self._call_fut():
self._fake_method(exc)
class Test_DatastoreAPIOverGRPC(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore._http import _DatastoreAPIOverGRPC
return _DatastoreAPIOverGRPC
def _make_one(self, stub, connection=None, secure=True, mock_args=None):
import mock
if connection is None:
connection = _Connection(None)
connection.credentials = object()
connection.host = 'CURR_HOST'
if mock_args is None:
mock_args = []
def mock_make_stub(*args):
mock_args.append(args)
return stub
if secure:
patch = mock.patch(
'google.cloud.datastore._http.make_secure_stub',
new=mock_make_stub)
else:
patch = mock.patch(
'google.cloud.datastore._http.make_insecure_stub',
new=mock_make_stub)
with patch:
return self._get_target_class()(connection, secure)
def test_constructor(self):
from google.cloud.datastore import _http as MUT
conn = _Connection(None)
conn.credentials = object()
conn.host = 'CURR_HOST'
stub = _GRPCStub()
mock_args = []
datastore_api = self._make_one(stub, connection=conn,
mock_args=mock_args)
self.assertIs(datastore_api._stub, stub)
self.assertEqual(mock_args, [(
conn.credentials,
conn.USER_AGENT,
MUT.datastore_pb2_grpc.DatastoreStub,
conn.host,
)])
def test_constructor_insecure(self):
from google.cloud.datastore import _http as MUT
conn = _Connection(None)
conn.credentials = object()
conn.host = 'CURR_HOST:1234'
stub = _GRPCStub()
mock_args = []
datastore_api = self._make_one(stub, connection=conn,
secure=False,
mock_args=mock_args)
self.assertIs(datastore_api._stub, stub)
self.assertEqual(mock_args, [(
MUT.datastore_pb2_grpc.DatastoreStub,
conn.host,
)])
def test_lookup(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.lookup(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(stub.method_calls,
[(request_pb, 'Lookup')])
def test_run_query(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.run_query(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(stub.method_calls,
[(request_pb, 'RunQuery')])
def _run_query_failure_helper(self, exc, err_class):
stub = _GRPCStub(side_effect=exc)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
with self.assertRaises(err_class):
datastore_api.run_query(project, request_pb)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(stub.method_calls,
[(request_pb, 'RunQuery')])
@unittest.skipUnless(_HAVE_GRPC, 'No gRPC')
def test_run_query_invalid_argument(self):
from grpc import StatusCode
from grpc._channel import _RPCState
from google.cloud.exceptions import BadRequest
from google.cloud.exceptions import GrpcRendezvous
details = ('Cannot have inequality filters on multiple '
'properties: [created, priority]')
exc_state = _RPCState((), None, None,
StatusCode.INVALID_ARGUMENT, details)
exc = GrpcRendezvous(exc_state, None, None, None)
self._run_query_failure_helper(exc, BadRequest)
def test_begin_transaction(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.begin_transaction(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(
stub.method_calls,
[(request_pb, 'BeginTransaction')])
def test_commit_success(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.commit(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(stub.method_calls,
[(request_pb, 'Commit')])
def test_rollback(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.rollback(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(stub.method_calls,
[(request_pb, 'Rollback')])
def test_allocate_ids(self):
return_val = object()
stub = _GRPCStub(return_val)
datastore_api = self._make_one(stub=stub)
request_pb = _RequestPB()
project = 'PROJECT'
result = datastore_api.allocate_ids(project, request_pb)
self.assertIs(result, return_val)
self.assertEqual(request_pb.project_id, project)
self.assertEqual(
stub.method_calls,
[(request_pb, 'AllocateIds')])
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore._http import Connection
return Connection
def _make_key_pb(self, project, id_=1234):
from google.cloud.datastore.key import Key
path_args = ('Kind',)
if id_ is not None:
path_args += (id_,)
return Key(*path_args, project=project).to_protobuf()
def _make_query_pb(self, kind):
from google.cloud.grpc.datastore.v1 import query_pb2
pb = query_pb2.Query()
pb.kind.add().name = kind
return pb
def _make_one(self, credentials=None, http=None, use_grpc=False):
import mock
with mock.patch('google.cloud.datastore._http._USE_GRPC',
new=use_grpc):
return self._get_target_class()(credentials=credentials, http=http)
def _verifyProtobufCall(self, called_with, URI, conn):
self.assertEqual(called_with['uri'], URI)
self.assertEqual(called_with['method'], 'POST')
self.assertEqual(called_with['headers']['Content-Type'],
'application/x-protobuf')
self.assertEqual(called_with['headers']['User-Agent'],
conn.USER_AGENT)
def test_default_url(self):
klass = self._get_target_class()
conn = self._make_one()
self.assertEqual(conn.api_base_url, klass.API_BASE_URL)
def test_custom_url_from_env(self):
import mock
from google.cloud._http import API_BASE_URL
from google.cloud.environment_vars import GCD_HOST
HOST = 'CURR_HOST'
fake_environ = {GCD_HOST: HOST}
with mock.patch('os.environ', new=fake_environ):
conn = self._make_one()
self.assertNotEqual(conn.api_base_url, API_BASE_URL)
self.assertEqual(conn.api_base_url, 'http://' + HOST)
def test_ctor_defaults(self):
conn = self._make_one()
self.assertIsNone(conn.credentials)
def test_ctor_without_grpc(self):
import mock
connections = []
return_val = object()
def mock_api(connection):
connections.append(connection)
return return_val
patch = mock.patch(
'google.cloud.datastore._http._DatastoreAPIOverHttp',
new=mock_api)
with patch:
conn = self._make_one(use_grpc=False)
self.assertIsNone(conn.credentials)
self.assertIs(conn._datastore_api, return_val)
self.assertEqual(connections, [conn])
def test_ctor_with_grpc(self):
import mock
api_args = []
return_val = object()
def mock_api(connection, secure):
api_args.append((connection, secure))
return return_val
patch = mock.patch(
'google.cloud.datastore._http._DatastoreAPIOverGRPC',
new=mock_api)
with patch:
conn = self._make_one(use_grpc=True)
self.assertIsNone(conn.credentials)
self.assertIs(conn._datastore_api, return_val)
self.assertEqual(api_args, [(conn, True)])
def test_ctor_explicit(self):
class Creds(object):
pass
creds = Creds()
conn = self._make_one(creds)
self.assertIs(conn.credentials, creds)
def test_http_w_existing(self):
conn = self._make_one()
conn._http = http = object()
self.assertIs(conn.http, http)
def test_http_wo_creds(self):
import httplib2
conn = self._make_one()
self.assertIsInstance(conn.http, httplib2.Http)
def test_http_w_creds(self):
class Creds(object):
pass
creds = Creds()
conn = self._make_one(creds)
self.assertIs(conn.http.credentials, creds)
def test_build_api_url_w_default_base_version(self):
PROJECT = 'PROJECT'
METHOD = 'METHOD'
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':' + METHOD,
])
self.assertEqual(conn.build_api_url(PROJECT, METHOD), URI)
def test_build_api_url_w_explicit_base_version(self):
BASE = 'http://example.com/'
VER = '3.1415926'
PROJECT = 'PROJECT'
METHOD = 'METHOD'
conn = self._make_one()
URI = '/'.join([
BASE,
VER,
'projects',
PROJECT + ':' + METHOD,
])
self.assertEqual(conn.build_api_url(PROJECT, METHOD, BASE, VER),
URI)
def test_lookup_single_key_empty_response(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.LookupResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
found, missing, deferred = conn.lookup(PROJECT, [key_pb])
self.assertEqual(len(found), 0)
self.assertEqual(len(missing), 0)
self.assertEqual(len(deferred), 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 1)
self.assertEqual(key_pb, keys[0])
def test_lookup_single_key_empty_response_w_eventual(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.LookupResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
found, missing, deferred = conn.lookup(PROJECT, [key_pb],
eventual=True)
self.assertEqual(len(found), 0)
self.assertEqual(len(missing), 0)
self.assertEqual(len(deferred), 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 1)
self.assertEqual(key_pb, keys[0])
self.assertEqual(request.read_options.read_consistency,
datastore_pb2.ReadOptions.EVENTUAL)
self.assertEqual(request.read_options.transaction, b'')
def test_lookup_single_key_empty_response_w_eventual_and_transaction(self):
PROJECT = 'PROJECT'
TRANSACTION = b'TRANSACTION'
key_pb = self._make_key_pb(PROJECT)
conn = self._make_one()
self.assertRaises(ValueError, conn.lookup, PROJECT, key_pb,
eventual=True, transaction_id=TRANSACTION)
def test_lookup_single_key_empty_response_w_transaction(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
TRANSACTION = b'TRANSACTION'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.LookupResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
found, missing, deferred = conn.lookup(PROJECT, [key_pb],
transaction_id=TRANSACTION)
self.assertEqual(len(found), 0)
self.assertEqual(len(missing), 0)
self.assertEqual(len(deferred), 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 1)
self.assertEqual(key_pb, keys[0])
self.assertEqual(request.read_options.transaction, TRANSACTION)
def test_lookup_single_key_nonempty_response(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import entity_pb2
PROJECT = 'PROJECT'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.LookupResponse()
entity = entity_pb2.Entity()
entity.key.CopyFrom(key_pb)
rsp_pb.found.add(entity=entity)
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
(found,), missing, deferred = conn.lookup(PROJECT, [key_pb])
self.assertEqual(len(missing), 0)
self.assertEqual(len(deferred), 0)
self.assertEqual(found.key.path[0].kind, 'Kind')
self.assertEqual(found.key.path[0].id, 1234)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 1)
self.assertEqual(key_pb, keys[0])
def test_lookup_multiple_keys_empty_response(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
key_pb1 = self._make_key_pb(PROJECT)
key_pb2 = self._make_key_pb(PROJECT, id_=2345)
rsp_pb = datastore_pb2.LookupResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
found, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2])
self.assertEqual(len(found), 0)
self.assertEqual(len(missing), 0)
self.assertEqual(len(deferred), 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 2)
self.assertEqual(key_pb1, keys[0])
self.assertEqual(key_pb2, keys[1])
def test_lookup_multiple_keys_w_missing(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
key_pb1 = self._make_key_pb(PROJECT)
key_pb2 = self._make_key_pb(PROJECT, id_=2345)
rsp_pb = datastore_pb2.LookupResponse()
er_1 = rsp_pb.missing.add()
er_1.entity.key.CopyFrom(key_pb1)
er_2 = rsp_pb.missing.add()
er_2.entity.key.CopyFrom(key_pb2)
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
result, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2])
self.assertEqual(result, [])
self.assertEqual(len(deferred), 0)
self.assertEqual([missed.key for missed in missing],
[key_pb1, key_pb2])
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 2)
self.assertEqual(key_pb1, keys[0])
self.assertEqual(key_pb2, keys[1])
def test_lookup_multiple_keys_w_deferred(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
key_pb1 = self._make_key_pb(PROJECT)
key_pb2 = self._make_key_pb(PROJECT, id_=2345)
rsp_pb = datastore_pb2.LookupResponse()
rsp_pb.deferred.add().CopyFrom(key_pb1)
rsp_pb.deferred.add().CopyFrom(key_pb2)
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':lookup',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
result, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2])
self.assertEqual(result, [])
self.assertEqual(len(missing), 0)
self.assertEqual([def_key for def_key in deferred], [key_pb1, key_pb2])
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
self.assertEqual(cw['uri'], URI)
self.assertEqual(cw['method'], 'POST')
self.assertEqual(cw['headers']['Content-Type'],
'application/x-protobuf')
self.assertEqual(cw['headers']['User-Agent'], conn.USER_AGENT)
rq_class = datastore_pb2.LookupRequest
request = rq_class()
request.ParseFromString(cw['body'])
keys = list(request.keys)
self.assertEqual(len(keys), 2)
self.assertEqual(key_pb1, keys[0])
self.assertEqual(key_pb2, keys[1])
def test_run_query_w_eventual_no_transaction(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import query_pb2
PROJECT = 'PROJECT'
KIND = 'Nonesuch'
CURSOR = b'\x00'
q_pb = self._make_query_pb(KIND)
rsp_pb = datastore_pb2.RunQueryResponse()
rsp_pb.batch.end_cursor = CURSOR
no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS
rsp_pb.batch.more_results = no_more
rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':runQuery',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
pbs, end, more, skipped = conn.run_query(PROJECT, q_pb,
eventual=True)
self.assertEqual(pbs, [])
self.assertEqual(end, CURSOR)
self.assertTrue(more)
self.assertEqual(skipped, 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.RunQueryRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.partition_id.namespace_id, '')
self.assertEqual(request.query, q_pb)
self.assertEqual(request.read_options.read_consistency,
datastore_pb2.ReadOptions.EVENTUAL)
self.assertEqual(request.read_options.transaction, b'')
def test_run_query_wo_eventual_w_transaction(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import query_pb2
PROJECT = 'PROJECT'
KIND = 'Nonesuch'
CURSOR = b'\x00'
TRANSACTION = b'TRANSACTION'
q_pb = self._make_query_pb(KIND)
rsp_pb = datastore_pb2.RunQueryResponse()
rsp_pb.batch.end_cursor = CURSOR
no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS
rsp_pb.batch.more_results = no_more
rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':runQuery',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
pbs, end, more, skipped = conn.run_query(
PROJECT, q_pb, transaction_id=TRANSACTION)
self.assertEqual(pbs, [])
self.assertEqual(end, CURSOR)
self.assertTrue(more)
self.assertEqual(skipped, 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.RunQueryRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.partition_id.namespace_id, '')
self.assertEqual(request.query, q_pb)
self.assertEqual(
request.read_options.read_consistency,
datastore_pb2.ReadOptions.READ_CONSISTENCY_UNSPECIFIED)
self.assertEqual(request.read_options.transaction, TRANSACTION)
def test_run_query_w_eventual_and_transaction(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import query_pb2
PROJECT = 'PROJECT'
KIND = 'Nonesuch'
CURSOR = b'\x00'
TRANSACTION = b'TRANSACTION'
q_pb = self._make_query_pb(KIND)
rsp_pb = datastore_pb2.RunQueryResponse()
rsp_pb.batch.end_cursor = CURSOR
no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS
rsp_pb.batch.more_results = no_more
rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL
conn = self._make_one()
self.assertRaises(ValueError, conn.run_query, PROJECT, q_pb,
eventual=True, transaction_id=TRANSACTION)
def test_run_query_wo_namespace_empty_result(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import query_pb2
PROJECT = 'PROJECT'
KIND = 'Nonesuch'
CURSOR = b'\x00'
q_pb = self._make_query_pb(KIND)
rsp_pb = datastore_pb2.RunQueryResponse()
rsp_pb.batch.end_cursor = CURSOR
no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS
rsp_pb.batch.more_results = no_more
rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':runQuery',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
pbs, end, more, skipped = conn.run_query(PROJECT, q_pb)
self.assertEqual(pbs, [])
self.assertEqual(end, CURSOR)
self.assertTrue(more)
self.assertEqual(skipped, 0)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.RunQueryRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.partition_id.namespace_id, '')
self.assertEqual(request.query, q_pb)
def test_run_query_w_namespace_nonempty_result(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import entity_pb2
PROJECT = 'PROJECT'
KIND = 'Kind'
entity = entity_pb2.Entity()
q_pb = self._make_query_pb(KIND)
rsp_pb = datastore_pb2.RunQueryResponse()
rsp_pb.batch.entity_results.add(entity=entity)
rsp_pb.batch.entity_result_type = 1 # FULL
rsp_pb.batch.more_results = 3 # NO_MORE_RESULTS
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':runQuery',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
pbs = conn.run_query(PROJECT, q_pb, 'NS')[0]
self.assertEqual(len(pbs), 1)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.RunQueryRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.partition_id.namespace_id, 'NS')
self.assertEqual(request.query, q_pb)
def test_begin_transaction(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
TRANSACTION = b'TRANSACTION'
rsp_pb = datastore_pb2.BeginTransactionResponse()
rsp_pb.transaction = TRANSACTION
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':beginTransaction',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
self.assertEqual(conn.begin_transaction(PROJECT), TRANSACTION)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.BeginTransactionRequest
request = rq_class()
request.ParseFromString(cw['body'])
def test_commit_wo_transaction(self):
import mock
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.datastore.helpers import _new_value_pb
PROJECT = 'PROJECT'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.CommitResponse()
req_pb = datastore_pb2.CommitRequest()
mutation = req_pb.mutations.add()
insert = mutation.upsert
insert.key.CopyFrom(key_pb)
value_pb = _new_value_pb(insert, 'foo')
value_pb.string_value = u'Foo'
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':commit',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
# Set up mock for parsing the response.
expected_result = object()
_parsed = []
def mock_parse(response):
_parsed.append(response)
return expected_result
patch = mock.patch(
'google.cloud.datastore._http._parse_commit_response',
new=mock_parse)
with patch:
result = conn.commit(PROJECT, req_pb, None)
self.assertIs(result, expected_result)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.CommitRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.transaction, b'')
self.assertEqual(list(request.mutations), [mutation])
self.assertEqual(request.mode, rq_class.NON_TRANSACTIONAL)
self.assertEqual(_parsed, [rsp_pb])
def test_commit_w_transaction(self):
import mock
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.datastore.helpers import _new_value_pb
PROJECT = 'PROJECT'
key_pb = self._make_key_pb(PROJECT)
rsp_pb = datastore_pb2.CommitResponse()
req_pb = datastore_pb2.CommitRequest()
mutation = req_pb.mutations.add()
insert = mutation.upsert
insert.key.CopyFrom(key_pb)
value_pb = _new_value_pb(insert, 'foo')
value_pb.string_value = u'Foo'
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':commit',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
# Set up mock for parsing the response.
expected_result = object()
_parsed = []
def mock_parse(response):
_parsed.append(response)
return expected_result
patch = mock.patch(
'google.cloud.datastore._http._parse_commit_response',
new=mock_parse)
with patch:
result = conn.commit(PROJECT, req_pb, b'xact')
self.assertIs(result, expected_result)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.CommitRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.transaction, b'xact')
self.assertEqual(list(request.mutations), [mutation])
self.assertEqual(request.mode, rq_class.TRANSACTIONAL)
self.assertEqual(_parsed, [rsp_pb])
def test_rollback_ok(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
TRANSACTION = b'xact'
rsp_pb = datastore_pb2.RollbackResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':rollback',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
self.assertIsNone(conn.rollback(PROJECT, TRANSACTION))
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.RollbackRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(request.transaction, TRANSACTION)
def test_allocate_ids_empty(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
rsp_pb = datastore_pb2.AllocateIdsResponse()
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':allocateIds',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
self.assertEqual(conn.allocate_ids(PROJECT, []), [])
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.AllocateIdsRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(list(request.keys), [])
def test_allocate_ids_non_empty(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
PROJECT = 'PROJECT'
before_key_pbs = [
self._make_key_pb(PROJECT, id_=None),
self._make_key_pb(PROJECT, id_=None),
]
after_key_pbs = [
self._make_key_pb(PROJECT),
self._make_key_pb(PROJECT, id_=2345),
]
rsp_pb = datastore_pb2.AllocateIdsResponse()
rsp_pb.keys.add().CopyFrom(after_key_pbs[0])
rsp_pb.keys.add().CopyFrom(after_key_pbs[1])
conn = self._make_one()
URI = '/'.join([
conn.api_base_url,
conn.API_VERSION,
'projects',
PROJECT + ':allocateIds',
])
http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString())
self.assertEqual(conn.allocate_ids(PROJECT, before_key_pbs),
after_key_pbs)
cw = http._called_with
self._verifyProtobufCall(cw, URI, conn)
rq_class = datastore_pb2.AllocateIdsRequest
request = rq_class()
request.ParseFromString(cw['body'])
self.assertEqual(len(request.keys), len(before_key_pbs))
for key_before, key_after in zip(before_key_pbs, request.keys):
self.assertEqual(key_before, key_after)
class Test__parse_commit_response(unittest.TestCase):
def _call_fut(self, commit_response_pb):
from google.cloud.datastore._http import _parse_commit_response
return _parse_commit_response(commit_response_pb)
def test_it(self):
from google.cloud.grpc.datastore.v1 import datastore_pb2
from google.cloud.grpc.datastore.v1 import entity_pb2
index_updates = 1337
keys = [
entity_pb2.Key(
path=[
entity_pb2.Key.PathElement(
kind='Foo',
id=1234,
),
],
),
entity_pb2.Key(
path=[
entity_pb2.Key.PathElement(
kind='Bar',
name='baz',
),
],
),
]
response = datastore_pb2.CommitResponse(
mutation_results=[
datastore_pb2.MutationResult(key=key) for key in keys
],
index_updates=index_updates,
)
result = self._call_fut(response)
self.assertEqual(result, (index_updates, keys))
class Http(object):
_called_with = None
def __init__(self, headers, content):
from httplib2 import Response
self._response = Response(headers)
self._content = content
def request(self, **kw):
self._called_with = kw
return self._response, self._content
class _Connection(object):
host = None
USER_AGENT = 'you-sir-age-int'
def __init__(self, api_url):
self.api_url = api_url
self.build_kwargs = []
def build_api_url(self, **kwargs):
self.build_kwargs.append(kwargs)
return self.api_url
class _GRPCStub(object):
def __init__(self, return_val=None, side_effect=Exception):
self.return_val = return_val
self.side_effect = side_effect
self.method_calls = []
def _method(self, request_pb, name):
self.method_calls.append((request_pb, name))
if self.side_effect is Exception:
return self.return_val
else:
raise self.side_effect
def Lookup(self, request_pb):
return self._method(request_pb, 'Lookup')
def RunQuery(self, request_pb):
return self._method(request_pb, 'RunQuery')
def BeginTransaction(self, request_pb):
return self._method(request_pb, 'BeginTransaction')
def Commit(self, request_pb):
return self._method(request_pb, 'Commit')
def Rollback(self, request_pb):
return self._method(request_pb, 'Rollback')
def AllocateIds(self, request_pb):
return self._method(request_pb, 'AllocateIds')
class _RequestPB(object):
project_id = None
|
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
from astropy.io import fits
from astropy.table import Table, join
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import WCS
from photutils import aperture_photometry
from photutils import SkyCircularAperture
from photutils import SkyCircularAnnulus
import FITS_tools
import yaml
import os
import re
import subprocess
import shlex
import sys
# edited 8/23/17
# ------
#v3: instead of using photoutil, we reproject the frame and use dual mode of sextractor to get the magnitudes
# also add seeing (FWHM) look up for sextractor
def list_file_name_seeing(dir, name, end=0, startdir=0):
names=[]
for root, dirs, files in os.walk(dir):
for file in files:
if file.startswith(name):
if end == 0:
if startdir == 0:
names.append(os.path.join(root, file))
else:
if root.split('/')[-1][:2]==startdir:
names.append(os.path.join(root, file))
else:
if file.endswith(end):
if startdir == 0:
names.append(os.path.join(root, file))
else:
if root.split('/')[-1][:2]==startdir:
names.append(os.path.join(root, file))
if len(names) == 0:
print 'Cannot find the files'
return names
def aperature_proj(field,band):
with open("pisco_pipeline/params.yaml", 'r') as stream:
try:
param=yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
# see=float(fits.open(list_file_name_seeing('/Users/taweewat/Documents/pisco_code/',field,startdir='ut')[0])[0].header['FWHM1'])
myReg=re.compile(r'%s_A_\d{1,4}\.fits'%field)
for root, dirs, files in os.walk('/Users/taweewat/Documents/pisco_code/'):
for file in files:
if myReg.search(file) != None:
seeing=float(fits.open(root+'/'+myReg.search(file).group())[0].header['FWHM1'])
# print seeing
# seeing = float(fits.open(list_file_name_seeing(
# '/Users/taweewat/Documents/pisco_code/', field, startdir='ut')[0])[0].header['FWHM1'])
# def see_px(see):
# return (0.24-0.1)/(1.4-0.6)*(see)
slrdir = 'slr_output'
to_be_projected = 'final/coadd_c%s_%s.fits'%(field,band)
reference_fits = 'final/coadd_c%s_i.fits'%field
im1,im2, header = FITS_tools.match_fits(to_be_projected,reference_fits,return_header=True)
outname = 'final/proj_coadd_c%s_%s.fits'%(field,band)
print 'projecting from %s band to i band the fits file '%band + outname
fits.writeto(outname, im1, header, overwrite=True)
# cmd='sex final/coadd_c%s_i.fits,final/proj_coadd_c%s_%s.fits -c pisco_pipeline/config_slr.sex -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PIXEL_SCALE %s -CHECKIMAGE_NAME %s' % \
# (field,field,band,"%s/mag_i%s.fits"%(slrdir,band),str(seeing),str(param['satur_level_%s'%band]),str(see_px(seeing)),"%s/check_%s.fits"%(slrdir,band))
# print cmd
# sub=subprocess.check_call(shlex.split(cmd))
df_see=pd.read_csv('/Users/taweewat/Documents/red_sequence/total_chips_field_seeing.csv',index_col=0)
if field[0:5]=='CHIPS':
seeing = df_see[df_see.chips==field]['seeing_q25'].values[0] #np.min(df_see[df_see.chips==field][['seeing_025','seeing_gra_025']].values)
print seeing
elif (field[0:5]=='Field')|(field[0:3]=='PKS'):
seeing = df_see[df_see.name==field]['seeing_q25'].values[0] #np.min(df_see[df_see.name==field][['seeing_025','seeing_gra_025']].values)
print seeing
# if field=='CHIPS1011-0505':
# seeing=0.95
# if field=='Field179':
# seeing=1.12
# if seeing <= 0.65:
# seeing=0.9
# # elif seeing > 1.3:
# # seeing=1.34
# elif seeing > 1.:
# seeing=seeing
# else:
# seeing=1. #0.95 (1011), 1.0 (0005)
# seeing=1.1
# seeing=0.95
minarea=1.7 #field159
cmd='sex final/coadd_c%s_i.fits,final/proj_coadd_c%s_%s.fits -c pisco_pipeline/config_slr.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 15 -PIXEL_SCALE 0.22 -DETECT_MINAREA %s -CHECKIMAGE_NAME checki.fits'%\
(field,field,band,'sex_slr.param',"%s/mag_i%s.fits"%(slrdir,band),str(seeing),str(param['satur_level_%s'%band]),str(1.1/1.7*np.pi*(seeing/0.22)**2)); print cmd
sub = subprocess.check_call(shlex.split(cmd))
table=Table.read(slrdir+'/mag_i%s.fits'%band)
for name in table.colnames[1:]:
table.rename_column(name, name + '_%s' % band)
return table
def slr_running(field, bigmacs="pisco_pipeline/big-macs-calibrate-master"):
"""
slr_running: running SLR script from github.com/patkel/big-macs-calibrate to get a calibrated magnitude
INPUT:
- field: object of interset e.g., 'Field026'
- bigmacs: the location for "big-macs-calibrate" directoty
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
slrdir = 'slr_output'
infile = slrdir+'/star_%s.fits' % field
# infile = slrdir+'/star_bleem_%s.fits' % field
pyfile = os.path.join(bigmacs, 'fit_locus.py')
cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 5 -l -r ALPHA_J2000_i -d DELTA_J2000_i -j --plot=PLOTS_%s" \
% (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex.columns"), field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
def update_color(fname, table):
"""
update_color: using the output from SLR, update to the correct magnitude
INPUT:
- fname: input file from SLR output (...offsets.list)
- table: the table that we want to update the value (from column magg,etc to MAG_g,etc)
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band = [x.split(' ')[0][-1] for x in content[5:-1]]
corr = [float(x.split(' ')[1]) for x in content[5:-1]]
ecorr = [float(x.split(' ')[3]) for x in content[5:-1]]
print 'bands = ', band
table['MAG_' + band[0]] = table['MAG_AUTO_' + band[0]] + corr[0]
table['MAG_' + band[1]] = table['MAG_AUTO_' + band[1]] + corr[1]
table['MAG_' + band[2]] = table['MAG_AUTO_' + band[2]] + corr[2]
table['MAG_' + band[3]] = table['MAG_AUTO_' + band[3]] + corr[3]
table['MAGERR_' + band[0]] = table['MAGERR_AUTO_' + band[0]] + ecorr[0]
table['MAGERR_' + band[1]] = table['MAGERR_AUTO_' + band[1]] + ecorr[1]
table['MAGERR_' + band[2]] = table['MAGERR_AUTO_' + band[2]] + ecorr[2]
table['MAGERR_' + band[3]] = table['MAGERR_AUTO_' + band[3]] + ecorr[3]
return table
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
print 'remove', f
os.remove(os.path.join(dir, f))
"""
pisco_photometry: run pisco output data from pisco_combine to correct for the photometry of each object
and determine which objects are stars/galaxies.
The pipeline is a combination of SLR algorithm (cite: https://github.com/patkel/big-macs-calibrate)
and Photutils for photometry aperatures
ARGUMENTS:
1. fieldname for object (e.g., 'Field027')
EXAMPLES:
python pisco_pipeline/pisco_photometry_v3_step1.py SDSS123
"""
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
slrdir = 'slr_output'
if not os.path.exists(slrdir):
os.makedirs(slrdir)
field = str(sys.argv[1])
mag_ig=aperature_proj(field,'g')
mag_ii=aperature_proj(field,'i')
mag_ir=aperature_proj(field,'r')
mag_iz=aperature_proj(field,'z')
total=join(join(join(mag_ii,mag_ig,keys='NUMBER'), mag_ir,keys='NUMBER'),mag_iz,keys='NUMBER')
total.write(os.path.join(slrdir, 'total_%s.csv' % field), overwrite=True)
# total2=total[['NUMBER','ALPHA_J2000_i','DELTA_J2000_i','MAG_AUTO_i','MAGERR_AUTO_i','MAG_AUTO_g','MAGERR_AUTO_g',\
# 'MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z','MAGERR_AUTO_z','CLASS_STAR_i','CLASS_STAR_g',\
# 'CLASS_STAR_r','CLASS_STAR_z','FLAGS_i','FLAGS_g','FLAGS_r','FLAGS_z']]
# total2=total2[(total2['FLAGS_g']<5)&(total2['FLAGS_r']<5)&(total2['FLAGS_i']<5)&(total2['FLAGS_z']<5)]
# total3=total2[total2['CLASS_STAR_i'] > 0.9]
# print len(total3)
# total3.write(slrdir+'/star_%s.fits' % field, overwrite=True)
#
# slr_running(field)
# # ntotal = update_color(slrdir+'/star_bleem_%s.fits.offsets.list'%field, total)
# # ntotal.write(os.path.join(slrdir, 'ntotal_bleem_%s.csv' % field), overwrite=True)
# ntotal = update_color(slrdir+'/star_%s.fits.offsets.list'%field, total)
# ntotal.write(os.path.join(slrdir, 'ntotal_%s.csv' % field), overwrite=True)
#
# purge('final', "proj_coadd_c%s_.*\.fits" % field)
print 'test'
|
|
import logging
import asyncio
import traceback
import collections
import time
class TerminateProcessing(Exception):
pass
class Element(object):
def __init__(self, downstream=None, logger=None):
self.logger = logger
self.downstream = downstream
self.profiler = None
def __rshift__(self, rhs):
last = self
while last.downstream:
last = last.downstream
last.downstream = rhs
return self
def __iter__(self):
yield self
for successor in self._successors():
for element in successor:
yield element
def _profile(self, action, *args):
if self.profiler is not None:
self.profiler.log(action, *args)
def _successors(self):
if isinstance(self.downstream, list):
for sub_stream in self.downstream:
yield sub_stream
elif self.downstream:
yield self.downstream
def _set_up(self):
"""Subclasses my decorate this as a coroutine"""
pass
@asyncio.coroutine
def set_up(self):
if asyncio.iscoroutinefunction(self._set_up):
yield from self._set_up()
else:
self._set_up()
for successor in self._successors():
yield from successor.set_up()
def _tear_down(self):
"""Subclasses my decorate this as a coroutine"""
pass
@asyncio.coroutine
def tear_down(self):
for successor in self._successors():
yield from successor.tear_down()
if asyncio.iscoroutinefunction(self._tear_down):
yield from self._tear_down()
else:
self._tear_down()
@asyncio.coroutine
def _process_single(self, data):
return data
@asyncio.coroutine
def _process(self, data):
result = []
for element in data:
try:
mapped = yield from self._process_single(element)
assert mapped is not None, "pipeline element '{0}' single element processing result must not be None".format(self.__class__.__name__)
result.append(mapped)
except TerminateProcessing:
pass
return result
@asyncio.coroutine
def process(self, data):
try:
result = yield from self._process(data)
assert result is not None, "pipeline element '{0}' processing result must not be None".format(self.__class__.__name__)
assert isinstance(result, collections.Iterable), "pipeline element '{0}' processing must return an iterable".format(self.__class__.__name__)
for successor in self._successors():
yield from successor.process(result)
except TerminateProcessing:
pass
class Counter(Element):
def __init__(self, limit, downstream=None, logger=None):
super().__init__(downstream=downstream, logger=logger)
self.limit = limit
self.counter = 0
self.start = None
@asyncio.coroutine
def _process_single(self, message):
if self.start is None:
self.start = time.time()
self.counter += 1
if self.counter == self.limit:
duration = (time.time() - self.start) * 1000
if self.logger:
self.logger.info("received %d messages in %.0f ms" % (self.counter, duration))
self.counter = 0
self.start = None
return message
class Logger(Element):
def __init__(self, downstream=None, logger=None):
super().__init__(downstream=downstream, logger=logger)
self.level = logging.INFO
@asyncio.coroutine
def _process_single(self, message):
if self.logger is not None:
self.logger.log(self.level, 'received:\n--\n%s--', message)
return message
class Dispatcher(Element):
def __init__(self, target, downstream=None, logger=None):
super().__init__(downstream=downstream, logger=logger)
self.target = target
self.coroutine_target = asyncio.iscoroutinefunction(target)
@asyncio.coroutine
def _process_single(self, data):
result = self.target(data)
if self.coroutine_target:
result = yield from result
return result
class Numerator(Element):
def __init__(self, downstream=None, logger=None):
self._index = -1
@asyncio.coroutine
def _process_single(self, element):
self._index += 1
return (self._index, element)
class Serializer(Element):
def __init__(self, downstream=None, logger=None):
super().__init__(downstream=downstream, logger=logger)
self.super_process = super().process
@asyncio.coroutine
def _process(self, elements):
super_process = self.super_process
for element in elements:
yield from super_process(message)
raise TerminateProcessing()
class Parallelizer(Element):
def __init__(self, loop=None, downstream=None, logger=None):
super().__init__(downstream=downstream, logger=logger)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._workers = set() # asyncio.Task, each running process() of a successor
self._tearing_down = False
@asyncio.coroutine
def _tear_down(self):
self._tearing_down = True
# wait for worker to finish
if not self._workers:
return
for task in self._workers:
task.cancel()
done, pending = yield from asyncio.wait(self._workers, loop=self._loop, timeout=2)
if pending and self.logger is not None:
self.logger.error("could not cancel processing of %d messages", len(pending))
self._tearing_down = False
@asyncio.coroutine
def _process_single(self, data):
def worker_finished(task):
# HINT maybe use helper.create_exception_reporting_task
if not (self._tearing_down and task.cancelled()) and task.exception():
ex = task.exception()
output = traceback.format_exception(ex.__class__, ex, ex.__traceback__)
if self.logger is not None:
self.logger.critical(''.join(output))
self._workers.remove(task)
for successor in self._successors():
# start async task to process message
# TODO use self._loop.create_task once Python 3.4.2 is released
worker = asyncio.Task(successor.process(data), loop=self._loop)
worker.add_done_callback(worker_finished)
self._workers.add(worker)
return data
@asyncio.coroutine
def _process(self, data):
yield from super()._process(data)
raise TerminateProcessing()
|
|
from statsmodels.compat.python import lrange, lzip
from statsmodels.compat.numpy import recarray_select
import numpy as np
import numpy.lib.recfunctions as nprf
import pandas as pd
from pandas import DataFrame
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
from statsmodels.tools.validation import int_like, bool_like, string_like
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tools.data import _is_using_pandas, _is_recarray
from statsmodels.tools.validation import array_like
def add_trend(x, trend="c", prepend=False, has_constant='skip'):
"""
Add a trend and/or constant to an array.
Parameters
----------
x : array_like
Original array of data.
trend : str {'n', 'c', 't', 'ct', 'ctt'}
The trend to add.
* 'n' add no trend.
* 'c' add constant only.
* 't' add trend only.
* 'ct' add constant and linear trend.
* 'ctt' add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant column already
exists in x. 'raise' will raise an error. 'add' will add a column of
1s. 'skip' will return the data without change. 'skip' is the default.
Returns
-------
array_like
The original data with the additional trend columns. If x is a
recarray or pandas Series or DataFrame, then the trend column names
are 'const', 'trend' and 'trend_squared'.
See Also
--------
statsmodels.tools.tools.add_constant
Add a constant column to an array.
Notes
-----
Returns columns as ['ctt','ct','c'] whenever applicable. There is currently
no checking for an existing trend.
"""
prepend = bool_like(prepend, 'prepend')
trend = string_like(trend, 'trend', options=('n', 'c', 't', 'ct', 'ctt'))
has_constant = string_like(has_constant, 'has_constant',
options=('raise', 'add', 'skip'))
# TODO: could be generalized for trend of aribitrary order
columns = ['const', 'trend', 'trend_squared']
if trend == 'n':
return x.copy()
elif trend == "c": # handles structured arrays
columns = columns[:1]
trendorder = 0
elif trend == "ct" or trend == "t":
columns = columns[:2]
if trend == "t":
columns = columns[1:2]
trendorder = 1
elif trend == "ctt":
trendorder = 2
is_recarray = _is_recarray(x)
is_pandas = _is_using_pandas(x, None) or is_recarray
if is_pandas or is_recarray:
if is_recarray:
# deprecated: remove recarray support after 0.12
import warnings
from statsmodels.tools.sm_exceptions import recarray_warning
warnings.warn(recarray_warning, FutureWarning)
descr = x.dtype.descr
x = pd.DataFrame.from_records(x)
elif isinstance(x, pd.Series):
x = pd.DataFrame(x)
else:
x = x.copy()
else:
x = np.asanyarray(x)
nobs = len(x)
trendarr = np.vander(np.arange(1, nobs + 1, dtype=np.float64), trendorder + 1)
# put in order ctt
trendarr = np.fliplr(trendarr)
if trend == "t":
trendarr = trendarr[:, 1]
if "c" in trend:
if is_pandas or is_recarray:
# Mixed type protection
def safe_is_const(s):
try:
return np.ptp(s) == 0.0 and np.any(s != 0.0)
except:
return False
col_const = x.apply(safe_is_const, 0)
else:
ptp0 = np.ptp(np.asanyarray(x), axis=0)
col_is_const = ptp0 == 0
nz_const = col_is_const & (x[0] != 0)
col_const = nz_const
if np.any(col_const):
if has_constant == 'raise':
if x.ndim == 1:
base_err = "x is constant."
else:
columns = np.arange(x.shape[1])[col_const]
if isinstance(x, pd.DataFrame):
columns = x.columns
const_cols = ", ".join([str(c) for c in columns])
base_err = (
"x contains one or more constant columns. Column(s) "
f"{const_cols} are constant."
)
msg = (
f"{base_err} Adding a constant with trend='{trend}' is not allowed."
)
raise ValueError(msg)
elif has_constant == 'skip':
columns = columns[1:]
trendarr = trendarr[:, 1:]
order = 1 if prepend else -1
if is_recarray or is_pandas:
trendarr = pd.DataFrame(trendarr, index=x.index, columns=columns)
x = [trendarr, x]
x = pd.concat(x[::order], 1)
else:
x = [trendarr, x]
x = np.column_stack(x[::order])
if is_recarray:
x = x.to_records(index=False)
new_descr = x.dtype.descr
extra_col = len(new_descr) - len(descr)
if prepend:
descr = new_descr[:extra_col] + descr
else:
descr = descr + new_descr[-extra_col:]
x = x.astype(np.dtype(descr))
return x
def add_lag(x, col=None, lags=1, drop=False, insert=True):
"""
Returns an array with lags included given an array.
Parameters
----------
x : array_like
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column containing the variable. Or `col` can
be an int of the zero-based column index. If it's a 1d array `col`
can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load(as_pandas=False)
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags
"""
lags = int_like(lags, 'lags')
drop = bool_like(drop, 'drop')
if x.dtype.names:
names = x.dtype.names
if not col and np.squeeze(x).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
elif len(names) == 1:
col = names[0]
if isinstance(col, int):
col = x.dtype.names[col]
contemp = x[col]
# make names for lags
tmp_names = [col + '_'+'L(%i)' % i for i in range(1, lags+1)]
ndlags = lagmat(contemp, maxlag=lags, trim='Both')
# get index for return
if insert is True:
ins_idx = list(names).index(col) + 1
elif insert is False:
ins_idx = len(names) + 1
else: # insert is an int
if insert > len(names):
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position", ValueWarning)
ins_idx = insert
first_names = list(names[:ins_idx])
last_names = list(names[ins_idx:])
if drop:
if col in first_names:
first_names.pop(first_names.index(col))
else:
last_names.pop(last_names.index(col))
if first_names: # only do this if x is not "empty"
# Workaround to avoid NumPy FutureWarning
_x = recarray_select(x, first_names)
first_arr = nprf.append_fields(_x[lags:], tmp_names, ndlags.T,
usemask=False)
else:
first_arr = np.zeros(len(x)-lags, dtype=lzip(tmp_names,
(x[col].dtype,)*lags))
for i,name in enumerate(tmp_names):
first_arr[name] = ndlags[:,i]
if last_names:
return nprf.append_fields(first_arr, last_names,
[x[name][lags:] for name in last_names], usemask=False)
else: # lags for last variable
return first_arr
else: # we have an ndarray
if x.ndim == 1: # make 2d if 1d
x = x[:,None]
if col is None:
col = 0
# handle negative index
if col < 0:
col = x.shape[1] + col
contemp = x[:,col]
if insert is True:
ins_idx = col + 1
elif insert is False:
ins_idx = x.shape[1]
else:
if insert < 0: # handle negative index
insert = x.shape[1] + insert + 1
if insert > x.shape[1]:
insert = x.shape[1]
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position", ValueWarning)
ins_idx = insert
ndlags = lagmat(contemp, lags, trim='Both')
first_cols = lrange(ins_idx)
last_cols = lrange(ins_idx,x.shape[1])
if drop:
if col in first_cols:
first_cols.pop(first_cols.index(col))
else:
last_cols.pop(last_cols.index(col))
return np.column_stack((x[lags:,first_cols],ndlags,
x[lags:,last_cols]))
def detrend(x, order=1, axis=0):
"""
Detrend an array with a trend of given order along axis 0 or 1.
Parameters
----------
x : array_like, 1d or 2d
Data, if 2d, then each row or column is independently detrended with
the same trendorder, but independent trend estimates.
order : int
The polynomial order of the trend, zero is constant, one is
linear trend, two is quadratic trend.
axis : int
Axis can be either 0, observations by rows, or 1, observations by
columns.
Returns
-------
ndarray
The detrended series is the residual of the linear regression of the
data on the trend of given order.
"""
order = int_like(order, 'order')
axis = int_like(axis, 'axis')
if x.ndim == 2 and int(axis) == 1:
x = x.T
elif x.ndim > 2:
raise NotImplementedError('x.ndim > 2 is not implemented until it is needed')
nobs = x.shape[0]
if order == 0:
# Special case demean
resid = x - x.mean(axis=0)
else:
trends = np.vander(np.arange(float(nobs)), N=order + 1)
beta = np.linalg.pinv(trends).dot(x)
resid = x - np.dot(trends, beta)
if x.ndim == 2 and int(axis) == 1:
resid = resid.T
return resid
def lagmat(x, maxlag, trim='forward', original='ex', use_pandas=False):
"""
Create 2d array of lags.
Parameters
----------
x : array_like
Data; if 2d, observation in rows and variables in columns.
maxlag : int
All lags from zero to maxlag are included.
trim : {'forward', 'backward', 'both', 'none', None}
The trimming method to use.
* 'forward' : trim invalid observations in front.
* 'backward' : trim invalid initial observations.
* 'both' : trim invalid observations on both sides.
* 'none', None : no trimming of observations.
original : {'ex','sep','in'}
How the original is treated.
* 'ex' : drops the original array returning only the lagged values.
* 'in' : returns the original array and the lagged values as a single
array.
* 'sep' : returns a tuple (original array, lagged values). The original
array is truncated to have the same number of rows as
the returned lagmat.
use_pandas : bool
If true, returns a DataFrame when the input is a pandas
Series or DataFrame. If false, return numpy ndarrays.
Returns
-------
lagmat : ndarray
The array with lagged observations.
y : ndarray, optional
Only returned if original == 'sep'.
Notes
-----
When using a pandas DataFrame or Series with use_pandas=True, trim can only
be 'forward' or 'both' since it is not possible to consistently extend
index values.
Examples
--------
>>> from statsmodels.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward", original='in')
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both", original='in')
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
"""
maxlag = int_like(maxlag, 'maxlag')
use_pandas = bool_like(use_pandas, 'use_pandas')
trim = string_like(trim, 'trim', optional=True,
options=('forward', 'backward', 'both', 'none'))
original = string_like(original, 'original', options=('ex', 'sep', 'in'))
# TODO: allow list of lags additional to maxlag
orig = x
x = array_like(x, 'x', ndim=2, dtype=None)
is_pandas = _is_using_pandas(orig, None) and use_pandas
trim = 'none' if trim is None else trim
trim = trim.lower()
if is_pandas and trim in ('none', 'backward'):
raise ValueError("trim cannot be 'none' or 'forward' when used on "
"Series or DataFrames")
dropidx = 0
nobs, nvar = x.shape
if original in ['ex', 'sep']:
dropidx = nvar
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs + maxlag, nvar * (maxlag + 1)))
for k in range(0, int(maxlag + 1)):
lm[maxlag - k:nobs + maxlag - k,
nvar * (maxlag - k):nvar * (maxlag - k + 1)] = x
if trim in ('none', 'forward'):
startobs = 0
elif trim in ('backward', 'both'):
startobs = maxlag
else:
raise ValueError('trim option not valid')
if trim in ('none', 'backward'):
stopobs = len(lm)
else:
stopobs = nobs
if is_pandas:
x = orig
x_columns = x.columns if isinstance(x, DataFrame) else [x.name]
columns = [str(col) for col in x_columns]
for lag in range(maxlag):
lag_str = str(lag + 1)
columns.extend([str(col) + '.L.' + lag_str for col in x_columns])
lm = DataFrame(lm[:stopobs], index=x.index, columns=columns)
lags = lm.iloc[startobs:]
if original in ('sep', 'ex'):
leads = lags[x_columns]
lags = lags.drop(x_columns, 1)
else:
lags = lm[startobs:stopobs, dropidx:]
if original == 'sep':
leads = lm[startobs:stopobs, :dropidx]
if original == 'sep':
return lags, leads
else:
return lags
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward',
use_pandas=False):
"""
Generate lagmatrix for 2d array, columns arranged by variables.
Parameters
----------
x : array_like
Data, 2d. Observations in rows and variables in columns.
maxlag0 : int
The first variable all lags from zero to maxlag are included.
maxlagex : {None, int}
The max lag for all other variables all lags from zero to maxlag are
included.
dropex : int
Exclude first dropex lags from other variables. For all variables,
except the first, lags from dropex to maxlagex are included.
trim : str
The trimming method to use.
* 'forward' : trim invalid observations in front.
* 'backward' : trim invalid initial observations.
* 'both' : trim invalid observations on both sides.
* 'none' : no trimming of observations.
use_pandas : bool
If true, returns a DataFrame when the input is a pandas
Series or DataFrame. If false, return numpy ndarrays.
Returns
-------
ndarray
The array with lagged observations, columns ordered by variable.
Notes
-----
Inefficient implementation for unequal lags, implemented for convenience.
"""
maxlag0 = int_like(maxlag0, 'maxlag0')
maxlagex = int_like(maxlagex, 'maxlagex', optional=True)
trim = string_like(trim, 'trim', optional=True,
options=('forward', 'backward', 'both', 'none'))
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
is_pandas = _is_using_pandas(x, None)
if x.ndim == 1:
if is_pandas:
x = pd.DataFrame(x)
else:
x = x[:, None]
elif x.ndim == 0 or x.ndim > 2:
raise ValueError('Only supports 1 and 2-dimensional data.')
nobs, nvar = x.shape
if is_pandas and use_pandas:
lags = lagmat(x.iloc[:, 0], maxlag, trim=trim,
original='in', use_pandas=True)
lagsli = [lags.iloc[:, :maxlag0 + 1]]
for k in range(1, nvar):
lags = lagmat(x.iloc[:, k], maxlag, trim=trim,
original='in', use_pandas=True)
lagsli.append(lags.iloc[:, dropex:maxlagex + 1])
return pd.concat(lagsli, axis=1)
elif is_pandas:
x = np.asanyarray(x)
lagsli = [lagmat(x[:, 0], maxlag, trim=trim, original='in')[:, :maxlag0 + 1]]
for k in range(1, nvar):
lagsli.append(lagmat(x[:, k], maxlag, trim=trim, original='in')[:, dropex:maxlagex + 1])
return np.column_stack(lagsli)
def vec(mat):
return mat.ravel('F')
def vech(mat):
# Gets Fortran-order
return mat.T.take(_triu_indices(len(mat)))
# tril/triu/diag, suitable for ndarray.take
def _tril_indices(n):
rows, cols = np.tril_indices(n)
return rows * n + cols
def _triu_indices(n):
rows, cols = np.triu_indices(n)
return rows * n + cols
def _diag_indices(n):
rows, cols = np.diag_indices(n)
return rows * n + cols
def unvec(v):
k = int(np.sqrt(len(v)))
assert(k * k == len(v))
return v.reshape((k, k), order='F')
def unvech(v):
# quadratic formula, correct fp error
rows = .5 * (-1 + np.sqrt(1 + 8 * len(v)))
rows = int(np.round(rows))
result = np.zeros((rows, rows))
result[np.triu_indices(rows)] = v
result = result + result.T
# divide diagonal elements by 2
result[np.diag_indices(rows)] /= 2
return result
def duplication_matrix(n):
"""
Create duplication matrix D_n which satisfies vec(S) = D_n vech(S) for
symmetric matrix S
Returns
-------
D_n : ndarray
"""
n = int_like(n, 'n')
tmp = np.eye(n * (n + 1) // 2)
return np.array([unvech(x).ravel() for x in tmp]).T
def elimination_matrix(n):
"""
Create the elimination matrix L_n which satisfies vech(M) = L_n vec(M) for
any matrix M
Parameters
----------
Returns
-------
"""
n = int_like(n, 'n')
vech_indices = vec(np.tril(np.ones((n, n))))
return np.eye(n * n)[vech_indices != 0]
def commutation_matrix(p, q):
"""
Create the commutation matrix K_{p,q} satisfying vec(A') = K_{p,q} vec(A)
Parameters
----------
p : int
q : int
Returns
-------
K : ndarray (pq x pq)
"""
p = int_like(p, 'p')
q = int_like(q, 'q')
K = np.eye(p * q)
indices = np.arange(p * q).reshape((p, q), order='F')
return K.take(indices.ravel(), axis=0)
def _ar_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array_like
The AR coefficients
Reference
---------
Jones(1980)
"""
newparams = np.tanh(params/2)
tmp = np.tanh(params/2)
for j in range(1,len(params)):
a = newparams[j]
for kiter in range(j):
tmp[kiter] -= a * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ar_invtransparams(params):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array_like
The transformed AR coefficients
"""
params = params.copy()
tmp = params.copy()
for j in range(len(params)-1,0,-1):
a = params[j]
for kiter in range(j):
tmp[kiter] = (params[kiter] + a * params[j-kiter-1])/\
(1-a**2)
params[:j] = tmp[:j]
invarcoefs = 2*np.arctanh(params)
return invarcoefs
def _ma_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : ndarray
The ma coeffecients of an (AR)MA model.
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
# levinson-durbin to get macf
for j in range(1,len(params)):
b = newparams[j]
for kiter in range(j):
tmp[kiter] += b * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ma_invtransparams(macoefs):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : ndarray
The transformed MA coefficients
"""
tmp = macoefs.copy()
for j in range(len(macoefs)-1,0,-1):
b = macoefs[j]
for kiter in range(j):
tmp[kiter] = (macoefs[kiter]-b *macoefs[j-kiter-1])/(1-b**2)
macoefs[:j] = tmp[:j]
invmacoefs = -np.log((1-macoefs)/(1+macoefs))
return invmacoefs
def unintegrate_levels(x, d):
"""
Returns the successive differences needed to unintegrate the series.
Parameters
----------
x : array_like
The original series
d : int
The number of differences of the differenced series.
Returns
-------
y : array_like
The increasing differences from 0 to d-1 of the first d elements
of x.
See Also
--------
unintegrate
"""
d = int_like(d, 'd')
x = x[:d]
return np.asarray([np.diff(x, d - i)[0] for i in range(d, 0, -1)])
def unintegrate(x, levels):
"""
After taking n-differences of a series, return the original series
Parameters
----------
x : array_like
The n-th differenced series
levels : list
A list of the first-value in each differenced series, for
[first-difference, second-difference, ..., n-th difference]
Returns
-------
y : array_like
The original series de-differenced
Examples
--------
>>> x = np.array([1, 3, 9., 19, 8.])
>>> levels = unintegrate_levels(x, 2)
>>> levels
array([ 1., 2.])
>>> unintegrate(np.diff(x, 2), levels)
array([ 1., 3., 9., 19., 8.])
"""
levels = list(levels)[:] # copy
if len(levels) > 1:
x0 = levels.pop(-1)
return unintegrate(np.cumsum(np.r_[x0, x]), levels)
x0 = levels[0]
return np.cumsum(np.r_[x0, x])
def freq_to_period(freq):
"""
Convert a pandas frequency to a periodicity
Parameters
----------
freq : str or offset
Frequency to convert
Returns
-------
period : int
Periodicity of freq
Notes
-----
Annual maps to 1, quarterly maps to 4, monthly to 12, weekly to 52.
"""
if not isinstance(freq, offsets.DateOffset):
freq = to_offset(freq) # go ahead and standardize
freq = freq.rule_code.upper()
if freq == 'A' or freq.startswith(('A-', 'AS-')):
return 1
elif freq == 'Q' or freq.startswith(('Q-', 'QS-')):
return 4
elif freq == 'M' or freq.startswith(('M-', 'MS')):
return 12
elif freq == 'W' or freq.startswith('W-'):
return 52
elif freq == 'D':
return 7
elif freq == 'B':
return 5
elif freq == 'H':
return 24
else: # pragma : no cover
raise ValueError("freq {} not understood. Please report if you "
"think this is in error.".format(freq))
__all__ = ['lagmat', 'lagmat2ds','add_trend', 'duplication_matrix',
'elimination_matrix', 'commutation_matrix',
'vec', 'vech', 'unvec', 'unvech', 'freq_to_period']
|
|
#!/usr/bin/env python
# This file is part of ObjectPath released under MIT license.
# Copyright (C) 2010-2014 Adrian Kalbarczyk
import sys, re
from .parser import parse
from objectpath.core import *
import objectpath.utils.colorify as color # pylint: disable=W0614
from objectpath.utils import flatten, filter_dict, timeutils, skip
from objectpath.utils.json_ext import py2JSON
from objectpath.core import ITER_TYPES, generator, chain
from objectpath.utils.debugger import Debugger
EPSILON = 0.0000000000000001 #this is used in float comparison
EXPR_CACHE = {}
RE_TYPE = type(re.compile(''))
# setting external modules to 0, thus enabling lazy loading. 0 ensures that Pythonic types are never matched.
# this way is efficient because if statement is fast and once loaded these variables are pointing to libraries.
ObjectId = generateID = calendar = escape = escapeDict = unescape = unescapeDict = 0
class Tree(Debugger):
_REGISTERED_FUNCTIONS = {}
@classmethod
def register_function(cls, name, func):
"""
This method is used to add custom functions not catered for by default
:param str name: The name by which the function will be referred to in the expression
:param callable func: The function
:return:
"""
cls._REGISTERED_FUNCTIONS[name] = func
def __init__(self, obj, cfg=None):
if not cfg:
cfg = {}
self.D = cfg.get("debug", False)
self.setObjectGetter(cfg.get("object_getter", None))
self.setData(obj)
self.current = self.node = None
if self.D: super(Tree, self).__init__()
def setData(self, obj):
if type(obj) in ITER_TYPES + [dict]:
self.data = obj
def setObjectGetter(self, object_getter_cb):
if callable(object_getter_cb):
self.object_getter = object_getter_cb
else:
def default_getter(obj, attr):
try:
return obj.__getattribute__(attr)
except AttributeError:
if self.D:
self.end(color.op(".") + " returning '%s'", color.bold(obj))
return obj
self.object_getter = default_getter
def compile(self, expr):
if expr in EXPR_CACHE:
return EXPR_CACHE[expr]
ret = EXPR_CACHE[expr] = parse(expr, self.D)
return ret
def execute(self, expr):
D = self.D
if D: self.start("Tree.execute")
TYPES = [str, int, float, bool, generator, chain]
try:
TYPES += [long]
except NameError:
pass
# TODO change to yield?
def exe(node):
"""
node[0] - operator name
node[1:] - params
"""
types = [
str, timeutils.datetime.time, timeutils.datetime.date,
timeutils.datetime.datetime
]
try:
types += [unicode]
except:
pass
if D: self.start("executing node %s", color.bold(self.cleanOutput(node)))
type_node = type(node)
if node is None or type_node in TYPES:
return node
elif type_node in types:
return node
elif type_node is list:
return (exe(n) for n in node)
elif type_node is dict:
ret = {}
for i in node.items():
ret[exe(i[0])] = exe(i[1])
return ret
op = node[0]
if op == "or":
if D: self.debug("%s or %s", node[1], node[2])
return exe(node[1]) or exe(node[2])
elif op == "and":
if D: self.debug("%s and %s", node[1], node[2])
return exe(node[1]) and exe(node[2])
elif op == "+":
if len(node) > 2:
fst = exe(node[1])
snd = exe(node[2])
if None in (fst, snd):
return fst or snd
typefst = type(fst)
typesnd = type(snd)
if typefst is dict:
try:
fst.update(snd)
except Exception:
if type(snd) is not dict:
raise ProgrammingError(
"Can't add value of type %s to %s" % (
color.bold(
PY_TYPES_MAP.
get(type(snd).__name__,
type(snd).__name__)
), color.bold("object")
)
)
return fst
if typefst is list and typesnd is list:
if D: self.debug("both sides are lists, returning '%s'", fst + snd)
return fst + snd
if typefst in ITER_TYPES or typesnd in ITER_TYPES:
if typefst not in ITER_TYPES:
fst = [fst]
elif typesnd not in ITER_TYPES:
snd = [snd]
if D: self.debug("at least one side is a generator and the other is an iterable, returning chain")
return chain(fst, snd)
if typefst in NUM_TYPES:
try:
return fst + snd
except Exception:
return fst + float(snd)
if typefst in STR_TYPES or typesnd in STR_TYPES:
if D: self.info("doing string comparison '%s' is '%s'", fst, snd)
if sys.version_info[0] < 3:
if typefst is unicode:
fst = fst.encode("utf-8")
if typesnd is unicode:
snd = snd.encode("utf-8")
return str(fst) + str(snd)
try:
timeType = timeutils.datetime.time
if typefst is timeType and typesnd is timeType:
return timeutils.addTimes(fst, snd)
except Exception:
pass
if D: self.debug("standard addition, returning '%s'", fst + snd)
return fst + snd
else:
return exe(node[1])
elif op == "-":
if len(node) > 2:
fst = exe(node[1])
snd = exe(node[2])
try:
return fst - snd
except Exception:
typefst = type(fst)
typesnd = type(snd)
timeType = timeutils.datetime.time
if typefst is timeType and typesnd is timeType:
return timeutils.subTimes(fst, snd)
else:
return -exe(node[1])
elif op == "*":
return exe(node[1])*exe(node[2])
elif op == "%":
return exe(node[1]) % exe(node[2])
elif op == "/":
return exe(node[1])/float(exe(node[2]))
elif op == ">":
if D: self.debug("%s > %s, %s", node[1], node[2], node[1] > node[2])
return exe(node[1]) > exe(node[2])
elif op == "<":
return exe(node[1]) < exe(node[2])
elif op == ">=":
return exe(node[1]) >= exe(node[2])
elif op == "<=":
return exe(node[1]) <= exe(node[2])
# TODO this algorithm produces 3 for 1<2<3 and should be true
# elif op in "<=>=":
# fst=exe(node[1])
# snd=exe(node[2])
# if op==">":
# return fst > snd and snd or False
# elif op=="<":
# return fst < snd and snd or False
# elif op==">=":
# return fst >= snd and snd or False
# elif op=="<=":
# return fst <= snd and snd or False
elif op == "not":
fst = exe(node[1])
if D: self.debug("doing not '%s'", fst)
return not fst
elif op == "in":
fst = exe(node[1])
snd = exe(node[2])
if D: self.debug("doing '%s' in '%s'", node[1], node[2])
if type(fst) in ITER_TYPES and type(snd) in ITER_TYPES:
return any(
x in max(fst, snd, key=len) for x in min(fst, snd, key=len)
)
return exe(node[1]) in exe(node[2])
elif op == "not in":
fst = exe(node[1])
snd = exe(node[2])
if D: self.debug("doing '%s' not in '%s'", node[1], node[2])
if type(fst) in ITER_TYPES and type(snd) in ITER_TYPES:
return not any(
x in max(fst, snd, key=len) for x in min(fst, snd, key=len)
)
return exe(node[1]) not in exe(node[2])
elif op in ("is", "is not"):
if D: self.debug("found operator '%s'", op)
# try:
fst = exe(node[1])
# except Exception as e:
# if D: self.debug("NOT ERROR! Can't execute node[1] '%s', error: '%s'. Falling back to orginal value.",node[1],str(e))
# fst=node[1]
# try:
snd = exe(node[2])
# except Exception as e:
# if D: self.debug("NOT ERROR! Can't execute node[2] '%s', error: '%s'. Falling back to orginal value.",node[2],str(e))
# snd=node[2]
if op == "is" and fst == snd:
return True
# this doesn't work for 3 is not '3'
# if op == "is not" and fst != snd:
# return True
typefst = type(fst)
typesnd = type(snd)
if D: self.debug("type fst: '%s', type snd: '%s'", typefst, typesnd)
if typefst in STR_TYPES:
if D: self.info("doing string comparison '\"%s\" is \"%s\"'", fst, snd)
ret = str(fst) == str(snd)
elif typefst is float or typesnd is float:
if D: self.info("doing float comparison '%s is %s'", fst, snd)
try:
ret = abs(float(fst) - float(snd)) < EPSILON
except:
ret = False
elif typefst is int or typesnd is int:
if D: self.info("doing integer comparison '%s is %s'", fst, snd)
try:
ret = int(fst) == int(snd)
except:
ret = False
elif typefst is list and typesnd is list:
if D: self.info("doing array comparison '%s' is '%s'", fst, snd)
ret = fst == snd
elif typefst is dict and typesnd is dict:
if D: self.info("doing object comparison '%s' is '%s'", fst, snd)
ret = fst == snd
elif fst is None or snd is None:
if fst is None and snd is None:
# this executes only for "is not"
ret = True
else:
ret = (fst or snd) is None
if D: self.info(
"doing None comparison %s is %s = %s", color.bold(fst), color.bold(snd),
color.bold(not not (fst or snd))
)
else:
if D: self.info("can't compare %s and %s. Returning False", self.cleanOutput(fst), self.cleanOutput(snd))
ret = False
# else:
# try:
# global ObjectId
# if not ObjectId:
# from bson.objectid import ObjectId
# if typefst is ObjectId or typesnd is ObjectId:
# if D: self.info("doing MongoDB objectID comparison '%s' is '%s'",fst,snd)
# ret=str(fst)==str(snd)
# else:
# if D: self.info("doing standard comparison '%s' is '%s'",fst,snd)
# ret=fst is snd
# except Exception:
# pass
if op == "is not":
if D: self.info("'is not' found. Returning %s", not ret)
return not ret
else:
if D: self.info("returning %s is %s => %s", color.bold(self.cleanOutput(fst)), color.bold(self.cleanOutput(snd)), color.bold(ret))
return ret
elif op == "re":
return re.compile(exe(node[1]))
elif op == "matches":
fst = exe(node[1])
snd = exe(node[2])
if type(fst) not in STR_TYPES+[RE_TYPE]:
raise Exception("operator " + color.bold("matches") + " expects regexp on the left. Example: 'a.*d' matches 'abcd'")
if type(snd) in ITER_TYPES:
for i in snd:
if not not re.match(fst, i):
return True
return False
else:
# regex matches string
return not not re.match(fst, snd)
# elif op=="(literal)":
# fstLetter=node[1][0]
# if fstLetter is "'":
# return node[1][1:-1]
# elif fstLetter.isdigit:
# return int(node[1])
elif op == "(root)": # this is $
return self.data
# elif op=="(node)":# this is !
# if D: self.debug("returning node %s",self.node)
# return self.node
elif op == "(current)": # this is @
if D: self.debug("returning current node: \n %s", color.bold(self.current))
return self.current
elif op == "name":
return node[1]
elif op == ".":
fst = node[1]
if type(fst) is tuple:
fst = exe(fst)
typefst = type(fst)
if D: self.debug(color.op(".") + " left is '%s'", color.bold(self.cleanOutput(fst)))
# try:
if node[2][0] == "*":
if D:
self.end(
color.op(".") + " returning '%s'",
color.bold(typefst in ITER_TYPES and fst or [fst])
)
return fst # typefst in ITER_TYPES and fst or [fst]
# except:
# pass
snd = exe(node[2])
if D: self.debug(color.op(".") + " right is '%s'", color.bold(snd))
if typefst in ITER_TYPES:
if D: self.debug(
color.op(".") + " filtering %s by %s", color.bold(self.cleanOutput(fst)),
color.bold(snd)
)
if type(snd) in ITER_TYPES:
return filter_dict(fst, list(snd))
else:
# if D: self.debug(list(fst))
return (e[snd] for e in fst if type(e) is dict and snd in e)
try:
if D: self.end(color.op(".") + " returning '%s'", fst.get(snd))
return fst.get(snd)
except Exception:
if isinstance(fst, object):
return self.object_getter(fst, snd)
if D: self.end(color.op(".") + " returning '%s'", color.bold(fst))
return fst
elif op == "..":
fst = flatten(exe(node[1]))
if node[2][0] == "*":
if D: self.debug(color.op("..") + " returning '%s'", color.bold(fst))
return fst
# reduce objects to selected attributes
snd = exe(node[2])
if D: self.debug(
color.op("..") + " finding all %s in %s", color.bold(snd),
color.bold(self.cleanOutput(fst))
)
if type(snd) in ITER_TYPES:
ret = filter_dict(fst, list(snd))
if D: self.debug(color.op("..") + " returning %s", color.bold(ret))
return ret
else:
ret = chain.from_iterable(
type(x) in ITER_TYPES and x or [x]
for x in (e[snd] for e in fst if snd in e)
)
# print list(chain(*(type(x) in ITER_TYPES and x or [x] for x in (e[snd] for e in fst if snd in e))))
if D: self.debug(color.op("..") + " returning %s", color.bold(self.cleanOutput(ret)))
return ret
elif op == "[":
len_node = len(node)
# TODO move it to tree generation phase
if len_node == 1: # empty list
if D: self.debug("returning an empty list")
return []
if len_node == 2: # list - preserved to catch possible event of leaving it as '[' operator
if D: self.debug("doing list mapping")
return [exe(x) for x in node[1]]
if len_node == 3: # selector used []
fst = exe(node[1])
# check against None
if not fst:
return fst
selector = node[2]
if D:
self.debug(
"\n found selector '%s'.\n executing on %s", color.bold(selector),
color.bold(fst)
)
selectorIsTuple = type(selector) is tuple
if selectorIsTuple and selector[0] == "[":
nodeList = []
nodeList_append = nodeList.append
for i in fst:
if D: self.debug("setting self.current to %s", color.bold(i))
self.current = i
nodeList_append(
exe((selector[0], exe(selector[1]), exe(selector[2])))
)
if D: self.debug(
"returning %s objects: %s", color.bold(len(nodeList)),
color.bold(nodeList)
)
return nodeList
if selectorIsTuple and selector[0] == "(current)":
if D:
self.warning(
color.bold("$.*[@]") + " is eqivalent to " +
color.bold("$.*") + "!"
)
return fst
if selectorIsTuple and selector[0] in SELECTOR_OPS:
if D: self.debug(
"found %s operator in selector, %s", color.bold(selector[0]),
color.bold(selector)
)
if type(fst) is dict:
fst = [fst]
# TODO move it to tree building phase
if type(selector[1]) is tuple and selector[1][0] == "name":
selector = (selector[0], selector[1][1], selector[2])
selector0 = selector[0]
selector1 = selector[1]
selector2 = selector[2]
def exeSelector(fst):
for i in fst:
if D:
self.debug("setting self.current to %s", color.bold(i))
self.debug(" s0: %s\n s1: %s\n s2: %s\n Current: %s", selector0, selector1, selector2, i)
self.current = i
if selector0 == "fn":
yield exe(selector)
# elif type(selector1) in STR_TYPES and False:
# if D: self.debug("found string %s", type(i))
# try:
# if exe((selector0,i[selector1],selector2)):
# yield i
# if D: self.debug("appended")
# if D: self.debug("discarded")
# except Exception as e:
# if D: self.debug("discarded, Exception: %s",color.bold(e))
else:
try:
# TODO optimize an event when @ is not used. exe(selector1) can be cached
if exe((selector0, exe(selector1), exe(selector2))):
yield i
if D: self.debug("appended %s", i)
elif D: self.debug("discarded")
except Exception:
if D: self.debug("discarded")
# if D and nodeList: self.debug("returning '%s' objects: '%s'", color.bold(len(nodeList)), color.bold(nodeList))
return exeSelector(fst)
self.current = fst
snd = exe(node[2])
typefst = type(fst)
if typefst in [tuple] + ITER_TYPES + STR_TYPES:
typesnd = type(snd)
# nodes[N]
if typesnd in NUM_TYPES or typesnd is str and snd.isdigit():
n = int(snd)
if D:
self.info(
"getting %sth element from '%s'", color.bold(n),
color.bold(fst)
)
if typefst in (generator, chain):
if n > 0:
return skip(fst, n)
elif n == 0:
return next(fst)
else:
fst = list(fst)
else:
try:
return fst[n]
except (IndexError, TypeError):
return None
# $.*['string']==$.string
if type(snd) in STR_TYPES:
return exe((".", fst, snd))
else:
# $.*[@.string] - bad syntax, but allowed
return snd
else:
try:
if D: self.debug("returning %s", color.bold(fst[snd]))
return fst[snd]
except KeyError:
# CHECK - is it ok to do that or should it be ProgrammingError?
if D: self.debug("returning an empty list")
return []
raise ProgrammingError(
"Wrong usage of " + color.bold("[") + " operator"
)
elif op == "fn":
# Built-in functions
fnName = node[1]
args = None
try:
args = [exe(x) for x in node[2:]]
except IndexError:
if D:
self.debug("NOT ERROR: can't map '%s' with '%s'", node[2:], exe)
# arithmetic
if fnName == "sum":
args = args[0]
if type(args) in NUM_TYPES:
return args
return sum((x for x in args if type(x) in NUM_TYPES))
elif fnName == "max":
args = args[0]
if type(args) in NUM_TYPES:
return args
return max((x for x in args if type(x) in NUM_TYPES))
elif fnName == "min":
args = args[0]
if type(args) in NUM_TYPES:
return args
return min((x for x in args if type(x) in NUM_TYPES))
elif fnName == "avg":
args = args[0]
if type(args) in NUM_TYPES:
return args
if type(args) not in ITER_TYPES:
raise Exception("Argument for avg() is not an array")
else:
args = list(args)
try:
return sum(args)/float(len(args))
except TypeError:
args = [x for x in args if type(x) in NUM_TYPES]
self.warning("Some items in array were ommited")
return sum(args)/float(len(args))
elif fnName == "round":
return round(*args)
# casting
elif fnName == "int":
return int(args[0])
elif fnName == "float":
return float(args[0])
elif fnName == "str":
return str(py2JSON(args[0]))
elif fnName in ("list", "array"):
try:
a = args[0]
except IndexError:
return []
targs = type(a)
if targs is timeutils.datetime.datetime:
return timeutils.date2list(a) + timeutils.time2list(a)
if targs is timeutils.datetime.date:
return timeutils.date2list(a)
if targs is timeutils.datetime.time:
return timeutils.time2list(a)
return list(a)
# string
elif fnName == "upper":
return args[0].upper()
elif fnName == "lower":
return args[0].lower()
elif fnName == "capitalize":
return args[0].capitalize()
elif fnName == "title":
return args[0].title()
elif fnName == "split":
return args[0].split(*args[1:])
elif fnName == "slice":
if args and type(args[1]) not in ITER_TYPES:
raise ExecutionError(
"Wrong usage of slice(STRING, ARRAY). Second argument is not an array but %s."
% color.bold(type(args[1]).__name__)
)
try:
pos = list(args[1])
if type(pos[0]) in ITER_TYPES:
if D: self.debug("run slice() for a list of slicers")
return (args[0][x[0]:x[1]] for x in pos)
return args[0][pos[0]:pos[1]]
except IndexError:
if len(args) != 2:
raise ProgrammingError(
"Wrong usage of slice(STRING, ARRAY). Provided %s argument, should be exactly 2."
% len(args)
)
elif fnName == "escape":
global escape, escapeDict
if not escape:
from objectpath.utils import escape, escapeDict
return escape(args[0], escapeDict)
elif fnName == "unescape":
global unescape, unescapeDict
if not unescape:
from objectpath.utils import unescape, unescapeDict
return unescape(args[0], unescapeDict)
elif fnName == "replace":
if sys.version_info[0] < 3 and type(args[0]) is unicode:
args[0] = args[0].encode("utf8")
return str.replace(args[0], args[1], args[2])
# TODO this should be supported by /regex/
# elif fnName=="REsub":
# return re.sub(args[1],args[2],args[0])
elif fnName == "sort":
if len(args) > 1:
key = args[1]
a = {"key": lambda x: x.get(key, 0)}
else:
a = {}
args = args[0]
if D: self.debug("doing sort on '%s'", args)
try:
return sorted(args, **a)
except TypeError:
return args
elif fnName == "reverse":
args = args[0]
try:
args.reverse()
return args
except TypeError:
return args
elif fnName == "unique":
try:
return list(set(args[0]))
except TypeError:
return args[0]
elif fnName == "map":
return chain.from_iterable(map(lambda x: exe(("fn", args[0], x)), args[1]))
elif fnName in ("count", "len"):
args = args[0]
if args in (True, False, None):
return args
if type(args) in ITER_TYPES:
return len(list(args))
return len(args)
elif fnName == "join":
try:
joiner = args[1]
except Exception:
joiner = ""
try:
return joiner.join(args[0])
except TypeError:
try:
return joiner.join(map(str, args[0]))
except Exception:
return args[0]
# time
elif fnName in ("now", "age", "time", "date", "dateTime"):
if fnName == "now":
return timeutils.now()
if fnName == "date":
return timeutils.date(args)
if fnName == "time":
return timeutils.time(args)
if fnName == "dateTime":
return timeutils.dateTime(args)
# TODO move lang to localize() entirely!
if fnName == "age":
a = {}
if len(args) > 1:
a["reference"] = args[1]
if len(args) > 2:
a["lang"] = args[2]
return list(timeutils.age(args[0], **a))
elif fnName == "toMillis":
args = args[0]
if args.utcoffset() is not None:
args = args - args.utcoffset() # pylint: disable=E1103
global calendar
if not calendar:
import calendar
return int(
calendar.timegm(args.timetuple())*1000 + args.microsecond/1000
)
elif fnName == "localize":
if type(args[0]) is timeutils.datetime.datetime:
return timeutils.UTC2local(*args)
# polygons
elif fnName == "area":
def segments(p):
p = list(map(lambda x: x[0:2], p))
return zip(p, p[1:] + [p[0]])
return 0.5*abs(
sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(args[0]))
)
# misc
elif fnName == "keys":
try:
return list(args[0].keys())
except AttributeError:
raise ExecutionError(
"Argument is not " + color.bold("object") +
" but %s in keys()" % color.bold(type(args[0]).__name__)
)
elif fnName == "values":
try:
return list(args[0].values())
except AttributeError:
raise ExecutionError(
"Argument is not " + color.bold("object") +
" but %s in values()" % color.bold(type(args[0]).__name__)
)
elif fnName == "type":
ret = type(args[0])
if ret in ITER_TYPES:
return "array"
if ret is dict:
return "object"
return ret.__name__
elif fnName in self._REGISTERED_FUNCTIONS:
return self._REGISTERED_FUNCTIONS[fnName](*args)
else:
raise ProgrammingError(
"Function " + color.bold(fnName) + " does not exist."
)
else:
return node
D = self.D
if type(expr) in STR_TYPES:
tree = self.compile(expr)
elif type(expr) not in (tuple, list, dict):
return expr
ret = exe(tree)
if D: self.end("Tree.execute with: %s", color.bold(self.cleanOutput(ret)))
return ret
def __str__(self):
return "TreeObject()"
def __repr__(self):
return self.__str__()
|
|
# pylint: disable=W0611
'''
Utils
=====
The Utils module provides a selection of general utility functions and classes
that may be useful for various applications. These include maths, color,
algebraic and platform functions.
.. versionchanged:: 1.6.0
The OrderedDict class has been removed. Use collections.OrderedDict
instead.
'''
__all__ = ('intersection', 'difference', 'strtotuple',
'get_color_from_hex', 'get_hex_from_color', 'get_random_color',
'is_color_transparent', 'hex_colormap', 'colormap', 'boundary',
'deprecated', 'SafeList',
'interpolate', 'QueryDict',
'platform', 'escape_markup', 'reify', 'rgba')
from os import environ
from sys import platform as _sys_platform
from re import match, split
from kivy.compat import string_types
def boundary(value, minvalue, maxvalue):
'''Limit a value between a minvalue and maxvalue.'''
return min(max(value, minvalue), maxvalue)
def intersection(set1, set2):
'''Return the intersection of 2 lists.'''
return [s for s in set1 if s in set2]
def difference(set1, set2):
'''Return the difference between 2 lists.'''
return [s for s in set1 if s not in set2]
def interpolate(value_from, value_to, step=10):
'''Interpolate between two values. This can be useful for smoothing some
transitions. For example::
# instead of setting directly
self.pos = pos
# use interpolate, and you'll have a nicer transition
self.pos = interpolate(self.pos, new_pos)
.. warning::
These interpolations work only on lists/tuples/doubles with the same
dimensions. No test is done to check the dimensions are the same.
'''
if type(value_from) in (list, tuple):
out = []
for x, y in zip(value_from, value_to):
out.append(interpolate(x, y, step))
return out
else:
return value_from + (value_to - value_from) / float(step)
def strtotuple(s):
'''Convert a tuple string into a tuple
with some security checks. Designed to be used
with the eval() function::
a = (12, 54, 68)
b = str(a) # return '(12, 54, 68)'
c = strtotuple(b) # return (12, 54, 68)
'''
# security
if not match('^[,.0-9 ()\[\]]*$', s):
raise Exception('Invalid characters in string for tuple conversion')
# fast syntax check
if s.count('(') != s.count(')'):
raise Exception('Invalid count of ( and )')
if s.count('[') != s.count(']'):
raise Exception('Invalid count of [ and ]')
r = eval(s)
if type(r) not in (list, tuple):
raise Exception('Conversion failed')
return r
def rgba(s, *args):
'''Return a kivy color (4 value from 0-1 range) from either a hex string or
a list of 0-255 values
.. versionadded:: 1.9.2
'''
if isinstance(s, string_types):
return get_color_from_hex(s)
elif isinstance(s, (list, tuple)):
s = map(lambda x: x / 255., s)
if len(s) == 3:
return list(s) + [1]
return s
elif isinstance(s, (int, float)):
s = map(lambda x: x / 255., [s] + list(args))
if len(s) == 3:
return list(s) + [1]
return s
raise Exception('Invalid value (not a string / list / tuple)')
def get_color_from_hex(s):
'''Transform a hex string color to a kivy
:class:`~kivy.graphics.Color`.
'''
if s.startswith('#'):
return get_color_from_hex(s[1:])
value = [int(x, 16) / 255.
for x in split('([0-9a-f]{2})', s.lower()) if x != '']
if len(value) == 3:
value.append(1)
return value
def get_hex_from_color(color):
'''Transform a kivy :class:`~kivy.graphics.Color` to a hex value::
>>> get_hex_from_color((0, 1, 0))
'#00ff00'
>>> get_hex_from_color((.25, .77, .90, .5))
'#3fc4e57f'
.. versionadded:: 1.5.0
'''
return '#' + ''.join(['{0:02x}'.format(int(x * 255)) for x in color])
def get_random_color(alpha=1.0):
'''Returns a random color (4 tuple).
:Parameters:
`alpha` : float, defaults to 1.0
If alpha == 'random', a random alpha value is generated.
'''
from random import random
if alpha == 'random':
return [random(), random(), random(), random()]
else:
return [random(), random(), random(), alpha]
def is_color_transparent(c):
'''Return True if the alpha channel is 0.'''
if len(c) < 4:
return False
if float(c[3]) == 0.:
return True
return False
hex_colormap = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgreen': '#90ee90',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
colormap = {k: get_color_from_hex(v) for k, v in hex_colormap.items()}
DEPRECATED_CALLERS = []
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted the first time
the function is used.'''
import inspect
import functools
@functools.wraps(func)
def new_func(*args, **kwargs):
file, line, caller = inspect.stack()[1][1:4]
caller_id = "%s:%s:%s" % (file, line, caller)
# We want to print deprecated warnings only once:
if caller_id not in DEPRECATED_CALLERS:
DEPRECATED_CALLERS.append(caller_id)
warning = (
'Call to deprecated function %s in %s line %d.'
'Called from %s line %d'
' by %s().' % (
func.__name__,
func.__code__.co_filename,
func.__code__.co_firstlineno + 1,
file, line, caller))
from kivy.logger import Logger
Logger.warn(warning)
if func.__doc__:
Logger.warn(func.__doc__)
return func(*args, **kwargs)
return new_func
class SafeList(list):
'''List with a clear() method.
.. warning::
Usage of the iterate() function will decrease your performance.
'''
def clear(self):
del self[:]
@deprecated
def iterate(self, reverse=False):
if reverse:
return iter(reversed(self))
return iter(self)
class QueryDict(dict):
'''QueryDict is a dict() that can be queried with dot.
.. versionadded:: 1.0.4
::
d = QueryDict()
# create a key named toto, with the value 1
d.toto = 1
# it's the same as
d['toto'] = 1
'''
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(QueryDict, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
def format_bytes_to_human(size, precision=2):
'''Format a byte value to a human readable representation (B, KB, MB...).
.. versionadded:: 1.0.8
:Parameters:
`size`: int
Number that represents the bytes value
`precision`: int, defaults to 2
Precision after the comma
Examples::
>>> format_bytes_to_human(6463)
'6.31 KB'
>>> format_bytes_to_human(646368746541)
'601.98 GB'
'''
size = int(size)
fmt = '%%1.%df %%s' % precision
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return fmt % (size, unit)
size /= 1024.0
class Platform(object):
# refactored to class to allow module function to be replaced
# with module variable
def __init__(self):
self._platform_ios = None
self._platform_android = None
@deprecated
def __call__(self):
return self._get_platform()
def __eq__(self, other):
return other == self._get_platform()
def __ne__(self, other):
return other != self._get_platform()
def __str__(self):
return self._get_platform()
def __repr__(self):
return 'platform name: \'{platform}\' from: \n{instance}'.format(
platform=self._get_platform(),
instance=super(Platform, self).__repr__()
)
def __hash__(self):
return self._get_platform().__hash__()
def _get_platform(self):
if self._platform_android is None:
# ANDROID_ARGUMENT and ANDROID_PRIVATE are 2 environment variables
# from python-for-android project
self._platform_android = 'ANDROID_ARGUMENT' in environ
if self._platform_ios is None:
self._platform_ios = (environ.get('KIVY_BUILD', '') == 'ios')
# On android, _sys_platform return 'linux2', so prefer to check the
# import of Android module than trying to rely on _sys_platform.
if self._platform_android is True:
return 'android'
elif self._platform_ios is True:
return 'ios'
elif _sys_platform in ('win32', 'cygwin'):
return 'win'
elif _sys_platform == 'darwin':
return 'macosx'
elif _sys_platform[:5] == 'linux':
return 'linux'
elif _sys_platform.startswith('freebsd'):
return 'linux'
return 'unknown'
platform = Platform()
'''
platform is a string describing the current Operating System. It is one
of: *win*, *linux*, *android*, *macosx*, *ios* or *unknown*.
You can use it as follows::
from kivy import platform
if platform == 'linux':
do_linux_things()
if platform() == 'linux': # triggers deprecation warning
do_more_linux_things()
.. versionadded:: 1.3.0
.. versionchanged:: 1.8.0
platform is now a variable instead of a function.
'''
def escape_markup(text):
'''
Escape markup characters found in the text. Intended to be used when markup
text is activated on the Label::
untrusted_text = escape_markup('Look at the example [1]')
text = '[color=ff0000]' + untrusted_text + '[/color]'
w = Label(text=text, markup=True)
.. versionadded:: 1.3.0
'''
return text.replace('&', '&').replace('[', '&bl;').replace(']', '&br;')
class reify(object):
'''
Put the result of a method which uses this (non-data) descriptor decorator
in the instance dict after the first call, effectively replacing the
decorator with an instance variable.
It acts like @property, except that the function is only ever called once;
after that, the value is cached as a regular attribute. This gives you lazy
attribute creation on objects that are meant to be immutable.
Taken from the `Pyramid project <https://pypi.python.org/pypi/pyramid/>`_.
To use this as a decorator::
@reify
def lazy(self):
...
return hard_to_compute_int
first_time = self.lazy # lazy is reify obj, reify.__get__() runs
second_time = self.lazy # lazy is hard_to_compute_int
'''
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, inst, cls):
if inst is None:
return self
retval = self.func(inst)
setattr(inst, self.func.__name__, retval)
return retval
|
|
# -*- coding:utf-8 -*-
import json
import time
import app
from app.main.controller import login_required
from elasticsearch.client import IndicesClient
from flask import render_template, session, redirect, url_for, current_app, request, Blueprint
monitorlog_blueprint = Blueprint('monitorlog_blueprint', __name__)
@monitorlog_blueprint.route('/index', methods=['GET', 'POST'])
@login_required
def index():
timestamp = request.values.get("timestamp")
etype = request.values.get("etype")
if timestamp==None:
timestamp=""
if etype == None:
etype = ""
return render_template('monitorlog/index.html', etype=etype, timestamp=timestamp)
@monitorlog_blueprint.route('/detailpage', methods=['GET', 'POST'])
@login_required
def detailpage():
mid = request.values.get("mid")
timestamp = request.values.get("timestamp")
if timestamp==None:
timestamp=""
return render_template('monitorlog/detail.html', mid=mid, timestamp=timestamp)
@monitorlog_blueprint.route('/getdata', methods=['GET', 'POST'])
@login_required
def getdata():
arg_dict = request.values
timestamp = request.values["timestamp"]
res = get_need_datas(get_indexs(timestamp), arg_dict.__dict__["dicts"][1], 10)
# print json.dumps(res, encoding='utf8', ensure_ascii=False, indent=2)
return json.dumps(res, encoding='utf8', ensure_ascii=False, indent=2)
@monitorlog_blueprint.route('/detail', methods=['GET', 'POST'])
@login_required
def detail():
arg_dict = request.values
timestamp = request.values["timestamp"]
res = get_need_detail_datas(get_indexs(timestamp), arg_dict.__dict__["dicts"][1], 10)
# print json.dumps(res, encoding='utf8', ensure_ascii=False, indent=2)
return json.dumps(res, encoding='utf8', ensure_ascii=False, indent=2)
def get_indexs(msvalue):
return "kafka_msg_log_" + time.strftime("%Y.%m.%d", time.localtime(long(msvalue) / 1000))
def gen_musts(arr):
req = []
if arr.has_key("mid") and arr.get("mid"):
req.append({"match": {"mid": arr["mid"]}})
if arr.has_key("app") and arr.get("app"):
req.append({"match": {"app": arr["app"]}})
if arr.has_key("host") and arr.get("host"):
req.append({"match": {"host": arr["host"]}})
if arr.has_key("ip") and arr.get("ip"):
req.append({"match": {"ip": arr["ip"]}})
if arr.has_key("topic") and arr.get("topic"):
req.append({"match": {"topic": arr["topic"]}})
if arr.has_key("pid") and arr.get("pid"):
req.append({"match": {"pid": int(arr["pid"])}})
if arr.has_key("group") and arr.get("group"):
req.append({"match": {"group": arr["group"]}})
if arr.has_key("partition") and arr.get("partition"):
req.append({"match": {"partition": int(arr["partition"])}})
if arr.has_key("offset") and arr.get("offset"):
req.append({"match": {"offset": long(arr["offset"])}})
if arr.has_key("etype") and arr.get("etype"):
req.append({"match": {"etype": int(arr["etype"])}})
if arr.has_key("stage") and arr.get("stage"):
req.append({"match": {"stage": int(arr["stage"])}})
min_num = 10
if arr.has_key("mins") and arr.get("mins"):
min_num = int(arr["mins"])
if arr.has_key("timestamp"):
timestamp = long(arr["timestamp"])
req.append({"range": {"timestamp": {
"gte": timestamp - min_num * 1000 * 60,
"lte": timestamp
}}})
return req
def get_need_datas(indexs, arg_dict, size=10):
gen_musts(arg_dict)
body = {
"from": 0,
"size": 0,
'query': {
"bool": {
"must": gen_musts(arg_dict),
}
},
"fields": "mid",
"aggregations": {
"mid": {
"terms": {
"field": "mid",
"size": size,
# "sort": [
# {
# "timestamp": {
# "order": "desc"
# }
# }
# ]
},
# "sort": [
# {
# "timestamp": {
# "order": "desc"
# }
# }
# ],
"aggs": {
"last_msg": {
"top_hits": {
"size": 1,
"sort": [
{
"stage": {
"order": "desc"
}
}
]
}
}
}
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}
result = app.es.search(
ignore=404,
index=indexs,
body=body
)
req = []
# if result["status"] == 200 :
for r in result['aggregations']['mid']['buckets']:
try:
req.append(r["last_msg"]["hits"]["hits"][0]["_source"])
except:
pass
return req
def get_need_detail_datas(indexs, arg_dict, size=1000):
gen_musts(arg_dict)
body = {
"from": 0,
"size": size,
'query': {
"bool": {
"must": gen_musts(arg_dict),
}
},
"sort": [
{
"timestamp": {
"order": "asc"
}
}
]
}
result = app.es.search(
ignore=404,
index=indexs,
body=body
)
req = []
for r in result['hits']['hits']:
try:
req.append(r["_source"])
except:
pass
return req
|
|
# -*- coding: utf-8 -*-
"""
hyper/http20/bufsocket.py
~~~~~~~~~~~~~~~~~~~~~~~~~
This file implements a buffered socket wrapper.
The purpose of this is to avoid the overhead of unnecessary syscalls while
allowing small reads from the network. This represents a potentially massive
performance optimisation at the cost of burning some memory in the userspace
process.
"""
import select
from .exceptions import ConnectionResetError, LineTooLongError
class BufferedSocket(object):
"""
A buffered socket wrapper.
The purpose of this is to avoid the overhead of unnecessary syscalls while
allowing small reads from the network. This represents a potentially
massive performance optimisation at the cost of burning some memory in the
userspace process.
"""
def __init__(self, sck, buffer_size=1000):
"""
Create the buffered socket.
:param sck: The socket to wrap.
:param buffer_size: The size of the backing buffer in bytes. This
parameter should be set to an appropriate value for your use case.
Small values of ``buffer_size`` increase the overhead of buffer
management: large values cause more memory to be used.
"""
# The wrapped socket.
self._sck = sck
# The buffer we're using.
self._backing_buffer = bytearray(buffer_size)
self._buffer_view = memoryview(self._backing_buffer)
# The size of the buffer.
self._buffer_size = buffer_size
# The start index in the memory view.
self._index = 0
# The number of bytes in the buffer.
self._bytes_in_buffer = 0
@property
def _remaining_capacity(self):
"""
The maximum number of bytes the buffer could still contain.
"""
return self._buffer_size - self._index
@property
def _buffer_end(self):
"""
The index of the first free byte in the buffer.
"""
return self._index + self._bytes_in_buffer
@property
def can_read(self):
"""
Whether or not there is more data to read from the socket.
"""
if self._bytes_in_buffer:
return True
read = select.select([self._sck], [], [], 0)[0]
if read:
return True
return False
@property
def buffer(self):
"""
Get access to the buffer itself.
"""
return self._buffer_view[self._index:self._buffer_end]
def advance_buffer(self, count):
"""
Advances the buffer by the amount of data consumed outside the socket.
"""
self._index += count
self._bytes_in_buffer -= count
def new_buffer(self):
"""
This method moves all the data in the backing buffer to the start of
a new, fresh buffer. This gives the ability to read much more data.
"""
def read_all_from_buffer():
end = self._index + self._bytes_in_buffer
return self._buffer_view[self._index:end]
new_buffer = bytearray(self._buffer_size)
new_buffer_view = memoryview(new_buffer)
new_buffer_view[0:self._bytes_in_buffer] = read_all_from_buffer()
self._index = 0
self._backing_buffer = new_buffer
self._buffer_view = new_buffer_view
return
def recv(self, amt):
"""
Read some data from the socket.
:param amt: The amount of data to read.
:returns: A ``memoryview`` object containing the appropriate number of
bytes. The data *must* be copied out by the caller before the next
call to this function.
"""
# In this implementation you can never read more than the number of
# bytes in the buffer.
if amt > self._buffer_size:
amt = self._buffer_size
# If the amount of data we've been asked to read is less than the
# remaining space in the buffer, we need to clear out the buffer and
# start over.
if amt > self._remaining_capacity:
self.new_buffer()
# If there's still some room in the buffer, opportunistically attempt
# to read into it.
# If we don't actually _need_ the data (i.e. there's enough in the
# buffer to satisfy the request), use select to work out if the read
# attempt will block. If it will, don't bother reading. If we need the
# data, always do the read.
if self._bytes_in_buffer >= amt:
should_read = select.select([self._sck], [], [], 0)[0]
else:
should_read = True
if ((self._remaining_capacity > self._bytes_in_buffer) and
(should_read)):
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
# The socket just got closed. We should throw an exception if we
# were asked for more data than we can return.
if not count and amt > self._bytes_in_buffer:
raise ConnectionResetError()
self._bytes_in_buffer += count
# Read out the bytes and update the index.
amt = min(amt, self._bytes_in_buffer)
data = self._buffer_view[self._index:self._index+amt]
self._index += amt
self._bytes_in_buffer -= amt
return data
def fill(self):
"""
Attempts to fill the buffer as much as possible. It will block for at
most the time required to have *one* ``recv_into`` call return.
"""
if not self._remaining_capacity:
self.new_buffer()
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
if not count:
raise ConnectionResetError()
self._bytes_in_buffer += count
return
def readline(self):
"""
Read up to a newline from the network and returns it. The implicit
maximum line length is the buffer size of the buffered socket.
Note that, unlike recv, this method absolutely *does* block until it
can read the line.
:returns: A ``memoryview`` object containing the appropriate number of
bytes. The data *must* be copied out by the caller before the next
call to this function.
"""
# First, check if there's anything in the buffer. This is one of those
# rare circumstances where this will work correctly on all platforms.
index = self._backing_buffer.find(
b'\n',
self._index,
self._index + self._bytes_in_buffer
)
if index != -1:
length = index + 1 - self._index
data = self._buffer_view[self._index:self._index+length]
self._index += length
self._bytes_in_buffer -= length
return data
# In this case, we didn't find a newline in the buffer. To fix that,
# read some data into the buffer. To do our best to satisfy the read,
# we should shunt the data down in the buffer so that it's right at
# the start. We don't bother if we're already at the start of the
# buffer.
if self._index != 0:
self.new_buffer()
while self._bytes_in_buffer < self._buffer_size:
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
if not count:
raise ConnectionResetError()
# We have some more data. Again, look for a newline in that gap.
first_new_byte = self._buffer_end
self._bytes_in_buffer += count
index = self._backing_buffer.find(
b'\n',
first_new_byte,
first_new_byte + count,
)
if index != -1:
# The length of the buffer is the index into the
# buffer at which we found the newline plus 1, minus the start
# index of the buffer, which really should be zero.
assert not self._index
length = index + 1
data = self._buffer_view[:length]
self._index += length
self._bytes_in_buffer -= length
return data
# If we got here, it means we filled the buffer without ever getting
# a newline. Time to throw an exception.
raise LineTooLongError()
def __getattr__(self, name):
return getattr(self._sck, name)
|
|
#!/usr/bin/python
# BY DAVE, FOR TESTING GAITS
import time
import os
import sys
import pygame
import collections
import serial
import struct
EXPAND = 0
CONTRACT = 1
NUM_MOTORS = 4
TRANSMIT_DELAY = 0.8 # SECONDS
# TOGGLED WITH THE <TAB> KEY
CONTROL_MANUAL = 0
CONTROL_TRANSMIT = 1
CONTROL_WALK = 2
# ROBOT MOVING?
WALK_RESET = 0
WALK_PAUSE = 1
WALK_PLAY = 2
KEY_UP = 273
KEY_DOWN = 274
KEY_RIGHT = 275
KEY_LEFT = 276
KEY_TAB = 9
KEY_RETURN = 13
KEY_SPACE = 32
KEY_TRANSFER = 116 # 't'
KEY_QUIT = 113 # 'q'
HIGHLIGHT = (255, 255, 0)
WHITE = (200, 200, 255)
ANIMATION = [ \
"------------------------", \
"-----------------------.", \
"----------------------,.", \
"---------------------.,.", \
"---------------------.,.", \
"-------------------'-.,.", \
"------------------`'-.,.", \
"-----------------'`'-.,.", \
"-----------------'`'-.,.", \
"---------------.-'`'-.,.", \
"--------------,.-'`'-.,.", \
"-------------.,.-'`'-.,.", \
"-------------.,.-'`'-.,.", \
"-----------'-.,.-'`'-.,.", \
"----------`'-.,.-'`'-.,.", \
"---------'`'-.,.-'`'-.,.", \
"---------'`'-.,.-'`'-.,.", \
"-------.-'`'-.,.-'`'-.,.", \
"------,.-'`'-.,.-'`'-.,.", \
"-----.,.-'`'-.,.-'`'-.,.", \
"-----.,.-'`'-.,.-'`'-.,.", \
"---'-.,.-'`'-.,.-'`'-.,.", \
"--`'-.,.-'`'-.,.-'`'-.,.", \
"-'`'-.,.-'`'-.,.-'`'-.,.", \
"'`'-.,.-'`'-.,.-'`'-.,.-", \
"`'-.,.-'`'-.,.-'`'-.,.-'", \
"'-.,.-'`'-.,.-'`'-.,.-'`", \
"-.,.-'`'-.,.-'`'-.,.-'`'", \
".,.-'`'-.,.-'`'-.,.-'`'-", \
",.-'`'-.,.-'`'-.,.-'`'-.", \
".-'`'-.,.-'`'-.,.-'`'-.,", \
]
ANIMATION_CONTINUE = 23
# FOR UI
current_motor = 0
selected_row = 0
selected_col = 0
# FOR MANUAL MOTOR CONTROL
motorstate = [0] * NUM_MOTORS # 0->stopped 1->expanding 2->contracting
control_mode = CONTROL_MANUAL
walk_mode = WALK_RESET
input_state = False # ENTERING A VARIABLE
input_value = 0 # VARIABLE VALUE BEING ENTERED
frame = 0 # COUNTER FOR ANIMATION
animation_frame = 0 # POINTS TO ASCII ART
vars = collections.OrderedDict()
vars['expanded_delay'] = ([6000] * NUM_MOTORS)
vars['contract_time'] = ([6000] * NUM_MOTORS)
vars['contracted_delay']= ([6000] * NUM_MOTORS)
vars['expand_time'] = ([6000] * NUM_MOTORS)
vars['contract_speed'] = ([100] * NUM_MOTORS)
vars['expand_speed'] = ([100] * NUM_MOTORS)
vars['offset'] = ([0] * NUM_MOTORS)
VARIABLE_MAXS = [60000, 60000, 60000, 60000, 255, 255, 60000]
# INITIALIZE SERIAL connection
connection = None
if (os.name == 'posix'):
port_name = '/dev/ttyACM0'
else:
port_name = 'COM4'
connection = serial.Serial(
port=port_name,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0 # don't block when reading
)
def load():
print "Loading saved gait."
try:
f = open('gait.dat', 'r')
for var in vars.items():
for i in range(0, NUM_MOTORS):
var[1][i] = int(f.readline())
except:
print "Failed to load saved gait. Using defaults."
def save():
f = open('gait.dat', 'w')
for var in vars.items():
for i in range(0, NUM_MOTORS):
f.write(str(var[1][i]))
f.write("\n")
def transfer():
packed = struct.pack('!c', 't')
connection.write(packed)
for i in range(0, NUM_MOTORS):
packed = struct.pack('!HHHHBBH', vars['expanded_delay'][i], \
vars['contract_time'][i], \
vars['contracted_delay'][i], \
vars['expand_time'][i], \
vars['contract_speed'][i], \
vars['expand_speed'][i], \
vars['offset'][i])
connection.write(packed)
def refresh():
screen.fill((0,0,0))
draw_text()
draw_graphs()
pygame.display.flip()
def draw_text():
colors = [WHITE] * NUM_MOTORS
if (control_mode == CONTROL_MANUAL):
colors[current_motor] = HIGHLIGHT
for i in range(0, NUM_MOTORS):
message = ""
if (motorstate[i] == 1):
message += ">>> "
elif (motorstate[i] == 2):
message += "<<< "
else:
message += " "
message += "motor " + str(i)
if (motorstate[i] == 1):
message += " <<<"
elif (motorstate[i] == 2):
message += " >>>"
label = myfont.render(message, 1, colors[i])
screen.blit(label, (0, (i * 15) + 30))
for col in range(0, NUM_MOTORS):
string = 'm' + str(col)
label = myfont.render('m' + str(col), 1, WHITE)
screen.blit(label, (390 + (col * 60), 5))
row = 0;
for var in vars.items():
label = myfont.render(var[0], 1, WHITE)
screen.blit(label, (200, (row * 15) + 30))
for col in range(0, NUM_MOTORS):
if (row == selected_row and col == selected_col and control_mode == CONTROL_TRANSMIT):
if (input_state):
label = myfont.render(str(input_value), 1, HIGHLIGHT)
else:
label = myfont.render(str(var[1][col]), 1, HIGHLIGHT)
else:
label = myfont.render(str(var[1][col]), 1, WHITE)
screen.blit(label, (380 + (col * 60), (row * 15) + 30))
row += 1
if (control_mode == CONTROL_WALK ):
color = HIGHLIGHT
else:
color = WHITE
label = myfont.render(ANIMATION[animation_frame], 1, color)
screen.blit(label, (700, 60))
if (walk_mode == WALK_RESET):
message = "reset"
elif (walk_mode == WALK_PAUSE):
message = "pause"
elif (walk_mode == WALK_PLAY):
message = "walk"
label = myfont.render(message, 1, color)
screen.blit(label, (790, 30))
# HARD CODED FOR 4 MOTORS
def draw_graphs():
motor_y = []
for i in range (0, NUM_MOTORS):
motor_y.append(200 + (100 * i))
GRAPH_HEIGHT = 60
GRAPH_WIDTH = 800
max_cycle = 0
for i in range (0, NUM_MOTORS):
cycle = vars['offset'][i] + \
vars['expanded_delay'][i] + \
vars['contract_time'][i] + \
vars['contracted_delay'][i] + \
vars['expand_time'][i]
max_cycle = max(max_cycle, cycle)
scale = GRAPH_WIDTH / float(max_cycle)
MARGIN = 80
for i in range(0, NUM_MOTORS):
label = myfont.render('m' + str(i), 1, WHITE)
screen.blit(label, (MARGIN - 40, motor_y[i]))
motor_lines = []
x_pos = ((vars['offset'][i] % max_cycle) * scale) + MARGIN
motor_lines.append((x_pos, motor_y[i]))
x_pos += ((vars['expanded_delay'][i] % max_cycle) * scale)
motor_lines.append((x_pos, motor_y[i]))
x_pos += ((vars['contract_time'][i] % max_cycle) * scale)
motor_lines.append((x_pos, motor_y[i] + GRAPH_HEIGHT))
x_pos += ((vars['contracted_delay'][i] % max_cycle) * scale)
motor_lines.append((x_pos, motor_y[i] + GRAPH_HEIGHT))
x_pos += ((vars['expand_time'][i] % max_cycle) * scale)
motor_lines.append((x_pos, motor_y[i]))
pygame.draw.lines(screen, WHITE, False, motor_lines)
label = myfont.render(str(max_cycle) + 'ms', 1, WHITE)
screen.blit(label, (GRAPH_WIDTH + 40, 600))
#####################################
# INITIALIZE #
#####################################
pygame.init()
myfont = pygame.font.SysFont("monospace", 17, bold=True)
screen = pygame.display.set_mode((1000, 800))
load()
refresh()
#####################################
# LOOP #
#####################################
while True:
if (walk_mode == WALK_PLAY):
frame += 1
if ((frame % 10000) == 0):
animation_frame = (animation_frame + 1)
if (animation_frame >= len(ANIMATION)):
animation_frame = ANIMATION_CONTINUE
refresh()
for event in pygame.event.get():
# QUIT
if ((event.type == pygame.QUIT) or ((event.type == pygame.KEYDOWN) and (event.key == KEY_QUIT))):
save()
pygame.quit()
sys.exit()
# SWITCH COLUMN
elif ((event.type == pygame.KEYDOWN) and (event.key == KEY_TAB)):
control_mode = (control_mode + 1) % 3
input_value = 0
input_state = False
# LEFT COLUMN
elif (control_mode == CONTROL_MANUAL):
if (event.type == pygame.KEYDOWN):
if ((event.key == KEY_UP) and motorstate[current_motor] == 0):
current_motor = (current_motor - 1) % NUM_MOTORS
elif ((event.key == KEY_DOWN) and motorstate[current_motor] == 0):
current_motor = (current_motor + 1) % NUM_MOTORS
elif (event.key == KEY_LEFT):
if (motorstate[current_motor] == 0):
motorstate[current_motor] = 1
connection.write(struct.pack('!cBB', 'a', current_motor, 1))
elif (event.key == KEY_RIGHT):
if (motorstate[current_motor] == 0):
motorstate[current_motor] = 2
connection.write(struct.pack('!cBB', 'a', current_motor, 2))
elif (event.type == pygame.KEYUP):
if ((event.key == KEY_LEFT) or (event.key == KEY_RIGHT)):
motorstate[current_motor] = 0
connection.write(struct.pack('!cBB', 'a', current_motor, 0))
# MIDDLE COLUMN
elif (control_mode == CONTROL_TRANSMIT):
if event.type == pygame.KEYDOWN:
if (event.key == KEY_TRANSFER):
walk_mode = WALK_RESET
animation_frame = 0
transfer()
elif (event.key == KEY_RETURN):
if (input_state):
if (input_value >= 0 and input_value <= VARIABLE_MAXS[selected_col]):
# THERE IS DEFINATELY A BETTER WAY TO DO THIS
vars[vars.items()[selected_row][0]][selected_col] = input_value
input_state = False
else:
input_value = 0
input_state = True
# TYPING IN NEW VALUE
elif (input_state and event.key >= 48 and event.key <= 57):
input_integer = event.key - 48
input_value = (input_value * 10) + input_integer
elif (event.key == KEY_UP):
selected_row = (selected_row - 1) % len(vars)
input_value = 0
input_state = False
elif (event.key == KEY_DOWN):
selected_row = (selected_row + 1) % len(vars)
input_value = 0
input_state = False
elif (event.key == KEY_LEFT):
selected_col = (selected_col - 1) % NUM_MOTORS
input_value = 0
input_state = False
elif (event.key == KEY_RIGHT):
selected_col = (selected_col + 1) % NUM_MOTORS
input_value = 0
input_state = False
# RIGHT COLUMN
elif (control_mode == CONTROL_WALK):
if ((event.type == pygame.KEYDOWN) and (event.key == KEY_SPACE)):
if (walk_mode == WALK_PLAY):
walk_mode = WALK_PAUSE
connection.write(struct.pack('!cB', 'p', 0))
elif (walk_mode == WALK_PAUSE):
walk_mode = WALK_PLAY
connection.write(struct.pack('!cB', 'p', 1))
elif (walk_mode == WALK_RESET):
walk_mode = WALK_PLAY
connection.write(struct.pack('!cB', 'p', 1))
refresh()
line = connection.readline()
if (len(line) > 0):
sys.stdout.write(line);
|
|
"""RyuApp shim between Ryu and Valve."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from functools import partial
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.controller import dpset
from ryu.controller import event
from ryu.controller import ofp_event
from ryu.lib import hub
from faucet.config_parser import get_config_for_api
from faucet.valve_ryuapp import EventReconfigure, RyuAppBase
from faucet.valve_util import dpid_log, kill_on_exception
from faucet import faucet_experimental_api
from faucet import faucet_experimental_event
from faucet import faucet_bgp
from faucet import faucet_dot1x
from faucet import valves_manager
from faucet import faucet_metrics
from faucet import valve_of
class EventFaucetExperimentalAPIRegistered(event.EventBase):
"""Event used to notify that the API is registered with Faucet."""
pass
class EventFaucetResolveGateways(event.EventBase):
"""Event used to trigger gateway re/resolution."""
pass
class EventFaucetStateExpire(event.EventBase):
"""Event used to trigger expiration of state in controller."""
pass
class EventFaucetMetricUpdate(event.EventBase):
"""Event used to trigger update of metrics."""
pass
class EventFaucetAdvertise(event.EventBase):
"""Event used to trigger periodic network advertisements (eg IPv6 RAs)."""
pass
class EventFaucetLLDPAdvertise(event.EventBase):
"""Event used to trigger periodic LLDP beacons."""
pass
class EventFaucetStackLinkStates(event.EventBase):
"""Event used to update link stack states."""
pass
class Faucet(RyuAppBase):
"""A RyuApp that implements an L2/L3 learning VLAN switch.
Valve provides the switch implementation; this is a shim for the Ryu
event handling framework to interface with Valve.
"""
_CONTEXTS = {
'dpset': dpset.DPSet,
'faucet_experimental_api': faucet_experimental_api.FaucetExperimentalAPI,
}
_EVENTS = [EventFaucetExperimentalAPIRegistered]
_VALVE_SERVICES = {
EventFaucetMetricUpdate: (None, 5),
EventFaucetResolveGateways: ('resolve_gateways', 2),
EventFaucetStateExpire: ('state_expire', 5),
EventFaucetAdvertise: ('advertise', 5),
EventFaucetLLDPAdvertise: ('send_lldp_beacons', 5),
EventFaucetStackLinkStates: ('update_stack_link_states', 2),
}
logname = 'faucet'
exc_logname = logname + '.exception'
bgp = None
metrics = None
notifier = None
valves_manager = None
def __init__(self, *args, **kwargs):
super(Faucet, self).__init__(*args, **kwargs)
self.api = kwargs['faucet_experimental_api']
self.metrics = faucet_metrics.FaucetMetrics(reg=self._reg)
self.bgp = faucet_bgp.FaucetBgp(self.logger, self.metrics, self._send_flow_msgs)
self.dot1x = faucet_dot1x.FaucetDot1x(
self.logger, self.metrics, self._send_flow_msgs)
self.notifier = faucet_experimental_event.FaucetExperimentalEventNotifier(
self.get_setting('EVENT_SOCK'), self.metrics, self.logger)
self.valves_manager = valves_manager.ValvesManager(
self.logname, self.logger, self.metrics, self.notifier, self.bgp,
self.dot1x, self._send_flow_msgs)
@kill_on_exception(exc_logname)
def start(self):
super(Faucet, self).start()
# Start Prometheus
prom_port = int(self.get_setting('PROMETHEUS_PORT'))
prom_addr = self.get_setting('PROMETHEUS_ADDR')
self.metrics.start(prom_port, prom_addr)
# Start event notifier
notifier_thread = self.notifier.start()
if notifier_thread is not None:
self.threads.append(notifier_thread)
for service_event, service_pair in list(self._VALVE_SERVICES.items()):
_, interval = service_pair
self.threads.append(hub.spawn(
partial(self._thread_reschedule, service_event(), interval)))
# Register to API
self.api._register(self)
self.send_event_to_observers(EventFaucetExperimentalAPIRegistered())
def _delete_deconfigured_dp(self, deleted_dpid):
self.logger.info(
'Deleting de-configured %s', dpid_log(deleted_dpid))
ryu_dp = self.dpset.get(deleted_dpid)
if ryu_dp is not None:
ryu_dp.close()
@set_ev_cls(EventReconfigure, MAIN_DISPATCHER)
@kill_on_exception(exc_logname)
def reload_config(self, ryu_event):
"""Handle a request to reload configuration."""
super(Faucet, self).reload_config(ryu_event)
self.valves_manager.request_reload_configs(
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
@kill_on_exception(exc_logname)
def _send_flow_msgs(self, valve, flow_msgs, ryu_dp=None):
"""Send OpenFlow messages to a connected datapath.
Args:
Valve instance or None.
flow_msgs (list): OpenFlow messages to send.
ryu_dp: Override datapath from DPSet.
"""
if ryu_dp is None:
ryu_dp = self.dpset.get(valve.dp.dp_id)
if not ryu_dp:
valve.logger.error('send_flow_msgs: DP not up')
return
valve.send_flows(ryu_dp, flow_msgs)
def _get_valve(self, ryu_event, require_running=False):
"""Get Valve instance to response to an event.
Args:
ryu_event (ryu.controller.event.Event): event
require_running (bool): require DP to be running.
Returns:
valve, ryu_dp, msg: tuple of Nones, or datapath object, Ryu datapath, and Ryu msg (if any)
"""
valve, ryu_dp, msg = self._get_datapath_obj(
self.valves_manager.valves, ryu_event)
if valve:
if msg:
valve.ofchannel_log([msg])
if require_running and not valve.dp.running:
valve = None
return (valve, ryu_dp, msg)
def _config_files_changed(self):
return self.valves_manager.config_watcher.files_changed()
@set_ev_cls(EventFaucetMetricUpdate, MAIN_DISPATCHER)
@kill_on_exception(exc_logname)
def metric_update(self, _):
"""Handle a request to update metrics in the controller."""
self.valves_manager.update_metrics(time.time())
@set_ev_cls(EventFaucetResolveGateways, MAIN_DISPATCHER)
@set_ev_cls(EventFaucetStateExpire, MAIN_DISPATCHER)
@set_ev_cls(EventFaucetAdvertise, MAIN_DISPATCHER)
@set_ev_cls(EventFaucetLLDPAdvertise, MAIN_DISPATCHER)
@set_ev_cls(EventFaucetStackLinkStates, MAIN_DISPATCHER)
@kill_on_exception(exc_logname)
def _valve_flow_services(self, ryu_event):
"""Call a method on all Valves and send any resulting flows."""
self.valves_manager.valve_flow_services(
time.time(),
self._VALVE_SERVICES[type(ryu_event)][0])
def get_config(self):
"""FAUCET experimental API: return config for all Valves."""
return get_config_for_api(self.valves_manager.valves)
def get_tables(self, dp_id):
"""FAUCET experimental API: return config tables for one Valve."""
if dp_id in self.valves_manager.valves:
return self.valves_manager.valves[dp_id].dp.get_tables()
return {}
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def packet_in_handler(self, ryu_event):
"""Handle a packet in event from the dataplane.
Args:
ryu_event (ryu.controller.event.EventReplyBase): packet in message.
"""
valve, _, msg = self._get_valve(ryu_event, require_running=True)
if valve is None:
return
self.valves_manager.valve_packet_in(ryu_event.timestamp, valve, msg)
@set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def error_handler(self, ryu_event):
"""Handle an OFPError from a datapath.
Args:
ryu_event (ryu.controller.ofp_event.EventOFPErrorMsg): trigger
"""
valve, _, msg = self._get_valve(ryu_event)
if valve is None:
return
valve.oferror(msg)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def features_handler(self, ryu_event):
"""Handle receiving a switch features message from a datapath.
Args:
ryu_event (ryu.controller.ofp_event.EventOFPStateChange): trigger.
"""
valve, ryu_dp, msg = self._get_valve(ryu_event)
if valve is None:
return
self._send_flow_msgs(valve, valve.switch_features(msg), ryu_dp=ryu_dp)
@kill_on_exception(exc_logname)
def _datapath_connect(self, ryu_event):
"""Handle any/all re/connection of a datapath.
Args:
ryu_event (ryu.controller.ofp_event.Event)
"""
now = time.time()
valve, ryu_dp, _ = self._get_valve(ryu_event)
if valve is None:
return
discovered_up_ports = [
port.port_no for port in list(ryu_dp.ports.values())
if valve_of.port_status_from_state(port.state) and not valve_of.ignore_port(port.port_no)]
self._send_flow_msgs(valve, valve.datapath_connect(now, discovered_up_ports))
self.valves_manager.stack_topo_change(now, valve)
@kill_on_exception(exc_logname)
def _datapath_disconnect(self, ryu_event):
"""Handle any/all disconnection of a datapath.
Args:
ryu_event (ryu.controller.ofp_event.Event)
"""
valve, _, _ = self._get_valve(ryu_event)
if valve is None:
return
valve.datapath_disconnect()
self.valves_manager.stack_topo_change(time.time(), valve)
@set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def desc_stats_reply_handler(self, ryu_event):
"""Handle OFPDescStatsReply from datapath.
Args:
ryu_event (ryu.controller.ofp_event.EventOFPDescStatsReply): trigger.
"""
valve, _, msg = self._get_valve(ryu_event)
if valve is None:
return
valve.ofdescstats_handler(msg.body)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def port_status_handler(self, ryu_event):
"""Handle a port status change event.
Args:
ryu_event (ryu.controller.ofp_event.EventOFPPortStatus): trigger.
"""
valve, _, msg = self._get_valve(ryu_event, require_running=True)
if valve is None:
return
self._send_flow_msgs(valve, valve.port_status_handler(
msg.desc.port_no, msg.reason, msg.desc.state))
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER) # pylint: disable=no-member
@kill_on_exception(exc_logname)
def flowremoved_handler(self, ryu_event):
"""Handle a flow removed event.
Args:
ryu_event (ryu.controller.ofp_event.EventOFPFlowRemoved): trigger.
"""
valve, ryu_dp, msg = self._get_valve(ryu_event, require_running=True)
if valve is None:
return
if msg.reason == ryu_dp.ofproto.OFPRR_IDLE_TIMEOUT:
self._send_flow_msgs(valve, valve.flow_timeout(time.time(), msg.table_id, msg.match))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import fileinput
import re
import subprocess
import os
import sys
import shutil
from pprint import pprint
import datetime
print ("Welcome to preapre ppa package...")
dists = []
# https://wiki.ubuntu.com/Releases
dists.append({
"name": "Ubuntu 16.04.7 LTS (xenial)",
"dist_name": "xenial",
"ppa_name_suffix": "ppa-ubuntu-16-04-xenial-1",
"end": "April 2021",
"version": "16.04.7 LTS"
})
dists.append({
"name": "Ubuntu 18.04.5 LTS (bionic)",
"dist_name": "bionic",
"ppa_name_suffix": "ppa-ubuntu-18-04-bionic-2",
"end": "April 2023",
"version": "18.04.5 LTS"
})
dists.append({
"name": "Ubuntu 20.04.2 LTS (focal)",
"dist_name": "focal",
"ppa_name_suffix": "ppa-ubuntu-20-04-focal-2",
"end": "April 2025",
"version": "20.04.2 LTS"
})
dists.append({
"name": "Ubuntu 20.10 (groovy)",
"dist_name": "groovy",
"ppa_name_suffix": "ppa-ubuntu-20-10-groovy-1",
"end": "July 2021",
"version": "20.10"
})
print("Please choose dist name:")
i = 0
for d in dists:
print(' ' + str(i) + '. ' + d['dist_name'] + ' (' + d['version'] + '), date end: ' + d['end'])
i = i + 1
dist_num_ = input("Enter number of dist: ")
dist_num_ = int(dist_num_)
if dist_num_ >= len(dists):
sys.exit("Wrong dist number")
dist_name_ = dists[dist_num_]['dist_name']
ppa_name_ = dists[dist_num_]['ppa_name_suffix']
print("Dist Name: " + dist_name_)
#############################################
def clear_all():
print( " -> Clear all")
if os.path.exists('./inventory-files'):
shutil.rmtree('./inventory-files')
print( " -> DONE")
print( " -> Cleanup previous ppa packages")
onlyfiles = [f for f in os.listdir('./') if os.path.isfile(os.path.join('./', f))]
for f in onlyfiles:
m = re.search(r'^inventory-files_(\d+\.\d+\.\d+)-ppa-.*(\.orig\.tar\.gz|source\.changes|_source\.build|_source.ppa.upload|\.tar\.gz|_source\.buildinfo|\.dsc)$', f)
if m:
print('Remove file ' + f)
os.remove(f)
clear_all()
print( " -> Prepare sources directory ")
os.mkdir('./inventory-files')
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
shutil.copytree('../../src', './inventory-files/src', symlinks=False, ignore=None)
shutil.copytree('../../res', './inventory-files/res', symlinks=False, ignore=None)
shutil.copy2('../../inventory-files.pro', './inventory-files/inventory-files.pro')
shutil.copy2('../../inventory-files.qrc', './inventory-files/inventory-files.qrc')
shutil.copy2('../../VERSION', './inventory-files/VERSION')
shutil.copy2('../../LICENSE', './inventory-files/LICENSE')
shutil.copytree('./debian', './inventory-files/debian', symlinks=False, ignore=None)
shutil.copytree('./install-files', './inventory-files/install-files', symlinks=False, ignore=None)
print( " -> DONE ")
#############################################
print( " -> Read version of package ")
f = open("../../VERSION",'r')
filedata = f.read()
f.close()
print(filedata)
m = re.search('(\\d+\\.\\d+\\.\\d+)', filedata)
if m:
current_version = m.group(1)
print ("\n *** Current version: " + current_version + "\n")
# parse CHANGELOG.md
changelog_list = []
version_logs = {'version': '', 'dt': '', 'logs': []}
lines = [line.rstrip('\n') for line in open('../../CHANGELOG.md')]
for li in lines:
m = re.search(r'[ ]*##[ ]+\[v(\d+\.\d+\.\d+)\][ ]*-[ ]*(\d+)-(\d+)-(\d+)[ ]*\((.*)\).*', li)
if m:
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
ver = m.group(1)
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
_dt = datetime.date(year, month, day)
# must be format Mon, 22 Mar 2010 00:37:31 +0100
dt = _dt.strftime("%a, %d %b %Y %H:%M:%S +0700")
version_logs['version'] = ver
version_logs['dt'] = dt
if version_logs['version'] == '':
continue
m = re.search('[ ]*-[ ]*(.*)', li)
if m:
line_log = m.group(1)
version_logs['logs'].append(line_log)
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
print(version_logs)
#############################################
print( " -> Prepare changelog ")
changelog="./inventory-files/debian/changelog"
f = open(changelog,'w')
li_count = 0
for li in changelog_list:
if li_count != 0:
f.write("\n")
f.write("\n")
li_count = li_count + 1
f.write("inventory-files (" + li['version'] + "-" + ppa_name_ + ") " + dist_name_ + "; urgency=low\n\n")
for li_log in li['logs']:
li_log = li_log.strip()
if li_log != '':
f.write(" * " + li_log + "\n")
f.write("\n")
#if li['dt'] == '?':
# li['dt'] = subprocess.Popen(['date', '-R'], stdout=subprocess.PIPE).communicate()[0]
f.write(" -- Evgenii Sopov <[email protected]> " + li['dt']) # 2 space!!!
f.write("\n")
f.close()
print( " -> DONE ")
# TODO
# subprocess.call("./clean_sources_ppa.sh")
#############################################
print( " -> Prepare tar.gz source package ")
os.system("cd ./ && tar -acf inventory-files_" + current_version + "-" + ppa_name_ + ".orig.tar.gz inventory-files")
os.system("cd ./inventory-files && debuild -S -sa")
print( " -> DONE ")
dput_filename = "inventory-files_" + current_version + "-" + ppa_name_ + "_source.changes"
os.system("debsign -k 3AA3105C5766233DD2F243A3A742BE2E628592AC " + dput_filename)
sys.stdout.write("Are you want try upload source package to ppa.launchpad? [y/n]: ")
ask_upload_ = input().lower()
if ask_upload_ == "y":
os.system("dput ppa:sea5kg/inventory-files " + dput_filename)
|
|
"""Generates API documentation by introspection."""
import rest_framework
from rest_framework import viewsets
from rest_framework.serializers import BaseSerializer
from .introspectors import APIViewIntrospector, \
WrappedAPIViewIntrospector, \
ViewSetIntrospector, BaseMethodIntrospector, IntrospectorHelper, \
get_default_value, get_data_type
from .compat import OrderedDict
class DocumentationGenerator(object):
# Serializers defined in docstrings
explicit_serializers = set()
# Serializers defined in fields
fields_serializers = set()
# Response classes defined in docstrings
explicit_response_types = dict()
def generate(self, apis):
"""
Returns documentation for a list of APIs
"""
api_docs = []
for api in apis:
api_docs.append({
'description': IntrospectorHelper.get_summary(api['callback']),
'path': api['path'],
'operations': self.get_operations(api, apis),
})
return api_docs
def get_introspector(self, api, apis):
path = api['path']
pattern = api['pattern']
callback = api['callback']
if callback.__module__ == 'rest_framework.decorators':
return WrappedAPIViewIntrospector(callback, path, pattern)
elif issubclass(callback, viewsets.ViewSetMixin):
patterns = [a['pattern'] for a in apis
if a['callback'] == callback]
return ViewSetIntrospector(callback, path, pattern,
patterns=patterns)
else:
return APIViewIntrospector(callback, path, pattern)
def get_operations(self, api, apis=None):
"""
Returns docs for the allowed methods of an API endpoint
"""
if apis is None:
apis = [api]
operations = []
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
if not isinstance(method_introspector, BaseMethodIntrospector) or \
method_introspector.get_http_method() == "OPTIONS":
continue # No one cares. I impose JSON.
doc_parser = method_introspector.get_yaml_parser()
serializer = self._get_method_serializer(method_introspector)
response_type = self._get_method_response_type(
doc_parser, serializer, introspector, method_introspector)
operation = {
'method': method_introspector.get_http_method(),
'summary': method_introspector.get_summary(),
'nickname': method_introspector.get_nickname(),
'notes': method_introspector.get_notes(),
'type': response_type,
}
if doc_parser.yaml_error is not None:
operation['notes'] += "<pre>YAMLError:\n {err}</pre>".format(
err=doc_parser.yaml_error)
response_messages = doc_parser.get_response_messages()
parameters = doc_parser.discover_parameters(
inspector=method_introspector)
if parameters:
operation['parameters'] = parameters
if response_messages:
operation['responseMessages'] = response_messages
operations.append(operation)
return operations
def get_models(self, apis):
"""
Builds a list of Swagger 'models'. These represent
DRF serializers and their fields
"""
serializers = self._get_serializer_set(apis)
serializers.update(self.explicit_serializers)
serializers.update(
self._find_field_serializers(serializers)
)
models = {}
for serializer in serializers:
data = self._get_serializer_fields(serializer)
# Register 2 models with different subset of properties suitable
# for data reading and writing.
# i.e. rest framework does not output write_only fields in response
# or require read_only fields in complex input.
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
# Writing
# no readonly fields
w_name = "Write{serializer}".format(serializer=serializer_name)
w_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['read_only'])
models[w_name] = {
'id': w_name,
'required': [i for i in data['required'] if i in w_properties.keys()],
'properties': w_properties,
}
# Reading
# no write_only fields
r_name = serializer_name
r_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['write_only'])
models[r_name] = {
'id': r_name,
'required': [i for i in r_properties.keys()],
'properties': r_properties,
}
# Enable original model for testing purposes
# models[serializer_name] = {
# 'id': serializer_name,
# 'required': data['required'],
# 'properties': data['fields'],
# }
models.update(self.explicit_response_types)
models.update(self.fields_serializers)
return models
def _get_method_serializer(self, method_inspector):
"""
Returns serializer used in method.
Registers custom serializer from docstring in scope.
Serializer might be ignored if explicitly told in docstring
"""
serializer = method_inspector.get_response_serializer_class()
doc_parser = method_inspector.get_yaml_parser()
if doc_parser.get_response_type() is not None:
# Custom response class detected
return None
if doc_parser.should_omit_serializer():
serializer = None
return serializer
def _get_method_response_type(self, doc_parser, serializer,
view_inspector, method_inspector):
"""
Returns response type for method.
This might be custom `type` from docstring or discovered
serializer class name.
Once custom `type` found in docstring - it'd be
registered in a scope
"""
response_type = doc_parser.get_response_type()
if response_type is not None:
# Register class in scope
view_name = view_inspector.callback.__name__
view_name = view_name.replace('ViewSet', '')
view_name = view_name.replace('APIView', '')
view_name = view_name.replace('View', '')
response_type_name = "{view}{method}Response".format(
view=view_name,
method=method_inspector.method.title().replace('_', '')
)
self.explicit_response_types.update({
response_type_name: {
"id": response_type_name,
"properties": response_type
}
})
return response_type_name
else:
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is not None:
return serializer_name
return 'object'
def _get_serializer_set(self, apis):
"""
Returns a set of serializer classes for a provided list
of APIs
"""
serializers = set()
for api in apis:
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
serializer = self._get_method_serializer(method_introspector)
if serializer is not None:
serializers.add(serializer)
extras = method_introspector.get_extra_serializer_classes()
for extra in extras:
if extra is not None:
serializers.add(extra)
return serializers
def _find_field_serializers(self, serializers, found_serializers=set()):
"""
Returns set of serializers discovered from fields
"""
def get_thing(field, key):
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
if isinstance(field, ListSerializer):
return key(field.child)
return key(field)
serializers_set = set()
for serializer in serializers:
fields = serializer().get_fields()
for name, field in fields.items():
if isinstance(field, BaseSerializer):
serializers_set.add(get_thing(field, lambda f: f))
if field not in found_serializers:
serializers_set.update(
self._find_field_serializers(
(get_thing(field, lambda f: f.__class__),),
serializers_set))
return serializers_set
def _get_serializer_fields(self, serializer):
"""
Returns serializer fields in the Swagger MODEL format
"""
if serializer is None:
return
if hasattr(serializer, '__call__'):
fields = serializer().get_fields()
else:
fields = serializer.get_fields()
data = OrderedDict({
'fields': OrderedDict(),
'required': [],
'write_only': [],
'read_only': [],
})
for name, field in fields.items():
if getattr(field, 'write_only', False):
data['write_only'].append(name)
if getattr(field, 'read_only', False):
data['read_only'].append(name)
if getattr(field, 'required', False):
data['required'].append(name)
data_type = get_data_type(field) or 'string'
if data_type == 'hidden':
continue
# guess format
data_format = 'string'
if data_type in BaseMethodIntrospector.PRIMITIVES:
data_format = BaseMethodIntrospector.PRIMITIVES.get(data_type)[0]
description = getattr(field, 'help_text', '')
if not description or description.strip() == '':
description = None
f = {
'description': description,
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
'readOnly': getattr(field, 'read_only', None),
}
# Swagger type is a primitive, format is more specific
if f['type'] == f['format']:
del f['format']
# defaultValue of null is not allowed, it is specific to type
if f['defaultValue'] == None:
del f['defaultValue']
# Min/Max values
max_val = getattr(field, 'max_val', None)
min_val = getattr(field, 'min_val', None)
if max_val is not None and data_type == 'integer':
f['minimum'] = min_val
if max_val is not None and data_type == 'integer':
f['maximum'] = max_val
# ENUM options
if get_data_type(field) in ['multiple choice', 'choice']:
if isinstance(field.choices, list):
f['enum'] = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
f['enum'] = [k for k, v in field.choices.items()]
# Support for complex types
if isinstance(field, BaseSerializer):
field_serializer = IntrospectorHelper.get_serializer_name(field)
if getattr(field, 'write_only', False):
field_serializer = "Write{}".format(field_serializer)
f['type'] = field_serializer
if rest_framework.VERSION < '3.0.0':
has_many = field.many
else:
from rest_framework.serializers import ListSerializer
has_many = isinstance(field, ListSerializer)
if has_many:
f['type'] = 'array'
if data_type in BaseMethodIntrospector.PRIMITIVES:
f['items'] = {'type': data_type}
else:
f['items'] = {'$ref': field_serializer}
# memorize discovered field
data['fields'][name] = f
return data
|
|
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
import gtk
import gobject
import proximatestate
from plugins import get_plugin_by_type
from support import get_debug_mode
from os.path import join
from pathname import get_dir, get_path, ICON_DIR, DEFAULT_COMMUNITY_ICON, \
DEFAULT_USER_ICON, SMALL_KEYS_ICON, PROXIMATE_COMMUNITY_ICON
from proximateprotocol import PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_KEY_MANAGEMENT, \
PLUGIN_TYPE_STATE, MAX_FACE_DIMENSION, DEFAULT_COMMUNITY_NAME
from pic_choose_dlg import Picture_Choose_Dialog
from proximateprotocol import PLUGIN_TYPE_NOTIFICATION, valid_status
from guiutils import GUI_Page, Action_List, center_image, add_icon_to_image, \
new_scrollarea, pango_escape
from utils import str_to_int
ACTION_IMAGE_SIZE = 48
def get_status_icon(status, size):
if not valid_status(status):
status = 'normal'
fname = '%dpx-status_icon_%s.png' % (size, status)
return gtk.gdk.pixbuf_new_from_file_at_size(join(get_dir(ICON_DIR), fname), size, size)
# descriptive names for all profile fields
field_descriptions = {
'name': 'Name',
'age': 'Age',
'gender': 'Gender',
'city': 'City',
'state': 'State',
'country': 'Country',
'birth_date': 'Birth Date',
'email': 'E-mail',
'www': 'WWW',
'occupation': 'Occupation',
'phone_numbers': 'Phone Numbers',
'languages': 'Languages',
'description': 'Description',
'uid': 'uid',
'ip': 'IP',
'port': 'Port',
'hops': 'Hops',
'status_icon': 'Status icon',
'v': 'Version',
'faceversion': 'Face version',
'myfaceversion': 'My face version',
}
class User_Page(GUI_Page):
def __init__(self, gui, community_gui, user):
"""User_Page class is for showing user's profile information
defined in the Edit Profile dialog and for showing user's
communities.
update_user_page() have to be called after user's profile
is changed or after user's communities are changed so that
new values are loaded into GUI"""
GUI_Page.__init__(self, user.get('nick'))
self.main_gui = gui
self.community_gui = community_gui
self.user = user
self.community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
self.state_plugin = get_plugin_by_type(PLUGIN_TYPE_STATE)
self.notebook = gtk.Notebook()
self.notebook.set_show_tabs(True)
self.notebook.set_show_border(False)
self.initialize_profile_page()
self.initialize_user_action_page()
self.initialize_communities_page()
self.pack_start(self.notebook)
self.show_all()
def get_user(self):
return self.user
def back_action(self):
self.community_gui.user_pages.pop(self.user)
self.main_gui.remove_page(self)
self.destroy()
return True
def update_user_page(self):
""" Function calls other functions to update user's
profile, community, content and plugin pages. """
self.update_profile_widgets()
self.set_page_title(self.user.get('nick'))
def initialize_user_action_page(self):
vbox = gtk.VBox()
add_user_icon = gtk.gdk.pixbuf_new_from_file_at_size(join(get_dir(ICON_DIR), "64px-plus_icon.png"), ACTION_IMAGE_SIZE, ACTION_IMAGE_SIZE)
remove_user_icon = gtk.gdk.pixbuf_new_from_file_at_size(join(get_dir(ICON_DIR), "64px-no_icon.png"), ACTION_IMAGE_SIZE, ACTION_IMAGE_SIZE)
exchange_keys_icon = gtk.gdk.pixbuf_new_from_file_at_size(join(get_dir(ICON_DIR), "key.png"), ACTION_IMAGE_SIZE, ACTION_IMAGE_SIZE)
refetch_icon = gtk.gdk.pixbuf_new_from_file_at_size(join(get_dir(ICON_DIR), "64px-edit_metadata_icon.png"), ACTION_IMAGE_SIZE, ACTION_IMAGE_SIZE)
action_buttons = [(add_user_icon, 'Invite to\nCommunity', self.show_invite_dialog_cb),
(refetch_icon, 'Refetch\nProfile', self.refetch_profile_cb),
]
if self.state_plugin.options.personal_communities:
action_buttons.insert(0, (add_user_icon, 'Add to\n Community',
self.show_add_dialog_cb))
action_buttons.insert(1, (remove_user_icon, 'Remove from\nCommunity',
self.show_remove_dialog_cb))
if self.state_plugin.options.key_exchange:
action_buttons.insert(3, (exchange_keys_icon, 'Exchange\nKeys', self.show_exchange_keys_dialog_cb))
self.actions = Action_List()
for action in action_buttons:
(icon, text, cb) = action
self.actions.add_button(icon, text, cb)
vbox.pack_start(self.actions.get_widget())
self.announce_checkbox = gtk.CheckButton('Make an alarm when user appears')
vbox.pack_start(self.announce_checkbox, False, False)
self.announce_checkbox.set_active(self.user.get('friend'))
self.announce_checkbox.connect('toggled', self.set_announce)
self.notebook.append_page(vbox, gtk.Label('More actions'))
def initialize_profile_page(self):
profile_hbox = gtk.HBox()
vbox = gtk.VBox()
picture_hbox = gtk.HBox()
self.profile_image = gtk.Image()
self.profile_image.set_size_request(MAX_FACE_DIMENSION+10, MAX_FACE_DIMENSION+10)
picture_hbox.pack_start(self.profile_image, False, True)
self.status_label = gtk.Label()
self.status_label.set_line_wrap(True)
picture_hbox.pack_start(self.status_label)
vbox.pack_start(picture_hbox)
self.profile_info_label = gtk.Label()
self.profile_info_label.set_alignment(0.1, 0.01) # 0.01 on purpose
self.profile_info_label.set_line_wrap(True)
vbox.pack_start(self.profile_info_label)
profile_hbox.pack_start(vbox)
self.user_action_list = User_Action_List(self.community_gui,
self.get_user)
profile_hbox.pack_start(self.user_action_list.action_view)
swindow = new_scrollarea()
swindow.set_border_width(0)
swindow.add_with_viewport(profile_hbox)
self.update_profile_widgets()
self.notebook.append_page(swindow, gtk.Label('Profile'))
def initialize_communities_page(self):
vbox = gtk.VBox()
self.list = Community_List(self.view_community)
for com in self.community.get_user_communities(self.user):
self.list.add_community(com)
vbox.pack_start(self.list.get_widget())
self.notebook.append_page(vbox, gtk.Label('User communities'))
def view_community(self, com):
self.community_gui.show_com_page(com)
def update_profile_widgets(self):
""" Reads new profile information from user and
updates profile page's widgets."""
image = get_user_profile_picture(self.user)
if not self.user.present:
image.saturate_and_pixelate(image, 0.0, True)
self.profile_image.set_from_pixbuf(image)
value = self.user.get('status')
if value == None:
value = ''
self.status_label.set_text(value)
self.profile_info_label.set_markup(self.construct_profile_info_str())
def construct_profile_info_str(self):
def heading(s):
# Returns a heading string s formatted with pango markup and
# a new-line
return '<span color="slategray" weight="bold" size="large">%s</span>\n' % pango_escape(s)
def field(s):
value = self.user.get(s)
if value != None:
return '<b>%s:</b> %s\n' % (field_descriptions[s], pango_escape(str(value)))
else:
return ''
def join_list(l):
out = []
for s in l:
value = self.user.get(s)
if value != None:
out.append(pango_escape(str(value)))
if len(out) > 0:
return ', '.join(out) + '\n'
else:
return ''
s = heading(self.user.get('nick'))
s += field('name')
s += join_list(('age', 'gender'))
s += field('birth_date')
s += join_list(('city', 'state', 'country'))
s += field('phone_numbers')
s += field('email')
s += field('www')
s += field('occupation')
s += field('languages')
s += field('description')
s += heading('Last contact')
l = []
for (t, location) in self.user.log():
ss = t
if len(location) > 0:
ss += '\n(at %s)' %(location)
l.append(ss)
if len(l) == 0:
l = ['never']
s += pango_escape('\n'.join(l)) + '\n'
if get_debug_mode():
s += heading('Debug information')
s += field('uid')
s += field('ip')
s += field('port')
s += field('hops')
s += field('status_icon')
s += field('v')
s += field('faceversion')
s += field('myfaceversion')
return s
def show_add_dialog_cb(self, widget):
Add_To_Community_Dialog(self.main_gui, self.user)
def show_invite_dialog_cb(self, widget):
Invite_To_Community_Dialog(self.main_gui, self.user)
def show_remove_dialog_cb(self, widget):
Remove_From_Community_Dialog(self.main_gui, self.user)
def show_exchange_keys_dialog_cb(self, widget):
keymanagement = get_plugin_by_type(PLUGIN_TYPE_KEY_MANAGEMENT)
keymanagement.show_exchange_keys_gui(self.user)
def refetch_profile_cb(self, widget):
self.user.force_profile_update()
notification = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)
notification.notify('Reloading profile for %s' %(self.user.tag()))
def set_announce(self, widget):
self.user.set('friend', widget.get_active())
class My_User_Page(GUI_Page):
def __init__(self, gui, user):
"""User_Page class is for showing user's profile information
defined in the Edit Profile dialog and for showing user's
communities.
update_user_page() have to be called after user's profile
is changed or after user's communities are changed so that
new values are loaded into GUI"""
GUI_Page.__init__(self, 'My profile')
# references to gui components which text or other
# attribute will be modified if user's profile changes
self.profile_widgets = {}
self.main_gui = gui
self.user = user
self.community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
self.initialize_profile_page_widgets()
self.pic_dialog = Picture_Choose_Dialog(self.main_gui, self.got_picture)
def update_user_page(self):
""" Function calls other functions to update user's
profile, community, content and plugin pages. """
self.update_profile_widgets()
def update_profile_widgets(self):
""" Reads new profile information from user and
updates profile page's widgets."""
image = get_user_profile_picture(self.user)
self.profile_image.set_from_pixbuf(image)
def initialize_profile_page_widgets(self):
self.profile_main_vbox = gtk.VBox()
swindow = new_scrollarea()
swindow.set_border_width(0)
main_hbox = gtk.HBox(False, 20)
picture_vbox = gtk.VBox()
self.profile_image = gtk.Image()
self.profile_image.set_size_request(MAX_FACE_DIMENSION+10, MAX_FACE_DIMENSION+10)
eventbox = gtk.EventBox()
eventbox.connect("button-press-event", self.image_clicked)
eventbox.add(self.profile_image)
picture_vbox.pack_start(gtk.Label('Click picture to change'))
picture_vbox.pack_start(eventbox, True, True)
# User always has a nick
widget = gtk.Entry()
widget.set_text(self.user.get('nick'))
widget.connect("focus-out-event", self.entry_focus_out, 'nick')
self.profile_widgets['nick'] = widget
nick_label = gtk.Label('Nick:')
nick_label.set_alignment(0, 0)
picture_vbox.pack_start(nick_label, False, False)
picture_vbox.pack_start(widget, False, False)
left_hbox = gtk.VBox(False, 20)
left_hbox.pack_start(picture_vbox, False, False)
user_info_vbox = gtk.VBox(False, 5)
profile_components = (('Name:', 'name'),
('Age:', 'age'),
('Gender:', 'gender'),
('City:', 'city'),
('State:', 'state'),
('Country:', 'country'),
('Birth Date:', 'birth_date'),
('E-mail:', 'email'),
('WWW:', 'www'),
('Occupation:', 'occupation'),
('Phone Numbers:', 'phone_numbers'),
('Languages:', 'languages'),
('Description:', 'description'),
)
genders = ('Male', 'Female')
for header, key in profile_components:
hbox = gtk.HBox()
label = gtk.Label(header)
label.set_size_request(130, -1)
label.set_alignment(0, 0)
value = self.user.get(key)
if value == None:
value = ''
if key == 'gender':
# create gender widget separately
widget = gtk.combo_box_entry_new_text()
for gender in genders:
widget.append_text(gender)
entry = widget.child
entry.set_text(str(value))
widget.connect("changed", self.combo_changed, key)
elif key == 'description':
widget = gtk.TextView()
widget.get_buffer().set_text(str(value))
widget.set_property("wrap-mode", gtk.WRAP_CHAR)
widget.set_size_request(-1, 100)
entry = widget
else:
widget = gtk.Entry()
widget.set_text(str(value))
entry = widget
entry.connect("focus-out-event", self.entry_focus_out, key)
hbox.pack_start(label, False, False)
hbox.pack_start(widget, True, True)
self.profile_widgets[key] = entry
user_info_vbox.pack_start(hbox, False, False)
main_hbox.pack_start(left_hbox, False, False)
main_hbox.pack_start(user_info_vbox, True, True)
swindow.add_with_viewport(main_hbox)
self.update_profile_widgets()
self.pack_start(swindow, True, True)
def image_clicked(self, widget, event):
self.pic_dialog.set_picture(proximatestate.seek_face_name(self.user))
self.pic_dialog.show()
def got_picture(self, fname):
self.community.set_my_face(fname)
def combo_changed(self, widget, key):
self.entry_focus_out(widget.child, None, key)
def entry_focus_out(self, entry, event, key):
if key == 'description':
buf = entry.get_buffer()
value = buf.get_text(buf.get_start_iter(), buf.get_end_iter())
else:
value = entry.get_text()
if len(value) == 0:
value = None
if value != self.user.get(key):
if self.user.set(key, value):
self.community.announce_user_change(self.user, allowme=True)
else:
# re-insert old value if set fails
value = self.user.get(key)
if value == None:
value = ''
entry.set_text(str(value))
class Community_List:
COL_ICON = 0
COL_NAME = 1
COL_MEMBERS = 2
COL_DESC = 3
COL_COM = 4
def __init__(self, activate_cb=None):
self.community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
self.store = gtk.ListStore(gtk.gdk.Pixbuf, str, str, str, object)
self.scrollarea = new_scrollarea()
self.scrollarea.set_size_request(-1, 340)
self.view = gtk.TreeView()
self.view.set_headers_visible(True)
cr1 = gtk.CellRendererPixbuf()
cr2 = gtk.CellRendererText()
cr3 = gtk.CellRendererText()
cr4 = gtk.CellRendererText()
cr3.set_property('xalign', 0.1)
col = gtk.TreeViewColumn('Community')
col.pack_start(cr1, False)
col.pack_start(cr2)
col.add_attribute(cr1, 'pixbuf', self.COL_ICON)
col.add_attribute(cr2, 'text', self.COL_NAME)
col2 = gtk.TreeViewColumn('Members')
col2.pack_start(cr3)
col2.add_attribute(cr3, 'text', self.COL_MEMBERS)
self.column_desc = gtk.TreeViewColumn('Description')
self.column_desc.pack_start(cr4)
self.column_desc.add_attribute(cr4, 'text', self.COL_DESC)
self.view.append_column(col)
self.view.append_column(col2)
self.view.append_column(self.column_desc)
self.view.set_model(self.store)
self.view.connect('row-activated', self.row_activated_cb)
self.view.connect_after('size-allocate', self.resized)
self.activated = activate_cb
self.scrollarea.add_with_viewport(self.view)
def get_widget(self):
return self.scrollarea
def add_community(self, c):
n = len(self.community.get_community_members(c))
myself = self.community.get_myself()
if c in self.community.get_user_communities(myself):
n += 1
icon = get_community_icon(c).scale_simple(48, 48, gtk.gdk.INTERP_BILINEAR)
desc = c.get('description')
if desc == None:
desc = ''
desc = desc.replace('\n', ' ')
if n == 0:
icon.saturate_and_pixelate(icon, 0.0, True)
self.store.append([icon, c.get('name'), str(n), desc, c])
else:
self.store.prepend([icon, c.get('name'), str(n), desc, c])
def get_selected(self):
model, selected = self.view.get_selection().get_selected_rows()
if len(selected) == 0:
return None
row = self.store[selected[0]]
return row[self.COL_COM]
def row_activated_cb(self, treeview, path, col):
store = treeview.get_model()
row = store[path]
com = row[self.COL_COM]
if self.activated != None:
self.activated(com)
def resized(self, view, rect):
columns_width = 0
for col in view.get_columns():
if col != self.column_desc:
columns_width += col.get_width()
if rect.width < columns_width:
return
wrap_width = rect.width - columns_width
self.column_desc.get_cell_renderers()[0].set_property('wrap-width', wrap_width)
self.column_desc.set_property('max-width', wrap_width)
store = view.get_model()
i = store.get_iter_first()
while i and store.iter_is_valid(i):
store.row_changed(store.get_path(i), i)
i = store.iter_next(i)
view.set_size_request(0, -1)
class Community_List_Dialog:
def __init__(self, gui, title, actiontext=gtk.STOCK_OK):
self.community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
self.notification = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)
self.main_window = gui.get_main_window()
self.dialog = gtk.Dialog(title, self.main_window, gtk.DIALOG_DESTROY_WITH_PARENT,
(actiontext, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
self.dialog.set_modal(True)
self.list = Community_List()
self.dialog.vbox.pack_start(self.list.get_widget(), True, True)
self.dialog.connect("response", self.response_handler)
self.dialog.set_default_size(400, 300)
self.dialog.show_all()
def add_community(self, com):
self.list.add_community(com)
def response_handler(self, widget, event):
if event == gtk.RESPONSE_OK:
com = self.list.get_selected()
if com != None:
self.community_selected(com)
self.dialog.destroy()
class Add_To_Community_Dialog(Community_List_Dialog):
def __init__(self, gui, user):
Community_List_Dialog.__init__(self, gui, 'Add User To Community', actiontext='Add')
self.user = user
communities = self.community.find_communities(None, False, None)
usercoms = self.community.get_user_personal_communities(self.user)
for com in communities:
if not self.community.personal_communities and com.get('peer') == False:
continue
if not com in usercoms:
self.add_community(com)
def community_selected(self, com):
self.community.add_member(com, self.user)
class Invite_To_Community_Dialog(Community_List_Dialog):
def __init__(self, gui, user):
Community_List_Dialog.__init__(self, gui, 'Invite User To Community', actiontext='Invite')
self.user = user
myself = self.community.get_myself()
communities = self.community.get_user_communities(myself)
usercoms = self.community.get_user_communities(self.user)
for com in communities:
if not com in usercoms:
self.add_community(com)
def community_selected(self, com):
if not self.community.invite_member(com, self.user, self.invite_sent):
self.notification.notify('Unable to send an invitation to %s' % self.user.tag(), True)
def invite_sent(self, success):
if not success:
self.notification.notify('Unable to send an invitation to %s' % self.user.tag(), True)
class Remove_From_Community_Dialog(Community_List_Dialog):
def __init__(self, gui, user):
Community_List_Dialog.__init__(self, gui, 'Remove User From Community', actiontext='Remove')
self.user = user
communities = self.community.get_user_personal_communities(self.user)
for com in communities:
self.add_community(com)
def community_selected(self, com):
self.community.remove_member(com, self.user)
class User_Action_List(Action_List):
def __init__(self, gui, get_selected_func):
Action_List.__init__(self)
self.get_selected = get_selected_func
self.community_gui = gui
for event in self.community_gui.user_events:
self.add_event(event)
self.community_gui.register_user_action_list(self)
def add_event(self, event):
(icon, name, callback) = event
self.add_button(icon, name, self.action, callback)
def action(self, callback):
callback(self.get_selected())
def get_default_community_icon(com):
if com == proximatestate.get_ordinary_community(DEFAULT_COMMUNITY_NAME):
fname = get_path(PROXIMATE_COMMUNITY_ICON)
else:
fname = get_path(DEFAULT_COMMUNITY_ICON)
return gtk.gdk.pixbuf_new_from_file(fname)
def get_community_icon(com):
fname = proximatestate.seek_community_icon_name(com)
try:
com_icon = gtk.gdk.pixbuf_new_from_file(fname)
except gobject.GError:
# if we have broken community information (picture missing)
# we must use default icon
com_icon = get_default_community_icon(com)
return com_icon
def create_default_user_picture(user):
# list of suitable colors to pick from
colors = (0x000000FF, 0x8A8A8AFF, 0x9B00AFFF, 0x5DAF00FF,
0x79AF00FF, 0xA8AF00FF, 0xAF9B00FF, 0xAF6000FF,
0xAF0016FF, 0xAF0092FF, 0xBC0086FF, 0x000FBCFF,
0x007403FF, 0x007466FF, 0xD5FFBAFF, 0xFFFFFFFF)
# use default icon and color it using the first char as an index
buf = gtk.gdk.pixbuf_new_from_file(get_path(DEFAULT_USER_ICON))
icon = buf.copy()
color = colors[int(user.get('uid')[0], 16)]
icon.fill(color)
buf.composite(icon, 0, 0, buf.get_width(), buf.get_height(),
0, 0, 1.0, 1.0, gtk.gdk.INTERP_NEAREST, 255)
return icon
def get_user_profile_picture(user, status_icons=True, center=True):
""" Returns picture saved in user's profile as a GDK Pixbuf,
or the default picture with a background color
generated from uid.
Status icons are added to the picture, if status_icons == True.
"""
try:
icon = gtk.gdk.pixbuf_new_from_file(proximatestate.seek_face_name(user))
except gobject.GError:
icon = create_default_user_picture(user)
if center:
# center image if it's smaller than MAX_FACE_DIMENSION
smaller_dimension = min(icon.get_width(), icon.get_height())
if smaller_dimension < MAX_FACE_DIMENSION:
icon = center_image(icon, MAX_FACE_DIMENSION, MAX_FACE_DIMENSION)
if status_icons:
# add small status icons
community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
if not user == community.get_myself():
if user.get('key_fname'):
status_icon = gtk.gdk.pixbuf_new_from_file(get_path(SMALL_KEYS_ICON))
add_icon_to_image(icon, status_icon, 4)
user_status = user.get('status_icon')
if user_status:
add_icon_to_image(icon, get_status_icon(user_status, 32), 0)
return icon
|
|
"""Allows the creation of a sensor that breaks out state_attributes."""
import logging
from typing import Optional
from itertools import chain
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
DEVICE_CLASSES_SCHEMA,
)
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_ENTITY_PICTURE_TEMPLATE,
ATTR_ENTITY_ID,
CONF_SENSORS,
EVENT_HOMEASSISTANT_START,
CONF_FRIENDLY_NAME_TEMPLATE,
MATCH_ALL,
CONF_DEVICE_CLASS,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from .const import CONF_AVAILABILITY_TEMPLATE
CONF_ATTRIBUTE_TEMPLATES = "attribute_templates"
_LOGGER = logging.getLogger(__name__)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
state_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
friendly_name_template = device_config.get(CONF_FRIENDLY_NAME_TEMPLATE)
unit_of_measurement = device_config.get(ATTR_UNIT_OF_MEASUREMENT)
device_class = device_config.get(CONF_DEVICE_CLASS)
attribute_templates = device_config[CONF_ATTRIBUTE_TEMPLATES]
entity_ids = set()
manual_entity_ids = device_config.get(ATTR_ENTITY_ID)
invalid_templates = []
templates = {
CONF_VALUE_TEMPLATE: state_template,
CONF_ICON_TEMPLATE: icon_template,
CONF_ENTITY_PICTURE_TEMPLATE: entity_picture_template,
CONF_FRIENDLY_NAME_TEMPLATE: friendly_name_template,
CONF_AVAILABILITY_TEMPLATE: availability_template,
}
for tpl_name, template in chain(templates.items(), attribute_templates.items()):
if template is None:
continue
template.hass = hass
if manual_entity_ids is not None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids == MATCH_ALL:
entity_ids = MATCH_ALL
# Cut off _template from name
invalid_templates.append(tpl_name.replace("_template", ""))
elif entity_ids != MATCH_ALL:
entity_ids |= set(template_entity_ids)
if invalid_templates:
_LOGGER.warning(
"Template sensor %s has no entity ids configured to track nor"
" were we able to extract the entities to track from the %s "
"template(s). This entity will only be able to be updated "
"manually.",
device,
", ".join(invalid_templates),
)
if manual_entity_ids is not None:
entity_ids = manual_entity_ids
elif entity_ids != MATCH_ALL:
entity_ids = list(entity_ids)
sensors.append(
SensorTemplate(
hass,
device,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
)
)
async_add_entities(sensors)
return True
class SensorTemplate(Entity):
"""Representation of a Template Sensor."""
def __init__(
self,
hass,
device_id,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._friendly_name_template = friendly_name_template
self._unit_of_measurement = unit_of_measurement
self._template = state_template
self._state = None
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._availability_template = availability_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._device_class = device_class
self._available = True
self._attribute_templates = attribute_templates
self._attributes = {}
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_sensor_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_sensor_startup(event):
"""Update template on startup."""
if self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change(
self.hass, self._entities, template_sensor_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_sensor_startup
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_update(self):
"""Update the state from the template."""
try:
self._state = self._template.async_render()
self._available = True
except TemplateError as ex:
self._available = False
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render template %s," " the state is unknown.", self._name
)
else:
self._state = None
_LOGGER.error("Could not render template %s: %s", self._name, ex)
attrs = {}
for key, value in self._attribute_templates.items():
try:
attrs[key] = value.async_render()
except TemplateError as err:
_LOGGER.error("Error rendering attribute %s: %s", key, err)
self._attributes = attrs
templates = {
"_icon": self._icon_template,
"_entity_picture": self._entity_picture_template,
"_name": self._friendly_name_template,
"_available": self._availability_template,
}
for property_name, template in templates.items():
if template is None:
continue
try:
value = template.async_render()
if property_name == "_available":
value = value.lower() == "true"
setattr(self, property_name, value)
except TemplateError as ex:
friendly_property_name = property_name[1:].replace("_", " ")
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render %s template %s," " the state is unknown.",
friendly_property_name,
self._name,
)
continue
try:
setattr(self, property_name, getattr(super(), property_name))
except AttributeError:
_LOGGER.error(
"Could not render %s template %s: %s",
friendly_property_name,
self._name,
ex,
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
from proton import Message
from proton.utils import BlockingConnection
from proton.reactor import AtMostOnce
from proton.reactor import Container
from system_test import TestCase, Qdrouterd
from system_test import QdManager
apply_options = AtMostOnce()
class ManyLogFilesTest(TestCase):
@classmethod
def setUpClass(cls):
super(ManyLogFilesTest, cls).setUpClass()
name = "test-router"
LogLevelUpdateTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': LogLevelUpdateTest.listen_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
# We are sending three different module trace logs to three different
# files and we will make sure that these files exist and these
# files contain only logs pertinent to the module in question
('log', {'module': 'SERVER', 'enable': 'trace+',
'includeSource': 'true', 'outputFile': name + '-server.log'}),
('log', {'module': 'ROUTER_CORE', 'enable': 'trace+',
'includeSource': 'true',
'outputFile': name + '-core.log'}),
('log', {'module': 'PROTOCOL', 'enable': 'trace+',
'includeSource': 'true',
'outputFile': name + '-protocol.log'}),
# try two modules to the same file.
# Put the ROUTER_CORE and ROUTER module logs into the same log file
('log', {'module': 'ROUTER', 'enable': 'trace+',
'includeSource': 'true',
'outputFile': name + '-core.log'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_multiple_log_file(self):
blocking_connection = BlockingConnection(self.address)
TEST_ADDRESS = "test_multiple_log_file"
blocking_receiver = blocking_connection.create_receiver(address=TEST_ADDRESS)
blocking_sender = blocking_connection.create_sender(address=TEST_ADDRESS, options=apply_options)
TEST_MSG = "LOGTEST"
msg = Message(body=TEST_MSG)
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(TEST_MSG, received_message.body)
server_log_found = True
all_server_logs = True
try:
with open(self.router.outdir + '/test-router-server.log', 'r') as server_log:
for line in server_log:
parts = line.split(" ")
if parts[3] != "SERVER":
all_server_logs = False
break
except:
server_log_found = False
self.assertTrue(all_server_logs)
self.assertTrue(server_log_found)
protocol_log_found = True
all_protocol_logs = True
try:
with open(self.router.outdir + '/test-router-protocol.log', 'r') as protocol_log:
for line in protocol_log:
parts = line.split(" ")
if parts[3] != "PROTOCOL":
all_protocol_logs = False
break
except:
protocol_log_found = False
self.assertTrue(protocol_log_found)
self.assertTrue(all_protocol_logs)
core_router_log_found = True
all_core_router_logs = True
try:
with open(self.router.outdir + '/test-router-core.log', 'r') as core_log:
for line in core_log:
parts = line.split(" ")
if parts[3] != "ROUTER_CORE" and parts[3] != "ROUTER":
all_core_router_logs = False
break
except:
core_router_log_found = False
self.assertTrue(core_router_log_found)
self.assertTrue(all_core_router_logs)
class LogModuleProtocolTest(TestCase):
@classmethod
def setUpClass(cls):
super(LogModuleProtocolTest, cls).setUpClass()
name = "test-router"
LogModuleProtocolTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': LogModuleProtocolTest.listen_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'})
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def create_sender_receiver(self, test_address, test_msg, blocking_connection=None):
if not blocking_connection:
blocking_connection = BlockingConnection(self.address)
blocking_receiver = blocking_connection.create_receiver(address=test_address)
blocking_sender = blocking_connection.create_sender(address=test_address, options=apply_options)
msg = Message(body=test_msg)
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(test_msg, received_message.body)
def test_turn_on_protocol_trace(self):
hello_world_0 = "Hello World_0!"
qd_manager = QdManager(self, self.address)
blocking_connection = BlockingConnection(self.address)
TEST_ADDR = "moduletest0"
self.create_sender_receiver(TEST_ADDR, hello_world_0, blocking_connection)
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# num_attaches for address TEST_ADDR must be 4, two attaches to/from sender and receiver
self.assertTrue(num_attaches == 4)
# Turn off trace logging using qdmanage
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "info+"}, name="log/DEFAULT")
# Turn on trace (not trace+) level logging for the PROTOCOL module. After doing
# this we will create a sender and a receiver and make sure that the PROTOCOL module
# is emitting proton frame trace messages.
# Before DISPATCH-1558, the only way to turn on proton frame trace logging was to set
# enable to trace on the SERVER or the DEFAULT module. Turning on trace for the SERVER
# module would also spit out dispatch trace level messages from the SERVER module.
# DISPATCH-1558 adds the new PROTOCOL module which moves all protocol traces into
# that module.
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "trace+"}, name="log/PROTOCOL")
TEST_ADDR = "moduletest1"
hello_world_1 = "Hello World_1!"
self.create_sender_receiver(TEST_ADDR, hello_world_1, blocking_connection)
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# num_attaches for address TEST_ADDR must be 4, two attaches to/from sender and receiver
self.assertTrue(num_attaches == 4)
# Now turn off trace logging for the PROTOCOL module and make sure
# that there is no more proton frame trace messages appearing in the log
qd_manager.update("org.apache.qpid.dispatch.log",
{"enable": "info+"}, name="log/PROTOCOL")
TEST_ADDR = "moduletest2"
hello_world_2 = "Hello World_2!"
self.create_sender_receiver(TEST_ADDR, hello_world_2, blocking_connection)
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# num_attaches for address TEST_ADDR must be 4, two attaches to/from sender and receiver
self.assertTrue(num_attaches == 0)
class EnableConnectionLevelInterRouterTraceTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
super(EnableConnectionLevelInterRouterTraceTest, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
('listener', {'port': cls.tester.get_port()}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A',
('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B',
('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}))
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
cls.address = cls.routers[1].addresses[0]
def _get_transfer_frame_count(self, conn_id):
inter_router_cid = "[C" + conn_id + "]"
num_transfers = 0
with open(self.routers[1].logfile_path) as router_log:
for log_line in router_log:
log_components = log_line.split(" ")
if len(log_components) > 8 and 'PROTOCOL' in log_components[3]:
if inter_router_cid in log_components[5] and '@transfer' in log_components[8]:
num_transfers += 1
return num_transfers
def test_inter_router_protocol_trace(self):
qd_manager = QdManager(self, self.address)
# The router already has trace logging turned on for all connections.
# Get the connection id of the inter-router connection
results = qd_manager.query("org.apache.qpid.dispatch.connection")
conn_id = None
for result in results:
if result['role'] == 'inter-router':
conn_id = result['identity']
# Turn off trace logging for the inter-router connection. This update command is run async by the router
# so we need to sleep a bit before the operation is actually completed.
qd_manager.update("org.apache.qpid.dispatch.connection", {"enableProtocolTrace": "false"}, identity=conn_id)
time.sleep(1)
num_transfers = self._get_transfer_frame_count(conn_id)
# Create a receiver. This will send an MAU update to the other router but we should not see any of that
# in the log since the trace logging for the inter-router connection has been turned off.
TEST_ADDR_1 = "EnableConnectionLevelProtocolTraceTest1"
conn_2 = BlockingConnection(self.address)
conn_2.create_receiver(address=TEST_ADDR_1)
# Give some time for the MAU to go over the inter-router connection.
time.sleep(2)
num_transfers_after_update = self._get_transfer_frame_count(conn_id)
# Since there will be no transfer frames printed in the log, there should be no more new transfers in the
# log file.
self.assertEqual(num_transfers_after_update, num_transfers)
# Turn on trace logging for the inter-router connection
qd_manager.update("org.apache.qpid.dispatch.connection", {"enableProtocolTrace": "yes"}, identity=conn_id)
# Create a receiver and make sure the MAU update is NOT seen on the inter-router connection log
TEST_ADDR_2 = "EnableConnectionLevelProtocolTraceTest2"
conn_1 = BlockingConnection(self.address)
conn_1.create_receiver(address=TEST_ADDR_2)
# Give time for the MAU to be generated.
time.sleep(2)
num_transfers_after_update = self._get_transfer_frame_count(conn_id)
# Since we have now turned on trace logging for the inter-router connection, we should see
# additional transfer frames in the log and we check that here.
self.assertGreater(num_transfers_after_update, num_transfers)
conn_1.close()
conn_2.close()
class EnableConnectionLevelProtocolTraceTest(TestCase):
@classmethod
def setUpClass(cls):
super(EnableConnectionLevelProtocolTraceTest, cls).setUpClass()
name = "test-router"
LogLevelUpdateTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': LogLevelUpdateTest.listen_port}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_enable_protocol_trace_on_non_existent_connection(self):
qd_manager = QdManager(self, self.address)
bad_request = False
try:
# Turn on trace logging for connection with invalid or non-existent identity
outs = qd_manager.update("org.apache.qpid.dispatch.connection", {"enableProtocolTrace": "true"}, identity='G10000')
except Exception as e:
if "BadRequestStatus" in str(e):
bad_request = True
self.assertTrue(bad_request)
def test_single_connection_protocol_trace(self):
qd_manager = QdManager(self, self.address)
# Turn off trace logging on all connections.
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "info+"},
name="log/DEFAULT")
TEST_ADDR_1 = "EnableConnectionLevelProtocolTraceTest1"
MSG_BODY = "EnableConnectionLevelProtocolTraceTestMessage1"
CONTAINER_ID_1 = "CONTAINERID_1"
container_1 = Container()
container_1.container_id = CONTAINER_ID_1
conn_1 = BlockingConnection(self.address, container=container_1)
TEST_ADDR_2 = "EnableConnectionLevelProtocolTraceTest1"
CONTAINER_ID_2 = "CONTAINERID_2"
container_2 = Container()
container_2.container_id = CONTAINER_ID_2
conn_2 = BlockingConnection(self.address, container=container_2)
results = qd_manager.query("org.apache.qpid.dispatch.connection")
conn_id = None
for result in results:
if result['container'] == CONTAINER_ID_1:
conn_id = result['identity']
# Turn on trace logging for connection with identity conn_id
qd_manager.update("org.apache.qpid.dispatch.connection", {"enableProtocolTrace": "true"}, identity=conn_id)
blocking_receiver_1 = conn_1.create_receiver(address=TEST_ADDR_1)
blocking_sender_1 = conn_1.create_sender(address=TEST_ADDR_1, options=apply_options)
blocking_receiver_2 = conn_2.create_receiver(address=TEST_ADDR_2)
blocking_sender_2 = conn_2.create_sender(address=TEST_ADDR_2, options=apply_options)
num_attaches_1 = 0
num_attaches_2 = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR_1 in log[2]:
num_attaches_1 += 1
elif "@attach" in log[2] and TEST_ADDR_2 in log[2]:
num_attaches_2 += 1
# num_attaches_1 for address TEST_ADDR_1 must be 4, two attaches to/from sender and receiver
self.assertTrue(num_attaches_1 == 4)
# num_attaches_2 for address TEST_ADDR_2 must be 0 since trace was not
# turned on for that connection
self.assertTrue(num_attaches_2 == 0)
# Now turn off the connection tracing on that connection
qd_manager.update("org.apache.qpid.dispatch.connection",
{"enableProtocolTrace": "off"},
identity=conn_id)
blocking_receiver_1.close()
blocking_sender_1.close()
# Since tracing was turned off, there should be no detaches
logs = qd_manager.get_log()
num_detaches = 0
for log in logs:
if 'PROTOCOL' in log[0]:
if "@detach" in log[2]:
num_detaches += 1
self.assertTrue(num_detaches == 0)
blocking_receiver_2.close()
blocking_sender_2.close()
conn_1.close()
conn_2.close()
class LogLevelUpdateTest(TestCase):
@classmethod
def setUpClass(cls):
super(LogLevelUpdateTest, cls).setUpClass()
name = "test-router"
LogLevelUpdateTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': LogLevelUpdateTest.listen_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'})
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
cls.closest_count = 1
def create_sender_receiver(self, test_address, test_msg, blocking_connection=None):
if not blocking_connection:
blocking_connection = BlockingConnection(self.address)
blocking_receiver = blocking_connection.create_receiver(address=test_address)
blocking_sender = blocking_connection.create_sender(address=test_address, options=apply_options)
msg = Message(body=test_msg)
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(test_msg, received_message.body)
def test_01_toggle_default_trace_logging(self):
hello_world_1 = "Hello World_1!"
hello_world_2 = "Hello World_2!"
hello_world_3 = "Hello World_3!"
hello_world_4 = "Hello World_4!"
qd_manager = QdManager(self, self.address)
blocking_connection = BlockingConnection(self.address)
TEST_ADDR = "apachetest1"
self.create_sender_receiver(TEST_ADDR, hello_world_1, blocking_connection)
# STEP 1: Make sure that proton trace logging is turned on already.
# Search for attach frames in the log for address TEST_ADDR. There should be 4 attaches
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# num_attaches for address TEST_ADDR must be 4, two attaches to/from sender and receiver
self.assertTrue(num_attaches == 4)
# STEP 2: Turn off trace logging using qdmanage
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "info+"}, name="log/DEFAULT")
# Step 3: Now, router trace logging is turned off (has been set to info+)
# Create the sender and receiver again on a different address and make
# sure that the attaches are NOT showing in the log for that address.
TEST_ADDR = "apachetest2"
self.create_sender_receiver(TEST_ADDR, hello_world_2, blocking_connection)
# STEP 3: Count the nimber of attaches for address TEST_ADDR, there should be none
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# There should be no attach frames with address TEST_ADDR
# because we turned of trace logging.
self.assertTrue(num_attaches == 0)
# STEP 4: Tuen trace logging back on again and make sure num_attaches = 4
TEST_ADDR = "apachetest3"
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "trace+"}, name="log/DEFAULT")
self.create_sender_receiver(TEST_ADDR, hello_world_3, blocking_connection)
# STEP 3: Count the number of attaches for address TEST_ADDR, there should be 4
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# There should be 4 attach frames with address TEST_ADDR
# because we turned on trace logging.
self.assertTrue(num_attaches == 4)
# Create a brand new blocking connection and make sure that connection
# is logging at trace level as well.
num_attaches = 0
TEST_ADDR = "apachetest4"
self.create_sender_receiver(TEST_ADDR, hello_world_4)
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
self.assertTrue(num_attaches == 4)
def test_02_toggle_server_trace_logging(self):
"""
This test is similar to test_01_toggle_default_trace_logging but it tests the
SERVER log level.
"""
hello_world_5 = "Hello World_5!"
hello_world_6 = "Hello World_6!"
hello_world_7 = "Hello World_7!"
TEST_ADDR = "apachetest5"
# Step 1. Turn off trace logging for module DEFAULT and enable trace logging
# for the PROTOCOL module and make sure it works.
qd_manager = QdManager(self, self.address)
# Set log level to info+ on the DEFAULT module
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "info+"}, name="log/DEFAULT")
# Set log level to trace+ on the PROTOCOL module
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "trace+"}, name="log/PROTOCOL")
blocking_connection = BlockingConnection(self.address)
self.create_sender_receiver(TEST_ADDR, hello_world_5,
blocking_connection)
# Count the number of attaches for address TEST_ADDR, there should be 4
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
# There should be 4 attach frames with address TEST_ADDR
# because we turned on trace logging.
self.assertTrue(num_attaches == 4)
TEST_ADDR = "apachetest6"
qd_manager.update("org.apache.qpid.dispatch.log", {"enable": "info+"}, name="log/PROTOCOL")
self.create_sender_receiver(TEST_ADDR, hello_world_6, blocking_connection)
# Count the number of attaches for address TEST_ADDR, there should be 0
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
self.assertTrue(num_attaches == 0)
# Create a brand new blocking connection and make sure that connection
# is logging at info level as well.
TEST_ADDR = "apachetest7"
self.create_sender_receiver(TEST_ADDR, hello_world_7)
num_attaches = 0
logs = qd_manager.get_log()
for log in logs:
if 'PROTOCOL' in log[0]:
if "@attach" in log[2] and TEST_ADDR in log[2]:
num_attaches += 1
self.assertTrue(num_attaches == 0)
class RouterCoreModuleLogTest(TestCase):
@classmethod
def setUpClass(cls):
super(RouterCoreModuleLogTest, cls).setUpClass()
name = "test-router"
LogLevelUpdateTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': LogLevelUpdateTest.listen_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('log', {'module': 'ROUTER_CORE', 'enable': 'trace+',
'includeSource': 'true',
'outputFile': name + '-core.log'})
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_router_core_logger(self):
blocking_connection = BlockingConnection(self.address)
TEST_ADDRESS = "test_multiple_log_file"
blocking_receiver = blocking_connection.create_receiver(address=TEST_ADDRESS)
blocking_sender = blocking_connection.create_sender(address=TEST_ADDRESS, options=apply_options)
TEST_MSG_BODY = "LOGTEST"
msg = Message(body=TEST_MSG_BODY)
blocking_sender.send(msg)
received_message = blocking_receiver.receive()
self.assertEqual(TEST_MSG_BODY, received_message.body)
qd_manager = QdManager(self, self.address)
logs = qd_manager.get_log()
router_core_found = False
for log in logs:
if 'ROUTER_CORE' in log[0]:
router_core_found = True
break
self.assertTrue(router_core_found)
core_log_file_found = True
all_lines_router_core = True
try:
# Before the fix to DISPATCH-1575, this file will not be
# created because the router core module was logging to the ROUTER
# module instead of the ROUTER_CORE module.
with open(self.router.outdir + '/test-router-core.log', 'r') as core_log:
for line in core_log:
# Every line in the file must log to the router core module.
if "ROUTER_CORE" not in line:
all_lines_router_core = False
break
except:
core_log_file_found = False
self.assertTrue(core_log_file_found)
self.assertTrue(all_lines_router_core)
|
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glib
import gtk
import time
import logging
import os
from info_bar_gtk import *
from open_dialog_base import OpenDialogBase
class OpenDialogGtk(gtk.Dialog, OpenDialogBase):
def __init__(self, options, db, initial_filter):
gtk.Dialog.__init__(self)
OpenDialogBase.__init__(self, options, db, initial_filter)
self.set_title("Quick open...")
self.set_size_request(1000,400)
self.add_button("_Open",gtk.RESPONSE_OK)
self.add_button("Cancel",gtk.RESPONSE_CANCEL)
model = gtk.ListStore(object)
treeview = gtk.TreeView(model)
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
treeview.get_selection().connect('changed', self._on_treeview_selection_changed)
self.connect('response', self.response)
text_cell_renderer = gtk.CellRendererText()
def add_column(title,accessor_cb):
column = gtk.TreeViewColumn(title, text_cell_renderer)
column.set_cell_data_func(text_cell_renderer, lambda column, cell, model, iter: cell.set_property('text', accessor_cb(model.get(iter,0)[0])))
treeview.append_column(column)
return column
add_column("Rank",lambda obj: obj[1])
add_column("File",lambda obj: os.path.basename(obj[0]))
add_column("Path",lambda obj: os.path.dirname(obj[0]))
self.connect('destroy', self.on_destroy)
truncated_bar = InfoBarGtk()
bad_result_button = gtk.Button("Bad result")
bad_result_button.connect('clicked', lambda *args: self.on_badresult_clicked())
reindex_button = gtk.Button("_Reindex")
reindex_button.connect('clicked', lambda *args: self.on_reindex_clicked())
status_label = gtk.Label()
self.status_label = status_label
filter_entry = gtk.Entry()
filter_entry.set_text(self._filter_text)
filter_entry.connect('key_press_event', self._on_filter_entry_keypress)
filter_entry.connect('changed', self._on_filter_text_changed)
# attach everything up
vbox = self.vbox
table_vbox = gtk.VBox()
treeview_scroll_window = gtk.ScrolledWindow()
treeview_scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
table_options_hbox = gtk.HBox()
button_hbox = gtk.HBox()
vbox.pack_start(table_vbox,True,True,1)
table_vbox.pack_start(table_options_hbox,False,False,0)
table_options_hbox.pack_start(status_label,False,False,10)
table_options_hbox.pack_end(bad_result_button,False,False,0)
table_options_hbox.pack_end(reindex_button,False,False,0)
table_vbox.pack_start(treeview_scroll_window,True,True,0)
table_vbox.pack_start(truncated_bar,False,True,0)
table_vbox.pack_start(filter_entry,False,True,0)
treeview_scroll_window.add(treeview)
vbox.show_all()
truncated_bar.hide()
# remember things that need remembering
self._treeview = treeview
self._model = model
self._truncated_bar = truncated_bar
self._filter_entry = filter_entry
filter_entry.grab_focus()
if self.should_position_cursor_for_replace:
filter_entry.set_position(0)
filter_entry.select_region(0, len(self._filter_text))
else:
filter_entry.set_position(len(self._filter_text))
self.show_all()
def response(self, arg, *rest):
canceled = len(rest) > 0 and rest[0] != gtk.RESPONSE_OK
self.on_done(canceled)
def on_destroy(self, *args):
self.response(None, gtk.RESPONSE_CANCEL)
def redirect_to_treeview(self, event):
prev = self.get_focus()
self._treeview.grab_focus()
ret = self._treeview.emit('key_press_event', event)
if prev:
prev.grab_focus()
return True
def _on_filter_entry_keypress(self,entry,event):
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname in ("Up", "Down", "Page_Up", "Page_Down", "Left", "Right"):
return self.redirect_to_treeview(event)
elif keyname == "space" and event.state & gtk.gdk.CONTROL_MASK:
return self.redirect_to_treeview(event)
elif keyname == 'n' and event.state & gtk.gdk.CONTROL_MASK:
self.move_selection(1)
return True
elif keyname == 'p' and event.state & gtk.gdk.CONTROL_MASK:
self.move_selection(-1)
return True
elif keyname == 'a' and event.state & gtk.gdk.CONTROL_MASK:
i = self._filter_entry.set_position(0)
return True
elif keyname == 'e' and event.state & gtk.gdk.CONTROL_MASK:
self._filter_entry.set_position(len(self._filter_entry.get_text()))
return True
elif keyname == 'f' and event.state & gtk.gdk.CONTROL_MASK:
i = self._filter_entry.get_position()
i = min(i + 1, len(self._filter_entry.get_text()))
self._filter_entry.set_position(i)
return True
elif keyname == 'b' and event.state & gtk.gdk.CONTROL_MASK:
i = self._filter_entry.get_position()
if i >= 1:
self._filter_entry.set_position(i - 1)
return True
elif keyname == 'k' and event.state & gtk.gdk.CONTROL_MASK:
i = self._filter_entry.get_position()
t = self._filter_entry.get_text()[:i]
self._filter_entry.set_text(t)
self._filter_entry.set_position(len(t))
return True
elif keyname == 'Return':
self.response(gtk.RESPONSE_OK)
return True
def _on_filter_text_changed(self,entry):
text = entry.get_text()
self.set_filter_text(text)
def set_results_enabled(self, en):
self._treeview.set_sensitive(en)
self.set_response_sensitive(gtk.RESPONSE_OK, en)
def status_changed(self):
self.status_label.set_text(self.status_text)
# update the model based on result
def update_results_list(self, files, ranks):
if len(files) == 0:
self._model.clear()
return
start_time = time.time()
self._treeview.freeze_child_notify()
self._treeview.set_model(None)
self._model.clear()
for i in range(len(files)):
row = self._model.append()
self._model.set(row, 0, (files[i], ranks[i]))
self._treeview.set_model(self._model)
self._treeview.thaw_child_notify()
truncated = False
if truncated:
self._truncated_bar.text = "Search was truncated at %i items" % len(files)
self._truncated_bar.show()
else:
self._truncated_bar.hide()
elapsed = time.time() - start_time
if len(self._model) > 0:
if self._treeview.get_selection():
self._treeview.get_selection().select_path((0,))
def _on_treeview_selection_changed(self, selection):
self.set_response_sensitive(gtk.RESPONSE_OK,selection.count_selected_rows() != 0)
def move_selection(self, direction):
sel = self.get_selected_indices()
if len(sel) == 0:
if self._model.iter_n_children(None) == 0:
return
self.set_selected_indices([0])
return
if direction > 0:
i = max(sel)
else:
i = min(sel)
i = i + direction
if i < 0:
return
if i >= self._model.iter_n_children(None):
return
self.set_selected_indices([i])
def get_selected_indices(self):
model,rows = self._treeview.get_selection().get_selected_rows()
return [x[0] for x in rows]
def set_selected_indices(self, indices):
sel = self._treeview.get_selection()
for i in self.get_selected_indices():
sel.unselect_path((i,))
for i in indices:
sel.select_path((i,))
def get_selected_items(self):
model,rows = self._treeview.get_selection().get_selected_rows()
files = []
for path in rows:
iter = model.get_iter(path)
obj = model.get(iter,0)[0][0]
files.append(obj)
return files
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class protocoludp_stats(base_resource) :
ur""" Statistics for UDP Protocol resource.
"""
def __init__(self) :
self._clearstats = ""
self._udptotrxpkts = 0
self._udprxpktsrate = 0
self._udptotrxbytes = 0
self._udprxbytesrate = 0
self._udptottxpkts = 0
self._udptxpktsrate = 0
self._udptottxbytes = 0
self._udptxbytesrate = 0
self._udpcurratethreshold = 0
self._udptotunknownsvcpkts = 0
self._udpbadchecksum = 0
self._udpcurratethresholdexceeds = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def udptxpktsrate(self) :
ur"""Rate (/s) counter for udptottxpkts.
"""
try :
return self._udptxpktsrate
except Exception as e:
raise e
@property
def udpcurratethreshold(self) :
ur"""Limit for UDP packets handled every 10 milliseconds. Default value, 0, applies no limit.
This is a configurable value using the set rateControl command.
.
"""
try :
return self._udpcurratethreshold
except Exception as e:
raise e
@property
def udptotrxpkts(self) :
ur"""Total number of UDP packets received.
"""
try :
return self._udptotrxpkts
except Exception as e:
raise e
@property
def udptottxpkts(self) :
ur"""Total number of UDP packets transmitted.
"""
try :
return self._udptottxpkts
except Exception as e:
raise e
@property
def udptotrxbytes(self) :
ur"""Total number of UDP data received in bytes.
"""
try :
return self._udptotrxbytes
except Exception as e:
raise e
@property
def udptxbytesrate(self) :
ur"""Rate (/s) counter for udptottxbytes.
"""
try :
return self._udptxbytesrate
except Exception as e:
raise e
@property
def udprxpktsrate(self) :
ur"""Rate (/s) counter for udptotrxpkts.
"""
try :
return self._udprxpktsrate
except Exception as e:
raise e
@property
def udpbadchecksum(self) :
ur"""Packets received with a UDP checksum error.
"""
try :
return self._udpbadchecksum
except Exception as e:
raise e
@property
def udptottxbytes(self) :
ur"""Total number of UDP data transmitted in bytes.
"""
try :
return self._udptottxbytes
except Exception as e:
raise e
@property
def udptotunknownsvcpkts(self) :
ur"""Stray UDP packets dropped due to no configured listening service.
"""
try :
return self._udptotunknownsvcpkts
except Exception as e:
raise e
@property
def udprxbytesrate(self) :
ur"""Rate (/s) counter for udptotrxbytes.
"""
try :
return self._udprxbytesrate
except Exception as e:
raise e
@property
def udpcurratethresholdexceeds(self) :
ur"""Number of times the UDP rate threshold is exceeded. If this counter continuously increases, first make sure the UDP packets received are genuine.
If they are, increase the current rate threshold. This is a configurable value using the set rateControl command.
.
"""
try :
return self._udpcurratethresholdexceeds
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(protocoludp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.protocoludp
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all protocoludp_stats resources that are configured on netscaler.
"""
try :
obj = protocoludp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class protocoludp_response(base_response) :
def __init__(self, length=1) :
self.protocoludp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.protocoludp = [protocoludp_stats() for _ in range(length)]
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ryu import utils
from ryu.lib import type_desc
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_common
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto.ofproto_parser import StringifyMixin
def generate(ofp_name, ofpp_name):
import sys
import string
import functools
ofp = sys.modules[ofp_name]
ofpp = sys.modules[ofpp_name]
class _NXFlowSpec(StringifyMixin):
_hdr_fmt_str = '!H' # 2 bit 0s, 1 bit src, 2 bit dst, 11 bit n_bits
_dst_type = None
_subclasses = {}
def __init__(self, src, dst, n_bits):
self.src = src
self.dst = dst
self.n_bits = n_bits
@classmethod
def register(cls, subcls):
assert issubclass(subcls, cls)
assert subcls._dst_type not in cls._subclasses
cls._subclasses[subcls._dst_type] = subcls
@classmethod
def parse(cls, buf):
(hdr,) = struct.unpack_from(cls._hdr_fmt_str, buf, 0)
rest = buf[struct.calcsize(cls._hdr_fmt_str):]
if hdr == 0:
return None, rest # all-0 header is no-op for padding
src_type = (hdr >> 13) & 0x1
dst_type = (hdr >> 11) & 0x3
n_bits = hdr & 0x3ff
subcls = cls._subclasses[dst_type]
if src_type == 0: # subfield
src = cls._parse_subfield(rest)
rest = rest[6:]
elif src_type == 1: # immediate
src_len = (n_bits + 15) // 16 * 2
src_bin = rest[:src_len]
src = type_desc.IntDescr(size=src_len).to_user(src_bin)
rest = rest[src_len:]
if dst_type == 0: # match
dst = cls._parse_subfield(rest)
rest = rest[6:]
elif dst_type == 1: # load
dst = cls._parse_subfield(rest)
rest = rest[6:]
elif dst_type == 2: # output
dst = '' # empty
return subcls(src=src, dst=dst, n_bits=n_bits), rest
def serialize(self):
buf = bytearray()
if isinstance(self.src, tuple):
src_type = 0 # subfield
else:
src_type = 1 # immediate
# header
val = (src_type << 13) | (self._dst_type << 11) | self.n_bits
msg_pack_into(self._hdr_fmt_str, buf, 0, val)
# src
if src_type == 0: # subfield
buf += self._serialize_subfield(self.src)
elif src_type == 1: # immediate
src_len = (self.n_bits + 15) // 16 * 2
buf += type_desc.IntDescr(size=src_len).from_user(self.src)
# dst
if self._dst_type == 0: # match
buf += self._serialize_subfield(self.dst)
elif self._dst_type == 1: # load
buf += self._serialize_subfield(self.dst)
elif self._dst_type == 2: # output
pass # empty
return buf
@staticmethod
def _parse_subfield(buf):
(n, len) = ofp.oxm_parse_header(buf, 0)
assert len == 4 # only 4-bytes NXM/OXM are defined
field = ofp.oxm_to_user_header(n)
rest = buf[len:]
(ofs,) = struct.unpack_from('!H', rest, 0)
return (field, ofs)
@staticmethod
def _serialize_subfield(subfield):
(field, ofs) = subfield
buf = bytearray()
n = ofp.oxm_from_user_header(field)
ofp.oxm_serialize_header(n, buf, 0)
assert len(buf) == 4 # only 4-bytes NXM/OXM are defined
msg_pack_into('!H', buf, 4, ofs)
return buf
class NXFlowSpecMatch(_NXFlowSpec):
# Add a match criteria
# an example of the corresponding ovs-ofctl syntax:
# NXM_OF_VLAN_TCI[0..11]
_dst_type = 0
class NXFlowSpecLoad(_NXFlowSpec):
# Add NXAST_REG_LOAD actions
# an example of the corresponding ovs-ofctl syntax:
# NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]
_dst_type = 1
class NXFlowSpecOutput(_NXFlowSpec):
# Add an OFPAT_OUTPUT action
# an example of the corresponding ovs-ofctl syntax:
# output:NXM_OF_IN_PORT[]
_dst_type = 2
def __init__(self, src, n_bits, dst=''):
assert dst == ''
super(NXFlowSpecOutput, self).__init__(src=src, dst=dst,
n_bits=n_bits)
class NXAction(ofpp.OFPActionExperimenter):
_fmt_str = '!H' # subtype
_subtypes = {}
_experimenter = ofproto_common.NX_EXPERIMENTER_ID
def __init__(self):
super(NXAction, self).__init__(experimenter=self._experimenter)
self.subtype = self._subtype
@classmethod
def parse(cls, buf):
fmt_str = NXAction._fmt_str
(subtype,) = struct.unpack_from(fmt_str, buf, 0)
subtype_cls = cls._subtypes.get(subtype)
rest = buf[struct.calcsize(fmt_str):]
if subtype_cls is None:
return NXActionUnknown(subtype, rest)
return subtype_cls.parse(rest)
def serialize(self, buf, offset):
super(NXAction, self).serialize(buf, offset)
msg_pack_into(NXAction._fmt_str,
buf,
offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE,
self.subtype)
@classmethod
def register(cls, subtype_cls):
assert subtype_cls._subtype is not cls._subtypes
cls._subtypes[subtype_cls._subtype] = subtype_cls
class NXActionUnknown(NXAction):
def __init__(self, subtype, data=None,
type_=None, len_=None, experimenter=None):
super(NXActionUnknown, self).__init__()
self.data = data
@classmethod
def parse(cls, subtype, buf):
return cls(data=buf)
def serialize(self, buf, offset):
# fixup
data = self.data
if data is None:
data = bytearray()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionUnknown, self).serialize(buf, offset)
buf += data
class NXActionRegMove(NXAction):
_subtype = nicira_ext.NXAST_REG_MOVE
_fmt_str = '!HHH' # n_bits, src_ofs, dst_ofs
# Followed by OXM fields (src, dst) and padding to 8 bytes boundary
def __init__(self, src_field, dst_field, n_bits, src_ofs=0, dst_ofs=0,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionRegMove, self).__init__()
self.n_bits = n_bits
self.src_ofs = src_ofs
self.dst_ofs = dst_ofs
self.src_field = src_field
self.dst_field = dst_field
@classmethod
def parse(cls, buf):
(n_bits, src_ofs, dst_ofs,) = struct.unpack_from(
NXActionRegMove._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionRegMove._fmt_str):]
# src field
(n, len) = ofp.oxm_parse_header(rest, 0)
src_field = ofp.oxm_to_user_header(n)
rest = rest[len:]
# dst field
(n, len) = ofp.oxm_parse_header(rest, 0)
dst_field = ofp.oxm_to_user_header(n)
rest = rest[len:]
# ignore padding
return cls(src_field, dst_field=dst_field, n_bits=n_bits,
src_ofs=src_ofs, dst_ofs=dst_ofs)
def serialize(self, buf, offset):
# fixup
data = bytearray()
msg_pack_into(NXActionRegMove._fmt_str, data, 0,
self.n_bits, self.src_ofs, self.dst_ofs)
# src field
n = ofp.oxm_from_user_header(self.src_field)
ofp.oxm_serialize_header(n, data, len(data))
# dst field
n = ofp.oxm_from_user_header(self.dst_field)
ofp.oxm_serialize_header(n, data, len(data))
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionRegMove, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionLearn(NXAction):
_subtype = nicira_ext.NXAST_LEARN
# idle_timeout, hard_timeout, priority, cookie, flags,
# table_id, pad, fin_idle_timeout, fin_hard_timeout
_fmt_str = '!HHHQHBxHH'
# Followed by flow_mod_specs
def __init__(self,
table_id,
specs,
idle_timeout=0,
hard_timeout=0,
priority=ofp.OFP_DEFAULT_PRIORITY,
cookie=0,
flags=0,
fin_idle_timeout=0,
fin_hard_timeout=0,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionLearn, self).__init__()
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.cookie = cookie
self.flags = flags
self.table_id = table_id
self.fin_idle_timeout = fin_idle_timeout
self.fin_hard_timeout = fin_hard_timeout
self.specs = specs
@classmethod
def parse(cls, buf):
(idle_timeout,
hard_timeout,
priority,
cookie,
flags,
table_id,
fin_idle_timeout,
fin_hard_timeout,) = struct.unpack_from(
NXActionLearn._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionLearn._fmt_str):]
# specs
specs = []
while len(rest) > 0:
spec, rest = _NXFlowSpec.parse(rest)
if spec is None:
continue
specs.append(spec)
return cls(idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
priority=priority,
cookie=cookie,
flags=flags,
table_id=table_id,
fin_idle_timeout=fin_idle_timeout,
fin_hard_timeout=fin_hard_timeout,
specs=specs)
def serialize(self, buf, offset):
# fixup
data = bytearray()
msg_pack_into(NXActionLearn._fmt_str, data, 0,
self.idle_timeout,
self.hard_timeout,
self.priority,
self.cookie,
self.flags,
self.table_id,
self.fin_idle_timeout,
self.fin_hard_timeout)
for spec in self.specs:
data += spec.serialize()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionLearn, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
def add_attr(k, v):
setattr(ofpp, k, v)
add_attr('NXAction', NXAction)
add_attr('NXActionUnknown', NXActionUnknown)
classes = [
'NXActionRegMove',
'NXActionLearn',
'_NXFlowSpec', # exported for testing
'NXFlowSpecMatch',
'NXFlowSpecLoad',
'NXFlowSpecOutput',
]
vars = locals()
for name in classes:
cls = vars[name]
add_attr(name, cls)
if issubclass(cls, NXAction):
NXAction.register(cls)
if issubclass(cls, _NXFlowSpec):
_NXFlowSpec.register(cls)
|
|
#!/usr/bin/env python
from __future__ import print_function
import subprocess
import argparse
import json
import errno
import os
import shutil
import fnmatch
import hashlib
ARDUINO_H = """/**
* This is a generated file required by the Arduino build system."
*/
#include "pumbaa.h"
"""
SIMBA_GEN_C_FMT = """
#include "simba.h"
const FAR char sysinfo[] = "app: myapp built - by -.\\r\\n"
"board: {board}\\r\\n"
"mcu: {mcu}\\r\\n";
"""
INTERACTIVE_INO = """#
# The interactive interpreter starts automatically
# when this script ends.
#
"""
def mkdir_p(path):
"""Recursivly create directories.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def create_database():
"""Generate the pumbaa database with baord and mcu information.
"""
return json.loads(subprocess.check_output(["bin/dbgen.py"]))
def copy_tools():
"""Copy the frozen generation scripts to the tools folder.
"""
pumbaa_root = os.environ["PUMBAA_ROOT"]
os.makedirs("tools")
shutil.copy(os.path.join(pumbaa_root, "bin", "compile_ino.py"), "tools")
shutil.copy(os.path.join(pumbaa_root, "bin", "make_frozen.py"), "tools")
def generate_cores(family, database):
"""Generate the cores directory, shared among all boards.
"""
cores_dir = os.path.join("cores", "pumbaa")
# Create the cores directory.
mkdir_p(cores_dir)
with open(os.path.join(cores_dir, "Arduino.h"), "w") as fout:
fout.write(ARDUINO_H)
pumbaa_root = os.environ["PUMBAA_ROOT"]
cores_srcs = None
for board in database["boards"].values():
mcu = board["mcu"]
if database["mcus"][mcu]["family"] != family:
continue
if cores_srcs is None:
cores_srcs = set(board["src"])
else:
cores_srcs = cores_srcs & set(board["src"])
if family == "avr":
board = "arduino_pro_micro"
elif family == "sam":
board = "arduino_due"
elif family == "esp":
board = "esp01"
elif family == "esp32":
board = "nano32"
else:
raise ValueError("{}: bad family".format(family))
# Copy all source files, except those in boards and mcus that are
# variant specific. Use any board in given family
for src in cores_srcs:
dst_dir = os.path.join(cores_dir, os.path.dirname(src))
mkdir_p(dst_dir)
shutil.copy(os.path.join(pumbaa_root, src), dst_dir)
# Copy all header files.
for inc in database["boards"][board]["inc"]:
inc_dir = os.path.join(pumbaa_root, inc)
for root, _, filenames in os.walk(inc_dir):
for filename in fnmatch.filter(filenames, '*.[hi]'):
file_path = os.path.join(root, filename)
file_dir = os.path.dirname(file_path)
cores_file_dir = file_dir.replace(pumbaa_root + "/", "")
mkdir_p(os.path.join(cores_dir, cores_file_dir))
shutil.copy(file_path,
os.path.join(cores_dir, cores_file_dir))
# Copy c-files that are included into another c-file.
c_files = [
"micropython/extmod/crypto-algorithms/sha256.c",
"micropython/extmod/uzlib/adler32.c",
"micropython/extmod/uzlib/crc32.c",
"micropython/extmod/uzlib/tinfgzip.c",
"micropython/extmod/uzlib/tinflate.c",
"micropython/extmod/uzlib/tinfzlib.c"
]
for c_file in c_files:
src_path = os.path.join(pumbaa_root, c_file)
dst_dir = os.path.join(cores_dir, os.path.dirname(c_file))
mkdir_p(dst_dir)
shutil.copy(src_path, dst_dir)
# Various files.
root_files = [
"LICENSE",
"README.rst",
"VERSION.txt"
]
for root_file in root_files:
shutil.copy(os.path.join(pumbaa_root, root_file), ".")
return cores_srcs
def generate_variants(family, database, cores_srcs):
"""Generate the variants directory with board unique information.
"""
pumbaa_root = os.environ["PUMBAA_ROOT"]
print("Generating variants for family", family)
for board_name, config in database['boards'].items():
if database["mcus"][config["mcu"]]["family"] != family:
continue
variant_dir = os.path.join("variants", board_name)
# Create the variant directory.
mkdir_p(variant_dir)
# Copy variant specific source files; those in "boards" and
# "mcus". Other source files are copies in cores.
for src in config["src"]:
if src in cores_srcs:
continue
dst_dir = os.path.join(variant_dir, os.path.dirname(src))
mkdir_p(dst_dir)
shutil.copy(os.path.join(pumbaa_root, src), dst_dir)
# Copy all linker script files.
for libpath in config["libpath"]:
libpath_dir = os.path.join(pumbaa_root, libpath)
for root, _, filenames in os.walk(libpath_dir):
for filename in fnmatch.filter(filenames, '*.ld'):
file_path = os.path.join(root, filename)
file_dir = os.path.dirname(file_path)
variant_file_dir = file_dir.replace(pumbaa_root + "/",
"")
mkdir_p(os.path.join(variant_dir, variant_file_dir))
shutil.copy(file_path,
os.path.join(variant_dir, variant_file_dir))
with open(os.path.join(variant_dir, "pumbaa_gen.c"), "w") as fout:
fout.write(SIMBA_GEN_C_FMT.format(name="my_app",
board=board_name,
mcu=config["mcu"]))
# Generate the QSTR file for the default configuration.
default_configuration_dir = os.path.join(pumbaa_root,
"examples",
"default-configuration")
subprocess.check_call(["make",
"-s",
"BOARD=" + board_name],
cwd=default_configuration_dir)
qstr_file = os.path.join(default_configuration_dir,
"build",
board_name,
"gen",
"genhdr",
"qstrdefs.generated.h")
genhdr_dir = os.path.join(variant_dir, "genhdr")
os.makedirs(genhdr_dir)
shutil.copy(qstr_file, genhdr_dir)
def generate_examples():
"""Generate the examples directory.
libraries/Pumbaa/examples/<example folder>
"""
pumbaa_root = os.environ["PUMBAA_ROOT"]
pumbaa_examples_dir = os.path.join(pumbaa_root, 'examples')
arduino_pumbaa_path = os.path.join('libraries', 'Pumbaa')
arduino_examples_path = os.path.join(arduino_pumbaa_path, 'examples')
os.makedirs(arduino_examples_path)
with open(os.path.join(arduino_pumbaa_path, "Pumbaa.h"), "w") as fout:
fout.write("/* Generated file required by Arduino IDE. */")
examples = [
"blink",
"hello_world",
"interactive"
]
for example in examples:
# Create the .ino-file.
pumbaa_example_path = os.path.join(pumbaa_examples_dir, example)
arduino_example_path = os.path.join(arduino_examples_path, example)
os.makedirs(arduino_example_path)
main_py = os.path.join(pumbaa_example_path, "main.py")
ino_file = os.path.join(arduino_example_path, example + ".ino")
with open(ino_file, "w") as fout:
if os.path.exists(main_py):
with open(main_py) as fin:
fout.write(fin.read())
else:
fout.write(INTERACTIVE_INO)
def get_c_extra_flags(board, database):
"""Get include path, defines and flags to the compiler.
"""
incs = database["boards"][board]["inc"]
cdefs = (database["boards"][board]["cdefs"] +
['MICROPY_MODULE_FROZEN_STR=1'] +
['MICROPY_MODULE_FROZEN_MPY=0'])
cflags = []
for flag in database["boards"][board]["cflags"]:
if "-mforce-l32" in flag:
continue
cflags.append(flag)
return " ".join(cflags
+ ["\"-I{runtime.platform.path}/cores/pumbaa/" + inc + "\""
for inc in incs]
+ ["-D" + d for d in cdefs])
def get_cxx_extra_flags(board, database):
"""Get include path, defines and flags to the compiler.
"""
incs = database["boards"][board]["inc"]
cdefs = (database["boards"][board]["cdefs"] +
['MICROPY_MODULE_FROZEN_STR=1'] +
['MICROPY_MODULE_FROZEN_MPY=0'])
cxxflags = database["boards"][board]["cxxflags"]
return " ".join(cxxflags
+ ["\"-I{runtime.platform.path}/cores/pumbaa/" + inc + "\""
for inc in incs]
+ ["-D" + d for d in cdefs])
def get_c_elf_extra_flags(board, database):
"""Get library path, defines and flags to the linker.
"""
libpaths = database["boards"][board]["libpath"]
ldflags = database["boards"][board]["ldflags"]
ldflags = [ldflag for ldflag in ldflags if "-Wl,-Map" not in ldflag]
return " ".join(ldflags
+ ["\"-L{runtime.platform.path}/variants/" + board + "/" + libpath + "\""
for libpath in libpaths])
def get_c_elf_libs(board, database):
"""Get libraries.
"""
libs = database["boards"][board]["lib"]
return " ".join(["-l" + lib for lib in libs])
def generate_boards_txt_sam(database, boards_txt_fmt):
"""Generate boards.txt for SAM.
"""
return boards_txt_fmt.format(
arduino_due_x_dbg_compiler_c_extra_flags=get_c_extra_flags(
"arduino_due",
database),
arduino_due_x_dbg_compiler_cxx_extra_flags=get_cxx_extra_flags(
"arduino_due",
database),
arduino_due_x_dbg_compiler_c_elf_extra_flags=get_c_elf_extra_flags(
"arduino_due",
database))
def generate_boards_txt_esp(database, boards_txt_fmt):
"""Generate boards.txt for ESP.
"""
# ESP SDK libraries are copied to this location.
libpath = "-L{runtime.platform.path}/lib"
esp01_compiler_c_elf_extra_flags = get_c_elf_extra_flags("esp01", database)
esp01_compiler_c_elf_extra_flags += " "
esp01_compiler_c_elf_extra_flags += libpath
esp12e_compiler_c_elf_extra_flags = get_c_elf_extra_flags("esp12e", database)
esp12e_compiler_c_elf_extra_flags += " "
esp12e_compiler_c_elf_extra_flags += libpath
return boards_txt_fmt.format(
esp01_compiler_c_extra_flags=get_c_extra_flags("esp01", database),
esp01_compiler_cxx_extra_flags=get_cxx_extra_flags("esp01", database),
esp01_compiler_c_elf_extra_flags=esp01_compiler_c_elf_extra_flags,
esp01_compiler_c_elf_libs=get_c_elf_libs("esp01", database),
esp12e_compiler_c_extra_flags=get_c_extra_flags("esp12e", database),
esp12e_compiler_cxx_extra_flags=get_cxx_extra_flags("esp12e", database),
esp12e_compiler_c_elf_extra_flags=esp12e_compiler_c_elf_extra_flags,
esp12e_compiler_c_elf_libs=get_c_elf_libs("esp12e", database))
def generate_boards_txt_esp32(database, boards_txt_fmt):
"""Generate boards.txt for ESP32.
"""
# ESP SDK libraries are copied to this location.
libpath = "-L{runtime.platform.path}/lib"
nano32_compiler_c_elf_extra_flags = get_c_elf_extra_flags("nano32", database)
nano32_compiler_c_elf_extra_flags += " "
nano32_compiler_c_elf_extra_flags += libpath
esp32_devkitc_compiler_c_elf_extra_flags = get_c_elf_extra_flags("esp32_devkitc", database)
esp32_devkitc_compiler_c_elf_extra_flags += " "
esp32_devkitc_compiler_c_elf_extra_flags += libpath
return boards_txt_fmt.format(
nano32_compiler_c_extra_flags=get_c_extra_flags("nano32", database),
nano32_compiler_cxx_extra_flags=get_cxx_extra_flags("nano32", database),
nano32_compiler_c_elf_extra_flags=nano32_compiler_c_elf_extra_flags,
nano32_compiler_c_elf_libs=get_c_elf_libs("nano32", database),
esp32_devkitc_compiler_c_extra_flags=get_c_extra_flags("esp32_devkitc", database),
esp32_devkitc_compiler_cxx_extra_flags=get_cxx_extra_flags("esp32_devkitc", database),
esp32_devkitc_compiler_c_elf_extra_flags=esp32_devkitc_compiler_c_elf_extra_flags,
esp32_devkitc_compiler_c_elf_libs=get_c_elf_libs("esp32_devkitc", database))
def generate_configuration_files(family, database):
"""Generate copy configuration files.
"""
pumbaa_root = os.environ["PUMBAA_ROOT"]
family_dir = os.path.join(pumbaa_root,
"make",
"arduino",
family)
configuration_files = [
"platform.txt"
]
for configuration_file in configuration_files:
shutil.copy(os.path.join(family_dir, configuration_file), ".")
with open("boards.txt", "w") as fout:
with open(os.path.join(family_dir, "boards.txt"), "r") as fin:
if family == "sam":
boards_txt = generate_boards_txt_sam(database, fin.read())
elif family == "esp":
boards_txt = generate_boards_txt_esp(database, fin.read())
elif family == "esp32":
boards_txt = generate_boards_txt_esp32(database, fin.read())
else:
raise ValueError("Unsupported family {}.".format(family))
fout.write(boards_txt)
def generate_extra(family, database):
"""Generate extra files that do not fit into any other generation
function.
"""
pumbaa_root = os.environ["PUMBAA_ROOT"]
if family == "esp":
# Copy all libraries.
libpaths = database["boards"]["esp01"]["libpath"]
mkdir_p("lib")
for lib in database["boards"]["esp01"]["lib"]:
for libpath in libpaths:
libpath_dir = os.path.join(pumbaa_root, libpath)
for root, _, filenames in os.walk(libpath_dir):
for filename in filenames:
if filename != "lib" + lib + ".a":
continue
file_path = os.path.join(root, filename)
shutil.copy(file_path, "lib")
break
# Copt eboot (bootloader).
eboot_dir = os.path.join("bootloaders", "eboot")
mkdir_p(eboot_dir)
shutil.copy(os.path.join(pumbaa_root,
"simba",
"3pp",
"esp8266Arduino",
"2.3.0",
"bootloaders",
"eboot",
"eboot.elf"),
eboot_dir)
elif family == "esp32":
# Copy all libraries.
libpaths = database["boards"]["nano32"]["libpath"]
mkdir_p("lib")
for lib in database["boards"]["nano32"]["lib"]:
for libpath in libpaths:
libpath_dir = os.path.join(pumbaa_root, libpath)
for root, _, filenames in os.walk(libpath_dir):
for filename in filenames:
if filename != "lib" + lib + ".a":
continue
file_path = os.path.join(root, filename)
shutil.copy(file_path, "lib")
break
# Copy bootloader and partition table.
mkdir_p("bin")
for filename in ["bootloader.bin", "partitions_singleapp.bin"]:
shutil.copy(os.path.join(pumbaa_root,
"simba",
"3pp",
"esp32",
"bin",
filename),
"bin")
# Copy esptool.
mkdir_p("tools")
shutil.copy(os.path.join(pumbaa_root,
"simba",
"3pp",
"esp32",
"esp-idf",
"components",
"esptool_py",
"esptool",
"esptool.py"),
"tools")
def generate_files_and_folders(family, database, outdir):
"""Generate files and folders.
"""
os.makedirs(outdir)
cwd = os.getcwd()
os.chdir(outdir)
copy_tools()
cores_srcs = generate_cores(family, database)
generate_variants(family, database, cores_srcs)
generate_examples()
generate_configuration_files(family, database)
generate_extra(family, database)
os.chdir(cwd)
def main():
"""Package Pumbaa for the Arduino IDE.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--remove-outdir", "-r", action="store_true")
parser.add_argument("--outdir", default="pumbaa-arduino")
parser.add_argument("--version", required=True)
args = parser.parse_args()
if args.remove_outdir:
if os.path.exists(args.outdir):
print("Removing", args.outdir)
shutil.rmtree(args.outdir)
print("Creating software database.")
database = create_database()
print("Writing to " + args.outdir + ".")
# esp requires -mforce-l32 which is not part of the toolchain.
for family in ["sam", "esp32"]:
packages_family_dir = os.path.join(args.outdir,
"packages",
"Pumbaa",
family)
generate_files_and_folders(family,
database,
packages_family_dir)
# Create release archives and their sha256 sum.
temporary_family_dir = os.path.join(
args.outdir,
"pumbaa-arduino-" + family)
shutil.copytree(packages_family_dir, temporary_family_dir)
archive_path_no_suffix = os.path.join(
args.outdir,
"pumbaa-arduino-{family}-{version}".format(family=family,
version=args.version))
shutil.make_archive(archive_path_no_suffix,
"zip",
args.outdir,
"pumbaa-arduino-" + family)
shutil.rmtree(temporary_family_dir)
with open(archive_path_no_suffix + ".zip.sha256", "w") as fout:
with open(archive_path_no_suffix + ".zip", "rb") as fin:
fout.write(hashlib.sha256(fin.read()).hexdigest())
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Analyses test models, gives numerical information about their reachable portion and an estimate about the same information for their composition. All estimates are upper bounds.
"""
import os
import sys
import optparse
import tema.model.model
from tema.model import getModelType,loadModel
ACTIONS = 'actions'
ACTIONWORDS = 'action words'
STATES = 'states'
SLEEPSTATES = 'sleep states'
TRANSITIONS = 'transitions'
STATEPROPOSITIONS = 'state propositions'
STATEPROPOSITION_COMBINATIONS = 'state proposition combinations'
SLEEPING_STATEPROPOSITION_COMBINATIONS = 'sleeping state proposition combinations'
ACTIONWORD_STATEPROPOSITION_COMBINATIONS = 'action word - state proposition combinations'
ORDER = [ACTIONS, ACTIONWORDS, STATES, SLEEPSTATES, TRANSITIONS, STATEPROPOSITIONS, STATEPROPOSITION_COMBINATIONS, SLEEPING_STATEPROPOSITION_COMBINATIONS, ACTIONWORD_STATEPROPOSITION_COMBINATIONS]
COMMONS_MULTITARGET = {ACTIONS:0, ACTIONWORDS:0, STATES:1, TRANSITIONS:0, STATEPROPOSITIONS:1, STATEPROPOSITION_COMBINATIONS:2, ACTIONWORD_STATEPROPOSITION_COMBINATIONS:0}
COMMONS_SINGLETARGET = {ACTIONS:6, ACTIONWORDS:1, STATES:6, TRANSITIONS:6, STATEPROPOSITIONS:3, STATEPROPOSITION_COMBINATIONS:3, ACTIONWORD_STATEPROPOSITION_COMBINATIONS:1}
def analyseModel(model):
for prop in model.getInitialState().getStateProps():
if str(prop).endswith('SwitcherBase'):
base_prop = prop
break
else:
base_prop = None
actions = set()
states = set()
sleepstates = set()
transitions = set()
stateprops = set()
stateprop_combs = set()
sleep_stateprop_combs = set()
aw_stateprop_combs = set()
def isSleepState():
return base_prop in current_props
def isActionWord(action):
return str(action).find(':start_aw') != -1
stack = [model.getInitialState()]
while len(stack) > 0:
state = stack.pop()
current_props = frozenset(state.getStateProps())
states.add(state)
if isSleepState():
sleepstates.add(state)
for stateprop in current_props:
stateprops.add(stateprop)
stateprop_combs.add(current_props)
if isSleepState():
sleep_stateprop_combs.add(current_props)
for transition in state.getOutTransitions():
current_action = transition.getAction()
actions.add(current_action)
transitions.add(transition)
if isActionWord(current_action):
aw_stateprop_combs.add((current_action, current_props))
if transition.getDestState() not in states:
stack.append(transition.getDestState())
result = {}
result[ACTIONS] = len(actions)
result[ACTIONWORDS] = len([a for a in actions if isActionWord(a)])
result[STATES] = len(states)
result[SLEEPSTATES] = len(sleepstates)
result[TRANSITIONS] = len(transitions)
result[STATEPROPOSITIONS] = len(stateprops)
result[STATEPROPOSITION_COMBINATIONS] = len(stateprop_combs)
result[SLEEPING_STATEPROPOSITION_COMBINATIONS] = len(sleep_stateprop_combs)
result[ACTIONWORD_STATEPROPOSITION_COMBINATIONS] = len(aw_stateprop_combs)
return result
def calculateTotalResults(modelresults, commons = COMMONS_MULTITARGET):
totalresults = {SLEEPSTATES:1, SLEEPING_STATEPROPOSITION_COMBINATIONS:1}
totalresults.update(commons)
for result in modelresults:
totalresults[SLEEPSTATES] *= result[SLEEPSTATES]
totalresults[SLEEPING_STATEPROPOSITION_COMBINATIONS] *= result[SLEEPING_STATEPROPOSITION_COMBINATIONS]
totalresults[STATES] *= totalresults[SLEEPSTATES]
totalresults[TRANSITIONS] *= totalresults[SLEEPSTATES]
totalresults[STATEPROPOSITION_COMBINATIONS] *= totalresults[SLEEPING_STATEPROPOSITION_COMBINATIONS]
totalresults[ACTIONWORD_STATEPROPOSITION_COMBINATIONS] *= totalresults[SLEEPING_STATEPROPOSITION_COMBINATIONS]
for result in modelresults:
external_sleepstates = totalresults[SLEEPSTATES] / result[SLEEPSTATES]
external_sleeppropcombinations = totalresults[SLEEPING_STATEPROPOSITION_COMBINATIONS] / result[SLEEPING_STATEPROPOSITION_COMBINATIONS]
totalresults[ACTIONS] += result[ACTIONS] - commons[ACTIONS]
totalresults[ACTIONWORDS] += result[ACTIONWORDS] - commons[ACTIONWORDS]
totalresults[STATES] += (result[STATES] - result[SLEEPSTATES] * commons[STATES]) * external_sleepstates
totalresults[TRANSITIONS] += (result[TRANSITIONS] - result[SLEEPSTATES] * commons[TRANSITIONS]) * external_sleepstates
totalresults[STATEPROPOSITIONS] += result[STATEPROPOSITIONS] - commons[STATEPROPOSITIONS]
totalresults[STATEPROPOSITION_COMBINATIONS] += (result[STATEPROPOSITION_COMBINATIONS] - result[SLEEPING_STATEPROPOSITION_COMBINATIONS] * commons[STATEPROPOSITION_COMBINATIONS]) * \
external_sleeppropcombinations
totalresults[ACTIONWORD_STATEPROPOSITION_COMBINATIONS] += (result[ACTIONWORD_STATEPROPOSITION_COMBINATIONS] - \
result[SLEEPING_STATEPROPOSITION_COMBINATIONS] * commons[ACTIONWORD_STATEPROPOSITION_COMBINATIONS]) * \
external_sleeppropcombinations
return totalresults
def analyseModels(models, commons = COMMONS_MULTITARGET):
modelresults = []
for model in models:
if isinstance(model, tema.model.model.Model):
modelresults.append(analyseModel(model))
else:
modelresults.append(model)
totalresults = calculateTotalResults(modelresults, commons)
return (modelresults, totalresults)
def parseresult(string):
result = {}
for line in string.strip().split(os.linesep):
i = line.rfind(':')
if i != -1:
try:
result[line[:i].strip()] = int(line[i+1:])
except ValueError:
pass
return result
def printresult(name, results):
print name + ':'
for id in ORDER: #results.keys():
print id + ': ' + str(results[id])
def readArgs():
usagemessage = "usage: %prog structure [options] [filenames]"
description = "If no filenames are given or filename is -, reads from standard input.\nstructure=multi|single"
parser = optparse.OptionParser(usage=usagemessage,description=description)
parser.add_option("-f", "--format", action="store", type="str",
help="Format of the model file")
options, args = parser.parse_args(sys.argv[1:])
if len(args) > 0 and args[0] in ["multi","single"]:
structure = args[0]
else:
parser.error("Unknown structure parameter")
args = args[1:]
if len(args) == 0:
args.append("-")
elif "-" in args and len(args) > 1:
parser.error("Can't read from stdin and from files at the same time")
if not options.format and "-" in args:
parser.error("Reading from standard input requires format parameter")
return structure,args,options
def main():
structure,files,options = readArgs()
commons = {'multi':COMMONS_MULTITARGET, 'single':COMMONS_SINGLETARGET}[structure]
models = []
for filename in files:
if options.format:
modelType = options.format
else:
modelType = getModelType(filename)
if modelType is None and filename.endswith('.analysis'):
file = open(filename)
try:
content = file.read()
finally:
file.close()
models.append(parseresult(content))
elif modelType is None:
print >>sys.stderr, "%s: Error. Unknown model type. Specify model type using '-f'" % os.path.basename(sys.argv[0])
sys.exit(1)
else:
model = None
if filename == "-":
file = sys.stdin
else:
file = open(filename)
try:
model = loadModel(modelType,file)
finally:
file.close()
models.append(model)
results = analyseModels(models, commons)
print
for nameresult in zip(files, results[0]):
printresult(nameresult[0], nameresult[1])
print
printresult('Estimated total', results[1])
print
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulation variable data class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from makani.analysis.util.simdata_analysis import bootstrap
from makani.analysis.util.simdata_analysis.statistic import Statistic
from makani.analysis.util.simdata_analysis.utils import ascii_histogram as hist
from makani.analysis.util.simdata_analysis.utils import ecdf
from makani.analysis.util.simdata_analysis.utils import prob_above_thr
from makani.analysis.util.simdata_analysis.utils import prob_below_thr
import matplotlib.pyplot as plt
import numpy as np
NRESAMPLES = 1000 # Default resamples.
class VariableData(object):
"""Encapsulates simulation variable data.
"""
def __init__(self, variable_info, table_info, var_df, data):
"""Initializes the object.
Data is resampled here so it is readily available. The default number
of samples equals the number of samples in the data, and the number of
resamples is the package default NRESAMPLES.
Args:
variable_info: Dictionary with information about the variable.
table_info: Dictionary with information about the table.
var_df: DataFrame containing data about the variable.
data: Numpy array containing the numerical data.
"""
assert isinstance(variable_info, dict)
assert isinstance(table_info, dict)
for field in ['name']:
assert field in variable_info
for field in ['title', 'index', 'num_jobs']:
assert field in table_info
self.variable_info = variable_info
self.table_info = table_info
self.var_df = var_df
self.data = data
self.n_samples = len(self.data)
self.n_resamples = NRESAMPLES
self.bootstrapped_data = bootstrap.bootstrap(self.data, self.n_samples,
self.n_resamples)
self.n_valid_samples = len(self.data[~np.isnan(self.data)])
def resample(self, n_samples=None, n_resamples=None):
"""Resamples the data.
Args:
n_samples: Number of samples. If None, the default number of
samples is used (number of samples in original data).
n_resamples: Number of resamples. If None, the default number of
resamples is used (package value of NRESAMPLES).
"""
if n_samples is None:
n_samples = len(self.data)
if n_resamples is None:
n_resamples = NRESAMPLES
self.n_samples = n_samples
self.n_resamples = n_resamples
self.bootstrapped_data = bootstrap.bootstrap(self.data, self.n_samples,
self.n_resamples)
def text_histogram(self, *args):
"""Returns a multi-line string with a vertical histogram of self.data.
Args:
*args: Additional arguments to be passed to utils.ascii_histogram.
"""
if self.n_valid_samples == 0:
return '\n'
return hist(self.data[~np.isnan(self.data)], *args)
def mean(self):
"""Returns a Statistic object representing the mean.
"""
return Statistic(self.bootstrapped_data, np.mean)
def std(self):
"""Returns a Statistic object representing the standard deviation.
"""
return Statistic(self.bootstrapped_data, np.std)
def percentile(self, pctile):
"""Returns a Statistic object representing a percentile.
Args:
pctile: Percentile (between 0 and 100).
"""
assert pctile >= 0. and pctile <= 100.
return Statistic(self.bootstrapped_data,
lambda x: np.percentile(x, pctile))
def prob_above(self, thr):
"""Probability of the variable being above a threshold.
The probability is computed using 1 - ECDF(thr).
Args:
thr: Threshold value.
Returns:
Statistic object.
"""
return Statistic(self.bootstrapped_data, lambda x: prob_above_thr(x, thr))
def prob_below(self, thr):
"""Probability of the variable being below a threshold.
The probability is computed using ECDF(thr).
Args:
thr: Threshold value.
Returns:
Statistic object.
"""
return Statistic(self.bootstrapped_data, lambda x: prob_below_thr(x, thr))
def hist(self, **hist_kw):
"""Returns figure with histogram.
Args:
**hist_kw: Keywords to be passed to matplotlib.pyplot.hist().
Returns:
matplotlib figure.
"""
plt.hist(self.data[~np.isnan(self.data)], **hist_kw)
plt.xlabel(self.variable_info['name'])
plt.ylabel('Samples')
return plt.gcf()
def ecdf_data(self):
"""Computes the data for the ECDF.
Returns:
Tuple (xs, (mean_ecdf, lower_ecdf, upper_ecdf)):
- xs: Values where the ECDF is computed.
- mean_ecdf: Mean ECDF.
- lower_ecdf: Lower bound of the ECDF (95% confidence).
- upper_ecdf: Upper bound of the ECDF (95% confidence).
"""
# This line returns a 3D array:
# resampled_ecdf_data[i,0,:] is the x values for resample i
# resampled_ecdf_data[i,1,:] is the CDF values for resample i
resampled_ecdf_data = np.apply_along_axis(ecdf, 1, self.bootstrapped_data)
# Get some datapoints.
xs, _ = ecdf(self.data[~np.isnan(self.data)])
xs = np.linspace(xs[0], xs[-1], 200) # Resample evenly.
# Interpolate the CDFs
ecdf_mean_bounds = np.empty((3, len(xs)))
for idx, x in enumerate(xs):
data = np.empty(self.n_resamples)
for i in range(self.n_resamples):
data[i] = np.interp(x, resampled_ecdf_data[i, 0, :],
resampled_ecdf_data[i, 1, :], left=0, right=1.)
ecdf_mean_bounds[0, idx] = np.mean(data)
ecdf_mean_bounds[1, idx] = np.percentile(data, 2.5)
ecdf_mean_bounds[2, idx] = np.percentile(data, 97.5)
return xs, ecdf_mean_bounds
def ecdf(self, plot_bounds=True, **plot_kws):
"""Returns figure with the ECDF.
Args:
plot_bounds: Flag to plot the 95% confidence bounds.
**plot_kws: Keywords to be passed to matplotlib.pyplot.plot().
Returns:
matplotlib figure.
"""
xs, ecdf_mean_bounds = self.ecdf_data()
p = plt.plot(xs, ecdf_mean_bounds[0, :], **plot_kws)
if plot_bounds:
plt.plot(xs, ecdf_mean_bounds[1, :], '--', color=p[-1].get_color())
plt.plot(xs, ecdf_mean_bounds[2, :], '--', color=p[-1].get_color())
plt.xlabel(self.variable_info['name'])
plt.ylabel('CDF')
plt.yticks(np.linspace(0, 1, 11))
plt.ylim(0, 1)
plt.grid()
return plt.gcf()
def __repr__(self):
return self.__str__()
def __str__(self):
lines = 'Table: {0}\n'.format(self.table_info['title'])
lines += 'Table index: {0}\n'.format(self.table_info['index'])
lines += 'Number of samples: {0} ({1} valid)\n'.format(
self.n_samples, self.n_valid_samples)
lines += 'Range (min, max): ({0}, {1})\n'.format(min(self.data),
max(self.data))
lines += 'Histogram: \n'
lines += self.text_histogram()
return lines
class ScoreData(VariableData):
"""Encapsulates simulation score data.
"""
def __init__(self, score_info, table_info, score_df):
"""Initializes the object.
Args:
score_info: Dictionary with information about the score.
table_info: Dictionary with information about the table.
score_df: DataFrame containing the columns:
- score: score values.
- job_id: job_ids identifying the scores.
- folder: Folder that originated the data (useful for imports from
multiple files, and therefore repeated job_ids).
"""
assert set(score_df.columns.values) == set(['score', 'job_id', 'folder'])
assert isinstance(score_info, dict)
assert isinstance(table_info, dict)
for field in ['name', 'index', 'units', 'severity', 'experimental']:
assert field in score_info
super(ScoreData, self).__init__(score_info, table_info, score_df,
np.array(score_df['score']))
def __repr__(self):
return self.__str__()
def __str__(self):
lines = 'Variable: {0}\n'.format(self.variable_info['name'])
lines += 'Variable index: {0}\n'.format(self.variable_info['index'])
lines += 'Units: {0}\n'.format(self.variable_info['units'])
lines += 'Severity: {0}\n'.format(self.variable_info['severity'])
lines += 'Experimental: {0}\n'.format(self.variable_info['experimental'])
return lines + super(ScoreData, self).__str__()
class InputData(VariableData):
"""Encapsulates simulation input data.
"""
def __init__(self, input_info, table_info, input_df):
"""Initializes the object.
Args:
input_info: Dictionary with information about the input variable.
table_info: Dictionary with information about the table.
input_df: DataFrame containing the columns:
- value: Input values.
- job_id: job_ids identifying the input.
- folder: Folder that originated the data (useful for imports from
multiple files, and therefore repeated job_ids).
"""
assert set(input_df.columns.values) == set(['value', 'job_id', 'folder'])
assert isinstance(input_info, dict)
assert isinstance(table_info, dict)
for field in ['name']:
assert field in input_info
super(InputData, self).__init__(input_info, table_info, input_df,
np.array(input_df['value']))
def __repr__(self):
return self.__str__()
def __str__(self):
lines = 'Input: {0}\n'.format(self.variable_info['name'])
return lines + super(InputData, self).__str__()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import fixtures
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import api as dbapi
from neutron.db import flavors_db
from neutron.db import l3_db
from neutron.db import servicetype_db
from neutron.extensions import flavors
from neutron.plugins.common import constants
from neutron.services.flavors import flavors_plugin
from neutron.services import provider_configuration as provconf
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import base as extension
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
_driver = ('neutron.tests.unit.extensions.test_flavors.'
'DummyServiceDriver')
_provider = 'dummy'
_long_name = 'x' * (attr.NAME_MAX_LEN + 1)
_long_description = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN + 1)
class FlavorExtensionTestCase(extension.ExtensionTestCase):
def setUp(self):
super(FlavorExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron.services.flavors.flavors_plugin.FlavorsPlugin',
constants.FLAVORS, flavors.RESOURCE_ATTRIBUTE_MAP,
flavors.Flavors, '', supported_extension_aliases='flavors')
def test_create_flavor(self):
tenant_id = uuidutils.generate_uuid()
# Use service_type FLAVORS since plugin must be loaded to validate
data = {'flavor': {'name': 'GOLD',
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
expected = copy.deepcopy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.create_flavor.return_value = expected['flavor']
res = self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flavor.assert_called_with(mock.ANY,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_create_flavor_invalid_service_type(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': 'GOLD',
'service_type': 'BROKEN',
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_name(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': _long_description,
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': 'BROKEN'}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': 'the best flavor',
'enabled': True}}
expected = copy.copy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.update_flavor.return_value = expected['flavor']
res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.update_flavor.assert_called_with(mock.ANY,
flavor_id,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_update_flavor_too_long_name(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': _long_name,
'description': 'the best flavor',
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_too_long_description(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_invalid_enabled(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': 'BROKEN'}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_flavor(self):
flavor_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_flavor.assert_called_with(mock.ANY,
flavor_id)
def test_show_flavor(self):
flavor_id = 'fake_id'
expected = {'flavor': {'id': flavor_id,
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_flavors(self):
data = {'flavors': [{'id': 'id1',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']},
{'id': 'id2',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
res = self.api.get(_get_path('flavors', fmt=self.fmt))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(data, res)
def test_create_service_profile(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
instance = self.plugin.return_value
instance.create_service_profile.return_value = (
expected['service_profile'])
res = self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_service_profile.assert_called_with(
mock.ANY,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_create_service_profile_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': _long_description,
'driver': '',
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_too_long_driver(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': _long_description,
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'enabled': 'BROKEN',
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': False,
'metainfo': '{"data1": "value3"}'}}
instance = self.plugin.return_value
instance.update_service_profile.return_value = (
expected['service_profile'])
res = self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.update_service_profile.assert_called_with(
mock.ANY,
sp_id,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_update_service_profile_too_long_description(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile_invalid_enabled(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_service_profile(self):
sp_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_service_profile.assert_called_with(mock.ANY,
sp_id)
def test_show_service_profile(self):
sp_id = 'fake_id'
expected = {'service_profile': {'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}}
instance = self.plugin.return_value
instance.get_service_profile.return_value = (
expected['service_profile'])
res = self.api.get(_get_path('service_profiles',
id=sp_id, fmt=self.fmt))
instance.get_service_profile.assert_called_with(mock.ANY,
sp_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_service_profiles(self):
expected = {'service_profiles': [{'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True},
{'id': 'id2',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}]}
instance = self.plugin.return_value
instance.get_service_profiles.return_value = (
expected['service_profiles'])
res = self.api.get(_get_path('service_profiles', fmt=self.fmt))
instance.get_service_profiles.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_associate_service_profile_with_flavor(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'id': _uuid(),
'tenant_id': tenant_id}}
instance = self.plugin.return_value
instance.create_flavor_service_profile.return_value = (
expected['service_profile'])
res = self.api.post('/flavors/fl_id/service_profiles',
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_flavor_service_profile.assert_called_with(
mock.ANY, service_profile=expected, flavor_id='fl_id')
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_disassociate_service_profile_with_flavor(self):
instance = self.plugin.return_value
instance.delete_flavor_service_profile.return_value = None
self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
content_type='application/%s' % self.fmt)
instance.delete_flavor_service_profile.assert_called_with(
mock.ANY,
'fake_spid',
flavor_id='fl_id')
def test_update_association_error(self):
"""Confirm that update is not permitted with user error."""
new_id = uuidutils.generate_uuid()
data = {'service_profile': {'id': new_id}}
self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
class DummyCorePlugin(object):
pass
class DummyServicePlugin(object):
def driver_loaded(self, driver, service_profile):
pass
@classmethod
def get_plugin_type(cls):
return constants.DUMMY
def get_plugin_description(self):
return "Dummy service plugin, aware of flavors"
class DummyServiceDriver(object):
@staticmethod
def get_service_type():
return constants.DUMMY
def __init__(self, plugin):
pass
class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
base.PluginFixture):
def setUp(self):
super(FlavorPluginTestCase, self).setUp()
self.config_parse()
cfg.CONF.set_override(
'core_plugin',
'neutron.tests.unit.extensions.test_flavors.DummyCorePlugin')
cfg.CONF.set_override(
'service_plugins',
['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin'])
self.useFixture(
fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
self.plugin = flavors_plugin.FlavorsPlugin()
self.ctx = context.get_admin_context()
providers = [DummyServiceDriver.get_service_type() +
":" + _provider + ":" + _driver]
self.service_manager = servicetype_db.ServiceTypeManager.get_instance()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
self.service_providers.return_value = providers
for provider in providers:
self.service_manager.add_provider_configuration(
provider.split(':')[0], provconf.ProviderConfiguration())
dbapi.context_manager.get_legacy_facade().get_engine()
def _create_flavor(self, description=None):
flavor = {'flavor': {'name': 'GOLD',
'service_type': constants.DUMMY,
'description': description or 'the best flavor',
'enabled': True}}
return self.plugin.create_flavor(self.ctx, flavor), flavor
def test_create_flavor(self):
self._create_flavor()
res = self.ctx.session.query(flavors_db.Flavor).all()
self.assertEqual(1, len(res))
self.assertEqual('GOLD', res[0]['name'])
self.assertEqual(constants.DUMMY, res[0]['service_type'])
def test_update_flavor(self):
fl, flavor = self._create_flavor()
flavor = {'flavor': {'name': 'Silver',
'enabled': False}}
self.plugin.update_flavor(self.ctx, fl['id'], flavor)
res = (self.ctx.session.query(flavors_db.Flavor).
filter_by(id=fl['id']).one())
self.assertEqual('Silver', res['name'])
self.assertFalse(res['enabled'])
def test_delete_flavor(self):
fl, data = self._create_flavor()
self.plugin.delete_flavor(self.ctx, fl['id'])
res = (self.ctx.session.query(flavors_db.Flavor).all())
self.assertFalse(res)
def test_show_flavor(self):
fl, data = self._create_flavor()
show_fl = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(fl, show_fl)
def test_get_flavors(self):
fl, flavor = self._create_flavor()
flavor['flavor']['name'] = 'SILVER'
self.plugin.create_flavor(self.ctx, flavor)
show_fl = self.plugin.get_flavors(self.ctx)
self.assertEqual(2, len(show_fl))
def _create_service_profile(self, description=None):
data = {'service_profile':
{'description': description or 'the best sp',
'driver': _driver,
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
return sp, data
def test_create_service_profile(self):
sp, data = self._create_service_profile()
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['driver'], res['driver'])
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_create_service_profile_empty_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['driver'], res['driver'])
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_create_service_profile_invalid_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': "Broken",
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.assertRaises(flavors.ServiceProfileDriverNotFound,
self.plugin.create_service_profile,
self.ctx,
data)
def test_create_service_profile_invalid_empty(self):
data = {'service_profile':
{'description': '',
'driver': '',
'enabled': True,
'metainfo': ''}}
self.assertRaises(flavors.ServiceProfileEmpty,
self.plugin.create_service_profile,
self.ctx,
data)
def test_update_service_profile(self):
sp, data = self._create_service_profile()
data['service_profile']['metainfo'] = '{"data": "value1"}'
sp = self.plugin.update_service_profile(self.ctx, sp['id'],
data)
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_delete_service_profile(self):
sp, data = self._create_service_profile()
self.plugin.delete_service_profile(self.ctx, sp['id'])
res = self.ctx.session.query(flavors_db.ServiceProfile).all()
self.assertFalse(res)
def test_show_service_profile(self):
sp, data = self._create_service_profile()
sp_show = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(sp, sp_show)
def test_get_service_profiles(self):
self._create_service_profile()
self._create_service_profile(description='another sp')
self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx)))
def test_associate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertEqual(fl['id'], binding['flavor_id'])
self.assertEqual(sp['id'], binding['service_profile_id'])
res = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(1, len(res['service_profiles']))
self.assertEqual(sp['id'], res['service_profiles'][0])
res = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(1, len(res['flavors']))
self.assertEqual(fl['id'], res['flavors'][0])
def test_autodelete_flavor_associations(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor(self.ctx, fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertIsNone(binding)
def test_associate_service_profile_with_flavor_exists(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(flavors.FlavorServiceProfileBindingExists,
self.plugin.create_flavor_service_profile,
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
def test_disassociate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor_service_profile(
self.ctx, sp['id'], fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertIsNone(binding)
self.assertRaises(
flavors.FlavorServiceProfileBindingNotFound,
self.plugin.delete_flavor_service_profile,
self.ctx, sp['id'], fl['id'])
def test_delete_service_profile_in_use(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileInUse,
self.plugin.delete_service_profile,
self.ctx,
sp['id'])
def test_delete_flavor_in_use(self):
# make use of router since it has a flavor id
fl, data = self._create_flavor()
with self.ctx.session.begin():
self.ctx.session.add(l3_db.Router(flavor_id=fl['id']))
self.assertRaises(
flavors.FlavorInUse,
self.plugin.delete_flavor,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_no_binding(self):
fl, data = self._create_flavor()
self.assertRaises(
flavors.FlavorServiceProfileBindingNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_disabled(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': _driver,
'enabled': False,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileDisabled,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_no_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileDriverNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
providers = self.plugin.get_flavor_next_provider(
self.ctx,
fl['id'])
self.assertEqual(_provider, providers[0].get('provider', None))
|
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _sigmoid(x):
half = x.dtype.type(0.5)
xp = cuda.get_array_module(x)
return xp.tanh(x * half) * half + half
def _child_sum_tree_lstm(func, *inputs):
cs = inputs[:len(inputs) // 2]
hs = inputs[len(inputs) // 2:-1]
x = inputs[-1]
xp = cuda.get_array_module(x)
with cuda.get_device_from_array(x):
W_x = func.W_x.W.data.T
b_x = func.W_x.b.data
W_h_aio = func.W_h_aio.W.data.T
W_h_f = func.W_h_f.W.data.T
W_xa, W_xi, W_xo, W_xf = xp.split(W_x, 4, 1)
b_a, b_i, b_o, b_f = xp.split(b_x[None, ], 4, 1)
W_ha, W_hi, W_ho = xp.split(W_h_aio, 3, 1)
W_hf = W_h_f
sum_h = sum(hs)
a = x.dot(W_xa) + sum_h.dot(W_ha) + b_a
i = x.dot(W_xi) + sum_h.dot(W_hi) + b_i
o = x.dot(W_xo) + sum_h.dot(W_ho) + b_o
f_list = [x.dot(W_xf) + h.dot(W_hf) + b_f for h in hs]
a = xp.tanh(a)
i = _sigmoid(i)
o = _sigmoid(o)
f_list = [_sigmoid(f) for f in f_list]
c_next = a * i + sum(f * c for f, c in zip(f_list, cs))
y = o * xp.tanh(c_next)
return c_next, y
def _nary_tree_lstm(func, *inputs):
cs = inputs[:len(inputs) // 2]
hs = inputs[len(inputs) // 2:-1]
x = inputs[-1]
xp = cuda.get_array_module(x)
with cuda.get_device_from_array(x):
W_x = func.W_x.W.data.T
b_x = func.W_x.b.data
W_h_list = [getattr(func, 'W_h{}'.format(i)).W.data.T
for i in range(1, func.n_ary + 1)]
W_xs = xp.split(W_x, 3 + func.n_ary, 1)
W_xa, W_xi, W_xo, W_xfs = W_xs[0], W_xs[1], W_xs[2], W_xs[3:]
b_xs = xp.split(b_x[None, ], 3 + func.n_ary, 1)
b_a, b_i, b_o, b_fs = b_xs[0], b_xs[1], b_xs[2], b_xs[3:]
W_ha_list = [xp.split(W_h, 3 + func.n_ary, 1)[0]
for W_h in W_h_list]
W_hi_list = [xp.split(W_h, 3 + func.n_ary, 1)[1]
for W_h in W_h_list]
W_ho_list = [xp.split(W_h, 3 + func.n_ary, 1)[2]
for W_h in W_h_list]
W_hfs_list = [xp.split(W_h, 3 + func.n_ary, 1)[3:]
for W_h in W_h_list]
assert(all(len(W_hfs_list) == len(W_hfs) for W_hfs in W_hfs_list))
a = x.dot(W_xa) + b_a + \
sum(h.dot(W_ha) for h, W_ha in zip(hs, W_ha_list))
i = x.dot(W_xi) + b_i + \
sum(h.dot(W_hi) for h, W_hi in zip(hs, W_hi_list))
o = x.dot(W_xo) + b_o + \
sum(h.dot(W_ho) for h, W_ho in zip(hs, W_ho_list))
f_list = [x.dot(W_xf) + b_f +
sum(h.dot(W_hf) for h, W_hf in zip(hs, W_hf_list))
for W_xf, b_f, W_hf_list
in zip(W_xfs, b_fs, zip(*W_hfs_list))]
a = xp.tanh(a)
i = _sigmoid(i)
o = _sigmoid(o)
f_list = [_sigmoid(f) for f in f_list]
c_next = a * i + sum(f * c for f, c in zip(f_list, cs))
y = o * xp.tanh(c_next)
return c_next, y
@testing.parameterize(*testing.product({
'dtype': [numpy.float32],
'n_ary': [2, 3],
'in_size': [6, 9],
'out_size': [9],
'model_type': ['ChildSumTreeLSTM', 'NaryTreeLSTM'],
}))
class TestTreeLSTM(unittest.TestCase):
def setUp(self):
if self.model_type == 'ChildSumTreeLSTM':
self.link = links.ChildSumTreeLSTM(
self.in_size, self.out_size)
elif self.model_type == 'NaryTreeLSTM':
self.link = links.NaryTreeLSTM(
self.in_size, self.out_size, n_ary=self.n_ary)
else:
NotImplementedError()
for p in self.link.params():
p.data[:] = numpy.random.uniform(-1, 1, p.shape).astype(self.dtype)
self.c_prevs = [
numpy.random.uniform(-1, 1, (5, self.out_size)).astype(self.dtype)
for _ in range(self.n_ary)]
self.h_prevs = [
numpy.random.uniform(-1, 1, (5, self.out_size)).astype(self.dtype)
for _ in range(self.n_ary)]
self.x = numpy.random.uniform(
-1, 1, (5, self.in_size)).astype(self.dtype)
self.inputs = self.c_prevs + self.h_prevs + [self.x]
self.gc = numpy.random.uniform(
-1, 1, (5, self.out_size)).astype(self.dtype)
self.gh = numpy.random.uniform(
-1, 1, (5, self.out_size)).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, *inputs_data):
inputs_variable = [chainer.Variable(v) for v in inputs_data]
c, h = self.link(*inputs_variable)
self.assertEqual(c.data.dtype, self.dtype)
self.assertEqual(h.data.dtype, self.dtype)
# Compute expected out
if self.model_type == 'ChildSumTreeLSTM':
c_expect, h_expect = _child_sum_tree_lstm(self.link, *inputs_data)
elif self.model_type == 'NaryTreeLSTM':
c_expect, h_expect = _nary_tree_lstm(self.link, *inputs_data)
else:
NotImplementedError()
testing.assert_allclose(
c_expect, c.data, **self.check_forward_options)
testing.assert_allclose(
h_expect, h.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(*self.inputs)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(*[cuda.to_gpu(v) for v in self.inputs])
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
self.link.to_gpu()
inputs = [cuda.to_gpu(v) for v in self.inputs]
with cuda.get_device_from_id(1):
self.check_forward(*inputs)
def check_forward_valid_none(self, *inputs_data):
inputs_variable = [chainer.Variable(v)
if v is not None else v for v in inputs_data]
base = [v for v in inputs_data if v is not None][0]
xp = cuda.get_array_module(base)
inputs_data = [xp.zeros(self.h_prevs[0].shape, dtype=self.dtype)
if v is None else v for v in inputs_data[:-1]] + \
[xp.zeros(self.x.shape, dtype=self.dtype)
if inputs_data[-1] is None else inputs_data[-1]]
c, h = self.link(*inputs_variable)
self.assertEqual(c.data.dtype, self.dtype)
self.assertEqual(h.data.dtype, self.dtype)
# Compute expected out
if self.model_type == 'ChildSumTreeLSTM':
c_expect, h_expect = _child_sum_tree_lstm(self.link, *inputs_data)
elif self.model_type == 'NaryTreeLSTM':
c_expect, h_expect = _nary_tree_lstm(self.link, *inputs_data)
else:
NotImplementedError()
testing.assert_allclose(
c_expect, c.data, **self.check_forward_options)
testing.assert_allclose(
h_expect, h.data, **self.check_forward_options)
def test_forward_none_ch_cpu(self):
inputs = [None] * len(self.c_prevs) + \
[None] * len(self.h_prevs) + [self.x]
self.check_forward_valid_none(*inputs)
@attr.gpu
def test_forward_none_ch_gpu(self):
self.link.to_gpu()
inputs = [None] * len(self.c_prevs) + \
[None] * len(self.h_prevs) + \
[cuda.to_gpu(self.x)]
self.check_forward_valid_none(*inputs)
def test_forward_none_x_cpu(self):
inputs = self.c_prevs + self.h_prevs + [None]
self.check_forward_valid_none(*inputs)
@attr.gpu
def test_forward_none_x_gpu(self):
self.link.to_gpu()
inputs = [cuda.to_gpu(v) for v in self.c_prevs] + \
[cuda.to_gpu(v) for v in self.h_prevs] + [None]
self.check_forward_valid_none(*inputs)
def check_forward_invalid_none(self, *inputs_data):
inputs_variable = [chainer.Variable(v)
if v is not None else v for v in inputs_data]
self.assertRaises(ValueError, self.link, *inputs_variable)
def test_forward_none_chx_cpu(self):
inputs = [None] * len(self.inputs)
self.check_forward_invalid_none(*inputs)
@attr.gpu
def test_forward_none_chx_gpu(self):
self.link.to_gpu()
inputs = [None] * len(self.inputs)
self.check_forward_invalid_none(*inputs)
def check_backward(self, c_grad, h_grad, *inputs):
gradient_check.check_backward(
self.link,
inputs,
(c_grad, h_grad),
**self.check_backward_options)
@condition.retry(3)
def test_full_backward_cpu(self):
self.check_backward(self.gc, self.gh, *self.inputs)
@condition.retry(3)
def test_no_gc_backward_cpu(self):
self.check_backward(None, self.gh, *self.inputs)
@condition.retry(3)
def test_no_gh_backward_cpu(self):
self.check_backward(self.gc, None, *self.inputs)
@attr.gpu
@condition.retry(3)
def test_full_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.gc), cuda.to_gpu(self.gh),
*[cuda.to_gpu(v) for v in self.inputs])
@attr.gpu
@condition.retry(3)
def test_no_gc_backward_gpu(self):
self.link.to_gpu()
self.check_backward(None, cuda.to_gpu(self.gh),
*[cuda.to_gpu(v) for v in self.inputs])
@attr.gpu
@condition.retry(3)
def test_no_gh_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.gc), None,
*[cuda.to_gpu(v) for v in self.inputs])
testing.run_module(__name__, __file__)
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
from glanceclient import exc as glance_exc
import mock
from novaclient import exceptions as nova_exc
import six
from rally import consts
from rally import exceptions
import rally.osclients
from rally.task import validation
from rally.verification.tempest import tempest
from tests.unit import test
MODULE = "rally.task.validation."
class ValidationUtilsTestCase(test.TestCase):
def _get_scenario_validators(self, func_, scenario_, reset=True):
"""Unwrap scenario validators created by validation.validator()."""
if reset:
if hasattr(scenario_, "validators"):
del scenario_.validators
scenario = validation.validator(func_)()(scenario_)
return scenario.validators
def test_validator(self):
failure = validation.ValidationResult(False)
func = lambda *args, **kv: kv
scenario = lambda: None
# Check arguments passed to validator
wrap = validation.validator(func)
wrap_args = ["foo", "bar"]
wrap_kwargs = {"foo": "spam"}
wrap_scenario = wrap(*wrap_args, **wrap_kwargs)
wrap_validator = wrap_scenario(scenario)
validators = wrap_validator.validators
self.assertEqual(1, len(validators))
validator, = validators
self.assertEqual(wrap_kwargs, validator(None, None, None))
self.assertEqual(wrap_validator, scenario)
# Default result
func_success = lambda *a, **kv: None
validator, = self._get_scenario_validators(func_success, scenario)
self.assertTrue(validator(None, None, None).is_valid)
# Failure result
func_failure = lambda *a, **kv: failure
validator, = self._get_scenario_validators(func_failure, scenario)
self.assertFalse(validator(None, None, None).is_valid)
@ddt.ddt
class ValidatorsTestCase(test.TestCase):
def _unwrap_validator(self, validator, *args, **kwargs):
@validator(*args, **kwargs)
def func():
pass
return func.validators[0]
def test_number_not_nullable(self):
validator = self._unwrap_validator(validation.number, param_name="n")
self.assertFalse(validator({}, None, None).is_valid)
def test_number_nullable(self):
validator = self._unwrap_validator(validation.number, param_name="n",
nullable=True)
self.assertTrue(validator({}, None, None).is_valid)
def test_number_min_max_value(self):
validator = self._unwrap_validator(validation.number,
param_name="a", minval=4, maxval=10)
result = validator({"args": {"a": 3.9}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"a": 4.1}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"a": 11}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_number_integer_only(self):
validator = self._unwrap_validator(validation.number,
param_name="b", integer_only=True)
result = validator({"args": {"b": 3.9}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"b": 3}}, None, None)
self.assertTrue(result.is_valid, result.msg)
@mock.patch(MODULE + "os.access")
def test__file_access_ok(self, mock_access):
mock_access.return_value = True
result = validation._file_access_ok(
"foobar", os.R_OK, "p", False)
self.assertTrue(result.is_valid, result.msg)
@mock.patch(MODULE + "os.access")
def test__file_access_not_found(self, mock_access):
mock_access.return_value = False
result = validation._file_access_ok(
"foobar", os.R_OK, "p", False)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "_file_access_ok")
def test_file_exists(self, mock__file_access_ok):
mock__file_access_ok.return_value = "foobar"
validator = self._unwrap_validator(validation.file_exists,
param_name="p",
required=False)
result = validator({"args": {"p": "test_file"}}, None, None)
self.assertEqual("foobar", result)
mock__file_access_ok.assert_called_once_with(
"test_file", os.R_OK, "p", False)
def test_check_command_valid(self):
e = self.assertRaises(
ValueError, validation.check_command_dict,
{
"interpreter": "foobar", "script_file": "foo",
"script_inline": "bar"
})
self.assertIn("Exactly one of ", str(e))
e = self.assertRaises(
ValueError, validation.check_command_dict,
{"script_file": "foobar"})
self.assertIn("Supplied dict specifies no", str(e))
command = {"script_inline": "foobar", "interpreter": "foo"}
result = validation.check_command_dict(command)
self.assertIsNone(result)
e = self.assertRaises(
ValueError, validation.check_command_dict,
{
"script_inline": "foobar",
"interpreter": "foo",
"local_path": "bar"
})
self.assertIn("When uploading an interpreter its path", str(e))
result = validation.check_command_dict({
"script_inline": "foobar",
"interpreter": ["ENV=bar", "/bin/foo"],
"local_path": "bar",
"remote_path": "/bin/foo"
})
self.assertIsNone(result)
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command(self, mock__file_access_ok):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
mock__file_access_ok.return_value = validation.ValidationResult(True)
command = {"script_file": "foobar", "interpreter": "foo"}
result = validator({"args": {"p": command}}, None, None)
self.assertTrue(result.is_valid, result.msg)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p.script_file",
required=True)
def test_valid_command_required(self):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
result = validator({"args": {"p": None}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command_unreadable_script_file(self, mock__file_access_ok):
mock__file_access_ok.return_value = validation.ValidationResult(False)
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"script_file": "foobar", "interpreter": "foo"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.task.validation.check_command_dict")
def test_valid_command_fail_check_command_dict(self,
mock_check_command_dict):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
mock_check_command_dict.side_effect = ValueError("foobar")
command = {"foo": "bar"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
self.assertEqual("foobar", result.msg)
def test_valid_command_script_inline(self):
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"script_inline": "bar", "interpreter": "/bin/sh"}
result = validator({"args": {"p": command}}, None, None)
self.assertTrue(result.is_valid, result.msg)
@mock.patch("rally.task.validation._file_access_ok")
def test_valid_command_local_path(self, mock__file_access_ok):
mock__file_access_ok.return_value = validation.ValidationResult(False)
validator = self._unwrap_validator(validation.valid_command,
param_name="p")
command = {"remote_path": "bar", "local_path": "foobar"}
result = validator({"args": {"p": command}}, None, None)
self.assertFalse(result.is_valid, result.msg)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p.local_path",
required=True)
def test__get_validated_image_no_value_in_config(self):
result = validation._get_validated_image({}, None, "non_existing")
self.assertFalse(result[0].is_valid, result[0].msg)
def test__get_validated_image_from_context(self):
clients = mock.MagicMock()
image = {
"size": 0,
"min_ram": 0,
"min_disk": 0
}
result = validation._get_validated_image({"args": {
"image": {"name": "foo"}}, "context": {
"images": {
"image_name": "foo"}
}}, clients, "image")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], image)
result = validation._get_validated_image({"args": {
"image": {"regex": r"^foo$"}}, "context": {
"images": {
"image_name": "foo"}
}}, clients, "image")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], image)
@mock.patch(MODULE + "types.ImageResourceType.transform",
return_value="image_id")
def test__get_validated_image(self, mock_image_resource_type_transform):
clients = mock.MagicMock()
clients.glance().images.get().to_dict.return_value = {
"image": "image_id"}
result = validation._get_validated_image({"args": {"a": "test"},
"context": {
"image_name": "foo"}},
clients, "a")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], {"image": "image_id"})
mock_image_resource_type_transform.assert_called_once_with(
clients=clients, resource_config="test")
clients.glance().images.get.assert_called_with(image="image_id")
@mock.patch(MODULE + "types.ImageResourceType.transform",
side_effect=exceptions.InvalidScenarioArgument)
def test__get_validated_image_transform_error(
self, mock_image_resource_type_transform):
result = validation._get_validated_image({"args": {"a": "test"}},
None, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "types.ImageResourceType.transform")
def test__get_validated_image_not_found(
self, mock_image_resource_type_transform):
clients = mock.MagicMock()
clients.glance().images.get().to_dict.side_effect = (
glance_exc.HTTPNotFound(""))
result = validation._get_validated_image({"args": {"a": "test"}},
clients, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
def test__get_validated_flavor_no_value_in_config(self):
result = validation._get_validated_flavor({}, None, "non_existing")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "types.FlavorResourceType.transform",
return_value="flavor_id")
def test__get_validated_flavor(
self, mock_flavor_resource_type_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.return_value = "flavor"
result = validation._get_validated_flavor({"args": {"a": "test"}},
clients, "a")
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(result[1], "flavor")
mock_flavor_resource_type_transform.assert_called_once_with(
clients=clients, resource_config="test")
clients.nova().flavors.get.assert_called_once_with(flavor="flavor_id")
@mock.patch(MODULE + "types.FlavorResourceType.transform",
side_effect=exceptions.InvalidScenarioArgument)
def test__get_validated_flavor_transform_error(
self, mock_flavor_resource_type_transform):
result = validation._get_validated_flavor({"args": {"a": "test"}},
None, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "types.FlavorResourceType.transform")
def test__get_validated_flavor_not_found(
self, mock_flavor_resource_type_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
result = validation._get_validated_flavor({"args": {"a": "test"}},
clients, "a")
self.assertFalse(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "types.FlavorResourceType.transform")
def test__get_validated_flavor_from_context(
self, mock_flavor_resource_type_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "test",
"ram": 32,
}]
}
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertTrue(result[0].is_valid, result[0].msg)
@mock.patch(MODULE + "types.FlavorResourceType.transform")
def test__get_validated_flavor_from_context_failed(
self, mock_flavor_resource_type_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "othername",
"ram": 32,
}]
}
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertFalse(result[0].is_valid, result[0].msg)
config = {
"args": {"flavor": {"name": "test"}},
}
result = validation._get_validated_flavor(config, clients, "flavor")
self.assertFalse(result[0].is_valid, result[0].msg)
@ddt.data("nfS", "Cifs", "GLUSTERFS", "hdfs")
def test_validate_share_proto_valid(self, share_proto):
validator = self._unwrap_validator(validation.validate_share_proto)
result = validator(
{"args": {"share_proto": share_proto}}, "clients", "deployment")
self.assertTrue(result.is_valid, result.msg)
@ddt.data(
*([{"args": {"share_proto": v}} for v in (
None, "", "nfsfoo", "foonfs", "nfscifs", )] +
[{}, {"args": {}}])
)
def test_validate_share_proto_invalid(self, config):
validator = self._unwrap_validator(validation.validate_share_proto)
result = validator(config, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_exists(self):
validator = self._unwrap_validator(validation.image_exists, "param")
result = validator({}, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_exists_nullable(self):
validator = self._unwrap_validator(validation.image_exists,
"param", nullable=True)
result = validator({}, "clients", "deployment")
self.assertTrue(result.is_valid, result.msg)
def test_flavor_exists(self):
validator = self._unwrap_validator(validation.flavor_exists, "param")
result = validator({}, "clients", "deployment")
self.assertFalse(result.is_valid, result.msg)
def test_image_valid_on_flavor_flavor_or_image_not_specified(self):
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"flavor": {"id": 11}}}, mock.MagicMock(),
None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "_get_validated_image")
@mock.patch(MODULE + "_get_validated_flavor")
def test_image_valid_on_flavor(self, mock__get_validated_flavor,
mock__get_validated_image):
image = {
"id": "fake_id",
"min_ram": None,
"size": 2,
"min_disk": 0
}
flavor = mock.MagicMock()
success = validation.ValidationResult(True)
mock__get_validated_flavor.return_value = (success, flavor)
mock__get_validated_image.return_value = (success, image)
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
# test ram
flavor.disk = None
flavor.ram = 2
image["min_ram"] = None
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
image["min_ram"] = 4
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
image["min_ram"] = 1
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
# test disk (flavor.disk not None)
image["size"] = 2
image["min_disk"] = 0
flavor.disk = 5.0 / (1024 ** 3)
result = validator(None, None, None)
self.assertTrue(result.is_valid, result.msg)
image["min_disk"] = flavor.disk * 2
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
image["min_disk"] = flavor.disk / 4
image["size"] = 1000
result = validator(None, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "types.FlavorResourceType.transform")
@mock.patch(MODULE + "_get_validated_image")
def test_image_valid_on_flavor_context(
self, mock__get_validated_image,
mock_flavor_resource_type_transform):
clients = mock.MagicMock()
clients.nova().flavors.get.side_effect = nova_exc.NotFound("")
image = {"min_ram": 24, "id": "fake_id"}
success = validation.ValidationResult(True)
mock__get_validated_image.return_value = (success, image)
validator = self._unwrap_validator(validation.image_valid_on_flavor,
"flavor", "image")
config = {
"args": {"flavor": {"name": "test"}},
"context": {
"flavors": [{
"name": "test",
"ram": 32,
}]
}
}
# test ram
image["min_ram"] = None
result = validator(config, clients, None)
self.assertTrue(result.is_valid, result.msg)
image["min_ram"] = 64
result = validator(config, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_network_exists(self):
validator = self._unwrap_validator(validation.network_exists, "net")
net1 = mock.MagicMock()
net1.label = "private"
net2 = mock.MagicMock()
net2.label = "custom"
clients = mock.MagicMock()
clients.nova().networks.list.return_value = [net1, net2]
result = validator({}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"net": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"net": "custom2"}}, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_external_network_exists(self):
validator = self._unwrap_validator(
validation.external_network_exists, "name")
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
clients = mock.MagicMock()
net1 = mock.MagicMock()
net2 = mock.MagicMock()
clients.nova().floating_ip_pools.list.return_value = [net1, net2]
net1.name = "public"
net2.name = "custom"
result = validator({}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"name": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"name": "non_exist"}}, clients, None)
self.assertFalse(result.is_valid, result.msg)
net1.name = {"name": "public"}
net2.name = {"name": "custom"}
result = validator({"args": {"name": "custom"}}, clients, None)
self.assertTrue(result.is_valid, result.msg)
def test_tempest_tests_exists_no_arg(self):
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "tempest.Tempest")
def test_tempest_tests_exists(self, mock_tempest):
mock_tempest().is_installed.return_value = False
mock_tempest().is_configured.return_value = False
mock_tempest().discover_tests.return_value = set([
"tempest.api.a", "tempest.api.b", "tempest.api.c"])
deployment = {"uuid": "someuuid"}
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({"args": {"test_name": "a"}}, None, deployment)
self.assertTrue(result.is_valid, result.msg)
mock_tempest().is_installed.assert_called_once_with()
mock_tempest().is_configured.assert_called_once_with()
mock_tempest().discover_tests.assert_called_once_with()
result = validator({"args": {"test_name": "d"}}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"test_name": "tempest.api.a"}}, None,
deployment)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"test_name": "tempest.api.d"}}, None,
deployment)
self.assertFalse(result.is_valid, result.msg)
result = validator({"args": {"test_names": ["tempest.api.a", "b"]}},
None, deployment)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"test_names": ["tempest.api.j", "e"]}},
None, deployment)
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "tempest.Tempest")
def test_tempest_tests_exists_tempest_installation_failed(self,
mock_tempest):
mock_tempest().is_installed.return_value = False
mock_tempest().install.side_effect = tempest.TempestSetupFailure
deployment = {"uuid": "someuuid"}
validator = self._unwrap_validator(validation.tempest_tests_exists)
result = validator({"args": {"test_name": "a"}}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
mock_tempest().is_installed.assert_called_once_with()
def test_tempest_set_exists_missing_args(self):
validator = self._unwrap_validator(validation.tempest_set_exists)
result = validator({}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_tempest_set_exists(self):
validator = self._unwrap_validator(validation.tempest_set_exists)
sets = list(list(consts.TempestTestsSets) +
list(consts.TempestTestsAPI))
result = validator(
{"args": {"set_name": sets[0]}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator(
{"args": {"set_name": "lol"}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_required_parameters(self):
validator = self._unwrap_validator(validation.required_parameters,
"a", "b")
result = validator({"args": {"a": 1, "b": 2, "c": 3}}, None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"args": {"a": 1, "c": 3}}, None, None)
self.assertFalse(result.is_valid, result.msg)
@mock.patch("rally.common.objects.Endpoint")
def test_required_service(self, mock_endpoint):
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA,
consts.Service.NOVA_NET)
clients = mock.MagicMock()
clients.services().values.return_value = [consts.Service.KEYSTONE,
consts.Service.NOVA,
consts.Service.NOVA_NET]
fake_service = mock.Mock(binary="nova-network", status="enabled")
with mock.patch("rally.osclients.Clients") as clients_cls:
nova_client = clients_cls.return_value.nova.return_value
nova_client.services.list.return_value = [fake_service]
result = validator({}, clients, {"admin": {"info": "admin"}})
clients_cls.assert_called_once_with(mock_endpoint.return_value)
mock_endpoint.assert_called_once_with(info="admin")
self.assertTrue(result.is_valid, result.msg)
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA)
clients.services().values.return_value = [consts.Service.KEYSTONE]
with mock.patch("rally.osclients.Clients") as clients_cls:
result = validator({}, clients, None)
self.assertFalse(clients_cls.called)
self.assertFalse(result.is_valid, result.msg)
def test_required_service_wrong_service(self):
validator = self._unwrap_validator(validation.required_services,
consts.Service.KEYSTONE,
consts.Service.NOVA, "lol")
clients = mock.MagicMock()
result = validator({}, clients, None)
self.assertFalse(result.is_valid, result.msg)
def test_required_contexts(self):
validator = self._unwrap_validator(validation.required_contexts,
"c1", "c2", "c3")
result = validator({"context": {"a": 1}}, None, None)
self.assertFalse(result.is_valid, result.msg)
result = validator({"context": {"c1": 1, "c2": 2, "c3": 3}},
None, None)
self.assertTrue(result.is_valid, result.msg)
result = validator({"context": {"c1": 1, "c2": 2, "c3": 3, "a": 1}},
None, None)
self.assertTrue(result.is_valid, result.msg)
def test_required_openstack_with_admin(self):
validator = self._unwrap_validator(validation.required_openstack,
admin=True)
# admin presented in deployment
fake_deployment = {"admin": "admin_endpoint", "users": []}
self.assertTrue(validator(None, None, fake_deployment).is_valid)
# admin not presented in deployment
fake_deployment = {"admin": None, "users": ["u1", "h2"]}
self.assertFalse(validator(None, None, fake_deployment).is_valid)
def test_required_openstack_with_users(self):
validator = self._unwrap_validator(validation.required_openstack,
users=True)
# users presented in deployment
fake_deployment = {"admin": None, "users": ["u_endpoint"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and users presented in deployment
fake_deployment = {"admin": "a", "users": ["u1", "h2"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and user context
fake_deployment = {"admin": "a", "users": []}
context = {"context": {"users": True}}
self.assertTrue(validator(context, None, fake_deployment).is_valid)
# just admin presented
fake_deployment = {"admin": "a", "users": []}
self.assertFalse(validator({}, None, fake_deployment).is_valid)
def test_required_openstack_with_admin_and_users(self):
validator = self._unwrap_validator(validation.required_openstack,
admin=True, users=True)
fake_deployment = {"admin": "a", "users": []}
self.assertFalse(validator({}, None, fake_deployment).is_valid)
fake_deployment = {"admin": "a", "users": ["u"]}
self.assertTrue(validator({}, None, fake_deployment).is_valid)
# admin and user context
fake_deployment = {"admin": "a", "users": []}
context = {"context": {"users": True}}
self.assertTrue(validator(context, None, fake_deployment).is_valid)
def test_required_openstack_invalid(self):
validator = self._unwrap_validator(validation.required_openstack)
self.assertFalse(validator(None, None, None).is_valid)
def test_volume_type_exists(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients.cinder().volume_type.list.return_value = []
context = {"args": {"volume_type": False}}
result = validator(context, clients, mock.MagicMock())
self.assertTrue(result.is_valid, result.msg)
def test_volume_type_exists_check_types(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients.cinder().volume_types.list.return_value = ["type"]
context = {"args": {"volume_type": True}}
result = validator(context, clients, mock.MagicMock())
self.assertTrue(result.is_valid, result.msg)
def test_volume_type_exists_check_types_no_types_exist(self):
validator = self._unwrap_validator(validation.volume_type_exists,
param_name="volume_type")
clients = mock.MagicMock()
clients().cinder().volume_type.list.return_value = []
context = {"args": {"volume_type": True}}
result = validator(context, clients, mock.MagicMock())
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "osclients")
def test_required_clients(self, mock_osclients):
validator = self._unwrap_validator(validation.required_clients,
"keystone", "nova")
clients = mock.MagicMock()
clients.keystone.return_value = "keystone"
clients.nova.return_value = "nova"
result = validator({}, clients, {})
self.assertTrue(result.is_valid, result.msg)
self.assertFalse(mock_osclients.Clients.called)
clients.nova.side_effect = ImportError
result = validator({}, clients, {})
self.assertFalse(result.is_valid, result.msg)
@mock.patch(MODULE + "objects")
@mock.patch(MODULE + "osclients")
def test_required_clients_with_admin(self, mock_osclients, mock_objects):
validator = self._unwrap_validator(validation.required_clients,
"keystone", "nova", admin=True)
clients = mock.Mock()
clients.keystone.return_value = "keystone"
clients.nova.return_value = "nova"
mock_osclients.Clients.return_value = clients
mock_objects.Endpoint.return_value = "foo_endpoint"
result = validator({}, clients, {"admin": {"foo": "bar"}})
self.assertTrue(result.is_valid, result.msg)
mock_objects.Endpoint.assert_called_once_with(foo="bar")
mock_osclients.Clients.assert_called_once_with("foo_endpoint")
clients.nova.side_effect = ImportError
result = validator({}, clients, {"admin": {"foo": "bar"}})
self.assertFalse(result.is_valid, result.msg)
def test_required_cinder_services(self):
validator = self._unwrap_validator(
validation.required_cinder_services,
service_name=six.text_type("cinder-service"))
with mock.patch.object(rally.osclients.Clients, "cinder") as client:
fake_service = mock.Mock(binary="cinder-service", state="up")
cinder_client = mock.Mock()
services = mock.Mock()
services.list.return_value = [fake_service]
cinder_client.services = services
client.return_value = cinder_client
deployment = {"admin": {"auth_url": "fake_endpoint",
"username": "username",
"password": "password"}}
result = validator({}, None, deployment)
self.assertTrue(result.is_valid, result.msg)
fake_service.state = "down"
result = validator({}, None, deployment)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"])
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
def test_restricted_parameters_negative(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"])
result = validator({"args": {"param_name": "value"}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters_in_dict(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"], "subdict")
result = validator({"args": {"subdict": {}}}, None, None)
self.assertTrue(result.is_valid, result.msg)
def test_restricted_parameters_in_dict_negative(self):
validator = self._unwrap_validator(
validation.restricted_parameters, ["param_name"], "subdict")
result = validator({"args": {"subdict":
{"param_name": "value"}}}, None, None)
self.assertFalse(result.is_valid, result.msg)
def test_restricted_parameters_string_param_names(self):
validator = self._unwrap_validator(
validation.restricted_parameters, "param_name")
result = validator({"args": {}}, None, None)
self.assertTrue(result.is_valid, result.msg)
|
|
"""
Access maven artifacts.
"""
from xml.etree import ElementTree
import xml.dom.minidom as minidom
import requests
import logging as log # TODO
def requests_get_check(*args, **kwargs):
response = requests.get(*args, **kwargs)
response.raise_for_status()
return response
class Artifact:
"""
Represents a Maven artifact. Maven artifacts consist of a group id, an
artifact id and optionally a version. Additionally, the artifact may
contain additional information when it was used with a #MavenRepository,
eg. timestamp and build number.
"""
@classmethod
def from_id(cls, id_str):
group, artifact, version = id_str.split(':')
return cls(group, artifact, version)
def __init__(self, group, artifact, version=None, scope='compile', type='jar', optional=False):
self.group = group
self.artifact = artifact
self.version = version
self.timestamp = None
self.build_number = None
self.scope = scope
self.type = type
self.optional = optional
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
if isinstance(other, Artifact):
return self.as_tuple() == other.as_tuple()
return False
def __str__(self):
return '{}:{}:{}'.format(self.group, self.artifact, self.version)
def __repr__(self):
return 'Artifact("{}:{}:{}")'.format(self.group, self.artifact, self.version)
def as_tuple(self):
return (self.group, self.artifact, self.version)
def is_snapshot(self):
if self.version:
return 'SNAPSHOT' in self.version
return False
def to_local_path(self, ext):
return '{s.artifact}-{s.version}.{e}'.format(s=self, e=ext)
def to_maven_name(self, ext):
template = '{g}/{s.artifact}/{s.version}/{s.artifact}-{s.version}.{e}'
return template.format(g=self.group.replace('.', '/'), s=self, e=ext)
def to_maven_snapshot_name(self, ext):
template = '{g}/{s.artifact}/{s.version}/'\
'{s.artifact}-{v}-{s.timestamp}-{s.build_number}.{e}'
return template.format(g=self.group.replace('.', '/'), s=self, e=ext,
v=self.version.replace('-SNAPSHOT', ''))
def to_maven_metadata(self):
return '{}/{}'.format(self.group.replace('.', '/'), self.artifact) + '/maven-metadata.xml'
class MavenRepository:
def __init__(self, name, uri):
self.name = name
self.uri = uri.rstrip('/')
self.pom_cache = {}
self.pom_not_found = set()
self.metadata_cache = {}
def __repr__(self):
return 'MavenRepository(name={!r}, uri={!r})'.format(self.name, self.uri)
def download_jar(self, artifact, local_path):
"""
Downloads a JAR file of the #Artifact *artifact* to *local_path*.
"""
url = self.get_artifact_uri(artifact, 'jar')
log.info('[Downloading] JAR from {}'.format(url))
with open(local_path, 'wb') as fp:
for chunk in requests_get_check(url).iter_content():
fp.write(chunk)
def download_pom(self, artifact):
"""
Downloads the artifact's POM manifest.
"""
if not isinstance(artifact, Artifact):
raise TypeError('expected Artifact')
if artifact in self.pom_not_found:
return None
if artifact in self.pom_cache:
return self.pom_cache[artifact]
if not artifact.version:
if artifact in self.metadata_cache:
metadata = self.metadata_cache[artifact]
else:
metadata_path = self.uri + '/' + artifact.to_maven_metadata()
response = requests.get(metadata_path)
if response.status_code != 200:
return None
metadata = minidom.parseString(response.content)
self.metadata_cache[artifact] = metadata
try:
latest = metadata.getElementsByTagName('latest')[0].firstChild.nodeValue
except IndexError:
latest = metadata.getElementsByTagName('version')[0].firstChild.nodeValue
artifact.version = latest
if artifact.is_snapshot():
snapshot_info = self.get_snapshot_info(artifact)
if snapshot_info is not None:
artifact.timestamp, artifact.build_number = snapshot_info
url = self.get_artifact_uri(artifact, 'pom')
try:
log.info('[Checking] POM file {}'.format(url))
data = requests_get_check(url).text
self.pom_cache[artifact] = data
return data
except requests.exceptions.RequestException:
self.pom_not_found.add(artifact)
log.info('[Skipped] POM file not found at {}'.format(url))
return None
def get_artifact_uri(self, artifact, ext):
if not artifact.is_snapshot():
maven_name = artifact.to_maven_name(ext)
else:
maven_name = artifact.to_maven_snapshot_name(ext)
maven_path = self.uri + '/' + maven_name
return maven_path
def get_snapshot_info(self, artifact):
metadata_path = self.get_metadata_path(artifact)
try:
data = requests_get_check(metadata_path).text # XXX user agent
eletree = ElementTree.fromstring(data)
timestamp = eletree.findtext('versioning/snapshot/timestamp')
build_number = eletree.findtext('versioning/snapshot/buildNumber')
return (timestamp, build_number)
except requests.exceptions.RequestException:
return None
def get_metadata_path(self, artifact):
group = artifact.group.replace('.', '/')
metadata_path = "%s/%s/%s/%s/maven-metadata.xml" % (self.uri, group,
artifact.artifact, artifact.version)
return metadata_path
def pom_eval_deps(pom):
"""
Evaluates the dependencies of a POM XML file.
"""
if isinstance(pom, str):
pom = minidom.parseString(pom)
project = pom.getElementsByTagName('project')[0]
group_id = None
artifact_id = None
version = None
dependencies = None
for node in iter_dom_children(project):
if not version and node.nodeName == 'version':
version = node.firstChild.nodeValue
elif not group_id and node.nodeName == 'groupId':
group_id = node.firstChild.nodeValue
elif not artifact_id and node.nodeName == 'artifactId':
artifact_id = node.firstChild.nodeValue
elif node.nodeName == 'parent':
for node in iter_dom_children(node):
if not version and node.nodeName == 'version':
version = node.firstChild.nodeValue
elif not group_id and node.nodeName == 'groupId':
group_id = node.firstChild.nodeValue
elif not artifact_id and node.nodeName == 'artifactId':
artifact_id = node.firstChild.nodeValue
elif not dependencies and node.nodeName == 'dependencies':
dependencies = node
if not group_id or not version:
log.warn('[Error]: could not read version or group_id from POM')
return []
if not dependencies:
return []
def parse_dependency(node):
try:
scope = node.getElementsByTagName('scope')[0].firstChild.nodeValue
except IndexError:
scope = 'compile'
try:
deptype = node.getElementsByTagName('type')[0].firstChild.nodeValue
except IndexError:
deptype = 'jar'
try:
optional = node.getElementsByTagName('optional')[0].firstChild.nodeValue
except IndexError:
optional = False
else:
if optional == 'true':
optional = True
elif optional == 'false':
optional = False
else:
log.warn('unexpected <optional> value "{}"'.format(optional))
optional = False
dep_group = node.getElementsByTagName('groupId')[0].firstChild.nodeValue
dep_artifact = node.getElementsByTagName('artifactId')[0].firstChild.nodeValue
try:
dep_version = node.getElementsByTagName('version')[0].firstChild.nodeValue
except IndexError:
dep_version = None
# Try to resolve some of the properties.
if dep_group in ('${project.groupId}', '${pom.groupId}'):
dep_group = group_id
if dep_version in ('${project.version}', '${pom.version}'):
dep_version = version
# We're not a full-blown POM evaluator, so give a warning when
# we can't handle the property in the dependency version.
if dep_version and '$' in dep_version:
msg = 'unable to resolve "{}" in dependency {}:{} ({}:{}:{})'
log.warn(msg.format(dep_version, dep_group, dep_artifact,
group_id, artifact_id, version))
dep_version = None
if not dep_version and dep_group.startswith(group_id):
dep_version = version
return Artifact(dep_group, dep_artifact, dep_version, scope, deptype, optional)
results = []
for node in iter_dom_children(dependencies):
if node.nodeName == 'dependency':
results.append(parse_dependency(node))
return results
def iter_dom_children(node):
child = node.firstChild
while child:
yield child
child = child.nextSibling
|
|
import datetime
import aiohttp
import asyncio
import praw
import prawcore
import logging
import functools
import re
from errors import RetryRequestLater
log = logging.getLogger(__name__)
def retry(seconds):
def decorator(func):
@functools.wraps(func)
async def wrapped(self, *args, **kwargs):
while True:
try:
result = await func(self, *args, **kwargs)
except (OSError,
RetryRequestLater,
aiohttp.ClientResponseError,
praw.exceptions.APIException,
asyncio.TimeoutError,
prawcore.exceptions.PrawcoreException) as e:
subreddit = self.subreddit.name
exc = f'{e.__class__.__module__}.{e.__class__.__qualname__}: {e}'
log.error('%s (for /r/%s) failed with %s. Retrying in %ds', func.__name__, subreddit, exc, seconds)
await asyncio.sleep(seconds)
continue
else:
return result
return wrapped
return decorator
SIDEBAR_REGEX = re.compile(r'###### START STREAM LIST(.*?)###### END STREAM LIST', re.DOTALL)
MAX_SIDEBAR_LENGTH = 10240
MAX_WIDGET_LENGTH = 10000
def sanitize_input(data):
"""Sanitizes input for reddit markdown tables"""
# TODO: maybe the rest of markdown?
return data.replace('|', '|').replace('\n', '').replace('*', '\\*')
class SubredditTask:
"""Represents an asynchronous task run at a specific time.
This actually handles the work in a specific subreddit.
"""
def __init__(self, bot, subreddit):
self.bot = bot
self.subreddit = subreddit
self.time = datetime.datetime.utcnow()
self._fetched_game_ids = False
@retry(2 * 60.0)
async def get_streams(self):
formats = self.subreddit.format
if not self._fetched_game_ids:
mapping = await self.bot.twitch.get_game_ids(formats.keys())
self.subreddit.game_ids = mapping
self._fetched_game_ids = True
self.bot.save_config()
game_ids = self.subreddit.game_ids
# Sometimes our game_ids can have keys that aren't in the format.
# Just ignore those
to_pass = [game_id for game_id, name in game_ids.items() if name in formats]
streams = await self.bot.twitch.get_streams(to_pass)
# Convert game_ids into game names
for stream in streams:
try:
stream.game = game_ids[stream.game]
except KeyError:
log.warning('Could not find a game_id associated with %s.', stream.game)
streams.sort(key=lambda s: s.viewers, reverse=True)
game_names = ', '.join(repr(x) for x in formats)
log.info('Fetched %d streams for /r/%s: %s', len(streams), self.subreddit.name, game_names)
return streams
@property
def sub(self):
return self.bot.reddit.subreddit(self.subreddit.name)
def get_updated_sidebar_portion(self, streams):
result = ['###### START STREAM LIST\n']
for stream in streams:
fmt = self.subreddit.format.get(stream.game)
# None or empty string
if not fmt:
fmt = '[{name}]({url}) {viewers} viewers'
fmt = f'- {fmt}'
result.append(fmt.format(name=stream.name, url=stream.url, viewers=stream.viewers))
result.append('\n###### END STREAM LIST')
return '\n'.join(result)
def _update_sidebar(self, streams):
sidebar = self.sub.wiki['config/sidebar']
old_sidebar = sidebar.content_md
count = self.subreddit.top_cut
while count != 0:
to_replace = self.get_updated_sidebar_portion(streams[0:count])
new_sidebar = SIDEBAR_REGEX.sub(to_replace, old_sidebar, count=1)
if len(new_sidebar) <= MAX_SIDEBAR_LENGTH:
sidebar.edit(content=new_sidebar)
break
count = count // 2
log.info('Sidebar for %s too long. Trying again with %d streams.', self.subreddit.name, count)
log.info('Sidebar update complete for /r/%s.', self.subreddit.name)
@retry(60.0)
async def update_sidebar(self, streams):
await self.bot.loop.run_in_executor(None, self._update_sidebar, streams)
def _get_widget(self):
if not self.subreddit.widget:
return None
widgets = self.sub.widgets.sidebar
types = (praw.models.CustomWidget, praw.models.TextArea)
for widget in widgets:
if isinstance(widget, types) and widget.shortName == self.subreddit.widget.name:
return widget
return None
def get_updated_widget_text(self, streams):
wiki_url = f'https://reddit.com/r/{self.subreddit.name}/wiki/{self.subreddit.wiki}'
if not self.subreddit.widget.table:
text = self.get_updated_sidebar_portion(streams)
return f'{text}\n\n[Check the full list here]({wiki_url})'
result = [
'Stream | Views',
':------:|:-----:',
]
for stream in streams:
result.append(f'[{stream.name}]({stream.url})|{stream.viewers}')
result.append(f'\n[Check the full list here]({wiki_url})')
return '\n'.join(result)
def _update_widget(self, streams):
widget = self._get_widget()
if widget is None:
log.info('No widget found for /r/%s', self.subreddit.name)
return
count = self.subreddit.top_cut
while count != 0:
text = self.get_updated_widget_text(streams)
if len(text) <= MAX_WIDGET_LENGTH:
widget.mod.update(text=text)
break
count = count // 2
log.info('Widget for %s too long. Trying again with %d streams.', self.subreddit.name, count)
log.info('Widget update complete for /r/%s.', self.subreddit.name)
@retry(60.0)
async def update_widget(self, streams):
await self.bot.loop.run_in_executor(None, self._update_widget, streams)
def _update_wiki(self, streams):
wiki = self.sub.wiki
name = self.subreddit.name
interval = self.bot.config.delay
fmt = '%b %d %Y at %I:%M %p UTC'
result = [
f'Welcome to the /r/{name} livestream page!\n',
f'This page is automatically updated by /u/{self.bot.config.username} and should not be edited.' \
f'This page currently gets updated every {interval // 60} minutes. If something seems wrong, ' \
'please contact the subreddit moderators or /u/rapptz',
]
now = datetime.datetime.utcnow()
total = 0
result.append('### Streams')
result.append('')
result.append(f'This page was last updated on {now:{fmt}}\n')
result.append('Game Name | Stream | Viewers | Status ')
result.append(':---------|:-------|:-------:|:-------')
for stream in streams:
total += stream.viewers
title = sanitize_input(stream.title)
result.append(f'{stream.game}|[{stream.name}]({stream.url})|{stream.viewers}|{title}')
# Check maximum record
sub = self.subreddit
if sub.maximum is None or total > sub.maximum:
sub.maximum = total
sub.maximum_record = format(now, fmt)
self.bot.save_config()
result.append('')
result.append('### Statistics')
result.append('')
result.append(f'Total number of viewers: {total}\n')
result.append(f'Highest number of total viewers: {sub.maximum} on {sub.maximum_record}')
result.append('')
wikipage = wiki[self.subreddit.wiki]
wikipage.edit('\n'.join(result), reason='Bot action')
log.info('Wiki update complete for /r/%s', name)
@retry(60.0)
async def update_wiki(self, streams):
await self.bot.loop.run_in_executor(None, self._update_wiki, streams)
async def update(self):
delay = self.bot.config.delay
name = self.subreddit.name
while True:
try:
log.info('Beginning update on /r/%s', name)
streams = await self.get_streams()
await self.update_sidebar(streams)
await self.update_widget(streams)
await self.update_wiki(streams)
log.info('Completed update on /r/%s', name)
await asyncio.sleep(delay)
except KeyboardInterrupt:
log.info('Received keyboard interrupt signal on SubredditTask for /r/%s', name)
raise
|
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz, Dann Martens (TOMOTON).
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins import base
from sleekxmpp.plugins.xep_0009.stanza.RPC import RPCQuery, MethodCall, MethodResponse
from sleekxmpp.stanza.iq import Iq
from sleekxmpp.xmlstream.handler.callback import Callback
from sleekxmpp.xmlstream.matcher.xpath import MatchXPath
from sleekxmpp.xmlstream.stanzabase import register_stanza_plugin
from xml.etree import cElementTree as ET
import logging
log = logging.getLogger(__name__)
class xep_0009(base.base_plugin):
def plugin_init(self):
self.xep = '0009'
self.description = 'Jabber-RPC'
#self.stanza = sleekxmpp.plugins.xep_0009.stanza
register_stanza_plugin(Iq, RPCQuery)
register_stanza_plugin(RPCQuery, MethodCall)
register_stanza_plugin(RPCQuery, MethodResponse)
self.xmpp.registerHandler(
Callback('RPC Call', MatchXPath('{%s}iq/{%s}query/{%s}methodCall' % (self.xmpp.default_ns, RPCQuery.namespace, RPCQuery.namespace)),
self._handle_method_call)
)
self.xmpp.registerHandler(
Callback('RPC Call', MatchXPath('{%s}iq/{%s}query/{%s}methodResponse' % (self.xmpp.default_ns, RPCQuery.namespace, RPCQuery.namespace)),
self._handle_method_response)
)
self.xmpp.registerHandler(
Callback('RPC Call', MatchXPath('{%s}iq/{%s}error' % (self.xmpp.default_ns, self.xmpp.default_ns)),
self._handle_error)
)
self.xmpp.add_event_handler('jabber_rpc_method_call', self._on_jabber_rpc_method_call)
self.xmpp.add_event_handler('jabber_rpc_method_response', self._on_jabber_rpc_method_response)
self.xmpp.add_event_handler('jabber_rpc_method_fault', self._on_jabber_rpc_method_fault)
self.xmpp.add_event_handler('jabber_rpc_error', self._on_jabber_rpc_error)
self.xmpp.add_event_handler('error', self._handle_error)
#self.activeCalls = []
def post_init(self):
base.base_plugin.post_init(self)
self.xmpp.plugin['xep_0030'].add_feature('jabber:iq:rpc')
self.xmpp.plugin['xep_0030'].add_identity('automation','rpc')
def make_iq_method_call(self, pto, pmethod, params):
iq = self.xmpp.makeIqSet()
iq.attrib['to'] = pto
iq.attrib['from'] = self.xmpp.boundjid.full
iq.enable('rpc_query')
iq['rpc_query']['method_call']['method_name'] = pmethod
iq['rpc_query']['method_call']['params'] = params
return iq;
def make_iq_method_response(self, pid, pto, params):
iq = self.xmpp.makeIqResult(pid)
iq.attrib['to'] = pto
iq.attrib['from'] = self.xmpp.boundjid.full
iq.enable('rpc_query')
iq['rpc_query']['method_response']['params'] = params
return iq
def make_iq_method_response_fault(self, pid, pto, params):
iq = self.xmpp.makeIqResult(pid)
iq.attrib['to'] = pto
iq.attrib['from'] = self.xmpp.boundjid.full
iq.enable('rpc_query')
iq['rpc_query']['method_response']['params'] = None
iq['rpc_query']['method_response']['fault'] = params
return iq
# def make_iq_method_error(self, pto, pid, pmethod, params, code, type, condition):
# iq = self.xmpp.makeIqError(pid)
# iq.attrib['to'] = pto
# iq.attrib['from'] = self.xmpp.boundjid.full
# iq['error']['code'] = code
# iq['error']['type'] = type
# iq['error']['condition'] = condition
# iq['rpc_query']['method_call']['method_name'] = pmethod
# iq['rpc_query']['method_call']['params'] = params
# return iq
def _item_not_found(self, iq):
payload = iq.get_payload()
iq.reply().error().set_payload(payload);
iq['error']['code'] = '404'
iq['error']['type'] = 'cancel'
iq['error']['condition'] = 'item-not-found'
return iq
def _undefined_condition(self, iq):
payload = iq.get_payload()
iq.reply().error().set_payload(payload)
iq['error']['code'] = '500'
iq['error']['type'] = 'cancel'
iq['error']['condition'] = 'undefined-condition'
return iq
def _forbidden(self, iq):
payload = iq.get_payload()
iq.reply().error().set_payload(payload)
iq['error']['code'] = '403'
iq['error']['type'] = 'auth'
iq['error']['condition'] = 'forbidden'
return iq
def _recipient_unvailable(self, iq):
payload = iq.get_payload()
iq.reply().error().set_payload(payload)
iq['error']['code'] = '404'
iq['error']['type'] = 'wait'
iq['error']['condition'] = 'recipient-unavailable'
return iq
def _handle_method_call(self, iq):
type = iq['type']
if type == 'set':
log.debug("Incoming Jabber-RPC call from %s" % iq['from'])
self.xmpp.event('jabber_rpc_method_call', iq)
else:
if type == 'error' and ['rpc_query'] is None:
self.handle_error(iq)
else:
log.debug("Incoming Jabber-RPC error from %s" % iq['from'])
self.xmpp.event('jabber_rpc_error', iq)
def _handle_method_response(self, iq):
if iq['rpc_query']['method_response']['fault'] is not None:
log.debug("Incoming Jabber-RPC fault from %s" % iq['from'])
#self._on_jabber_rpc_method_fault(iq)
self.xmpp.event('jabber_rpc_method_fault', iq)
else:
log.debug("Incoming Jabber-RPC response from %s" % iq['from'])
self.xmpp.event('jabber_rpc_method_response', iq)
def _handle_error(self, iq):
print("['XEP-0009']._handle_error -> ERROR! Iq is '%s'" % iq)
print("#######################")
print("### NOT IMPLEMENTED ###")
print("#######################")
def _on_jabber_rpc_method_call(self, iq, forwarded=False):
"""
A default handler for Jabber-RPC method call. If another
handler is registered, this one will defer and not run.
If this handler is called by your own custom handler with
forwarded set to True, then it will run as normal.
"""
if not forwarded and self.xmpp.event_handled('jabber_rpc_method_call') > 1:
return
# Reply with error by default
error = self.client.plugin['xep_0009']._item_not_found(iq)
error.send()
def _on_jabber_rpc_method_response(self, iq, forwarded=False):
"""
A default handler for Jabber-RPC method response. If another
handler is registered, this one will defer and not run.
If this handler is called by your own custom handler with
forwarded set to True, then it will run as normal.
"""
if not forwarded and self.xmpp.event_handled('jabber_rpc_method_response') > 1:
return
error = self.client.plugin['xep_0009']._recpient_unavailable(iq)
error.send()
def _on_jabber_rpc_method_fault(self, iq, forwarded=False):
"""
A default handler for Jabber-RPC fault response. If another
handler is registered, this one will defer and not run.
If this handler is called by your own custom handler with
forwarded set to True, then it will run as normal.
"""
if not forwarded and self.xmpp.event_handled('jabber_rpc_method_fault') > 1:
return
error = self.client.plugin['xep_0009']._recpient_unavailable(iq)
error.send()
def _on_jabber_rpc_error(self, iq, forwarded=False):
"""
A default handler for Jabber-RPC error response. If another
handler is registered, this one will defer and not run.
If this handler is called by your own custom handler with
forwarded set to True, then it will run as normal.
"""
if not forwarded and self.xmpp.event_handled('jabber_rpc_error') > 1:
return
error = self.client.plugin['xep_0009']._recpient_unavailable(iq, iq.get_payload())
error.send()
def _send_fault(self, iq, fault_xml): #
fault = self.make_iq_method_response_fault(iq['id'], iq['from'], fault_xml)
fault.send()
def _send_error(self, iq):
print("['XEP-0009']._send_error -> ERROR! Iq is '%s'" % iq)
print("#######################")
print("### NOT IMPLEMENTED ###")
print("#######################")
def _extract_method(self, stanza):
xml = ET.fromstring("%s" % stanza)
return xml.find("./methodCall/methodName").text
|
|
import functools
import itertools
import warnings
from inspect import getfullargspec
import numpy as np
from ..core.formatting import format_item
from .utils import (
_infer_xy_labels, _process_cmap_cbar_kwargs,
import_matplotlib_pyplot, label_from_attrs)
# Overrides axes.labelsize, xtick.major.size, ytick.major.size
# from mpl.rcParams
_FONTSIZE = 'small'
# For major ticks on x, y axes
_NTICKS = 5
def _nicetitle(coord, value, maxchar, template):
"""
Put coord, value in template and truncate at maxchar
"""
prettyvalue = format_item(value, quote_strings=False)
title = template.format(coord=coord, value=prettyvalue)
if len(title) > maxchar:
title = title[:(maxchar - 3)] + '...'
return title
class FacetGrid(object):
"""
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axes : numpy object array
Contains axes in corresponding position, as returned from
plt.subplots
fig : matplotlib.Figure
The figure containing all the axes
name_dicts : numpy object array
Contains dictionaries mapping coordinate names to values. None is
used as a sentinel value for axes which should remain empty, ie.
sometimes the bottom right grid
"""
def __init__(self, data, col=None, row=None, col_wrap=None,
sharex=True, sharey=True, figsize=None, aspect=1, size=3,
subplot_kws=None):
"""
Parameters
----------
data : DataArray
xarray DataArray to be plotted
row, col : strings
Dimesion names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
sharex : bool, optional
If true, the facets will share x axes
sharey : bool, optional
If true, the facets will share y axes
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots
"""
plt = import_matplotlib_pyplot()
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError('Coordinates used for faceting cannot '
'contain repeated (nonunique) values.')
# single_group is the grouping variable, if there is exactly one
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn('Ignoring col_wrap since both col and row '
'were passed')
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError(
'Pass a coordinate name as an argument for row or col')
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axes = plt.subplots(nrow, ncol,
sharex=sharex, sharey=sharey, squeeze=False,
figsize=figsize, subplot_kw=subplot_kws)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].values) if col else []
row_names = list(data[row].values) if row else []
if single_group:
full = [{single_group: x} for x in
data[single_group].values]
empty = [None for x in range(nrow * ncol - len(full))]
name_dicts = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dicts = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dicts).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self._x_var = None
self._y_var = None
self._cmap_extend = None
self._mappables = []
self._finalized = False
@property
def _left_axes(self):
return self.axes[:, 0]
@property
def _bottom_axes(self):
return self.axes[-1, :]
def map_dataarray(self, func, x, y, **kwargs):
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
kwargs :
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
if kwargs.get('cbar_ax', None) is not None:
raise ValueError('cbar_ax not supported by FacetGrid.')
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, kwargs, self.data.values)
self._cmap_extend = cmap_params.get('extend')
# Order is important
func_kwargs = kwargs.copy()
func_kwargs.update(cmap_params)
func_kwargs.update({'add_colorbar': False, 'add_labels': False})
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y,
imshow=func.__name__ == 'imshow', rgb=kwargs.get('rgb', None))
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(subset, x=x, y=y, ax=ax, **func_kwargs)
self._mappables.append(mappable)
self._cmap_extend = cmap_params.get('extend')
self._finalize_grid(x, y)
if kwargs.get('add_colorbar', True):
self.add_colorbar(**cbar_kwargs)
return self
def map_dataarray_line(self, func, x, y, **kwargs):
from .plot import _infer_line_data
add_legend = kwargs.pop('add_legend', True)
kwargs['add_legend'] = False
func_kwargs = kwargs.copy()
func_kwargs['_labels'] = False
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(subset, x=x, y=y, ax=ax, **func_kwargs)
self._mappables.append(mappable)
_, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(
darray=self.data.loc[self.name_dicts.flat[0]],
x=x, y=y, hue=func_kwargs['hue'])
self._hue_var = hueplt
self._hue_label = huelabel
self._finalize_grid(xlabel, ylabel)
if add_legend and hueplt is not None and huelabel is not None:
self.add_legend()
return self
def _finalize_grid(self, *axlabels):
"""Finalize the annotations and layout."""
if not self._finalized:
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is None:
ax.set_visible(False)
self._finalized = True
def add_legend(self, **kwargs):
figlegend = self.fig.legend(
handles=self._mappables[-1],
labels=list(self._hue_var.values),
title=self._hue_label,
loc="center right", **kwargs)
# Draw the plot to set the bounding boxes correctly
self.fig.draw(self.fig.canvas.get_renderer())
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
self.fig.draw(self.fig.canvas.get_renderer())
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width) + 0.02
# margin = .01
# _space_needed = margin + space_needed
right = 1 - space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
def add_colorbar(self, **kwargs):
"""Draw a colorbar
"""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault('extend', self._cmap_extend)
if 'label' not in kwargs:
kwargs.setdefault('label', label_from_attrs(self.data))
self.cbar = self.fig.colorbar(self._mappables[-1],
ax=list(self.axes.flat),
**kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
# x_var is a string
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = label_from_attrs(self.data[self._x_var])
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = label_from_attrs(self.data[self._y_var])
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_titles(self, template="{coord} = {value}", maxchar=30,
**kwargs):
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for plot titles containing {coord} and {value}
maxchar : int
Truncate titles at maxchar
kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
nicetitle = functools.partial(_nicetitle, maxchar=maxchar,
template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value, maxchar=maxchar)
ax.set_title(title, **kwargs)
else:
# The row titles on the right edge of the grid
for ax, row_name in zip(self.axes[:, -1], self.row_names):
title = nicetitle(coord=self._row_var, value=row_name,
maxchar=maxchar)
ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center", **kwargs)
# The column titles on the top row
for ax, col_name in zip(self.axes[0, :], self.col_names):
title = nicetitle(coord=self._col_var, value=col_name,
maxchar=maxchar)
ax.set_title(title, **kwargs)
return self
def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS,
fontsize=_FONTSIZE):
"""
Set and control tick behavior
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axes.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(ax.xaxis.get_major_ticks(),
ax.yaxis.get_major_ticks()):
tick.label.set_fontsize(fontsize)
return self
def map(self, func, *args, **kwargs):
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
plt = import_matplotlib_pyplot()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].values for a in args]
maybe_mappable = func(*innerargs, **kwargs)
# TODO: better way to verify that an artist is mappable?
# https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522
if (maybe_mappable and
hasattr(maybe_mappable, 'autoscale_None')):
self._mappables.append(maybe_mappable)
self._finalize_grid(*args[:2])
return self
def _easy_facetgrid(data, plotfunc, kind, x=None, y=None, row=None,
col=None, col_wrap=None, sharex=True, sharey=True,
aspect=None, size=None, subplot_kws=None, **kwargs):
"""
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods
kwargs are the arguments to 2d plotting method
"""
ax = kwargs.pop('ax', None)
figsize = kwargs.pop('figsize', None)
if ax is not None:
raise ValueError("Can't use axes when making faceted plots.")
if aspect is None:
aspect = 1
if size is None:
size = 3
elif figsize is not None:
raise ValueError('cannot provide both `figsize` and `size` arguments')
g = FacetGrid(data=data, col=col, row=row, col_wrap=col_wrap,
sharex=sharex, sharey=sharey, figsize=figsize,
aspect=aspect, size=size, subplot_kws=subplot_kws)
if kind == 'line':
return g.map_dataarray_line(plotfunc, x, y, **kwargs)
if kind == 'dataarray':
return g.map_dataarray(plotfunc, x, y, **kwargs)
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class LoadBalancerTestJSON(base.BaseNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create vIP, and Pool
show vIP
list vIP
update vIP
delete vIP
update pool
delete pool
show pool
list pool
health monitoring operations
"""
@classmethod
@test.safe_setup
def setUpClass(cls):
super(LoadBalancerTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas', 'network'):
msg = "lbaas extension not enabled."
raise cls.skipException(msg)
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
pool_name = data_utils.rand_name('pool-')
vip_name = data_utils.rand_name('vip-')
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
"HTTP", cls.subnet)
cls.vip = cls.create_vip(name=vip_name,
protocol="HTTP",
protocol_port=80,
subnet=cls.subnet,
pool=cls.pool)
cls.member = cls.create_member(80, cls.pool)
cls.health_monitor = cls.create_health_monitor(delay=4,
max_retries=3,
Type="TCP",
timeout=1)
def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs):
create_obj = getattr(self.client, 'create_' + obj_name)
delete_obj = getattr(self.client, 'delete_' + obj_name)
list_objs = getattr(self.client, 'list_' + obj_name + 's')
_, body = create_obj(**kwargs)
obj = body[obj_name]
self.addCleanup(delete_obj, obj['id'])
for key, value in obj.iteritems():
# It is not relevant to filter by all arguments. That is why
# there is a list of attr to except
if key not in attr_exceptions:
_, body = list_objs(**{key: value})
objs = [v[key] for v in body[obj_name + 's']]
self.assertIn(value, objs)
@test.attr(type='smoke')
def test_list_vips(self):
# Verify the vIP exists in the list of all vIPs
_, body = self.client.list_vips()
vips = body['vips']
self.assertIn(self.vip['id'], [v['id'] for v in vips])
@test.attr(type='smoke')
def test_list_vips_with_filter(self):
name = data_utils.rand_name('vip-')
_, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method="ROUND_ROBIN",
protocol="HTTPS",
subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
attr_exceptions = ['status', 'session_persistence',
'status_description']
self._check_list_with_filter(
'vip', attr_exceptions, name=name, protocol="HTTPS",
protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'],
description=data_utils.rand_name('description-'),
admin_state_up=False)
@test.attr(type='smoke')
def test_create_update_delete_pool_vip(self):
# Creates a vip
name = data_utils.rand_name('vip-')
address = self.subnet['allocation_pools'][0]['end']
resp, body = self.client.create_pool(
name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
pool = body['pool']
_, body = self.client.create_vip(name=name,
protocol="HTTP",
protocol_port=80,
subnet_id=self.subnet['id'],
pool_id=pool['id'],
address=address)
vip = body['vip']
vip_id = vip['id']
# Confirm VIP's address correctness with a show
_, body = self.client.show_vip(vip_id)
vip = body['vip']
self.assertEqual(address, vip['address'])
# Verification of vip update
new_name = "New_vip"
new_description = "New description"
persistence_type = "HTTP_COOKIE"
update_data = {"session_persistence": {
"type": persistence_type}}
_, body = self.client.update_vip(vip_id,
name=new_name,
description=new_description,
connection_limit=10,
admin_state_up=False,
**update_data)
updated_vip = body['vip']
self.assertEqual(new_name, updated_vip['name'])
self.assertEqual(new_description, updated_vip['description'])
self.assertEqual(10, updated_vip['connection_limit'])
self.assertFalse(updated_vip['admin_state_up'])
self.assertEqual(persistence_type,
updated_vip['session_persistence']['type'])
self.client.delete_vip(vip['id'])
self.client.wait_for_resource_deletion('vip', vip['id'])
# Verification of pool update
new_name = "New_pool"
_, body = self.client.update_pool(pool['id'],
name=new_name,
description="new_description",
lb_method='LEAST_CONNECTIONS')
updated_pool = body['pool']
self.assertEqual(new_name, updated_pool['name'])
self.assertEqual('new_description', updated_pool['description'])
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
self.client.delete_pool(pool['id'])
@test.attr(type='smoke')
def test_show_vip(self):
# Verifies the details of a vip
_, body = self.client.show_vip(self.vip['id'])
vip = body['vip']
for key, value in vip.iteritems():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.vip[key], value)
@test.attr(type='smoke')
def test_show_pool(self):
# Here we need to new pool without any dependence with vips
_, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
# Verifies the details of a pool
_, body = self.client.show_pool(pool['id'])
shown_pool = body['pool']
for key, value in pool.iteritems():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(value, shown_pool[key])
@test.attr(type='smoke')
def test_list_pools(self):
# Verify the pool exists in the list of all pools
_, body = self.client.list_pools()
pools = body['pools']
self.assertIn(self.pool['id'], [p['id'] for p in pools])
@test.attr(type='smoke')
def test_list_pools_with_filters(self):
attr_exceptions = ['status', 'vip_id', 'members', 'provider',
'status_description']
self._check_list_with_filter(
'pool', attr_exceptions, name=data_utils.rand_name("pool-"),
lb_method="ROUND_ROBIN", protocol="HTTPS",
subnet_id=self.subnet['id'],
description=data_utils.rand_name('description-'),
admin_state_up=False)
@test.attr(type='smoke')
def test_list_members(self):
# Verify the member exists in the list of all members
_, body = self.client.list_members()
members = body['members']
self.assertIn(self.member['id'], [m['id'] for m in members])
@test.attr(type='smoke')
def test_list_members_with_filters(self):
attr_exceptions = ['status', 'status_description']
self._check_list_with_filter('member', attr_exceptions,
address="10.0.9.47", protocol_port=80,
pool_id=self.pool['id'])
@test.attr(type='smoke')
def test_create_update_delete_member(self):
# Creates a member
_, body = self.client.create_member(address="10.0.9.47",
protocol_port=80,
pool_id=self.pool['id'])
member = body['member']
# Verification of member update
_, body = self.client.update_member(member['id'],
admin_state_up=False)
updated_member = body['member']
self.assertFalse(updated_member['admin_state_up'])
# Verification of member delete
self.client.delete_member(member['id'])
@test.attr(type='smoke')
def test_show_member(self):
# Verifies the details of a member
_, body = self.client.show_member(self.member['id'])
member = body['member']
for key, value in member.iteritems():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.member[key], value)
@test.attr(type='smoke')
def test_list_health_monitors(self):
# Verify the health monitor exists in the list of all health monitors
_, body = self.client.list_health_monitors()
health_monitors = body['health_monitors']
self.assertIn(self.health_monitor['id'],
[h['id'] for h in health_monitors])
@test.attr(type='smoke')
def test_list_health_monitors_with_filters(self):
attr_exceptions = ['status', 'status_description', 'pools']
self._check_list_with_filter('health_monitor', attr_exceptions,
delay=5, max_retries=4, type="TCP",
timeout=2)
@test.attr(type='smoke')
def test_create_update_delete_health_monitor(self):
# Creates a health_monitor
_, body = self.client.create_health_monitor(delay=4,
max_retries=3,
type="TCP",
timeout=1)
health_monitor = body['health_monitor']
# Verification of health_monitor update
_, body = (self.client.update_health_monitor
(health_monitor['id'],
admin_state_up=False))
updated_health_monitor = body['health_monitor']
self.assertFalse(updated_health_monitor['admin_state_up'])
# Verification of health_monitor delete
_, body = self.client.delete_health_monitor(health_monitor['id'])
@test.attr(type='smoke')
def test_create_health_monitor_http_type(self):
hm_type = "HTTP"
_, body = self.client.create_health_monitor(delay=4,
max_retries=3,
type=hm_type,
timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
self.assertEqual(hm_type, health_monitor['type'])
@test.attr(type='smoke')
def test_update_health_monitor_http_method(self):
_, body = self.client.create_health_monitor(delay=4,
max_retries=3,
type="HTTP",
timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
_, body = (self.client.update_health_monitor
(health_monitor['id'],
http_method="POST",
url_path="/home/user",
expected_codes="290"))
updated_health_monitor = body['health_monitor']
self.assertEqual("POST", updated_health_monitor['http_method'])
self.assertEqual("/home/user", updated_health_monitor['url_path'])
self.assertEqual("290", updated_health_monitor['expected_codes'])
@test.attr(type='smoke')
def test_show_health_monitor(self):
# Verifies the details of a health_monitor
_, body = self.client.show_health_monitor(self.health_monitor['id'])
health_monitor = body['health_monitor']
for key, value in health_monitor.iteritems():
# 'status' should not be confirmed in api tests
if key != 'status':
self.assertEqual(self.health_monitor[key], value)
@test.attr(type='smoke')
def test_associate_disassociate_health_monitor_with_pool(self):
# Verify that a health monitor can be associated with a pool
_, body = (self.client.associate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
resp, body = self.client.show_health_monitor(
self.health_monitor['id'])
health_monitor = body['health_monitor']
resp, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
self.assertIn(pool['id'],
[p['pool_id'] for p in health_monitor['pools']])
self.assertIn(health_monitor['id'], pool['health_monitors'])
# Verify that a health monitor can be disassociated from a pool
(self.client.disassociate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
_, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
resp, body = self.client.show_health_monitor(
self.health_monitor['id'])
health_monitor = body['health_monitor']
self.assertNotIn(health_monitor['id'], pool['health_monitors'])
self.assertNotIn(pool['id'],
[p['pool_id'] for p in health_monitor['pools']])
@test.attr(type='smoke')
def test_get_lb_pool_stats(self):
# Verify the details of pool stats
_, body = self.client.list_lb_pool_stats(self.pool['id'])
stats = body['stats']
self.assertIn("bytes_in", stats)
self.assertIn("total_connections", stats)
self.assertIn("active_connections", stats)
self.assertIn("bytes_out", stats)
@test.attr(type='smoke')
def test_update_list_of_health_monitors_associated_with_pool(self):
(self.client.associate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
self.client.update_health_monitor(
self.health_monitor['id'], admin_state_up=False)
_, body = self.client.show_pool(self.pool['id'])
health_monitors = body['pool']['health_monitors']
for health_monitor_id in health_monitors:
_, body = self.client.show_health_monitor(health_monitor_id)
self.assertFalse(body['health_monitor']['admin_state_up'])
(self.client.disassociate_health_monitor_with_pool
(self.health_monitor['id'], self.pool['id']))
@test.attr(type='smoke')
def test_update_admin_state_up_of_pool(self):
self.client.update_pool(self.pool['id'],
admin_state_up=False)
_, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
self.assertFalse(pool['admin_state_up'])
@test.attr(type='smoke')
def test_show_vip_associated_with_pool(self):
_, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
_, body = self.client.show_vip(pool['vip_id'])
vip = body['vip']
self.assertEqual(self.vip['name'], vip['name'])
self.assertEqual(self.vip['id'], vip['id'])
@test.attr(type='smoke')
def test_show_members_associated_with_pool(self):
_, body = self.client.show_pool(self.pool['id'])
members = body['pool']['members']
for member_id in members:
_, body = self.client.show_member(member_id)
self.assertIsNotNone(body['member']['status'])
self.assertEqual(member_id, body['member']['id'])
self.assertIsNotNone(body['member']['admin_state_up'])
@test.attr(type='smoke')
def test_update_pool_related_to_member(self):
# Create new pool
_, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet['id'])
new_pool = body['pool']
self.addCleanup(self.client.delete_pool, new_pool['id'])
# Update member with new pool's id
_, body = self.client.update_member(self.member['id'],
pool_id=new_pool['id'])
# Confirm with show that pool_id change
resp, body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(member['pool_id'], new_pool['id'])
# Update member with old pool id, this is needed for clean up
_, body = self.client.update_member(self.member['id'],
pool_id=self.pool['id'])
@test.attr(type='smoke')
def test_update_member_weight(self):
self.client.update_member(self.member['id'],
weight=2)
_, body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(2, member['weight'])
class LoadBalancerTestXML(LoadBalancerTestJSON):
_interface = 'xml'
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Gulden test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
gulden/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70020 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return obj.serialize().hex()
# Objects that map to GuldenD objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in GuldenD
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
def get_compact_size(inp):
if inp < 0xfd:
return inp.to_bytes(1, 'little')
elif inp < 0xffff:
return b'\xfd' + inp.to_bytes(2, 'little')
elif inp < 0xffffffff:
return b'\xfe' + inp.to_bytes(4, 'little')
else:
return b'\xff' + inp.to_bytes(8, 'little')
def read_compact_size(f):
ni = struct.unpack("<c", f.read(1))[0][0]
if ni < 253:
return ni
if ni == 253: # integer of 2 bytes
size = 2
elif ni == 254: # integer of 4 bytes
size = 4
else: # integer of 8 bytes
size = 8
return int.from_bytes(byteint[1:1+size][::-1], 'big')
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256", "nVersionPoW2Witness", "nTimePoW2Witness", "hashMerkleRootPoW2Witness", "witnessHeaderPoW2Sig", "witnessUTXODelta")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nVersionPoW2Witness = header.nVersionPoW2Witness
self.nTimePoW2Witness = header.nTimePoW2Witness
self.hashMerkleRootPoW2Witness = header.hashMerkleRootPoW2Witness
self.witnessHeaderPoW2Sig = header.witnessHeaderPoW2Sig
self.witnessUTXODelta = header.witnessUTXODelta
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersionPoW2Witness = 0
self.nTimePoW2Witness = 0
self.hashMerkleRootPoW2Witness = 0
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.witnessHeaderPoW2Sig = 0
self.witnessUTXODelta = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersionPoW2Witness = struct.unpack("<i", f.read(4))[0]
self.nTimePoW2Witness = struct.unpack("<I", f.read(4))[0]
self.hashMerkleRootPoW2Witness = deser_uint256(f)
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
if self.nVersionPoW2Witness != 0:
self.witnessHeaderPoW2Sig = f.read(65)
deltaSize = read_compact_size(f)
self.witnessUTXODelta = f.read(deltaSize)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersionPoW2Witness)
r += struct.pack("<I", self.nTimePoW2Witness)
r += ser_uint256(self.hashMerkleRootPoW2Witness)
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
if self.nVersionPoW2Witness != 0:
r += self.witnessHeaderPoW2Sig
deltaSize = get_compact_size(len(self.witnessUTXODelta))
r += struct.pack("<c", deltaSize)
r += self.witnessUTXODelta
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
if self.nVersionPoW2Witness != 0:
r += self.witnessHeaderPoW2Sig
deltaSize = get_compact_size(len(self.witnessUTXODelta))
r += struct.pack("<c", deltaSize)
r += self.witnessUTXODelta
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
#BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
#assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
__slots__ = ()
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in GuldenD indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject:
__slots__ = ("code", "data", "message", "reason")
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
|
"""
Line-based and matrix-based plotting commands using MatPlotLib.
Before importing this file, you will probably want to do something
like:
from matplotlib import rcParams
rcParams['backend']='TkAgg'
to select a backend, or else select an appropriate one in your
matplotlib.rc file (if any). There are many backends available for
different GUI or non-GUI uses.
$Id$
"""
__version__='$Revision$'
import param
try:
import matplotlib.ticker
import pylab
except ImportError:
param.Parameterized(name=__name__).warning("Could not import matplotlib; module will not be useable.")
from topo.command import ImportErrorRaisingFakeModule
pylab = ImportErrorRaisingFakeModule("matplotlib") # pyflakes:ignore (try/except import)
import numpy
from math import pi
# JABALERT: Import all of these from numpy instead?
from numpy.oldnumeric import sqrt, array, transpose, argmin, cos, sin, log10, Float
from numpy import outer,arange,ones,zeros
from numpy.fft.fftpack import fft2
from numpy.fft.helper import fftshift
from numpy import abs
import topo
from topo.base.sheetview import SheetView
from topo.base.arrayutil import centroid, wrap
from topo.base.sheet import Sheet
from topo.misc.util import frange
import topo.analysis.vision
from topo.plotting.plot import make_template_plot
import param
from param import ParameterizedFunction,normalize_path
from param.parameterized import ParamOverrides
from topo.pattern import SineGrating, OrientationContrast
from topo.plotting.plotgroup import create_plotgroup
from topo.base.cf import CFSheet
from topo.analysis.featureresponses import Feature, PatternPresenter
from topo.analysis.featureresponses import PositionMeasurementCommand, FeatureCurveCommand, UnitCurveCommand
from topo.command import Command
class PylabPlotCommand(Command):
"""Parameterized command for plotting using Matplotlib/Pylab."""
file_dpi = param.Number(
default=100.0,bounds=(0,None),softbounds=(0,1000),doc="""
Default DPI when rendering to a bitmap.
The nominal size * the dpi gives the final image size in pixels.
E.g.: 4"x4" image * 80 dpi ==> 320x320 pixel image.""")
file_format = param.String(default="png",doc="""
Which image format to use when saving images.
The output can be png, ps, pdf, svg, or any other format
supported by Matplotlib.""")
# JABALERT: Should replace this with a filename_format and
# associated parameters, as in PlotGroupSaver.
# Also should probably allow interactive display to be controlled
# separately from the filename, to make things work more similarly
# with and without a GUI.
filename = param.String(default=None,doc="""
Optional base of the filename to use when saving images;
if None the plot will be displayed interactively.
The actual name is constructed from the filename base plus the
suffix plus the current simulator time plus the file_format.""")
filename_suffix = param.String(default="",doc="""
Optional suffix to be used for disambiguation of the filename.""")
title = param.String(default=None,doc="""
Optional title to be used when displaying the plot interactively.""")
__abstract = True
def _set_windowtitle(self,title):
"""
Helper function to set the title (if not None) of this PyLab plot window.
"""
# At the moment, PyLab does not offer a window-manager-independent
# means for controlling the window title, so what we do is to try
# what should work with Tkinter, and then suppress all errors. That
# way we should be ok when rendering to a file-based backend, but
# will get nice titles in Tk windows. If other toolkits are in use,
# the title can be set here using a similar try/except mechanism, or
# else there can be a switch based on the backend type.
if title is not None:
try:
manager = pylab.get_current_fig_manager()
manager.window.title(title)
except:
pass
def _generate_figure(self,p):
"""
Helper function to display a figure on screen or save to a file.
p should be a ParamOverrides instance containing the current
set of parameters.
"""
pylab.show._needmain=False
if p.filename is not None:
# JABALERT: need to reformat this as for other plots
fullname=p.filename+p.filename_suffix+str(topo.sim.time())+"."+p.file_format
pylab.savefig(normalize_path(fullname), dpi=p.file_dpi)
else:
self._set_windowtitle(p.title)
pylab.show()
class vectorplot(PylabPlotCommand):
"""
Simple line plotting for any vector or list of numbers.
Intended for interactive debugging or analyzing from the command
prompt. See MatPlotLib's pylab functions to create more elaborate
or customized plots; this is just a simple example.
An optional string can be supplied as a title for the figure, if
desired. At present, this is only used for the window, not the
actual body of the figure (and will thus not appear when the
figure is saved).
The style argument allows different line/linespoints style for
the plot: 'r-' for red solid line, 'bx' for blue x-marks, etc.
See http://matplotlib.sourceforge.net/matplotlib.pylab.html#-plot
for more possibilities.
The label argument can be used to identify the line in a figure legend.
Ordinarily, the x value for each point on the line is the index of
that point in the vec array, but a explicit list of xvalues can be
supplied; it should be the same length as vec.
Execution of multiple vectorplot() commands with different styles
will result in all those styles overlaid on a single plot window.
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,vec,xvalues=None,style='-',label=None,**params):
p=ParamOverrides(self,params)
if xvalues is not None:
pylab.plot(xvalues, vec, style, label=label)
else:
pylab.plot(vec, style, label=label)
pylab.grid(True)
self._generate_figure(p)
class matrixplot(PylabPlotCommand):
"""
Simple plotting for any matrix as a bitmap with axes.
Like MatLab's imagesc, scales the values to fit in the range 0 to 1.0.
Intended for interactive debugging or analyzing from the command
prompt. See MatPlotLib's pylab functions to create more elaborate
or customized plots; this is just a simple example.
"""
plot_type = param.Callable(default=pylab.gray,doc="""
Matplotlib command to generate the plot, e.g. pylab.gray or pylab.hsv.""")
extent = param.Parameter(default=None,doc="""
Subregion of the matrix to plot, as a tuple (l,b,r,t).""")
# JABALERT: All but the first two should probably be Parameters
def __call__(self,mat,aspect=None,colorbar=True,**params):
p=ParamOverrides(self,params)
p.plot_type()
pylab.figure(figsize=(5,5))
# Swap lbrt to lrbt to match pylab
if p.extent is None:
extent = None
else:
(l,b,r,t)=p.extent
extent=(l,r,b,t)
pylab.imshow(mat,interpolation='nearest',aspect=aspect,extent=extent)
if colorbar and (mat.min()!= mat.max()): pylab.colorbar()
self._generate_figure(p)
class matrixplot3d(PylabPlotCommand):
"""
Simple plotting for any matrix as a 3D wireframe with axes.
Uses Matplotlib's beta-quality features for 3D plotting. These
usually work fine for wireframe plots, although they don't always
format the axis labels properly, and do not support removal of
hidden lines. Note that often the plot can be rotated within the
window to make such problems go away, and then the best result can
be saved if needed.
Other than the default "wireframe", the type can be "contour" to
get a contour plot, or "surface" to get a solid surface plot, but
surface plots currently fail in many cases, e.g. for small
matrices.
If you have trouble, you can try matrixplot3d_gnuplot instead.
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,mat,type="wireframe",**params):
p=ParamOverrides(self,params)
from mpl_toolkits.mplot3d import axes3d
fig = pylab.figure()
ax = axes3d.Axes3D(fig)
# Construct matrices for r and c values
rn,cn = mat.shape
c = outer(ones(rn),arange(cn*1.0))
r = outer(arange(rn*1.0),ones(cn))
if type=="wireframe":
ax.plot_wireframe(r,c,mat)
elif type=="surface":
# Sometimes fails for no obvious reason
ax.plot_surface(r,c,mat)
elif type=="contour":
# Works but not usually very useful
ax.contour3D(r,c,mat)
else:
raise ValueError("Unknown plot type "+str(type))
ax.set_xlabel('R')
ax.set_ylabel('C')
ax.set_zlabel('Value')
self._generate_figure(p)
def matrixplot3d_gnuplot(mat,title=None,outputfilename="tmp.ps"):
"""
Simple plotting for any matrix as a 3D surface with axes.
Currently requires the gnuplot-py package to be installed, plus
the external gnuplot program; likely to be removed once Matplotlib
supports 3D plots better.
Unlikely to work on non-UNIX systems.
Should return when it completes, but for some reason the Topographica
prompt is not available until this command finishes.
"""
import Gnuplot
from os import system
psviewer="gv" # Should be a parameter, or handled better somehow
g = Gnuplot.Gnuplot(debug=0) #debug=1: output commands to stderr
r,c = mat.shape
x = arange(r*1.0)
y = arange(c*1.0)
# The .tolist() command is necessary to avoid bug in gnuplot-py,
# which will otherwise convert a 2D float array into integers (!)
m = numpy.asarray(mat,dtype="float32").tolist()
#g("set parametric")
g("set data style lines")
g("set hidden3d")
g("set xlabel 'R'")
g("set ylabel 'C'")
g("set zlabel 'Value'")
if title: g.title(title)
if outputfilename:
g("set terminal postscript eps color solid 'Times-Roman' 14")
g("set output '"+outputfilename+"'")
g.splot(Gnuplot.GridData(m,x,y, binary=1))
#g.hardcopy(outputfilename, enhanced=1, color=1)
system(psviewer+" "+outputfilename+" &")
else:
g.splot(Gnuplot.GridData(m,x,y, binary=1))
raw_input('Please press return to continue...\n')
class histogramplot(PylabPlotCommand):
"""
Compute and plot the histogram of the supplied data.
See help(pylab.hist) for help on the histogram function itself.
If given, colors is an iterable collection of matplotlib.colors
(see help (matplotlib.colors) ) specifying the bar colors.
Example use:
histogramplot([1,1,1,2,2,3,4,5],title='hist',colors='rgb',bins=3,normed=1)
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,data,colors=None,**params):
p=ParamOverrides(self,params,allow_extra_keywords=True)
pylab.figure(figsize=(4,2))
n,bins,bars = pylab.hist(data,**(p.extra_keywords()))
# if len(bars)!=len(colors), any extra bars won't have their
# colors changed, or any extra colors will be ignored.
if colors: [bar.set_fc(color) for bar,color in zip(bars,colors)]
self._generate_figure(p)
class gradientplot(matrixplot):
"""
Compute and show the gradient plot of the supplied data.
Translated from Octave code originally written by Yoonsuck Choe.
If the data is specified to be cyclic, negative differences will
be wrapped into the range specified (1.0 by default).
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,data,cyclic=True,cyclic_range=1.0,**params):
p=ParamOverrides(self,params)
r,c = data.shape
dx = numpy.diff(data,1,axis=1)[0:r-1,0:c-1]
dy = numpy.diff(data,1,axis=0)[0:r-1,0:c-1]
if cyclic: # Wrap into the specified range
# Convert negative differences to an equivalent positive value
dx = wrap(0,cyclic_range,dx)
dy = wrap(0,cyclic_range,dy)
#
# Make it increase as gradient reaches the halfway point,
# and decrease from there
dx = 0.5*cyclic_range-abs(dx-0.5*cyclic_range)
dy = 0.5*cyclic_range-abs(dy-0.5*cyclic_range)
super(gradientplot,self).__call__(sqrt(dx*dx+dy*dy),**p)
class fftplot(matrixplot):
"""
Compute and show the 2D Fast Fourier Transform (FFT) of the supplied data.
Example:: fftplot(topo.sim["V1"].sheet_views["OrientationPreference"].view()[0],filename="out")
"""
def __call__(self,data,**params):
p=ParamOverrides(self,params)
fft_plot=1-abs(fftshift(fft2(data-0.5, s=None, axes=(-2,-1))))
super(fftplot,self).__call__(fft_plot,**p)
class autocorrelationplot(matrixplot):
"""
Compute and show the 2D autocorrelation of the supplied data.
Requires the external SciPy package.
Example:: autocorrelationplot(topo.sim["V1"].sheet_views["OrientationPreference"].view()[0],filename="out")
"""
plot_type = param.Callable(default=pylab.autumn)
def __call__(self,data,**params):
p=ParamOverrides(self,params)
import scipy.signal
mat=scipy.signal.correlate2d(data,data)
super(autocorrelationplot,self).__call__(mat,**p)
class activityplot(matrixplot):
"""
Plots the activity in a sheet with axis labels in Sheet (not matrix) coordinates.
Same as matrixplot, but only for matrices associated with a Sheet.
By default plots the Sheet's activity, but any other matrix of the
same size may be supplied for plotting in these coordinates instead.
"""
def __call__(self,sheet,mat=None,**params):
p=ParamOverrides(self,params)
if p.extent is None: p.extent=sheet.bounds.aarect().lbrt()
if mat is None: mat = sheet.activity
super(activityplot,self).__call__(mat,**p)
class topographic_grid(PylabPlotCommand):
"""
By default, plot the XPreference and YPreference preferences for all
Sheets for which they are defined, using MatPlotLib.
If sheet_views other than XPreference and YPreference are desired,
the names of these can be passed in as arguments.
"""
xsheet_view_name = param.String(default='XPreference',doc="""
Name of the SheetView holding the X position locations.""")
ysheet_view_name = param.String(default='YPreference',doc="""
Name of the SheetView holding the Y position locations.""")
axis = param.Parameter(default=[-0.5,0.5,-0.5,0.5],doc="""
Four-element list of the plot bounds, i.e. [xmin, xmax, ymin, ymax].""")
skip = param.Integer(default=1,bounds=[1,None],softbounds=[1,10],doc="""
Plot every skipth line in each direction.
E.g. skip=4 means to keep only every fourth horizontal line
and every fourth vertical line, except that the first and last
are always included. The default is to include all data points.""")
def __call__(self,**params):
p=ParamOverrides(self,params)
for sheet in topo.sim.objects(Sheet).values():
if ((p.xsheet_view_name in sheet.sheet_views) and
(p.ysheet_view_name in sheet.sheet_views)):
x = sheet.sheet_views[p.xsheet_view_name].view()[0]
y = sheet.sheet_views[p.ysheet_view_name].view()[0]
pylab.figure(figsize=(5,5))
# This one-liner works in Octave, but in matplotlib it
# results in lines that are all connected across rows and columns,
# so here we plot each line separately:
# pylab.plot(x,y,"k-",transpose(x),transpose(y),"k-")
# Here, the "k-" means plot in black using solid lines;
# see matplotlib for more info.
isint=pylab.isinteractive() # Temporarily make non-interactive for plotting
pylab.ioff()
for r,c in zip(y[::p.skip],x[::p.skip]):
pylab.plot(c,r,"k-")
for r,c in zip(transpose(y)[::p.skip],transpose(x)[::p.skip]):
pylab.plot(c,r,"k-")
# Force last line avoid leaving cells open
if p.skip != 1:
pylab.plot(x[-1],y[-1],"k-")
pylab.plot(transpose(x)[-1],transpose(y)[-1],"k-")
pylab.xlabel('x')
pylab.ylabel('y')
# Currently sets the input range arbitrarily; should presumably figure out
# what the actual possible range is for this simulation (which would presumably
# be the maximum size of any GeneratorSheet?).
pylab.axis(p.axis)
p.title='Topographic mapping to '+sheet.name+' at time '+topo.sim.timestr()
if isint: pylab.ion()
p.filename_suffix="_"+sheet.name
self._generate_figure(p)
class overlaid_plots(PylabPlotCommand):
"""
Use matplotlib to make a plot combining a bitmap and line-based overlays.
"""
plot_template = param.List(default=[{'Hue':'OrientationPreference'}],doc="""
Template for the underlying bitmap plot.""")
overlay = param.List(default=[('contours','OcularPreference',0.5,'black'),
('arrows','DirectionPreference','DirectionSelectivity','white')],doc="""
List of overlaid plots, where each list item may be a 4-tuple
specifying either a contour line or a field of arrows::
('contours',map-name,contour-value,line-color)
('arrows',arrow-location-map-name,arrow-size-map-name,arrow-color)
Any number or combination of contours and arrows may be supplied.""")
normalize = param.Boolean(default='Individually',doc="""
Type of normalization, if any, to use. Options include 'None',
'Individually', and 'AllTogether'. See
topo.plotting.plotgroup.TemplatePlotGroup.normalize for more
details.""")
def __call__(self,**params):
p=ParamOverrides(self,params)
for template in p.plot_template:
for sheet in topo.sim.objects(Sheet).values():
name=template.keys().pop(0)
plot=make_template_plot(template,sheet.sheet_views,sheet.xdensity,sheet.bounds,p.normalize,name=template[name])
if plot:
bitmap=plot.bitmap
pylab.figure(figsize=(5,5))
isint=pylab.isinteractive() # Temporarily make non-interactive for plotting
pylab.ioff() # Turn interactive mode off
pylab.imshow(bitmap.image,origin='lower',interpolation='nearest')
pylab.axis('off')
for (t,pref,sel,c) in p.overlay:
v = pylab.flipud(sheet.sheet_views[pref].view()[0])
if (t=='contours'):
pylab.contour(v,[sel,sel],colors=c,linewidths=2)
if (t=='arrows'):
s = pylab.flipud(sheet.sheet_views[sel].view()[0])
scale=int(pylab.ceil(log10(len(v))))
X=pylab.array([x for x in xrange(len(v)/scale)])
v_sc=pylab.zeros((len(v)/scale,len(v)/scale))
s_sc=pylab.zeros((len(v)/scale,len(v)/scale))
for i in X:
for j in X:
v_sc[i][j]=v[scale*i][scale*j]
s_sc[i][j]=s[scale*i][scale*j]
pylab.quiver(scale*X,scale*X,-cos(2*pi*v_sc)*s_sc,-sin(2*pi*v_sc)*s_sc,color=c,edgecolors=c,minshaft=3,linewidths=1)
p.title='%s overlaid with %s at time %s' %(plot.name,pref,topo.sim.timestr())
if isint: pylab.ion()
p.filename_suffix="_"+sheet.name
self._generate_figure(p)
class tuning_curve(PylabPlotCommand):
"""
Plot a tuning curve for a feature, such as orientation, contrast, or size.
The curve datapoints are collected from the curve_dict for
the units at the specified coordinates in the specified sheet
(where the units and sheet may be set by a GUI, using
topo.analysis.featureresponses.UnitCurveCommand.sheet and
topo.analysis.featureresponses.UnitCurveCommand.coords,
or by hand).
"""
coords = param.List(default=[(0,0)],doc="""
List of coordinates of units to measure.""")
sheet = param.ObjectSelector(
default=None,doc="""
Name of the sheet to use in measurements.""")
x_axis = param.String(default="",doc="""
Feature to plot on the x axis of the tuning curve""")
# Can we list some alternatives here, if there are any
# useful ones?
plot_type = param.Callable(default=pylab.plot,doc="""
Matplotlib command to generate the plot.""")
unit = param.String(default="",doc="""
String to use in labels to specify the units in which curves are plotted.""")
__abstract = True
def _format_x_tick_label(self,x):
return "%3.1f" % x
def _rotate(self, seq, n=1):
n = n % len(seq) # n=hop interval
return seq[n:] + seq[:n]
def _curve_values(self, i_value, j_value, curve):
"""Return the x, y, and x ticks values for the specified curve from the curve_dict"""
x_values=sorted(curve.keys())
y_values=[curve[key].view()[0][i_value,j_value] for key in x_values]
return x_values,y_values,x_values
def _reduce_ticks(self,ticks):
x = [];
y= [];
num_ticks = 5;
y.append(ticks[0])
x.append(0)
for i in xrange(0,num_ticks):
y.append(y[-1]+numpy.pi/(num_ticks+1));
x.append(x[-1]+numpy.pi/(num_ticks+1));
y.append(y[-1]+numpy.pi/(num_ticks+1));
x.append(3.14)
return (x,y)
def __call__(self,**params):
p=ParamOverrides(self,params)
sheet = p.sheet
for coordinate in p.coords:
i_value,j_value=sheet.sheet2matrixidx(coordinate[0],coordinate[1])
pylab.figure(figsize=(7,7))
isint=pylab.isinteractive()
pylab.ioff()
pylab.ylabel('Response',fontsize='large')
pylab.xlabel('%s (%s)' % (p.x_axis.capitalize(),p.unit),fontsize='large')
pylab.title('Sheet %s, coordinate(x,y)=(%0.3f,%0.3f) at time %s' %
(sheet.name,coordinate[0],coordinate[1],topo.sim.timestr()))
p.title='%s: %s Tuning Curve' % (topo.sim.name,p.x_axis.capitalize())
self.first_curve=True
for curve_label in sorted(sheet.curve_dict[p.x_axis].keys()):
x_values,y_values,ticks=self._curve_values(i_value,j_value,sheet.curve_dict[p.x_axis][curve_label])
x_tick_values,ticks = self._reduce_ticks(ticks)
labels = [self._format_x_tick_label(x) for x in ticks]
pylab.xticks(x_tick_values, labels,fontsize='large')
pylab.yticks(fontsize='large')
p.plot_type(x_values, y_values, label=curve_label,lw=3.0)
self.first_curve=False
if isint: pylab.ion()
pylab.legend(loc=2)
self._generate_figure(p)
class cyclic_tuning_curve(tuning_curve):
"""
Same as tuning_curve, but rotates the curve so that minimum y
values are at the minimum x value to make the plots easier to
interpret. Such rotation is valid only for periodic quantities
like orientation or direction, and only if the correct period
is set.
At present, the y_values and labels are rotated by an amount
determined by the minmum y_value for the first curve plotted
(usually the lowest contrast curve).
"""
cyclic_range = param.Number(default=pi,bounds=(0,None),softbounds=(0,10),doc="""
Range of the cyclic quantity (e.g. pi for the orientation of
a symmetric stimulus, or 2*pi for motion direction or the
orientation of a non-symmetric stimulus).""")
unit = param.String(default="degrees",doc="""
String to use in labels to specify the units in which curves are plotted.""")
# This implementation should work for quantities periodic with
# some multiple of pi that we want to express in degrees, but it
# will need to be reimplemented in a subclass to work with other
# cyclic quantities.
def _format_x_tick_label(self,x):
return str(int(180*x/pi))
def _curve_values(self, i_value, j_value, curve):
"""
Return the x, y, and x ticks values for the specified curve from the curve_dict.
With the current implementation, there may be cases (i.e.,
when the lowest contrast curve gives a lot of zero y_values)
in which the maximum is not in the center. This may
eventually be changed so that the preferred orientation is in
the center.
"""
if self.first_curve==True:
x_values= sorted(curve.keys())
y_values=[curve[key].view()[0][i_value,j_value] for key in x_values]
min_arg=argmin(y_values)
x_min=x_values[min_arg]
y_min=y_values[min_arg]
y_values=self._rotate(y_values, n=min_arg)
self.ticks=self._rotate(x_values, n=min_arg)
self.ticks+=[x_min]
x_max=min(x_values)+self.cyclic_range
x_values.append(x_max)
y_values.append(y_min)
self.x_values=x_values
else:
y_values=[curve[key].view()[0][i_value,j_value] for key in self.ticks]
return self.x_values,y_values,self.ticks
def plot_cfproj_mapping(dest,proj='Afferent',style='b-'):
"""
Given a CF sheet receiving a CFProjection, plot
the mapping of the dests CF centers on the src sheet.
"""
if isinstance(dest,str):
from topo import sim
dest = sim[dest]
plot_coord_mapping(dest.projections()[proj].coord_mapper,
dest,style=style)
# JABALERT: not sure whether this is currently used
def plot_coord_mapping(mapper,sheet,style='b-'):
"""
Plot a coordinate mapping for a sheet.
Given a CoordinateMapperFn (as for a CFProjection) and a sheet
of the projection, plot a grid showing where the sheet's units
are mapped.
"""
from pylab import plot,hold,ishold
xs = sheet.sheet_rows()
ys = sheet.sheet_cols()
hold_on = ishold()
if not hold_on:
plot()
hold(True)
for y in ys:
pts = [mapper(x,y) for x in xs]
plot([u for u,v in pts],
[v for u,v in pts],
style)
for x in xs:
pts = [mapper(x,y) for y in ys]
plot([u for u,v in pts],
[v for u,v in pts],
style)
hold(hold_on)
# JABALERT: Untested as of Mon Nov 10 12:59:54 GMT 2008
class plot_tracked_attributes(PylabPlotCommand):
"""
Plots parameter values associated with an AttributeTrackingTF.
Example call:
VT=AttributeTrackingTF(function=HE, debug_params=['a', 'b',], units=[(0,0),(1,1)], step=1)
plot_tracked_attributes(VT,0,10000,attrib_names=['a'],units=[(0,0)], filename='V1')
"""
# JABALERT: These parameters need to be documented.
raw = param.Boolean(default=False)
attrib_names = param.List(default=[])
ylabel = param.String(default="")
# Should be renamed to coords to match other commands
units = param.List(default=[])
ybounds = param.Parameter(default=(None,None))
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,output_fn,init_time=0,final_time=None,**params):
p=ParamOverrides(self,params)
if final_time is None:
final_time=topo.sim.time()
attrs = p.attrib_names if len(p.attrib_names)>0 else output_fn.attrib_names
for a in attrs:
pylab.figure(figsize=(6,4))
isint=pylab.isinteractive()
pylab.ioff()
pylab.grid(True)
ylabel=p.ylabel
pylab.ylabel(a+" "+ylabel)
pylab.xlabel('Iteration Number')
coords = p.units if len(p.units)>0 else output_fn.units
for coord in coords:
y_data=[y for (x,y) in output_fn.values[a][coord]]
x_data=[x for (x,y) in output_fn.values[a][coord]]
if p.raw==True:
plot_data=zip(x_data,y_data)
pylab.save(normalize_path(p.filename+a+'(%.2f, %.2f)' %(coord[0], coord[1])),plot_data,fmt='%.6f', delimiter=',')
pylab.plot(x_data,y_data, label='Unit (%.2f, %.2f)' %(coord[0], coord[1]))
(ymin,ymax)=p.ybounds
pylab.axis(xmin=init_time,xmax=final_time,ymin=ymin,ymax=ymax)
if isint: pylab.ion()
pylab.legend(loc=0)
p.title=topo.sim.name+': '+a
p.filename_suffix=a
self._generate_figure(p)
# JABALERT: Should be updated to plot for a specified list of sheets,
# and then the combination of all of them, so that it will work for
# any network. Will need to remove the simple_sheet_name and
# complex_sheet_name parameters once that works.
class plot_modulation_ratio(PylabPlotCommand):
"""
This function computes the modulation ratios of neurons in the
specified sheets and plots their histograms. See
analysis.vision.complexity for more info.
"""
# JABALERT: All but the first argument should probably be Parameters
def __call__(self,fullmatrix,simple_sheet_name=None,complex_sheet_name=None,bins=frange(0,2.0,0.1,inclusive=True),**params):
p=ParamOverrides(self,params)
from topo.analysis.vision import complexity
if (topo.sim.objects().has_key(simple_sheet_name) and topo.sim.objects().has_key(complex_sheet_name)):
v1s = complexity(fullmatrix[topo.sim[simple_sheet_name]]).flatten()
v1c = complexity(fullmatrix[topo.sim[complex_sheet_name]]).flatten()
#double the number of complex cells to reflect large width of layer 2/3
v1c = numpy.concatenate((array(v1c),array(v1c)),axis=1)
pylab.figure()
n = pylab.subplot(311)
pylab.hist(v1s,bins)
pylab.axis([0,2.0,0,4100])
n.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(3))
n = pylab.subplot(312)
pylab.hist(v1c,bins)
pylab.axis([0,2.0,0,4100])
n.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(3))
n = pylab.subplot(313)
pylab.hist(numpy.concatenate((array(v1s),array(v1c)),axis=1),bins)
pylab.axis([0,2.0,0,4100])
n.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(3))
self._generate_figure(p)
class measure_position_pref(PositionMeasurementCommand):
"""Measure a position preference map by collating the response to patterns."""
scale = param.Number(default=0.3)
def _feature_list(self,p):
width =1.0*p.x_range[1]-p.x_range[0]
height=1.0*p.y_range[1]-p.y_range[0]
return [Feature(name="x",range=p.x_range,step=width/p.divisions,preference_fn=self.preference_fn),
Feature(name="y",range=p.y_range,step=height/p.divisions,preference_fn=self.preference_fn)]
from topo.misc.distribution import DSF_WeightedAverage
pg= create_plotgroup(name='Position Preference',category="Preference Maps",
doc='Measure preference for the X and Y position of a Gaussian.',
pre_plot_hooks=[measure_position_pref.instance(
preference_fn=DSF_WeightedAverage( selectivity_scale=(0.,17.) ))],
plot_hooks=[topographic_grid.instance()],
normalize='Individually')
pg.add_plot('X Preference',[('Strength','XPreference')])
pg.add_plot('Y Preference',[('Strength','YPreference')])
pg.add_plot('Position Preference',[('Red','XPreference'),
('Green','YPreference')])
class measure_cog(ParameterizedFunction):
"""
Calculate center of gravity (CoG) for each CF of each unit in each CFSheet.
Unlike measure_position_pref and other measure commands, this one
does not work by collating the responses to a set of input patterns.
Instead, the CoG is calculated directly from each set of incoming
weights. The CoG value thus is an indirect estimate of what
patterns the neuron will prefer, but is not limited by the finite
number of test patterns as the other measure commands are.
Measures only one projection for each sheet, as specified by the
proj_name parameter. The default proj_name of '' selects the
first non-self connection, which is usually useful to examine for
simple feedforward networks, but will not necessarily be useful in
other cases.
"""
proj_name = param.String(default='',doc="""
Name of the projection to measure; the empty string means 'the first
non-self connection available'.""")
def __call__(self,**params):
p=ParamOverrides(self,params)
measured_sheets = [s for s in topo.sim.objects(CFSheet).values()
if hasattr(s,'measure_maps') and s.measure_maps]
# Could easily be extended to measure CoG of all projections
# and e.g. register them using different names (e.g. "Afferent
# XCoG"), but then it's not clear how the PlotGroup would be
# able to find them automatically (as it currently supports
# only a fixed-named plot).
requested_proj=p.proj_name
for sheet in measured_sheets:
for proj in sheet.in_connections:
if (proj.name == requested_proj) or \
(requested_proj == '' and (proj.src != sheet)):
self._update_proj_cog(proj)
if requested_proj=='':
print "measure_cog: Measured %s projection %s from %s" % \
(proj.dest.name,proj.name,proj.src.name)
break
def _update_proj_cog(self,proj):
"""Measure the CoG of the specified projection and register corresponding SheetViews."""
sheet=proj.dest
rows,cols=sheet.activity.shape
xpref=zeros((rows,cols),Float)
ypref=zeros((rows,cols),Float)
for r in xrange(rows):
for c in xrange(cols):
cf=proj.cfs[r,c]
r1,r2,c1,c2 = cf.input_sheet_slice
row_centroid,col_centroid = centroid(cf.weights)
xcentroid, ycentroid = proj.src.matrix2sheet(
r1+row_centroid+0.5,
c1+col_centroid+0.5)
xpref[r][c]= xcentroid
ypref[r][c]= ycentroid
sheet.sheet_views['XCoG']=SheetView((xpref,sheet.bounds), sheet.name,
sheet.precedence,topo.sim.time(),sheet.row_precedence)
sheet.sheet_views['YCoG']=SheetView((ypref,sheet.bounds), sheet.name,
sheet.precedence,topo.sim.time(),sheet.row_precedence)
pg= create_plotgroup(name='Center of Gravity',category="Preference Maps",
doc='Measure the center of gravity of each ConnectionField in a Projection.',
pre_plot_hooks=[measure_cog.instance()],
plot_hooks=[topographic_grid.instance(xsheet_view_name="XCoG",ysheet_view_name="YCoG")],
normalize='Individually')
pg.add_plot('X CoG',[('Strength','XCoG')])
pg.add_plot('Y CoG',[('Strength','YCoG')])
pg.add_plot('CoG',[('Red','XCoG'),('Green','YCoG')])
class measure_or_tuning_fullfield(FeatureCurveCommand):
"""
Measures orientation tuning curve(s) of a particular unit using a
full-field sine grating stimulus.
The curve can be plotted at various different values of the
contrast (or actually any other parameter) of the stimulus. If
using contrast and the network contains an LGN layer, then one
would usually specify michelson_contrast as the
contrast_parameter. If there is no explicit LGN, then scale
(offset=0.0) can be used to define the contrast. Other relevant
contrast definitions (or other parameters) can also be used,
provided they are defined in PatternPresenter and the units
parameter is changed as appropriate.
"""
coords = param.Parameter(default=None,doc="""Ignored; here just to suppress warning.""")
pattern_presenter = param.Callable(
default=PatternPresenter(pattern_generator=SineGrating(),
contrast_parameter="michelson_contrast"))
create_plotgroup(template_plot_type="curve",name='Orientation Tuning Fullfield',category="Tuning Curves",doc="""
Plot orientation tuning curves for a specific unit, measured using full-field sine gratings.
Although the data takes a long time to collect, once it is ready the plots
are available immediately for any unit.""",
pre_plot_hooks=[measure_or_tuning_fullfield.instance()],
plot_hooks=[cyclic_tuning_curve.instance(x_axis="orientation")])
class measure_or_tuning(UnitCurveCommand):
"""
Measures orientation tuning curve(s) of a particular unit.
Uses a circular sine grating patch as the stimulus on the
retina.
The curve can be plotted at various different values of the
contrast (or actually any other parameter) of the stimulus. If
using contrast and the network contains an LGN layer, then one
would usually specify weber_contrast as the contrast_parameter. If
there is no explicit LGN, then scale (offset=0.0) can be used to
define the contrast. Other relevant contrast definitions (or
other parameters) can also be used, provided they are defined in
PatternPresenter and the units parameter is changed as
appropriate.
"""
num_orientation = param.Integer(default=12)
static_parameters = param.List(default=["size","x","y"])
def __call__(self,**params):
p=ParamOverrides(self,params)
self.params('sheet').compute_default()
sheet=p.sheet
for coord in p.coords:
self.x=self._sheetview_unit(sheet,coord,'XPreference',default=coord[0])
self.y=self._sheetview_unit(sheet,coord,'YPreference',default=coord[1])
self._compute_curves(p,sheet)
create_plotgroup(template_plot_type="curve",name='Orientation Tuning',category="Tuning Curves",doc="""
Measure orientation tuning for a specific unit at different contrasts,
using a pattern chosen to match the preferences of that unit.""",
pre_plot_hooks=[measure_or_tuning.instance()],
plot_hooks=[cyclic_tuning_curve.instance(x_axis="orientation")],
prerequisites=['XPreference'])
# JABALERT: Is there some reason not to call it measure_size_tuning?
class measure_size_response(UnitCurveCommand):
"""
Measure receptive field size of one unit of a sheet.
Uses an expanding circular sine grating stimulus at the preferred
orientation and retinal position of the specified unit.
Orientation and position preference must be calulated before
measuring size response.
The curve can be plotted at various different values of the
contrast (or actually any other parameter) of the stimulus. If
using contrast and the network contains an LGN layer, then one
would usually specify weber_contrast as the contrast_parameter. If
there is no explicit LGN, then scale (offset=0.0) can be used to
define the contrast. Other relevant contrast definitions (or
other parameters) can also be used, provided they are defined in
PatternPresenter and the units parameter is changed as
appropriate.
"""
size=None # Disabled unused parameter
static_parameters = param.List(default=["orientation","x","y"])
num_sizes = param.Integer(default=10,bounds=(1,None),softbounds=(1,50),
doc="Number of different sizes to test.")
max_size = param.Number(default=1.0,bounds=(0.1,None),softbounds=(1,50),
doc="Maximum extent of the grating")
x_axis = param.String(default='size',constant=True)
def __call__(self,**params):
p=ParamOverrides(self,params)
self.params('sheet').compute_default()
sheet=p.sheet
for coord in p.coords:
# Orientations are stored as a normalized value beween 0
# and 1, so we scale them by pi to get the true orientations.
self.orientation=pi*self._sheetview_unit(sheet,coord,'OrientationPreference')
self.x=self._sheetview_unit(sheet,coord,'XPreference',default=coord[0])
self.y=self._sheetview_unit(sheet,coord,'YPreference',default=coord[1])
self._compute_curves(p,sheet)
# Why not vary frequency too? Usually it's just one number, but it could be otherwise.
def _feature_list(self,p):
return [Feature(name="phase",range=(0.0,2*pi),step=2*pi/p.num_phase,cyclic=True),
Feature(name="frequency",values=p.frequencies),
Feature(name="size",range=(0.0,self.max_size),step=self.max_size/p.num_sizes,cyclic=False)]
create_plotgroup(template_plot_type="curve",name='Size Tuning',category="Tuning Curves",
doc='Measure the size preference for a specific unit.',
pre_plot_hooks=[measure_size_response.instance()],
plot_hooks=[tuning_curve.instance(x_axis="size",unit="Diameter of stimulus")],
prerequisites=['OrientationPreference','XPreference'])
class measure_contrast_response(UnitCurveCommand):
"""
Measures contrast response curves for a particular unit.
Uses a circular sine grating stimulus at the preferred
orientation and retinal position of the specified unit.
Orientation and position preference must be calulated before
measuring contrast response.
The curve can be plotted at various different values of the
contrast (or actually any other parameter) of the stimulus. If
using contrast and the network contains an LGN layer, then one
would usually specify weber_contrast as the contrast_parameter. If
there is no explicit LGN, then scale (offset=0.0) can be used to
define the contrast. Other relevant contrast definitions (or
other parameters) can also be used, provided they are defined in
PatternPresenter and the units parameter is changed as
appropriate.
"""
static_parameters = param.List(default=["size","x","y"])
contrasts = param.List(class_=int,default=[10,20,30,40,50,60,70,80,90,100])
relative_orientations = param.List(class_=float,default=[0.0, pi/6, pi/4, pi/2])
x_axis = param.String(default='contrast',constant=True)
units = param.String(default=" rad")
def __call__(self,**params):
p=ParamOverrides(self,params)
self.params('sheet').compute_default()
sheet=p.sheet
for coord in p.coords:
orientation=pi*self._sheetview_unit(sheet,coord,'OrientationPreference')
self.curve_parameters=[{"orientation":orientation+ro} for ro in self.relative_orientations]
self.x=self._sheetview_unit(sheet,coord,'XPreference',default=coord[0])
self.y=self._sheetview_unit(sheet,coord,'YPreference',default=coord[1])
self._compute_curves(p,sheet,val_format="%.4f")
def _feature_list(self,p):
return [Feature(name="phase",range=(0.0,2*pi),step=2*pi/p.num_phase,cyclic=True),
Feature(name="frequency",values=p.frequencies),
Feature(name="contrast",values=p.contrasts,cyclic=False)]
create_plotgroup(template_plot_type="curve",name='Contrast Response',category="Tuning Curves",
doc='Measure the contrast response function for a specific unit.',
pre_plot_hooks=[measure_contrast_response.instance()],
plot_hooks=[tuning_curve.instance(x_axis="contrast",unit="%")],
prerequisites=['OrientationPreference','XPreference'])
class measure_orientation_contrast(UnitCurveCommand):
"""
Measures the response to a center sine grating disk and a surround
sine grating ring at different contrasts of the central disk.
The central disk is set to the preferred orientation of the unit
to be measured. The surround disk orientation (relative to the
central grating) and contrast can be varied, as can the size of
both disks.
"""
pattern_presenter = param.Callable(
default=PatternPresenter(pattern_generator=OrientationContrast(),
contrast_parameter="weber_contrast"))
size=None # Disabled unused parameter
# Maybe instead of the below, use size and some relative parameter, to allow easy scaling?
sizecenter=param.Number(default=0.5,bounds=(0,None),doc="""
The size of the central pattern to present.""")
sizesurround=param.Number(default=1.0,bounds=(0,None),doc="""
The size of the surround pattern to present.""")
thickness=param.Number(default=0.5,bounds=(0,None),softbounds=(0,1.5),doc="""Ring thickness.""")
contrastsurround=param.Number(default=100,bounds=(0,100),doc="""Contrast of the surround.""")
contrastcenter=param.Number(default=100,bounds=(0,100),doc="""Contrast of the center.""")
x_axis = param.String(default='orientationsurround',constant=True)
orientation_center = param.Number(default=0.0,softbounds=(0.0,numpy.pi),doc="""
Orientation of the center grating patch""")
units = param.String(default="%")
static_parameters = param.List(default=["x","y","sizecenter","sizesurround","orientationcenter","thickness","contrastcenter"])
curve_parameters=param.Parameter([{"contrastsurround":30},{"contrastsurround":60},{"contrastsurround":80},{"contrastsurround":90}],doc="""
List of parameter values for which to measure a curve.""")
or_surrounds = []
def __call__(self,**params):
p=ParamOverrides(self,params)
self.params('sheet').compute_default()
sheet=p.sheet
for coord in p.coords:
self.or_surrounds=[]
orientation=p.orientation_center
self.orientationcenter=orientation
for i in xrange(0,self.num_orientation):
self.or_surrounds.append(orientation+i*pi/(self.num_orientation))
self.x=self._sheetview_unit(sheet,coord,'XPreference',default=coord[0])
self.y=self._sheetview_unit(sheet,coord,'YPreference',default=coord[1])
self._compute_curves(p,sheet)
def _feature_list(self,p):
return [Feature(name="phase",range=(0.0,2*pi),step=2*pi/p.num_phase,cyclic=True),
Feature(name="frequency",values=p.frequencies),
Feature(name="orientationsurround",values=self.or_surrounds,cyclic=True)]
create_plotgroup(template_plot_type="curve",name='Orientation Contrast',category="Tuning Curves",
doc='Measure the response of one unit to a center and surround sine grating disk.',
pre_plot_hooks=[measure_orientation_contrast.instance()],
plot_hooks=[tuning_curve.instance(x_axis="orientationsurround",unit="%")],
prerequisites=['OrientationPreference','XPreference'])
class test_measure(UnitCurveCommand):
static_parameters = param.List(default=["size","x","y"])
x_axis = param.String(default='contrast',constant=True)
units = param.String(default=" rad")
def __call__(self,**params):
p=ParamOverrides(self,params)
self.params('sheet').compute_default()
sheet=p.sheet
self.x = 0.0
self.y = 0.0
for coord in p.coords:
self._compute_curves(p,sheet,val_format="%.4f")
def _feature_list(self,p):
return [Feature(name="orientation",values=[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0],cyclic=True),
Feature(name="contrast",values=[100],cyclic=False)]
import types
__all__ = list(set([k for k,v in locals().items()
if isinstance(v,types.FunctionType) or
(isinstance(v,type) and issubclass(v,ParameterizedFunction))
and not v.__name__.startswith('_')]))
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the lib for gradient checker unittest."""
from __future__ import print_function
import unittest
import six
import collections
import numpy as np
from itertools import product
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
from paddle.fluid.backward import _append_grad_suffix_, _as_list
def _product(t):
if isinstance(t, int):
return t
else:
return np.product(t)
def dtype_to_np_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return np.float32
elif dtype == core.VarDesc.VarType.FP64:
return np.float64
elif dtype == core.VarDesc.VarType.FP16:
return np.float16
else:
raise ValueError("Not supported data type " + str(dtype))
def _get_item(t, i, np_dtype):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
np_t = np_t.flatten()
return np_t[i]
elif np_dtype == np.float32:
return t._get_float_element(i)
elif np_dtype == np.float64:
return t._get_double_element(i)
else:
raise ValueError("Not supported data type " + str(np_dtype))
def _set_item(t, i, e, np_dtype):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
shape = np_t.shape
np_t = np_t.flatten()
np_t[i] = e
np_t = np_t.reshape(shape).view(np.uint16)
t.set(np_t, place)
elif np_dtype == np.float32:
t._set_float_element(i, e)
elif np_dtype == np.float64:
t._set_double_element(i, e)
else:
raise ValueError("Not supported data type " + str(np_dtype))
def set_var_in_scope(scope, place, name, value, recursive_seq_len=None):
t = scope.var(name).get_tensor()
t.set(value, place)
if recursive_seq_len:
t.set_recursive_sequence_lengths(recursive_seq_len)
return t
def var_to_np_array_in_scope(scope, place, name):
return np.array(scope.var(name).get_tensor())
def make_jacobian(x, y_size, np_dtype):
if isinstance(x, fluid.framework.Variable):
return np.zeros((_product(x.shape), y_size), dtype=np_dtype)
elif isinstance(x, collections.Sequence):
jacobians = list(
filter(lambda t: t is not None, (make_jacobian(
item, y_size, np_dtype) for item in x)))
return jacobians
else:
None
def _compute_numerical_jacobian(program, x, y, place, scope, delta):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
program (Program): the network program.
x (Variable): the input variables.
y (list[Variable]): the output variables.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
scope (Scope): the scope used to run program.
delta: the amount of perturbation we give to the input
Returns:
A list of 2-D numpy array, the list length is len(y).
Each 2-D numpy array represents the Jacobian for dy_i/dx.
It has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and
"y_size" is the number of elements in each y_i.
"""
if not isinstance(x, fluid.framework.Variable):
raise TypeError('x is not Variable')
# To compute the jacobian, treat x and y as one-dimensional vectors.
y = _as_list(y)
exe = fluid.Executor(place)
def run():
y_res = exe.run(program, scope=scope, fetch_list=y)
return [yi.flatten() for yi in y_res]
x_name = x.name
x_shape = x.shape
x_size = _product(x_shape)
x_t = scope.find_var(x_name).get_tensor()
np_type = dtype_to_np_dtype(x.dtype)
jacobian = [make_jacobian(x, _product(yi.shape), np_type) for yi in y]
for i in six.moves.xrange(x_size):
orig = _get_item(x_t, i, np_type)
x_pos = orig + delta
_set_item(x_t, i, x_pos, np_type)
y_pos = run()
x_neg = orig - delta
_set_item(x_t, i, x_neg, np_type)
y_neg = run()
_set_item(x_t, i, orig, np_type)
for j in six.moves.xrange(len(y)):
jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.
return jacobian
def _compute_analytical_jacobian(program, x, y, place, scope):
"""Computes the analytical Jacobian for dy/dx.
Args:
program (Program): a Program with forward pass.
x (Variable|list[Variable]): a variable or list of variable
y (Variable): the target variable.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
scope (Scope): the scope used to run program.
Returns:
A list of 2-D numpy array. The list length is len(x).
Each 2-D numpy array represents the Jacobian for dy/dx_i.
It has "xi_size" rows and "dy_size" columns
where "x_size" is the number of elements in x_i and
"dy_size" is the number of elements in y.
"""
if not isinstance(y, fluid.framework.Variable):
raise TypeError('y is not Variable')
dy_name = _append_grad_suffix_(y.name)
np_type = dtype_to_np_dtype(y.dtype)
# create dy Variable in Program
dy = program.global_block().create_var(
name=dy_name, shape=y.shape, dtype=np_type, persistable=True)
# append backward
dx = fluid.gradients(y, x, dy)
# init dy tensor in scope
value = np.zeros(y.shape, dtype=np_type)
dy_t = set_var_in_scope(scope, place, dy_name, value)
exe = fluid.Executor(place)
y_size = _product(y.shape)
x = _as_list(x)
jacobian = make_jacobian(x, y_size, np_type)
# filter None in dx for DX/DY may be None in kernel
# only fetch not None dx in exe.run
filted = [(i, dxi) for i, dxi in enumerate(dx) if dxi is not None]
filted_idx, filted_dx = zip(*filted)
for i in six.moves.xrange(y_size):
_set_item(dy_t, i, 1, np_type)
dx_res = exe.run(program, scope=scope, fetch_list=filted_dx)
for j in six.moves.xrange(len(filted_dx)):
dx_idx = filted_idx[j]
if dx_res[j] is not None:
jacobian[dx_idx][:, i] = dx_res[j].flatten()
else:
jacobian[dx_idx][:, i] = np.zeros(
dx[dx_idx].shape, dtype=np_type).flatten()
_set_item(dy_t, i, 0, np_type)
return jacobian
def grad_check(x,
y,
x_init=None,
place=None,
program=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check numerical and analytical gradients for dy/dx.
Each Jacobian gradients is a 2-D array with shape [xi_size, yi_size].
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
eps (float): perturbation for finite differences.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
Returns:
True if all differences satisfy numpy.allclose condition.
"""
def fail_test(msg):
if raise_exception:
raise RuntimeError(msg)
return False
# check input arguments
x = _as_list(x)
y = _as_list(y)
for v in x:
v.stop_gradient = False
v.persistable = True
if place is None:
place = fluid.CPUPlace()
if program is None:
program = fluid.default_main_program()
# init variable in strtup program
scope = fluid.executor.global_scope()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_init = _as_list(x_init)
# init inputs if x_init is not None
if x_init:
if len(x_init) != len(x):
raise ValueError('len(x_init) (=%d) is not the same'
' as len(x) (= %d)' % (len(x_init), len(x)))
# init variable in main program
for var, arr in zip(x, x_init):
assert var.shape == arr.shape
feeds = {k.name: v for k, v in zip(x, x_init)}
exe.run(program, feed=feeds, scope=scope)
# [x_idx, y_idx]
numerical = [
_compute_numerical_jacobian(program, xi, y, place, scope, eps)
for xi in x
]
# [y_idx, x_idx]
analytical = []
for yi in y:
prog = program.clone()
clone_x = []
clone_y = None
for b in prog.blocks:
if b.has_var(yi.name):
clone_y = b.var(yi.name)
break
for xi in x:
for b in prog.blocks:
if b.has_var(xi.name):
clone_x.append(b.var(xi.name))
break
analytical.append(
_compute_analytical_jacobian(prog, clone_x, clone_y, place, scope))
for i, (x_idx,
y_idx) in enumerate(product(*[range(len(x)), range(len(y))])):
a = analytical[y_idx][x_idx]
n = numerical[x_idx][y_idx]
if not np.allclose(a, n, rtol, atol):
msg = 'Jacobian mismatch for output %s ' \
'with respect to input %s on %s,\n' \
'numerical:%s\nanalytical:%s\n' \
% (y[y_idx].name, x[x_idx].name, str(place), n, a)
return fail_test(msg)
return True
def double_grad_check(x,
y,
x_init=None,
y_grads=None,
place=None,
program=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
raise_exception=True):
"""
Check gradients of gradients. This function will append backward to the
program before second order gradient check.
Args:
x (Variable|list[Variable]): input variables to the program.
y (Variable|list[Variable]): output variables to the program.
x_init (numpy.array|list[numpy.array]|None): the init value for input x.
y_grads (numpy.array|list[numpy.array]|None): the gradients with respect to y.
place (fluid.CPUPlace or fluid.CUDAPlace): the device.
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
eps (float): perturbation for finite differences.
atol (float): absolute tolerance.
rtol (float): relative tolerance.
raise_exception (bool): whether to raise an exception if
the check fails. Default is True.
Returns:
True if all differences satisfy numpy.allclose condition.
"""
# check input arguments
x = _as_list(x)
for v in x:
v.stop_gradient = False
v.persistable = True
y = _as_list(y)
if program is None:
program = fluid.default_main_program()
if y_grads is None:
scope = fluid.executor.global_scope()
y_grads = []
y_grads_init = []
for yi in y:
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
dy = program.global_block().create_var(
name=dyi_name, shape=yi.shape, dtype=np_type, persistable=True)
dy.stop_gradient = False
v = np.random.random(size=yi.shape).astype(np_type)
set_var_in_scope(scope, place, dyi_name, v)
y_grads.append(dy)
y_grads_init.append(v)
else:
y_grads = _as_list(y_grads)
y_grads_init = [
var_to_np_array_in_scope(scope, place, v.name) for v in y_grads
]
# append first order grads
target_grads = fluid.gradients(y, x, y_grads)
# y_grads are the input of first-order backward,
# so, they are also the input of second-order backward.
x += y_grads
x_init = _as_list(x_init)
x_init += y_grads_init
grad_check(x, target_grads, x_init, place, program, eps, atol, rtol)
|
|
##############################################################
###
# WRESTLER DETAILS
# Contains -
# 1. List of wrestlers
# 2. Wrestler rating
# 3. Wrestler finisher set
# 4. Ratings of the finishers
# 5. Ratings for the non-finisher moves
###
# Complete list of wrestlers
# Given as :
# Wrestler's name, Rating, Finishers
wrestlers = {
1:["CM Punk",10.0,["GTS", "Anaconda Vice"], "Punk"],
2:["Daniel Bryan",10.0,["YES! Lock", "Running Knee"], "Bryan"],
3:["Chris Jericho",9.7,["Liontamer", "CodeBreaker"], "Jericho"],
4:["Dean Ambrose",9.5,["Dirty Deeds"], "Ambrose"],
5:["Seth Rollins",9.2,["Blackout"], "Rollins"],
6:["Roman Reigns",9.3,["Spear"], "Reigns"],
7:["John Cena",9.8,["Attitude Adjustment", "STF"], "Cena"],
8:["Randy Orton",9.6,["RKO", "Punt Kick"], "Orton"],
9:["Big Show",9.3,["WMD", "Choke Slam"], "Big Show"],
10:["Dolph Ziggler",9.1,["Zig Zag", "FameAsser"], "Ziggler"],
11:["Rob Van Dam",9.0,["Five Star Frog Splash"], "RVD"],
12:["Kane",9.3,["Choke Slam"], "Kane"],
13:["Cody Rhodes",8.9,["Cross Rhodes"], "Rhodes"],
14:["Goldust",8.5,["Curtain Call", "Final Cut"], "Goldust"],
15:["Alberto Del Rio",8.7,["Cross Armbreaker"], "Del Rio"],
16:["Bray Wyatt",8.7,["Sister Abigail"], "Wyatt"],
17:["Erik Rowan",8.1,["Running Splash"], "Rowan"],
18:["Luke Harper",8.3,["Truckstop"], "Harper"],
19:["Jack Swagger",8.2,["Ankle Lock", "Gutwrench Powerbomb"], "Swagger"],
20:["Antonio Cesaro",9.0,["Neutralizer", "Giant Swing"], "Cesaro"],
21:["Damien Sandow",8.4,["You're Welcome", "Terminus"], "Sandow"],
22:["Jimmy Uso",8.1,["Superkick", "Samoan Drop"], "Uso"],
23:["Jey Uso",8.1,["Superkick", "Samoan Drop"], "Uso"],
24:["Big E Langston",8.9,["Big Ending"], "Big E"],
25:["Rey Mysterio",8.8,["619"], "Mysterio"],
26:["Kofi Kingston",8.5,["Trouble In Paradise"], "Kofi"],
27:["Ryback",9.2,["ShellShocked"], "Ryback"],
28:["The Miz",7.8,["Skull Crushing Finale"], "Miz"],
29:["Sheamus",9.1,["Brogue Kick", "High Cross"], "Sheamus"],
30:["Mark Henry",9.0,["World's Strongest Slam"], "Mark Henry"],
31:["Curtis Axel",7.7,["Perfectplex"], "Axel"],
32:["Wade Barrett",7.6,["BullHammer"], "Barrett"],
33:["Fandango",7.4,["Beauty In Motion"], "Fandango"],
34:["Heath Slater",7.1,["Smash Hit"], "Slater"],
35:["Drew McIntyre",7.3,["Future Shock DDT"], "McIntyre"],
36:["Jinder Mahal",7.0,["Karachi Valley Driver"], "Jinder"],
37:["The Great Khali",7.9,["Punjabi Plunge"], "Khali"],
38:["Brodus Clay",7.4,["Splat!"], "Brodus Clay"],
39:["Tensai",7.6,["Baldo Bomb"], "Tensai"],
40:["Bo Dallas",6.8,["Bo Dallas Spear"], "Bo Dallas"],
41:["Santino Marella",7.0,["Cobra"], "Santino"],
42:["Zack Ryder",6.9,["Rough Ryder"], "Ryder"],
43:["David Otunga",6.8,["Verdict"], "Otunga"],
44:["Justin Gabriel",6.9,["450 Splash"], "Justin Gabriel"],
45:["Sin Cara",7.0,["Moonsault Side Slam"], "Sin Cara"],
46:["R-Truth",7.4,["Lie Detector"], "Truth"],
47:["John Morrison",7.7,["Starship Pain"], "Morrison"],
48:["Primo",6.8,["Backstabber"], "Primo"],
49:["Epico",6.7,["Backstabber"], "Epico"],
50:["Evan Bourne",6.8,["Shooting Star Press"], "Bourne"],
51:["AJ Styles",9.6,["Styles Clash", "Calf Slicer"], "Styles"],
52:["Bully Ray",9.5,["Bully Bomb"], "Bully"],
53:["Jeff Hardy",9.4,["Swanton Bomb", "Twist Of Fate"], "Hardy"],
54:["Mr.Anderson",8.8,["Mic Check"], "Anderson"],
55:["Austin Aries",9.4,["Brainbuster"], "Aries"],
56:["Bobby Roode",9.3,["Crossface", "Pay Off"], "Roode"],
57:["Christopher Daniels",9.1,["Best Moonsault Ever"], "Daniels"],
58:["Kazarian",9.1,["Fade To Black"], "Kaz"],
59:["James Storm",9.0,["Last Call Superkick"], "Storm"],
60:["Magnus",8.8,["Mag Daddy Driver"], "Magnus"],
61:["Kurt Angle",9.2,["Angle Slam", "Ankle Lock"], "Angle"],
62:["Samoa Joe",9.3,["Musclebuster"], "Joe"],
63:["Rob Terry",8.4,["Freakbuster"], "Rob T"],
64:["Abyss",8.9,["Black Hole Slam"], "Abyss"],
65:["Robbie E",7.4,["Spinning Lifting DDT"], "Robbie E"],
66:["Alex Shelley",8.6,["Sliced Bread #2"], "Shelley"],
67:["Chris Sabin",9.1,["All Hail Sabin"], "Sabin"],
68:["Manik",7.9,["Double Chickenwing Gutbuster"], "Manik"],
69:["Sting",8.9,["Scorpion Death Drop", "Scorpion Death Lock"], "Sting"],
70:["Devon",8.4,["Thrust Spinebuster"], "Devon"],
71:["DOC",7.7,["One Percenter"], "DOC"],
72:["Kenny King",7.6,["Royal Flush"], "King"],
73:["Chavo Guerrero",7.6,["Frog Splash"], "Chavo"],
74:["Hernandez",7.9,["Border Toss"], "Hernandez"],
75:["Chuck Taylor",8.7,["Awful Waffle"], "Taylor"],
76:["Johnny Gargano",8.4,["Garga-No Escape"], "Gargano"],
77:["Ricochet",8.5,["Double Rotation Moonsault"], "Ricochet"],
78:["Kevin Steen",8.8,["F-Cinq"], "Steen"],
79:["El Generico",8.9,["BRAINBUSTAAHHH!!"], "El Generico"],
80:["Colt Cabana",8.7,["Billy Goat's Curse", "Chicago Skyline"], "Cabana"],
81:["Chris Hero",8.7,["Hero's Welcome"], "Hero"],
82:["Matt Jackson",8.5,["Worst Case Scenario"], "Jackson"],
83:["Nick Jackson",8.5,["450 Splash"], "Jackson"],
84:["PAC",8.6,["Corkscrew 450 Splash"], "PAC"],
85:["Jigsaw",8.0,["Jig 'n Tonic", "Cancun Tornado"], "Jigsaw"],
86:["Lince Dorado",7.9,["Chikara Special", "Lynxsault"], "Lince Dorado"],
87:["Dragon Kid",8.2,["Dragonrana", "Ultra Hurricanrana"], "Dragon Kid"],
88:["Arik Cannon",7.8,["Total Anarchy"], "Cannon"],
89:["Brian Kendrick",7.7,["Sliced Bread"], "Kendrick"],
90:["Icarus",7.8,["Wings Of Icarus"], "Icarus"],
91:["Mike Quackenbush",7.5,["QuackenDriver", "Chikara Special"], "Quackenbush"],
92:["Fire ANT",7.9,["Beach Break", "Burning Hammer"], "Fire ANT"],
93:["AssailANT",7.9,["AssailANT's Cross", "GTS - Get The Sugar"], "AssailANT"],
94:["Matt Hardy",7.9,["Twist Of Fate"], "Hardy"],
95:["Zema Ion",7.6,["Submission Impossible"], "Zema Ion"],
96:["Stone Cold Steve Austin",10.0,["Stunner"], "Austin"],
97:["The Rock",10.0,["Rock Bottom", "People's Elbow"], "Rock"],
98:["The Undertaker",10.0,["Tombstone", "Choke Slam", "Last Ride", "Hell's Gate"], "Taker"],
99:["Shawn Michaels",10.0,["Sweet Chin Music"], "Michaels"],
100:["Edge",10.0,["Spear"], "Edge"],
101:["Christian",8.9,["Killswitch"], "Christian"],
102:["Mick Foley",9.1,["Mandible Claw", "Bionic Elbow"], "Foley"],
103:["Eddie Guerrero",9.4,["Frog Splash"], "Guerrero"],
104:["Chris Benoit",9.3,["Crippler Crossface"], "Benoit"],
105:["Hulk Hogan",9.0,["Hogan Leg Drop"], "Hogan"],
106:["Kevin Nash",8.5,["Jackknife Powerbomb"], "Nash"],
107:["Razor Ramon",8.8,["Razor's Edge"], "Ramon"],
108:["Goldberg",9.3,["Spear", "Jackhammer"], "Goldberg"],
109:["Brock Lesnar",9.5,["F5", "Kimura Lock"], "Lesnar"],
110:["Triple H",9.3,["Pedigree"], "Triple H"],
111:["Shane McMahon",8.6,["Elbow Drop", "Coast to Coast"], "Shane O'Mac"],
112:["Vince McMahon",8.2,["Pedigree", "People's Elbow", "Stunner", "Hogan Leg Drop"], "Mr.McMahon"],
113:["JBL",8.8,["Clothesline from Hell"], "JBL"],
114:["Diamond Dallas Page",8.0,["Diamond Cutter"], "DDP"],
115:["Bret Hart",9.0,["Sharpshooter"], "Hitman"],
116:["Ric Flair",9.0,["Figure 4 Leg Lock"], "Nature Boy"],
117:["Booker T",8.8,["Scissor Kick"], "Booker"],
118:["Dean Malenko",8.5,["Texas Cloverleaf"], "Malenko"],
119:["Roddy Piper",8.4,["Sleeper Hold"], "'Rowdy' Roddy Piper"],
120:["Umaga",8.3,["Samoan Drop", "Samoan Spike"], "Umaga"],
121:["Rikishi",8.3,["Banzai Drop", "StinkFace"], "Rikishi"],
122:["Andre The Giant",8.7,["Double Underhook Suplex"], "Andre"],
123:["Batista",9.2,["Batista Bomb"], "Batista"],
124:["Bobby Lashley",9.1,["Dominator", "Spear"], "Lashley"],
125:["Farooq",8.1,["Dominator", "Thrust Spinebuster"], "Farooq"]
}
# The number of wrestlers available is -
wrestler_count = len(wrestlers)
# Gives the ratings for every impact finisher used
impact_ratings = {
# '',0,
'450 Splash':8.6,
'619':8.5,
'All Hail Sabin':8.9,
'Angle Slam':8.6,
"AssailANT's Cross":8.5,
'Attitude Adjustment':9.5,
'Awful Waffle':9.1,
'BRAINBUSTAAHHH!!':9.1,
'Backstabber':8.4,
'Baldo Bomb':8.0,
'Banzai Drop':7.7,
'Batista Bomb':9.0,
'Beach Break':8.6,
'Beauty In Motion':8.7,
'Best Moonsault Ever':9.0,
'Bo Dallas Spear':7.9,
'Big Ending':8.9,
'Bionic Elbow':6.4,
'Black Hole Slam':8.3,
'Blackout':8.1,
'Border Toss':7.9,
'Brainbuster':9.2,
'Brogue Kick':9.4,
'BullHammer':8.6,
'Bully Bomb':8.7,
'Burning Hammer':8.7,
'Cancun Tornado':8.8,
'Chicago Skyline':8.7,
'Choke Slam':9.1,
'Clothesline from Hell':8.5,
'Coast to Coast':6.6,
'Cobra':6.7,
'CodeBreaker':8.3,
'Corkscrew 450 Splash':9.1,
'Cross Rhodes':8.5,
'Curtain Call':7.8,
'Diamond Cutter':9.1,
'Dirty Deeds':8.9,
'Dragonrana':8.8,
'Dominator':8.8,
'Double Chickenwing Gutbuster':8.5,
'Double Rotation Moonsault':9.2,
'Double Underhook Suplex':8.3,
'Elbow Drop':8.4,
'F-Cinq':8.9,
'F5':9.4,
'Fade To Black':9.0,
'FameAsser':8.1,
'Final Cut':8.6,
'Five Star Frog Splash':9.1,
'Freakbuster':8.5,
'Frog Splash':9.0,
'Future Shock DDT':8.0,
'GTS':9.6,
'GTS - Get the Sugar':8.7,
'Giant Swing':8.8,
'Gutwrench Powerbomb':8.1,
"Hero's Welcome":8.9,
'Hogan Leg Drop':8.2,
'High Cross':8.5,
'Jackhammer':9.3,
'Jackknife Powerbomb':8.6,
"Jig 'n Tonic":8.7,
'Karachi Valley Driver':7.8,
'Killswitch':8.4,
'Last Call Superkick':8.8,
'Last Ride':9.2,
'Lie Detector':7.9,
'Lynxsault':8.9,
'Mag Daddy Driver':8.7,
'Mic Check':8.1,
'Moonsault Side Slam':8.6,
'Musclebuster':9.1,
'Neutralizer':9.5,
'One Percenter':8.1,
'Pay Off':8.8,
'Pedigree':9.3,
"People's Elbow":7.6,
'Perfectplex':8.5,
'Punjabi Plunge':8.5,
'Punt Kick':9.0,
'QuackenDriver':8.0,
'RKO':9.7,
"Razor's Edge":8.9,
'Rock Bottom':9.0,
'Rough Ryder':7.5,
'Royal Flush':7.8,
'Running Knee':9.3,
'Running Splash':8.2,
'Samoan Drop':8.2,
'Samoan Spike':8.6,
'Scissor Kick':8.1,
'Scorpion Death Drop':8.5,
'ShellShocked':9.1,
'Shooting Star Press':9.0,
'Sister Abigail':9.0,
'Skull Crushing Finale':8.9,
'Sliced Bread':7.8,
'Sliced Bread #2':8.2,
'Smash Hit':7.7,
'Splat!':7.9,
'Spear':9.6,
'Spinning Lifting DDT':7.8,
'Starship Pain':9.1,
'Stunner':9.8,
'Styles Clash':9.1,
'Superkick':9.0,
'Swanton Bomb':9.1,
'Sweet Chin Music':9.7,
'Terminus':8.3,
'Thrust Spinebuster':8.1,
'Tombstone':9.9,
'Total Anarchy':8.3,
'Trouble In Paradise':8.5,
'Truckstop':8.2,
'Twist Of Fate':8.7,
'Ultra Hurricanrana':8.2,
'Verdict':7.8,
'WMD':9.5,
'Wings Of Icarus':8.5,
"World's Strongest Slam":9.3,
'Worst Case Scenario':8.5,
"You're Welcome":8.6,
'Zig Zag':9.0
}
# Gives the ratings for every submission finishers used
submission_ratings = {
# '',0,
'Anaconda Vice':9.5,
'Ankle Lock':9.2,
"Billy Goat's Curse":8.7,
'Calf Slicer':8.5,
'Chikara Special':9.2,
'Crippler Crossface':9.5,
'Crossface':8.9,
'Cross Armbreaker':9.0,
'Figure 4 Leg Lock':8.5,
'Garga-No Escape':8.9,
"Hell's Gate":9.8,
'Kimura Lock':9.5,
'Liontamer':9.7,
'Mandible Claw':8.8,
'STF':9.3,
'Scorpion Death Lock':8.8,
'Sharpshooter':9.2,
'Sleeper Hold':8.1,
'StinkFace':8.3,
'Submission Impossible':8.9,
'Texas Cloverleaf':9.1,
'YES! Lock':9.5,
}
# Gives the ratings for the basic moves
basic_ratings = {
# ['':0],
0:['Punch',0.2],
1:['Backhand Chop',0.5],
2:['Knife Edge Chop',0.6],
3:['Forearm',0.3],
4:['Knee Strike',0.4],
5:['Knee to the midsection',0.4],
6:['Kick',0.4],
7:['Kick to the midsection',0.4],
8:['Kick to the head',0.7],
9:['Jab',0.3],
10:['Tackle',0.4],
11:['Running takedown',0.4],
12:["Fireman's carry",0.3],
13:['Drop Toe Hold',0.3],
14:['Irish Whip',0.5],
15:['Big Boot',0.8],
16:['Dropkick',0.7],
17:['Enzuigiri',0.7],
18:['Battering Ram',0.6],
19:['Headbutt',0.4],
20:['Hiptoss',0.4],
21:['Arm Drag',0.4],
22:['Double Axe Handle',0.4],
23:['Elbow Strike',0.3],
24:['Crossbody',0.6],
25:['Clothesline',0.5],
26:['Shoulder Block',0.4],
27:['Back Rake',0.2],
28:['Uppercut',0.7],
29:['Slap',0.1],
30:['Lariat',0.4],
31:['Snapmare',0.3]
}
basic_count = len(basic_ratings) # The number of basic moves
# Gives the ratings for grapple moves
grapple_ratings = {
# ['',0],
0:['Scoop Slam',0.7],
1:['Suplex',0.8],
2:['Snap Suplex',0.8],
3:['German Suplex',1.2],
4:['Northern Lights Suplex',1.1],
5:['Belly to Belly Suplex',0.7],
6:['Belly to Back Suplex',0.9],
7:['DDT',1.0],
8:['Inverted DDT',1.1],
9:['Tornado DDT',1.4],
10:['Bodyslam',0.7],
11:['Back Body Drop',0.9],
12:['Atomic Drop',1.0],
13:['Gutwrench Suplex',1.2],
14:['Elevated DDT',1.5],
15:['Reverse Suplex',1.4],
16:['Falcon Arrow',1.7],
17:['Neckbreaker',0.9],
18:['Backbreaker',0.9],
19:['Thesz Press',0.7],
20:['Spinebuster',1.2],
21:['Facebuster',0.9],
22:['Bulldog',1.0],
23:['Jawbreaker',1.0],
24:['Sleeper Hold Drop',1.1],
25:['Electric Chair Drop',1.3],
26:['Leg Sweep',0.7],
27:['Monkey Toss',0.8],
28:['Powerbomb',1.4],
29:['Sidewalk Slam',0.9],
30:['Power Slam',1.2]
}
grapple_count = len(grapple_ratings) # The number of grapple moves
# Gives the ratings for holds and locks
hold_ratings = {
# ['',0],
0:['Front Facelock',-1.2],
1:['Headlock',-1.2],
2:['Hammerlock',-1.1],
3:['Wristlock',-1.0],
4:['Reverse Chinlock',-1.3],
5:['Surfboard',1.2],
6:['Armbar',1.8],
7:['Kneebar',1.9],
8:['Sleeper Hold',2.0],
9:['Abdominal Stretch',0.8],
10:['Full Nelson',-0.8],
11:['Half Nelson',-0.4],
}
hold_count = len(hold_ratings) # The number of hold moves
# Gives the ratings for aerial moves
aerial_ratings = {
# ['',0],
0:['Leg Drop',2.5],
1:['Elbow Drop',2.5],
2:['Diving Axehandle',2.0],
3:['Diving Shoulder Strike',2.0],
4:['Flying Crossbody',2.4],
5:['Missile Dropkick',2.8],
6:['Moonsault',2.9],
7:['Diving Clothesline',2.1],
8:['Top Rope Hurricanrana',3.2],
9:['Diving Headbutt',2.4],
10:['Superplex',3.7]
}
aerial_count = len(aerial_ratings) # The number of aerial moves
# Assign the upper limit for the rating total in the team
maxlimit = 43.5
final_save_location = "Match_commentaries/"
####################################################################
from Tkinter import *
import random
import itertools
from hashlib import md5
import os
####################################
class SurvivorSeriesGame(object):
""" This class defines -
* The team lists
* The stat maintainers - non-eliminated members, member currently in the ring, power meter
* The random number based operations :
> Selecting which particular move of a certain type is being performed
> Probability of successfully performing the move
> Updating the power meters after the move
> Who should start the match
> Who to tag in when a tag is made
> Who should continue the match after an elimination
* Functions to perform each type of move
> The subclass has a move_perform() method that uses getattr() to select which function to use
> Possible functions :
# do_matchstart()
# do_basic()
# do_grapple()
# do_hold()
# do_aerial()
# do_finisher()
# do_tag()
# do_elimination()
* Functions to retrieve move lists
"""
teams = []
match_teams = [[],[]]
in_control = 0
currently_1 = 0
currently_2 = 0
commentary_text = ""
move_list = {}
move_count = 0
fail_prob_basic = 0.27
fail_prob_grapple = 0.37
fail_prob_hold = 0.21
fail_prob_aerial = 0.57
match_finished = False
ko = False
def __init__(self):
self.setup_teams()
self.filename_setup()
return
def __repr__(self):
output_message = ""
output_message = "The teams are : \n"
for team_no in xrange(len(self.teams)):
output_message += "\nTEAM %d - \n" % (team_no+1)
for member in self.teams[team_no]:
output_message += "%s \n" % (member[0])
return output_message
def setup_teams(self):
for team_no in {0,1}:
wr_team = self.teams[team_no]
for wrestler in wr_team:
self.match_teams[team_no].append([wrestler[0], wrestler[1], wrestler[2], 100.0, wrestler[3]])
def filename_setup(self):
global final_save_location
if not final_save_location[:-1] in os.listdir('.'):
os.mkdir(final_save_location)
filename = ""
for team_no in {0,1}:
wr_team = self.teams[team_no]
for wrestler in wr_team:
filename += wrestler[3] + "_"
if team_no == 0:
filename += "_vs__"
final_save_location += filename
def text_display(self, finished = False):
team_msg = ["", ""]
for team_no in {0,1}:
wr_team = self.match_teams[team_no]
for wrestler in wr_team:
txt = "%s \n" % (wrestler[0])
team_msg[team_no] += (txt)
self.team1msg.delete(0.0, END)
self.team1msg.insert(0.0, team_msg[0])
self.team2msg.delete(0.0, END)
self.team2msg.insert(0.0, team_msg[1])
currently_in_the_ring = ""
if not finished or not self.match_finished:
currently_in_the_ring = "\n\nCurrently in the ring - "+ self.match_teams[0][self.currently_1][0] \
+ "\nAND\n" + self.match_teams[1][self.currently_2][0]
self.currently_in.delete(0.0, END)
self.currently_in.insert(0.0, currently_in_the_ring)
def in_control_display(self):
if self.in_control == 0:
current_wr = self.currently_1
else:
current_wr = self.currently_2
in_control_msg = self.match_teams[self.in_control][current_wr][0] + " is currently in control of the match"
self.currently_in.insert(0.0, in_control_msg)
def commentary_start(self):
text_arg = "\nLILIAN GARCIA : Ladies and Gentlemen, this is the traditional Survivor Series elimination match !\n\n"
self.commentary_update(text_arg)
text_arg = ""
for n, wr_team in enumerate(self.match_teams):
if n==0:
text_arg += "LILIAN GARCIA : Introducing the team of "
else:
text_arg += "\nAnd introducing their opponents, the team of "
for i, wrestler in enumerate(wr_team):
if i == 0:
text_arg += wrestler[0]
elif i == 4:
text_arg += " and " + wrestler[0] + " !! "
else:
text_arg += ", " + wrestler[0]
text_arg += "\n\n"
self.commentary_update(text_arg)
text_arg = "\nJIM ROSS : The match gets underway !\n*BELL RINGS*\nMICHAEL COLE : " \
+ self.match_teams[0][self.currently_1][0] + " and " + self.match_teams[1][self.currently_2][0] \
+ " start the match\n\n"
self.commentary_update(text_arg)
text_arg = "The two of them lock up to start us off.\n"
self.commentary_update(text_arg)
def commentary_update(self, text_arg):
self.commentary_text += text_arg
self.commentary.insert(0.0, text_arg)
def retrieve_basic(self):
return (basic_ratings, basic_count)
def retrieve_grapple(self):
return (grapple_ratings, grapple_count)
def retrieve_hold(self):
return (hold_ratings, hold_count)
def retrieve_aerial(self):
return (aerial_ratings, aerial_count)
def retrieve_finisher(self):
return ("Perform finisher", 1)
def retrieve_tag(self):
return ("Tag in a teammate", 2)
def default_retrieve(self):
return ("DEFAULT", 0)
def make_move(self, arg):
move_list = {}
move_count = 0
selected_function = arg
# print "\n", selected_function
try:
func = getattr(self, ("retrieve_" + selected_function))
except AttributeError:
func = self.default_retrieve
except KeyError:
func = self.default_retrieve
self.move_list, self.move_count = func()
# print self.move_list
# print self.move_count
def do_basic(self):
self.make_move('basic')
fail_prob = getattr(self, ('fail_prob_' + 'basic'))
self.move_perform(fail_prob)
def do_grapple(self):
self.make_move('grapple')
fail_prob = getattr(self, ('fail_prob_' + 'grapple'))
self.move_perform(fail_prob)
def do_hold(self):
self.make_move('hold')
fail_prob = getattr(self, ('fail_prob_' + 'hold'))
self.move_perform(fail_prob)
def do_aerial(self):
self.make_move('aerial')
fail_prob = getattr(self, ('fail_prob_' + 'aerial'))
self.move_perform(fail_prob)
def do_finisher(self):
self.make_move('finisher')
self.finisher_perform()
def do_tag(self):
self.make_move('tag')
self.tag_perform()
def do_elimination(self, eliminated):
if eliminated == 0:
t = 0
el = self.currently_1
else:
t = 1
el = self.currently_2
el_team = self.match_teams.pop(t)
person = el_team.pop(el)
el_msg = "LILIAN GARCIA : " + person[0] + " has been eliminated !\n\n**************\n\n"
self.commentary_update(el_msg)
if len(el_team) != 0:
self.match_teams.insert(t, el_team)
new_control = random.randrange(0, len(self.match_teams[t]))
el_msg = "COLE : " + self.match_teams[t][new_control][0] + " comes in.\n\n"
self.commentary_update(el_msg)
if t == 0:
self.currently_1 = new_control
else:
self.currently_2 = new_control
self.in_control = random.randrange(0, 2)
self.text_display()
self.in_control_display()
else:
self.match_teams.insert(t, el_team)
el_msg = "LILIAN GARCIA : And the winners of this match, the team of "
wr_team = self.teams[(t + 1)%2]
for i, wrestler in enumerate(wr_team):
if i == 0:
el_msg += wrestler[0]
elif i == 4:
el_msg += " and " + wrestler[0] + " !! "
else:
el_msg += ", " + wrestler[0]
el_msg += "\n\n"
self.match_finished = True
self.text_display(finished = True)
self.commentary_update(el_msg)
self.finish_game()
def power_update(self, powers):
ko_msg = ""
wr1_p, wr2_p = powers
self.match_teams[0][self.currently_1][3] -= wr1_p
if self.match_teams[0][self.currently_1][3] >= 100.0:
self.match_teams[0][self.currently_1][3] = 99.0
if self.match_teams[0][self.currently_1][3] < 5.0:
ko_msg = "COLE : Look JR, " + self.match_teams[0][self.currently_1][0] + " is struggling to get back on his feet\n"
ko_msg += "JR: You're right Cole. He looks like he's out. Yeah, the ref's calling for the bell\n"
ko_msg += "COLE: That's the end of him in this match.\n\n"
self.ko = True
self.text_display()
self.in_control_display()
self.commentary_update(ko_msg)
self.do_elimination(0)
self.match_teams[1][self.currently_2][3] -= wr2_p
if self.match_teams[1][self.currently_2][3] >= 100.0:
self.match_teams[1][self.currently_2][3] = 99.0
if self.match_teams[1][self.currently_2][3] < 5.0:
ko_msg = "COLE : Look JR, " + self.match_teams[1][self.currently_2][0] + " is struggling to get back on his feet\n"
ko_msg += "JR: You're right Cole. He looks like he's out. Yeah, the ref's calling for the bell\n"
ko_msg += "COLE: That's the end of him in this match.\n\n"
self.ko = True
self.text_display()
self.in_control_display()
self.commentary_update(ko_msg)
self.do_elimination(1)
def move_perform(self, fail_prob):
move_msg = ""
exec_move_key = random.randrange(0, self.move_count)
exec_move, exec_stat = self.move_list[exec_move_key]
# print exec_move, ' ', exec_stat
if self.in_control == 0:
cur_wr, other_wr = self.currently_1, self.currently_2
else:
cur_wr, other_wr = self.currently_2, self.currently_1
move_exec = random.random()
if move_exec < fail_prob:
move_msg = "JR : " + self.match_teams[self.in_control][cur_wr][0] + " tried to go for a " + exec_move \
+ ", but " + self.match_teams[(self.in_control + 1)%2][other_wr][0] + " managed to counter it.\n\n"
self.in_control = (self.in_control + 1) % 2
else:
z = self.match_teams[self.in_control][cur_wr][1] - (self.match_teams[(self.in_control + 1)%2][other_wr][1] / 1.5)
damage = (z * exec_stat) + (move_exec / 4.0)
move_msg = "JR : " + self.match_teams[self.in_control][cur_wr][0] + " with the " + exec_move +".\n\n"
power_modifications = [0.0, 0.0]
if exec_stat > 0.0:
power_modifications[self.in_control] = 0.0
power_modifications[(self.in_control + 1) % 2] = damage / 1.5
else:
power_modifications[self.in_control] = damage/1.5
power_modifications[(self.in_control + 1) % 2] = damage/3
self.power_update(power_modifications)
if not self.match_finished:
self.text_display()
self.in_control_display()
self.commentary_update(move_msg)
def finisher_perform(self):
fin_msg = ""
if self.in_control == 0:
cur_wr, other_wr = self.currently_1, self.currently_2
else:
cur_wr, other_wr = self.currently_2, self.currently_1
fin_track = random.randrange(0, len(self.match_teams[self.in_control][cur_wr][2]))
fin_move = self.match_teams[self.in_control][cur_wr][2][fin_track]
if fin_move in impact_ratings:
fin_stat = impact_ratings[fin_move]
else:
fin_stat = submission_ratings[fin_move]
fin_msg = "COLE : " + self.match_teams[self.in_control][cur_wr][0] + " goes for the " + fin_move
z = self.match_teams[self.in_control][cur_wr][1] - (self.match_teams[(self.in_control + 1)%2][other_wr][1] / 1.5)
if (random.random() * 10.0) > z:
cur_control = self.in_control
new_control = random.randrange(0, 2)
if cur_control == new_control:
fin_msg += ", but " + self.match_teams[(self.in_control + 1)%2][other_wr][0] + " manages to slip away.\n\n"
else:
fin_msg += ", but " + self.match_teams[(self.in_control + 1)%2][other_wr][0] + " counters !\n\n"
self.in_control = new_control
if not self.match_finished:
self.text_display()
self.in_control_display()
self.commentary_update(fin_msg)
else:
power_modifications = [0.0, 0.0]
if fin_move in impact_ratings:
fin_msg += ". And IT CONNECTS ! \n\n"
self.commentary_update(fin_msg)
damage = (z+0.2) * fin_stat
power_modifications[self.in_control] = 0.0
power_modifications[(self.in_control + 1) % 2] = damage
self.power_update(power_modifications)
try:
power_cur = self.match_teams[self.in_control][cur_wr][3]
power_other = self.match_teams[(self.in_control + 1)%2][other_wr][3]
power = power_other - (power_cur - power_other)
except IndexError:
print ""
if not self.ko:
fin_msg = "He goes for the cover.. 1 !! 2 !!..\n\n"
self.commentary_update(fin_msg)
if (random.random() * 100.0) > power:
fin_msg = "3 !!!\n\n"
self.commentary_update(fin_msg)
fin_msg = "JR : He gets the pin !\nCOLE : Vintage " + self.match_teams[self.in_control][cur_wr][4] + " !\n\n"
self.commentary_update(fin_msg)
power_modifications = [0.0, 0.0]
power_modifications[self.in_control] = -12.5
self.power_update(power_modifications)
self.do_elimination((self.in_control + 1)%2)
else:
fin_msg = ".. KICK OUT !!\n\n"
self.commentary_update(fin_msg)
fin_msg = "JR : How close was that !\nCOLE : He kicks out at two and stays alive in this match\n\n"
self.commentary_update(fin_msg)
power_modifications[self.in_control] = 1.5
power_modifications[(self.in_control + 1) % 2] = -15.0
self.power_update(power_modifications)
else:
self.ko = False
else:
fin_msg += ". He's got it locked in !! " + self.match_teams[(self.in_control + 1)%2][other_wr][0] \
+ " is struggling !!\n\n"
self.commentary_update(fin_msg)
damage = (z+0.2) * fin_stat
power_cur = self.match_teams[self.in_control][cur_wr][3]
power_other = self.match_teams[(self.in_control + 1)%2][other_wr][3] - (damage)
power = power_other - (power_cur - power_other)
if (random.random() * 100.0) > power:
fin_msg = "JR : HE TAPS OUT !! HE TAPS OUT !!\n\n"
self.commentary_update(fin_msg)
power_modifications = [0.0, 0.0]
power_modifications[self.in_control] = -12.5
self.power_update(power_modifications)
self.do_elimination((self.in_control + 1) % 2)
else:
fin_msg = "COLE : He's slowly crawling towards the ropes... He makes it ! \nJR: The ref ordering to break the hold. " \
+ "That was a really close call.\n\n"
power_modifications[self.in_control] = 0.0
power_modifications[(self.in_control + 1) % 2] = damage/2.0 - 7.5
self.power_update(power_modifications)
self.in_control = random.randrange(0, 2)
self.commentary_update(fin_msg)
if not self.match_finished:
self.text_display()
self.in_control_display()
def tag_perform(self):
tag_msg = ""
if self.in_control == 0:
cur_wr = self.currently_1
else:
cur_wr = self.currently_2
if len(self.match_teams[self.in_control]) == 1:
tag_msg = "COLE : " + self.match_teams[self.in_control][cur_wr][0] + " tries to make a tag, but no one's there.\n" \
+ "JR : I think he forgot that he's on his own out there !\n\n"
self.in_control = (self.in_control + 1) % 2
else:
tag_msg = self.match_teams[self.in_control][cur_wr][0] + " with the tag.\n"
for i, wrestler in enumerate(self.match_teams[self.in_control]):
if i != cur_wr:
if wrestler[3] < 90.0:
self.match_teams[self.in_control][i][3] += 8.0
team_shuffle = range(len(self.match_teams[self.in_control]))
team_shuffle.pop(cur_wr)
random.shuffle(team_shuffle)
cur_wr = team_shuffle[0]
if self.in_control == 0:
self.currently_1 = cur_wr
else:
self.currently_2 = cur_wr
tag_msg += "COLE : " + self.match_teams[self.in_control][cur_wr][0] + " enters the match.\n\n"
self.in_control = random.randrange(0, 2)
self.text_display()
self.in_control_display()
self.commentary_update(tag_msg)
def finish_game(self):
self.import_button = Button(
self,
text = "IMPORT COMMENTARY AND CLOSE GAME",
width = 60,
padx = 5,
command = self.import_comm
).grid(row = 6, column = 2, columnspan = 9, sticky = W)
self.just_quit = Button(
self,
text = "CLOSE GAME",
width = 60,
padx = 5,
command = self.close_game
).grid(row = 6, column = 6, columnspan = 9, sticky = W)
def import_comm(self):
global final_save_location
filename = ""
random_md5_hash = md5(self.commentary_text.encode('utf8')).hexdigest()
filename += random_md5_hash
filename += ".txt"
final_save_location += filename
commentary_file = open(final_save_location,"w")
commentary_file.truncate()
file_to_write = self.commentary_text
try:
commentary_file.write(file_to_write)
except:
for char in file_to_write:
commentary_file.write(char)
commentary_file.close()
self.quit()
def close_game(self):
self.quit()
##############################################
class MatchRun(Frame, SurvivorSeriesGame):
""" GUI class for running the match
Creates the required widgets -
* Buttons for every action of the wrestler
* A text area to display who is currently in control (performing the next move)
* Text areas to display the non-eliminated members of each team
* Commentary text area
We have a method move_perform() for dynamic dispatch
############## Think of a proper way to use move_perform()
"""
functions = {1:'basic', 2:'grapple', 3:'hold', 4:'aerial', 5:'finisher', 6:'tag'}
def __init__(self, master):
Frame.__init__(self, master)
self.grid()
self.create_widgets()
def __repr__(self):
return "The MatchRun class handles the GUI that displays the progress of the match."
def create_widgets(self):
# Creates the required widgets
self.intro = Label(self, text = "Ladies and Gentlemen. This is the traditional Survivor Series 5-on-5 elimination match !")
self.intro.grid(row = 0, column = 2, columnspan = 8, sticky = W)
self.currently_in = Text(self, width = 25, height = 15, wrap = WORD)
self.currently_in.grid(row = 2, column = 0, columnspan = 5, sticky = W)
self.team1display = Label(self, text = "TEAM 1 - ")
self.team1display.grid(row = 1, column = 5, columnspan = 3, sticky = W)
self.team1msg = Text(self, width = 25, height = 15, wrap = WORD)
self.team1msg.grid(row = 2, column = 5, columnspan = 5, sticky = W)
self.team2display = Label(self, text = "TEAM 2 - ")
self.team2display.grid(row = 1, column = 11, columnspan = 3, sticky = W)
self.team2msg = Text(self, width = 25, height = 15, wrap = WORD)
self.team2msg.grid(row = 2, column = 11, columnspan = 5, sticky = W)
self.currently_1 = random.randrange(0,len(self.match_teams[0]))
self.currently_2 = random.randrange(0,len(self.match_teams[1]))
self.in_control = random.randrange(0, 2)
self.text_display()
self.in_control_display()
self.instruction = Label(self, text = "Select the type of move for the wrestler in control to perform ")
self.instruction.grid(row = 4, column = 1, columnspan = 6, sticky = W)
self.button_set = []
for i, key in enumerate(self.functions):
med = Button(
self,
text = self.functions[key].upper(),
width = 10,
padx = 5,
command = getattr(self, "do_" + self.functions[key])
).grid(row = 6, column = i + 2, columnspan = 3, sticky = W)
self.button_set.append(med)
self.commentary = Text(self, width = 75, height = 50, wrap = WORD)
self.commentary.grid(row = 8, column = 2, columnspan = 6, sticky = W)
self.commentary_start()
""" def move_perform(self, cmd, arg):
# Dynamic dispatch function to run when a move is performed.
# Called from the corresponding buttons in the GUI
try:
func = getattr(self, "do_", cmd)
except AttributeError:
self.default(cmd)
return func(arg)
"""
##############################################
class WrestlerSelect(Frame):
""" GUI class for team selection menu
This class creates the GUI, with the required widgets -
* Checkbuttons for all the wrestlers in the list
* Text area to display the currently selected team, rating total, and other messages
* Confirmation Button
The GUI stays active until both the teams are confirmed, after which, it can be closed
"""
team_set = False
team_being_set = 0
selected_team = []
def __init__(self, master, team_being_set):
Frame.__init__(self, master)
self.grid()
self.create_widgets()
self.team_being_set = team_being_set
def create_widgets(self):
# Creates the required widgets
global maxlimit
self.intro = Label(self, text = "SURVIVOR SERIES ELIMINATION MATCH - TEAM SELECTION MENU")
self.intro.grid(row = 0, column = 0, columnspan = 16, sticky = W)
self.instruction = Label(self, text = "Select your team of 5 wrestlers, without exceeding the maximum rating total of " + str(maxlimit))
self.instruction.grid(row = 2, column = 0, columnspan = 16, sticky = W)
self.selection = []
for i, key in enumerate(wrestlers):
med = BooleanVar()
Checkbutton(self, text = str(wrestlers[key][0]) + "," + str(wrestlers[key][1]), variable = med, command = self.team_display).grid(row = 3 + int(i/7), column = (i%7), columnspan = 1, sticky = W)
self.selection.append(med)
self.team = Text(self, width = 90, height = 10, wrap = WORD)
self.team.grid(row = 5 + int(len(wrestlers)/7), column = 0, columnspan = 10, sticky = W)
self.team.delete(0.0, END)
self.team.insert(0.0, "Select the team of 5 for Team 1")
self.confirm = Button(self, text = "CONFIRM" , command = self.confirm_team)
self.confirm.grid(row = 7 + int(len(wrestlers)/7), column = 3, columnspan = 3, sticky = W)
def remove_widgets(self):
self.intro.destroy()
self.instruction.destroy()
self.confirm.destroy()
def team_display(self):
# To display the selected team
team_list = ""
team_rating_total = 0
count = 0
completed = False
overlap = False
self.team_set = False
global maxlimit
for i, key in enumerate(wrestlers):
if self.selection[i].get():
team_list += (" ** " + str(wrestlers[key][0]))
team_rating_total += wrestlers[key][1]
count += 1
if team_rating_total > maxlimit:
team_list += "\nMAXLIMIT EXCEEDED !! Modify your team"
elif count > 5:
team_list += "\nMAX TEAM SIZE EXCEEDED !! Modify your team"
else:
completed = True
if completed and count == 5:
self.selected_team = []
for i, key in enumerate(wrestlers):
if self.selection[i].get():
if self.team_being_set == 1:
if wrestlers[key] in SurvivorSeriesGame.teams[0]:
overlap = True
team_list += "\nYou have selected a wrestler already selected by Player 1"
break
self.selected_team.append(wrestlers[key])
if not overlap:
team_list += "\nYOUR TEAM HAS BEEN SET !!"
self.team_set = True
if not self.team_set:
team_list += "\nRating Total for current selected team : " + str(team_rating_total)
if not self.team_being_set > 1:
self.team.delete(0.0, END)
self.team.insert(0.0, team_list)
def confirm_team(self):
# Event handler for button click
message = ""
if self.team_set:
SurvivorSeriesGame.teams.append(self.selected_team)
self.team_being_set += 1
if self.team_being_set > 1:
self.quit()
if self.team_being_set == 1:
message = "Now select the team of 5 for Team " + str(self.team_being_set + 1)
else:
self.remove_widgets()
message = "You can now close this window"
for i, key in enumerate(wrestlers):
self.selection[i].set(False)
else:
message = "Complete selecting your team before continuing"
self.team.delete(0.0, END)
self.team.insert(0.0, message)
##################
# To run the GUI to set the two teams
def gui_caller_for_team_set():
root1 = Tk()
root1.title("Survivor Series Team Selection")
root1.geometry("1050x700")
WS1 = WrestlerSelect(root1, 0)
root1.mainloop()
# root1.geometry("0x0")
root1.destroy()
##################
# To run the GUI to run the game
def gui_caller_for_game_run():
ss1 = SurvivorSeriesGame()
# print ss1
root1 = Tk()
root1.title("The Match")
root1.geometry("1050x700")
MR1 = MatchRun(root1)
root1.mainloop()
##################
# The main function starts the execution of the program
def main():
gui_caller_for_team_set()
gui_caller_for_game_run()
# Boilerplate
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module allows adding and deleting of projects to the local manifest."""
import logging
import platform
import optparse
import os
import sys
import xml.etree.ElementTree as ElementTree
from chromite.lib import cros_build_lib
from chromite.lib import git
class Manifest(object):
"""Class which provides an abstraction for manipulating the local manifest."""
@classmethod
def FromPath(cls, path, empty_if_missing=False):
if os.path.isfile(path):
with open(path) as f:
return cls(f.read())
elif empty_if_missing:
cros_build_lib.Die('Manifest file, %r, not found' % path)
return cls()
def __init__(self, text=None):
self._text = text or '<manifest>\n</manifest>'
self.nodes = ElementTree.fromstring(self._text)
def AddNonWorkonProject(self, name, path, remote=None, revision=None):
"""Add a new nonworkon project element to the manifest tree."""
element = ElementTree.Element('project', name=name, path=path,
remote=remote)
element.attrib['workon'] = 'False'
if revision is not None:
element.attrib['revision'] = revision
self.nodes.append(element)
return element
def GetProject(self, name, path=None):
"""Accessor method for getting a project node from the manifest tree.
Returns:
project element node from ElementTree, otherwise, None
"""
if path is None:
# Use a unique value that can't ever match.
path = object()
for project in self.nodes.findall('project'):
if project.attrib['name'] == name or project.attrib['path'] == path:
return project
return None
def ToString(self):
# Reset the tail for each node, then just do a hacky replace.
project = None
for project in self.nodes.findall('project'):
project.tail = '\n '
if project is not None:
# Tweak the last project to not have the trailing space.
project.tail = '\n'
# Fix manifest tag text and tail.
self.nodes.text = '\n '
self.nodes.tail = '\n'
return ElementTree.tostring(self.nodes)
def GetProjects(self):
return list(self.nodes.findall('project'))
def _AddProjectsToManifestGroups(options, *projects):
"""Enable the given manifest groups for the configured repository."""
groups_to_enable = ['name:%s' % x for x in projects]
git_config = options.git_config
enabled_groups = cros_build_lib.RunCommandCaptureOutput(
['git', 'config', '-f', git_config, '--get', 'manifest.groups'],
error_code_ok=True, print_cmd=False).output.split(',')
# Note that ordering actually matters, thus why the following code
# is written this way.
# Per repo behaviour, enforce an appropriate platform group if
# we're converting from a default manifest group to a limited one.
# Finally, note we reprocess the existing groups; this is to allow
# us to cleanup any user screwups, or our own screwups.
requested_groups = (
['minilayout', 'platform-%s' % (platform.system().lower(),)] +
enabled_groups + list(groups_to_enable))
processed_groups = set()
finalized_groups = []
for group in requested_groups:
if group not in processed_groups:
finalized_groups.append(group)
processed_groups.add(group)
cros_build_lib.RunCommandCaptureOutput(
['git', 'config', '-f', git_config, 'manifest.groups',
','.join(finalized_groups)], print_cmd=False)
def _UpgradeMinilayout(options):
"""Convert a repo checkout away from minilayout.xml to default.xml."""
full_tree = Manifest.FromPath(options.default_manifest_path)
local_manifest_exists = os.path.exists(options.local_manifest_path)
new_groups = []
if local_manifest_exists:
local_tree = Manifest.FromPath(options.local_manifest_path)
# Identify which projects need to be transferred across.
projects = local_tree.GetProjects()
new_groups = [x.attrib['name'] for x in projects]
allowed = set(x.attrib['name'] for x in full_tree.GetProjects())
transferred = [x for x in projects if x.attrib['name'] in allowed]
for project in transferred:
# Mangle local_manifest object, removing those projects;
# note we'll still be adding those projects to the default groups,
# including those that didn't intersect the main manifest.
local_tree.nodes.remove(project)
_AddProjectsToManifestGroups(options, *new_groups)
if local_manifest_exists:
# Rewrite the local_manifest now; if there is no settings left in
# the local_manifest, wipe it.
if local_tree.nodes.getchildren():
with open(options.local_manifest_path, 'w') as f:
f.write(local_tree.ToString())
else:
os.unlink(options.local_manifest_path)
# Finally, move the symlink.
os.unlink(options.manifest_sym_path)
os.symlink('manifests/default.xml', options.manifest_sym_path)
logging.info("Converted the checkout to manifest groups based minilayout.")
def main(argv):
parser = optparse.OptionParser(usage='usage: %prog add [options] <name> '
'<--workon | <path> --remote <remote> >')
parser.add_option('-w', '--workon', action='store_true', dest='workon',
default=False, help='Is this a workon package?')
parser.add_option('-r', '--remote', dest='remote',
default=None)
parser.add_option('-v', '--revision', dest='revision',
default=None,
help="Use to override the manifest defined default "
"revision used for a given project.")
parser.add_option('--upgrade-minilayout', default=False, action='store_true',
help="Upgrade a minilayout checkout into a full.xml "
"checkout utilizing manifest groups.")
(options, args) = parser.parse_args(argv)
repo_dir = git.FindRepoDir(os.getcwd())
if not repo_dir:
parser.error("This script must be invoked from within a repository "
"checkout.")
options.git_config = os.path.join(repo_dir, 'manifests.git', 'config')
options.repo_dir = repo_dir
options.local_manifest_path = os.path.join(repo_dir, 'local_manifest.xml')
# This constant is used only when we're doing an upgrade away from
# minilayout.xml to default.xml.
options.default_manifest_path = os.path.join(repo_dir, 'manifests',
'default.xml')
options.manifest_sym_path = os.path.join(repo_dir, 'manifest.xml')
active_manifest = os.path.basename(os.readlink(options.manifest_sym_path))
upgrade_required = active_manifest == 'minilayout.xml'
if options.upgrade_minilayout:
if args:
parser.error("--upgrade-minilayout takes no arguments.")
if not upgrade_required:
print "This repository checkout isn't using minilayout.xml; nothing to do"
else:
_UpgradeMinilayout(options)
return 0
elif upgrade_required:
logging.warn(
"Your repository checkout is using the old minilayout.xml workflow; "
"auto-upgrading it.")
cros_build_lib.RunCommand(
[sys.argv[0], '--upgrade-minilayout'], cwd=os.getcwd(),
print_cmd=False)
if not args:
parser.error("No command specified.")
elif args[0] != 'add':
parser.error("Only supported subcommand is add right now.")
elif options.workon:
if len(args) != 2:
parser.error(
"Argument count is wrong for --workon; must be add <project>")
name, path = args[1], None
else:
if options.remote is None:
parser.error('Adding non-workon projects requires a remote.')
elif len(args) != 3:
parser.error(
"Argument count is wrong for non-workon mode; "
"must be add <project> <path> --remote <remote-arg>")
name, path = args[1:]
revision = options.revision
if revision is not None:
if (not git.IsRefsTags(revision) and
not git.IsSHA1(revision)):
revision = git.StripRefsHeads(revision, False)
main_manifest = Manifest.FromPath(options.manifest_sym_path,
empty_if_missing=False)
local_manifest = Manifest.FromPath(options.local_manifest_path)
main_element = main_manifest.GetProject(name, path=path)
if options.workon:
if main_element is None:
parser.error('No project named %r in the default manifest.' % name)
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
elif main_element is not None:
if options.remote is not None:
# Likely this project wasn't meant to be remote, so workon main element
print "Project already exists in manifest. Using that as workon project."
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
else:
# Conflict will occur; complain.
parser.error("Requested project name=%r path=%r will conflict with "
"your current manifest %s" % (name, path, active_manifest))
elif local_manifest.GetProject(name, path=path) is not None:
parser.error("Requested project name=%r path=%r conflicts with "
"your local_manifest.xml" % (name, path))
else:
element = local_manifest.AddNonWorkonProject(name=name, path=path,
remote=options.remote,
revision=revision)
_AddProjectsToManifestGroups(options, element.attrib['name'])
with open(options.local_manifest_path, 'w') as f:
f.write(local_manifest.ToString())
return 0
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nailgun.objects.serializers.task import TaskSerializer
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun import consts
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import Cluster
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.task.helpers import TaskHelper
class Task(NailgunObject):
model = models.Task
serializer = TaskSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Task",
"description": "Serialized Task object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"parent_id": {"type": "number"},
"name": {
"type": "string",
"enum": list(consts.TASK_NAMES)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.TASK_STATUSES)
},
"progress": {"type": "number"},
"weight": {"type": "number"},
"cache": {"type": "object"},
"result": {"type": "object"}
}
}
@classmethod
def create_subtask(cls, instance, name):
if name not in consts.TASK_NAMES:
raise errors.InvalidData(
"Invalid subtask name"
)
return cls.create({
"name": name,
"cluster_id": instance.cluster_id,
"parent_id": instance.id
})
@classmethod
def get_by_uuid(cls, uuid, fail_if_not_found=False, lock_for_update=False):
# maybe consider using uuid as pk?
q = db().query(cls.model).filter_by(uuid=uuid)
if lock_for_update:
q = q.order_by('id')
q = q.with_lockmode('update')
res = q.first()
if not res and fail_if_not_found:
raise errors.ObjectNotFound(
"Task with UUID={0} is not found in DB".format(uuid)
)
return res
@classmethod
def update_verify_networks(cls, instance, status,
progress, msg, result):
#TODO(dshulyak) move network tests into ostf
previous_status = instance.status
statuses = [sub.status for sub in instance.subtasks]
messages = [sub.message for sub in instance.subtasks]
messages.append(msg)
statuses.append(status)
if any(st == 'error' for st in statuses):
instance.status = 'error'
else:
instance.status = status or instance.status
instance.progress = progress or instance.progress
instance.result = result or instance.result
# join messages if not None or ""
instance.message = '\n'.join([m for m in messages if m])
if previous_status != instance.status and instance.cluster_id:
logger.debug("Updating cluster status: "
"cluster_id: %s status: %s",
instance.cluster_id, status)
cls._update_cluster_data(instance)
@classmethod
def _update_parent_instance(cls, instance):
subtasks = instance.subtasks
if len(subtasks):
data = dict()
if all(map(lambda s: s.status == 'ready', subtasks)):
data['status'] = 'ready'
data['progress'] = 100
data['message'] = u'\n'.join(map(
lambda s: s.message, filter(
lambda s: s.message is not None, subtasks)))
cls.update(instance, data)
TaskHelper.update_action_log(instance)
elif any(map(lambda s: s.status in ('error',), subtasks)):
for subtask in subtasks:
if not subtask.status in ('error', 'ready'):
subtask.status = 'error'
subtask.progress = 100
subtask.message = 'Task aborted'
data['status'] = 'error'
data['progress'] = 100
data['message'] = u'\n'.join(list(set(map(
lambda s: (s.message or ""), filter(
lambda s: (
s.status == 'error' and not
# TODO: make this check less ugly
s.message == 'Task aborted'
), subtasks)))))
cls.update(instance, data)
TaskHelper.update_action_log(instance)
else:
subtasks_with_progress = filter(
lambda s: s.progress is not None,
subtasks
)
if subtasks_with_progress:
instance.progress = \
TaskHelper.calculate_parent_task_progress(
subtasks_with_progress
)
else:
instance.progress = 0
@classmethod
def __update_nodes_to_error(cls, q_nodes_to_error, error_type):
if q_nodes_to_error.count():
logger.debug(
u'Updating nodes to error with error_type "{0}": {1}'
.format(error_type, [n.full_name for n in q_nodes_to_error]))
for n in q_nodes_to_error:
n.status = 'error'
n.progress = 0
n.error_type = error_type
@classmethod
def __update_cluster_status(cls, cluster, status):
logger.debug(
"Updating cluster (%s) status: from %s to %s",
cluster.full_name, cluster.status, status)
Cluster.update(cluster, data={'status': status})
@classmethod
def _update_cluster_data(cls, instance):
cluster = instance.cluster
if instance.name == 'deploy':
if instance.status == 'ready':
# If for some reasosns orchestrator
# didn't send ready status for node
# we should set it explicitly
for n in cluster.nodes:
if n.status == 'deploying':
n.status = 'ready'
n.progress = 100
cls.__update_cluster_status(cluster, 'operational')
Cluster.clear_pending_changes(cluster)
elif instance.status == 'error' and \
not TaskHelper.before_deployment_error(instance):
# We don't want to set cluster status to
# error because we don't want to lock
# settings if cluster wasn't delpoyed
cls.__update_cluster_status(cluster, 'error')
elif instance.name == 'deployment' and instance.status == 'error':
cls.__update_cluster_status(cluster, 'error')
q_nodes_to_error = \
TaskHelper.get_nodes_to_deployment_error(cluster)
cls.__update_nodes_to_error(q_nodes_to_error,
error_type='deploy')
elif instance.name == 'provision' and instance.status == 'error':
cls.__update_cluster_status(cluster, 'error')
q_nodes_to_error = \
TaskHelper.get_nodes_to_provisioning_error(cluster)
cls.__update_nodes_to_error(q_nodes_to_error,
error_type='provision')
elif instance.name == 'stop_deployment':
if instance.status == 'error':
cls.__update_cluster_status(cluster, 'error')
else:
cls.__update_cluster_status(cluster, 'stopped')
elif instance.name == consts.TASK_NAMES.update:
if instance.status == consts.TASK_STATUSES.error:
cls.__update_cluster_status(
cluster,
consts.CLUSTER_STATUSES.update_error
)
q_nodes_to_error = \
TaskHelper.get_nodes_to_deployment_error(cluster)
cls.__update_nodes_to_error(
q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy)
elif instance.status == consts.TASK_STATUSES.ready:
cls.__update_cluster_status(
cluster,
consts.CLUSTER_STATUSES.operational
)
cluster.release_id = cluster.pending_release_id
cluster.pending_release_id = None
@classmethod
def _clean_data(cls, data):
result = copy.copy(data)
if result.get('status') not in consts.TASK_STATUSES:
result.pop('status', None)
return result
@classmethod
def update(cls, instance, data):
logger.debug("Updating task: %s", instance.uuid)
clean_data = cls._clean_data(data)
super(Task, cls).update(instance, clean_data)
db().flush()
if instance.cluster_id:
logger.debug("Updating cluster status: %s "
"cluster_id: %s status: %s",
instance.uuid, instance.cluster_id,
data.get('status'))
cls._update_cluster_data(instance)
if instance.parent:
logger.debug("Updating parent task: %s.", instance.parent.uuid)
cls._update_parent_instance(instance.parent)
class TaskCollection(NailgunCollection):
single = Task
@classmethod
def get_by_cluster_id(cls, cluster_id):
if cluster_id == '':
return cls.filter_by(None, cluster_id=None)
return cls.filter_by(None, cluster_id=cluster_id)
@classmethod
def lock_cluster_tasks(cls, cluster_id, names=None):
query = cls.get_by_cluster_id(cluster_id)
if isinstance(names, (list, tuple)):
query = cls.filter_by_list(query, 'name', names)
query = cls.order_by(query, 'id')
query = cls.lock_for_update(query)
return query.all()
@classmethod
def delete_by_names(cls, cluster_id, names):
db().query(cls.single.model).filter_by(
cluster_id=cluster_id,
).filter(
cls.single.model.name.in_(names)
).delete(
synchronize_session='fetch'
)
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import dcpwr
from .. import extra
from .. import scpi
CurrentLimitBehavior = set(['regulate'])
TrackingType = set(['floating'])
TriggerSourceMapping = {
'immediate': 'imm',
'bus': 'bus'}
class agilentU2722A(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest,
dcpwr.Base, dcpwr.Measurement,
extra.dcpwr.OCP,
ivi.Driver):
"Agilent U2722A IVI modular source measure unit driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'U2722A')
super(agilentU2722A, self).__init__(*args, **kwargs)
self._self_test_delay = 10
self._output_count = 3
self._output_spec = [
{
'current_range': {
'R1uA': 1e-6,
'R10uA': 10e-6,
'R100uA': 100e-6,
'R1mA': 1e-3,
'R10mA': 10e-3,
'R120mA': 120e-3,
},
'voltage_range': {
'R2V': 2.0,
'R20V': 20.0,
},
'ocp_max': 120e-3,
'ovp_max': 20.0,
'voltage_max': 20.0,
'current_max': 120e-3
}
]*3
self._output_trigger_delay = list()
self._identity_description = "Agilent U2722A modular source measure unit driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['U2722A', 'U2723A']
self._init_outputs()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilentU2722A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _init_outputs(self):
try:
super(agilentU2722A, self)._init_outputs()
except AttributeError:
pass
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
self._output_trigger_source = list()
self._output_trigger_delay = list()
for i in range(self._output_count):
self._output_current_limit.append(0)
self._output_current_limit_behavior.append('regulate')
self._output_enabled.append(False)
self._output_ovp_enabled.append(True)
self._output_ovp_limit.append(0)
self._output_voltage_level.append(0)
self._output_trigger_source.append('bus')
self._output_trigger_delay.append(0)
def _get_output_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_current_limit[index] = float(self._ask("source:current:level? (@%d)" % (index+1)))
self._set_cache_valid(index=index)
return self._output_current_limit[index]
def _set_output_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if abs(value) > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("source:current:level %.6g, (@%d)" % (value, index+1))
self._output_current_limit[index] = value
self._set_cache_valid(index=index)
def _get_output_current_limit_behavior(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_current_limit_behavior[index] = 'regulate'
self._set_cache_valid(index=index)
return self._output_current_limit_behavior[index]
def _set_output_current_limit_behavior(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in CurrentLimitBehavior:
raise ivi.ValueNotSupportedException()
self._set_cache_valid(index=index)
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_enabled[index] = bool(int(self._ask("output? (@%d)" % (index+1))))
self._set_cache_valid(index=index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write("output %d, (@%d)" % (int(value), index+1))
self._output_enabled[index] = value
for k in range(self._output_count):
self._set_cache_valid(valid=False,index=k)
self._set_cache_valid(index=index)
def _get_output_ovp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_ovp_enabled[index] = True
self._set_cache_valid(index=index)
return self._output_ovp_enabled[index]
def _set_output_ovp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not value:
raise ivi.ValueNotSupportedException()
self._output_ovp_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_ovp_limit(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_ovp_limit[index] = float(self._ask("source:voltage:limit? (@%d)" % (index+1)))
self._set_cache_valid(index=index)
return self._output_ovp_limit[index]
def _set_output_ovp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if abs(value) > self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("source:voltage:limit %.6g, (@%d)" % (value, index+1))
self._output_ovp_limit[index] = value
self._set_cache_valid(index=index)
def _get_output_ocp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_ocp_enabled[index] = True
self._set_cache_valid(index=index)
return self._output_ocp_enabled[index]
def _set_output_ocp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not value:
raise ivi.ValueNotSupportedException()
self._output_ocp_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_ocp_limit(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_ocp_limit[index] = float(self._ask("source:current:limit? (@%d)" % (index+1)))
self._set_cache_valid(index=index)
return self._output_ocp_limit[index]
def _set_output_ocp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if abs(value) > self._output_spec[index]['ocp_max']:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("source:current:limit %.6g, (@%d)" % (value, index+1))
self._output_ocp_limit[index] = value
self._set_cache_valid(index=index)
def _get_output_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._output_voltage_level[index] = float(self._ask("source:voltage:level? (@%d)" % (index+1)))
self._set_cache_valid(index=index)
return self._output_voltage_level[index]
def _set_output_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if abs(value) > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("source:voltage:level %.6g, (@%d)" % (value, index+1))
self._output_voltage_level[index] = value
self._set_cache_valid(index=index)
def _output_configure_range(self, index, range_type, range_val):
index = ivi.get_index(self._output_name, index)
if range_type not in dcpwr.RangeType:
raise ivi.ValueNotSupportedException()
if range_type == 'voltage':
t = 0
elif range_type == 'current':
t = 1
range_val = abs(range_val)
if len(self._output_spec[index][range_type+'_range']) < 2:
# do not set range if there is only one range
return
k = dcpwr.get_range(self._output_spec[index][range_type+'_range'], None, range_val)
if k is None:
raise ivi.OutOfRangeException()
if range_type == 'voltage':
self._output_spec[index]['voltage_max'] = self._output_spec[index]['voltage_range'][k]
elif range_type == 'current':
self._output_spec[index]['current_max'] = self._output_spec[index]['current_range'][k]
if not self._driver_operation_simulate:
if range_type == 'voltage':
self._write("source:voltage:range %s, (@%d)" % (k, index+1))
elif range_type == 'current':
self._write("source:current:range %s, (@%d)" % (k, index+1))
def _output_query_current_limit_max(self, index, voltage_level):
index = ivi.get_index(self._output_name, index)
if abs(voltage_level) > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['current_max']
def _output_query_voltage_level_max(self, index, current_limit):
index = ivi.get_index(self._output_name, index)
if abs(current_limit) > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['voltage_max']
def _output_query_output_state(self, index, state):
index = ivi.get_index(self._output_name, index)
raise ivi.ValueNotSupportedException()
return False
def _output_reset_output_protection(self, index):
pass
def _output_measure(self, index, type):
index = ivi.get_index(self._output_name, index)
if type not in dcpwr.MeasurementType:
raise ivi.ValueNotSupportedException()
if type == 'voltage':
if not self._driver_operation_simulate:
return float(self._ask("measure:voltage? (@%d)" % (index+1)))
elif type == 'current':
if not self._driver_operation_simulate:
return float(self._ask("measure:current? (@%d)" % (index+1)))
return 0
|
|
# This is a Makefile for the `mk` tool. (Limited) details for that here:
# <http://svn.openkomodo.com/openkomodo/browse/mk>
import sys
import os
from os.path import join, dirname, normpath, abspath, exists, basename
import re
from glob import glob
import codecs
import webbrowser
import mklib
assert mklib.__version_info__ >= (0,7,2) # for `mklib.mk`
from mklib.common import MkError
from mklib import Task, mk
from mklib import sh
class bugs(Task):
"""open bug/issues page"""
def make(self):
webbrowser.open("http://github.com/trentm/django-markdown-deux/issues")
class site(Task):
"""open project page"""
def make(self):
webbrowser.open("http://github.com/trentm/django-markdown-deux")
class pypi(Task):
"""open project page"""
def make(self):
webbrowser.open("http://pypi.python.org/pypi/django-markdown-deux/")
class cut_a_release(Task):
"""automate the steps for cutting a release
See 'docs/devguide.md' in <http://github.com/trentm/eol> for details.
"""
proj_name = "django-markdown-deux"
version_py_path = "lib/markdown_deux/__init__.py"
version_module = "markdown_deux"
_changes_parser = re.compile(r'^## %s (?P<ver>[\d\.abc]+)'
r'(?P<nyr>\s+\(not yet released\))?'
r'(?P<body>.*?)(?=^##|\Z)' % proj_name, re.M | re.S)
def make(self):
DRY_RUN = False
version = self._get_version()
# Confirm
if not DRY_RUN:
answer = query_yes_no("* * *\n"
"Are you sure you want cut a %s release?\n"
"This will involved commits and a release to pypi." % version,
default="no")
if answer != "yes":
self.log.info("user abort")
return
print "* * *"
self.log.info("cutting a %s release", version)
# Checks: Ensure there is a section in changes for this version.
changes_path = join(self.dir, "CHANGES.md")
changes_txt = changes_txt_before = codecs.open(changes_path, 'r', 'utf-8').read()
changes_sections = self._changes_parser.findall(changes_txt)
top_ver = changes_sections[0][0]
if top_ver != version:
raise MkError("top section in `CHANGES.md' is for "
"version %r, expected version %r: aborting"
% (top_ver, version))
top_nyr = changes_sections[0][1]
if not top_nyr:
answer = query_yes_no("\n* * *\n"
"The top section in `CHANGES.md' doesn't have the expected\n"
"'(not yet released)' marker. Has this been released already?",
default="yes")
if answer != "no":
self.log.info("abort")
return
print "* * *"
top_body = changes_sections[0][2]
if top_body.strip() == "(nothing yet)":
raise MkError("top section body is `(nothing yet)': it looks like "
"nothing has been added to this release")
# Commits to prepare release.
changes_txt = changes_txt.replace(" (not yet released)", "", 1)
if not DRY_RUN and changes_txt != changes_txt_before:
self.log.info("prepare `CHANGES.md' for release")
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
sh.run('git commit %s -m "prepare for %s release"'
% (changes_path, version), self.log.debug)
# Tag version and push.
curr_tags = set(t for t in _capture_stdout(["git", "tag", "-l"]).split('\n') if t)
if not DRY_RUN and version not in curr_tags:
self.log.info("tag the release")
sh.run('git tag -a "%s" -m "version %s"' % (version, version),
self.log.debug)
sh.run('git push --tags', self.log.debug)
# Release to PyPI.
self.log.info("release to pypi")
if not DRY_RUN:
mk("pypi_upload")
# Commits to prepare for future dev and push.
next_version = self._get_next_version(version)
self.log.info("prepare for future dev (version %s)", next_version)
marker = "## %s %s\n" % (self.proj_name, version)
if marker not in changes_txt:
raise MkError("couldn't find `%s' marker in `%s' "
"content: can't prep for subsequent dev" % (marker, changes_path))
changes_txt = changes_txt.replace("## %s %s\n" % (self.proj_name, version),
"## %s %s (not yet released)\n\n(nothing yet)\n\n## %s %s\n" % (
self.proj_name, next_version, self.proj_name, version))
if not DRY_RUN:
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
ver_path = join(self.dir, normpath(self.version_py_path))
ver_content = codecs.open(ver_path, 'r', 'utf-8').read()
version_tuple = self._tuple_from_version(version)
next_version_tuple = self._tuple_from_version(next_version)
marker = "__version_info__ = %r" % (version_tuple,)
if marker not in ver_content:
raise MkError("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_path))
ver_content = ver_content.replace(marker,
"__version_info__ = %r" % (next_version_tuple,))
if not DRY_RUN:
f = codecs.open(ver_path, 'w', 'utf-8')
f.write(ver_content)
f.close()
if not DRY_RUN:
sh.run('git commit %s %s -m "prep for future dev"' % (
changes_path, ver_path))
sh.run('git push')
def _tuple_from_version(self, version):
def _intify(s):
try:
return int(s)
except ValueError:
return s
return tuple(_intify(b) for b in version.split('.'))
def _get_next_version(self, version):
last_bit = version.rsplit('.', 1)[-1]
try:
last_bit = int(last_bit)
except ValueError: # e.g. "1a2"
last_bit = int(re.split('[abc]', last_bit, 1)[-1])
return version[:-len(str(last_bit))] + str(last_bit + 1)
def _get_version(self):
lib_dir = join(dirname(abspath(__file__)), "lib")
sys.path.insert(0, lib_dir)
try:
mod = __import__(self.version_module)
return mod.__version__
finally:
del sys.path[0]
class clean(Task):
"""Clean generated files and dirs."""
def make(self):
patterns = [
"dist",
"build",
"MANIFEST",
"*.pyc",
"lib/*.pyc",
]
for pattern in patterns:
p = join(self.dir, pattern)
for path in glob(p):
sh.rm(path, log=self.log)
class sdist(Task):
"""python setup.py sdist"""
def make(self):
sh.run_in_dir("%spython setup.py sdist --formats zip"
% _setup_command_prefix(),
self.dir, self.log.debug)
class pypi_upload(Task):
"""Upload release to pypi."""
def make(self):
sh.run_in_dir("%spython setup.py sdist --formats zip upload"
% _setup_command_prefix(),
self.dir, self.log.debug)
sys.path.insert(0, join(self.dir, "lib"))
url = "http://pypi.python.org/pypi/django-markdown-deux/"
import webbrowser
webbrowser.open_new(url)
class todo(Task):
"""Print out todo's and xxx's in the docs area."""
def make(self):
for path in _paths_from_path_patterns(['.'],
excludes=[".svn", "*.pyc", "TO""DO.txt", "Makefile.py",
"*.png", "*.gif", "*.pprint", "*.prof",
"tmp*"]):
self._dump_pattern_in_path("TO\DO\\|XX\X", path)
def _dump_pattern_in_path(self, pattern, path):
os.system("grep -nH '%s' '%s'" % (pattern, path))
#---- internal support stuff
## {{{ http://code.activestate.com/recipes/577058/ (r2)
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
## end of http://code.activestate.com/recipes/577058/ }}}
## {{{ http://code.activestate.com/recipes/577230/ (r2)
def _should_include_path(path, includes, excludes):
"""Return True iff the given path should be included."""
from os.path import basename
from fnmatch import fnmatch
base = basename(path)
if includes:
for include in includes:
if fnmatch(base, include):
try:
log.debug("include `%s' (matches `%s')", path, include)
except (NameError, AttributeError):
pass
break
else:
try:
log.debug("exclude `%s' (matches no includes)", path)
except (NameError, AttributeError):
pass
return False
for exclude in excludes:
if fnmatch(base, exclude):
try:
log.debug("exclude `%s' (matches `%s')", path, exclude)
except (NameError, AttributeError):
pass
return False
return True
def _walk(top, topdown=True, onerror=None, follow_symlinks=False):
"""A version of `os.walk()` with a couple differences regarding symlinks.
1. follow_symlinks=False (the default): A symlink to a dir is
returned as a *non*-dir. In `os.walk()`, a symlink to a dir is
returned in the *dirs* list, but it is not recursed into.
2. follow_symlinks=True: A symlink to a dir is returned in the
*dirs* list (as with `os.walk()`) but it *is conditionally*
recursed into (unlike `os.walk()`).
A symlinked dir is only recursed into if it is to a deeper dir
within the same tree. This is my understanding of how `find -L
DIR` works.
TODO: put as a separate recipe
"""
import os
from os.path import join, isdir, islink, abspath
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except OSError, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
if follow_symlinks:
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
else:
for name in names:
path = join(top, name)
if islink(path):
nondirs.append(name)
elif isdir(path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if follow_symlinks and islink(path):
# Only walk this path if it links deeper in the same tree.
top_abs = abspath(top)
link_abs = abspath(join(top, os.readlink(path)))
if not link_abs.startswith(top_abs + os.sep):
continue
for x in _walk(path, topdown, onerror, follow_symlinks=follow_symlinks):
yield x
if not topdown:
yield top, dirs, nondirs
_NOT_SPECIFIED = ("NOT", "SPECIFIED")
def _paths_from_path_patterns(path_patterns, files=True, dirs="never",
recursive=True, includes=[], excludes=[],
skip_dupe_dirs=False,
follow_symlinks=False,
on_error=_NOT_SPECIFIED):
"""_paths_from_path_patterns([<path-patterns>, ...]) -> file paths
Generate a list of paths (files and/or dirs) represented by the given path
patterns.
"path_patterns" is a list of paths optionally using the '*', '?' and
'[seq]' glob patterns.
"files" is boolean (default True) indicating if file paths
should be yielded
"dirs" is string indicating under what conditions dirs are
yielded. It must be one of:
never (default) never yield dirs
always yield all dirs matching given patterns
if-not-recursive only yield dirs for invocations when
recursive=False
See use cases below for more details.
"recursive" is boolean (default True) indicating if paths should
be recursively yielded under given dirs.
"includes" is a list of file patterns to include in recursive
searches.
"excludes" is a list of file and dir patterns to exclude.
(Note: This is slightly different than GNU grep's --exclude
option which only excludes *files*. I.e. you cannot exclude
a ".svn" dir.)
"skip_dupe_dirs" can be set True to watch for and skip
descending into a dir that has already been yielded. Note
that this currently does not dereference symlinks.
"follow_symlinks" is a boolean indicating whether to follow
symlinks (default False). To guard against infinite loops
with circular dir symlinks, only dir symlinks to *deeper*
dirs are followed.
"on_error" is an error callback called when a given path pattern
matches nothing:
on_error(PATH_PATTERN)
If not specified, the default is look for a "log" global and
call:
log.error("`%s': No such file or directory")
Specify None to do nothing.
Typically this is useful for a command-line tool that takes a list
of paths as arguments. (For Unix-heads: the shell on Windows does
NOT expand glob chars, that is left to the app.)
Use case #1: like `grep -r`
{files=True, dirs='never', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield nothing
script PATH* # yield all files matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #2: like `file -r` (if it had a recursive option)
{files=True, dirs='if-not-recursive', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #3: kind of like `find .`
{files=True, dirs='always', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files and dirs recursively under DIR
# (including DIR)
script -r PATH* # yield files and dirs matching PATH* and recursively
# under dirs; if none, call on_error(PATH*)
# callback
TODO: perf improvements (profile, stat just once)
"""
from os.path import basename, exists, isdir, join, normpath, abspath, \
lexists, islink, realpath
from glob import glob
assert not isinstance(path_patterns, basestring), \
"'path_patterns' must be a sequence, not a string: %r" % path_patterns
GLOB_CHARS = '*?['
if skip_dupe_dirs:
searched_dirs = set()
for path_pattern in path_patterns:
# Determine the set of paths matching this path_pattern.
for glob_char in GLOB_CHARS:
if glob_char in path_pattern:
paths = glob(path_pattern)
break
else:
if follow_symlinks:
paths = exists(path_pattern) and [path_pattern] or []
else:
paths = lexists(path_pattern) and [path_pattern] or []
if not paths:
if on_error is None:
pass
elif on_error is _NOT_SPECIFIED:
try:
log.error("`%s': No such file or directory", path_pattern)
except (NameError, AttributeError):
pass
else:
on_error(path_pattern)
for path in paths:
if (follow_symlinks or not islink(path)) and isdir(path):
if skip_dupe_dirs:
canon_path = normpath(abspath(path))
if follow_symlinks:
canon_path = realpath(canon_path)
if canon_path in searched_dirs:
continue
else:
searched_dirs.add(canon_path)
# 'includes' SHOULD affect whether a dir is yielded.
if (dirs == "always"
or (dirs == "if-not-recursive" and not recursive)
) and _should_include_path(path, includes, excludes):
yield path
# However, if recursive, 'includes' should NOT affect
# whether a dir is recursed into. Otherwise you could
# not:
# script -r --include="*.py" DIR
if recursive and _should_include_path(path, [], excludes):
for dirpath, dirnames, filenames in _walk(path,
follow_symlinks=follow_symlinks):
dir_indeces_to_remove = []
for i, dirname in enumerate(dirnames):
d = join(dirpath, dirname)
if skip_dupe_dirs:
canon_d = normpath(abspath(d))
if follow_symlinks:
canon_d = realpath(canon_d)
if canon_d in searched_dirs:
dir_indeces_to_remove.append(i)
continue
else:
searched_dirs.add(canon_d)
if dirs == "always" \
and _should_include_path(d, includes, excludes):
yield d
if not _should_include_path(d, [], excludes):
dir_indeces_to_remove.append(i)
for i in reversed(dir_indeces_to_remove):
del dirnames[i]
if files:
for filename in sorted(filenames):
f = join(dirpath, filename)
if _should_include_path(f, includes, excludes):
yield f
elif files and _should_include_path(path, includes, excludes):
yield path
## end of http://code.activestate.com/recipes/577230/ }}}
_g_version = None
def _get_version():
global _g_version
if _g_version is None:
sys.path.insert(0, join(dirname(__file__), "lib"))
try:
import cmdln
_g_version = cmdln.__version__
finally:
del sys.path[0]
return _g_version
def _setup_command_prefix():
prefix = ""
if sys.platform == "darwin":
# http://forums.macosxhints.com/archive/index.php/t-43243.html
# This is an Apple customization to `tar` to avoid creating
# '._foo' files for extended-attributes for archived files.
prefix = "COPY_EXTENDED_ATTRIBUTES_DISABLE=1 "
return prefix
def _capture_stdout(argv):
import subprocess
p = subprocess.Popen(argv, stdout=subprocess.PIPE)
return p.communicate()[0]
|
|
'''Example script to run a full analysis with high resolution telescope data + a fast time reference plane + a small device under tests.
The telescope consists of 6 Mimosa26 planes and one FE-I4 with a full size planar n-in-n sensor as a timing reference.
The device under tests is a small passive sensor in LFoundry 150 nm CMOS process.
The Mimosa26 has an active area of 21.2mm x 10.6mm and the pixel matrix consists of 1152 columns and 576 rows (18.4um x 18.4um pixel size).
The total size of the chip is 21.5mm x 13.7mm x 0.036mm (radiation length 9.3660734)
The matrix is divided into 4 areas. For each area the threshold can be set up individually.
The quartes are from column 0-287, 288,575, 576-863 and 864-1151.
The timing reference is about 2 cm x 2 cm divided into 80 x 336 pixels. The time stamping happens with a 40 MHz clock (25 ns).
'''
import os
import logging
import numpy as np
from testbeam_analysis import hit_analysis
from testbeam_analysis import dut_alignment
from testbeam_analysis import track_analysis
from testbeam_analysis import result_analysis
from testbeam_analysis.tools import data_selection
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
def run_analysis():
# The location of the example data files, one file per DUT
data_files = [r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane0.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane1.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane2.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\fe_dut-converted-synchronized_plane0.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\fe_dut-converted-synchronized_plane1.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane3.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane4.h5',
r'H:\Testbeam_05052016_LFCMOS\Telescope_data\kartel-converted-synchronized_plane5.h5'] # The first device is the reference for the coordinate system
# Pixel dimesions and matrix size of the DUTs
pixel_size = [(18.4, 18.4), (18.4, 18.4), (18.4, 18.4), (250, 50), (250, 50), (18.4, 18.4), (18.4, 18.4), (18.4, 18.4)] # (Column, row) pixel pitch in um
n_pixels = [(1152, 576), (1152, 576), (1152, 576), (80, 336), (80, 336), (1152, 576), (1152, 576), (1152, 576)] # (Column, row) dimensions of the pixel matrix
z_positions = [0., 20000, 40000, 40000 + 101000, 40000 + 101000 + 23000, 247000, 267000, 287000] # in um
dut_names = ("Tel 0", "Tel 1", "Tel 2", "LFCMOS3", "FEI4 Reference", "Tel 3", "Tel 4", "Tel 5") # Friendly names for plotting
# Folder where all output data and plots are stored
output_folder = r'H:\Testbeam_05052016_LFCMOS\output'
# The following shows a complete test beam analysis by calling the seperate function in correct order
# Generate noisy pixel mask for all DUTs
threshold = [2, 2, 2, 10, 10, 2, 2, 2]
for i, data_file in enumerate(data_files):
hit_analysis.generate_pixel_mask(input_hits_file=data_file,
n_pixel=n_pixels[i],
pixel_mask_name='NoisyPixelMask',
pixel_size=pixel_size[i],
threshold=threshold[i],
dut_name=dut_names[i])
# Cluster hits from all DUTs
column_cluster_distance = [3, 3, 3, 2, 2, 3, 3, 3]
row_cluster_distance = [3, 3, 3, 3, 3, 3, 3, 3]
frame_cluster_distance = [0, 0, 0, 0, 0, 0, 0, 0]
for i, data_file in enumerate(data_files):
hit_analysis.cluster_hits(input_hits_file=data_file,
input_noisy_pixel_mask_file=os.path.splitext(data_files[i])[0] + '_noisy_pixel_mask.h5',
min_hit_charge=0,
max_hit_charge=13,
column_cluster_distance=column_cluster_distance[i],
row_cluster_distance=row_cluster_distance[i],
frame_cluster_distance=frame_cluster_distance[i],
dut_name=dut_names[i])
# Generate filenames for cluster data
input_cluster_files = [os.path.splitext(data_file)[0] + '_clustered.h5'
for data_file in data_files]
# Correlate the row / column of each DUT
dut_alignment.correlate_cluster(input_cluster_files=input_cluster_files,
output_correlation_file=os.path.join(output_folder, 'Correlation.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size,
dut_names=dut_names)
# Create prealignment relative to the first DUT from the correlation data
dut_alignment.prealignment(input_correlation_file=os.path.join(output_folder, 'Correlation.h5'),
output_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
z_positions=z_positions,
pixel_size=pixel_size,
dut_names=dut_names,
fit_background=True,
non_interactive=False) # Tries to find cuts automatically; deactivate to do this manualy
# Merge the cluster tables to one merged table aligned at the event number
dut_alignment.merge_cluster_data(input_cluster_files=input_cluster_files,
output_merged_file=os.path.join(output_folder, 'Merged.h5'),
pixel_size=pixel_size)
# Apply the prealignment to the merged cluster table to create tracklets
dut_alignment.apply_alignment(input_hit_file=os.path.join(output_folder, 'Merged.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder, 'Tracklets_prealigned.h5'),
force_prealignment=True)
# Find tracks from the prealigned tracklets and stores the with quality indicator into track candidates table
track_analysis.find_tracks(input_tracklets_file=os.path.join(output_folder, 'Tracklets_prealigned.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_track_candidates_file=os.path.join(output_folder, 'TrackCandidates_prealignment.h5'))
# Select tracks with a hit in the time reference (DUT 4) and all position devices to increase analysis speed due to data reduction
data_selection.select_hits(hit_file=os.path.join(output_folder, 'TrackCandidates_prealignment.h5'),
track_quality=0b11110111,
track_quality_mask=0b11110111)
# Do an alignment step with the track candidates, corrects rotations and is therefore much more precise than simple prealignment
dut_alignment.alignment(input_track_candidates_file=os.path.join(output_folder, 'TrackCandidates_prealignment_reduced.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
# Order of combinaions of planes to align, one should start with high resoultion planes (here: telescope planes)
align_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # align the time reference after the telescope alignment
[3]], # align the DUT last and not with the reference since it is rather small and would make the time reference alinmnt worse
# The DUTs to be used in the fit, always just the high resolution Mimosa26 planes used
selection_fit_duts=[0, 1, 2, 5, 6, 7],
# The DUTs to be required to have a hit for the alignment
selection_hit_duts=[[0, 1, 2, 4, 5, 6, 7], # Take tracks with time reference hit
[0, 1, 2, 4, 5, 6, 7], # Take tracks with time reference hit
[0, 1, 2, 3, 4, 5, 6, 7]], # Also require hit in the small DUT
# The required track quality per alignment step and DUT
selection_track_quality=[[1, 1, 1, 0, 1, 1, 1], # Do not require a good hit in the time refernce
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1]], # Do not require a good hit in the small DUT
initial_rotation=[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
# Devices 3, 4 are heavily rotated (inverted), this is not implemented now
# Thus one has to set the correct rotation angles here manually
[np.pi - 0.05, -0.05, -0.005],
[np.pi - 0.01, -0.02, -0.0005],
[0., 0, 0.],
[0., 0, 0.],
[0., 0, 0.]],
initial_translation=[[0., 0, 0.],
[0., 0, 0.],
[0., 0, 0.],
# Devices 3, 4 are heavily rotated (inverted), this is not implemented now
# Thus one has to set the correct positions here manually
[11540, 18791, 0.],
[710., 9851., 0.],
[0., 0, 0.],
[0., 0, 0.],
[0., 0, 0.]],
n_pixels=n_pixels,
use_n_tracks=200000, # Do the alignment only on a subset of data, needed for reasonable run time
pixel_size=pixel_size)
# Apply new alignment to data
# Revert alignment from track candidates. Usually one would just apply the alignment to the merged data.
# Due to the large beam angle track finding fails on aligned data. Thus rely on the found tracks from prealignment.
dut_alignment.apply_alignment(input_hit_file=os.path.join(output_folder, 'TrackCandidates_prealignment_reduced.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder, 'Merged_small.h5'), # This is the new not aligned but preselected merged data file to apply (pre-) alignment on
inverse=True,
force_prealignment=True)
# Apply the alignment to the merged cluster table to create tracklets
dut_alignment.apply_alignment(input_hit_file=os.path.join(output_folder, 'Merged_small.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder, 'TrackCandidates.h5'))
# Fit track using alignment
track_analysis.fit_tracks(input_track_candidates_file=os.path.join(output_folder, 'TrackCandidates.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks.h5'),
selection_hit_duts=[0, 1, 2, 4, 5, 6, 7],
selection_fit_duts=[0, 1, 2, 5, 6, 7],
selection_track_quality=1) # Take all tracks with good hits, do not care about time reference hit quality
# Create unconstrained residuals with aligned data
result_analysis.calculate_residuals(input_tracks_file=os.path.join(output_folder, 'Tracks.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Calculate efficiency with aligned data
result_analysis.calculate_efficiency(input_tracks_file=os.path.join(output_folder, 'Tracks.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_efficiency_file=os.path.join(output_folder, 'Efficiency.h5'),
bin_size=(10, 10),
use_duts=[3],
sensor_size=[(20000, 10000),
(20000, 10000),
(20000, 10000),
(20000, 20000),
(20000, 10000),
(20000, 10000),
(20000, 10000)])
# Fit tracks using prealignmend
track_analysis.fit_tracks(input_track_candidates_file=os.path.join(output_folder, 'TrackCandidates_prealignment_reduced.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_prealignment.h5'),
force_prealignment=True,
selection_hit_duts=[0, 1, 2, 4, 5, 6, 7],
selection_fit_duts=[0, 1, 2, 5, 6, 7],
selection_track_quality=1) # Take all tracks with good hits, do not care about time reference hit quality
# Create unconstrained residuals with prealigned data
result_analysis.calculate_residuals(input_tracks_file=os.path.join(output_folder, 'Tracks_prealignment.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals_prealignment.h5'),
force_prealignment=True,
n_pixels=n_pixels,
pixel_size=pixel_size)
# Create efficiency plot with prealigned data
result_analysis.calculate_efficiency(input_tracks_file=os.path.join(output_folder, 'Tracks_prealignment.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_efficiency_file=os.path.join(output_folder, 'Efficiency_prealignment.h5'),
force_prealignment=True,
bin_size=(10, 10),
use_duts=[3],
sensor_size=[(20000, 10000),
(20000, 10000),
(20000, 10000),
(20000, 20000),
(20000, 10000),
(20000, 10000),
(20000, 10000)])
if __name__ == '__main__': # Main entry point is needed for multiprocessing under windows
run_analysis()
|
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.platforminfo import PlatformInfo
def fake_sys(platform_str='darwin', windows_version_tuple=None):
class FakeSysModule(object):
platform = platform_str
if windows_version_tuple:
getwindowsversion = lambda x: windows_version_tuple
return FakeSysModule()
def fake_platform(mac_version_string='10.6.3', release_string='bar', linux_version='trusty'):
class FakePlatformModule(object):
def mac_ver(self):
return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
def linux_distribution(self):
return tuple([None, None, linux_version])
def platform(self):
return 'foo'
def release(self):
return release_string
return FakePlatformModule()
def fake_executive(output=None):
if output:
return MockExecutive2(output=output)
return MockExecutive2(exception=SystemError)
class TestPlatformInfo(unittest.TestCase):
def make_info(self, sys_module=None, platform_module=None, filesystem_module=None, executive=None):
return PlatformInfo(sys_module or fake_sys(), platform_module or fake_platform(), filesystem_module or MockFileSystem(), executive or fake_executive())
def test_real_code(self):
# This test makes sure the real (unmocked) code actually works.
info = PlatformInfo(sys, platform, FileSystem(), Executive())
self.assertNotEquals(info.os_name, '')
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd())
self.assertIsNotNone(info.terminal_width())
if info.is_linux():
self.assertIsNotNone(info.linux_distribution())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
else:
self.assertIsNone(info.total_bytes_memory())
def test_os_name_and_wrappers(self):
info = self.make_info(fake_sys('linux2'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('linux3'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
self.assertEqual(info.os_name, 'mac')
self.assertFalse(info.is_linux())
self.assertTrue(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertEqual(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600'))
self.assertEqual(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('freebsd8'))
self.assertEqual(info.os_name, 'freebsd')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertTrue(info.is_freebsd())
self.assertRaises(AssertionError, self.make_info, fake_sys('vms'))
def test_os_version(self):
self.assertRaises(AssertionError, self.make_info, fake_sys('darwin'), fake_platform('10.4.3'))
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.6.1')).os_version, 'snowleopard')
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.7.1')).os_version, 'lion')
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.8.1')).os_version, 'mountainlion')
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.9.0')).os_version, 'mavericks')
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.10.0')).os_version, 'mac10.10')
self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.11.0')).os_version, 'future')
self.assertEqual(self.make_info(fake_sys('linux2')).os_version, 'trusty')
info = self.make_info(fake_sys('linux2'), fake_platform(linux_version='precise'))
self.assertEqual(info.os_version, 'precise')
info = self.make_info(fake_sys('linux2'), fake_platform(linux_version='utopic'))
self.assertEqual(info.os_version, 'trusty')
self.assertEqual(self.make_info(fake_sys('freebsd8'), fake_platform('', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
self.assertEqual(self.make_info(fake_sys('freebsd9'), fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
self.assertRaises(AssertionError, self.make_info, fake_sys('win32', tuple([5, 0, 1234])))
self.assertRaises(AssertionError, self.make_info, fake_sys('win32', tuple([6, 1, 1234])))
self.assertEqual(self.make_info(fake_sys('win32', tuple([10, 1, 1234]))).os_version, 'future')
self.assertEqual(self.make_info(fake_sys('win32', tuple([10, 0, 1234]))).os_version, '10')
self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 3, 1234]))).os_version, '8.1')
self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version, '8')
self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 1, 7601]))).os_version, '7sp1')
self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version, '7sp0')
self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 0, 1234]))).os_version, 'vista')
self.assertEqual(self.make_info(fake_sys('win32', tuple([5, 1, 1234]))).os_version, 'xp')
self.assertRaises(AssertionError, self.make_info, fake_sys('win32'),
executive=fake_executive('5.0.1234'))
self.assertRaises(AssertionError, self.make_info, fake_sys('win32'),
executive=fake_executive('6.1.1234'))
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('10.1.1234')).os_version, 'future')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('10.0.1234')).os_version, '10')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.3.1234')).os_version, '8.1')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.2.1234')).os_version, '8')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7601')).os_version, '7sp1')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600')).os_version, '7sp0')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.0.1234')).os_version, 'vista')
self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('5.1.1234')).os_version, 'xp')
def _assert_file_implies_linux_distribution(self, file, distribution):
info = self.make_info(sys_module=fake_sys('linux2'), filesystem_module=MockFileSystem({file: ''}))
self.assertEqual(info.linux_distribution(), distribution)
def test_linux_distro_detection(self):
self._assert_file_implies_linux_distribution('/etc/arch-release', 'arch')
self._assert_file_implies_linux_distribution('/etc/debian_version', 'debian')
self._assert_file_implies_linux_distribution('/etc/redhat-release', 'redhat')
self._assert_file_implies_linux_distribution('/etc/mock-release', 'unknown')
info = self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600'))
self.assertIsNone(info.linux_distribution())
def test_display_name(self):
info = self.make_info(fake_sys('darwin'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('linux2'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('freebsd9'))
self.assertNotEquals(info.display_name(), '')
def test_total_bytes_memory(self):
info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'), executive=fake_executive('1234'))
self.assertEqual(info.total_bytes_memory(), 1234)
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('linux2'))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('freebsd9'))
self.assertIsNone(info.total_bytes_memory())
|
|
from django import forms
from django.forms import ModelForm, BaseModelFormSet
from django.forms.models import modelformset_factory
from django.db.models import Q
from django_select2.forms import (
ModelSelect2Widget,
Select2Widget,
ModelSelect2MultipleWidget,
Select2MultipleWidget,
)
from history.models import (
Award,
BackgroundCheck,
CommitteeMember,
MeetingMinutes,
NonEventProject,
NonEventProjectParticipant,
Officer,
ProjectReportHeader,
Publication,
WebsiteArticle,
Distinction,
GoverningDocument,
GoverningDocumentType,
)
from event_cal.models import EventPhoto
from mig_main.models import (
AcademicTerm,
MemberProfile,
OfficerPosition,
UserProfile,
)
from requirements.models import DistinctionType
class GoverningDocumentForm(forms.ModelForm):
"""" Form for updating the governing documents"""
class Meta:
model = GoverningDocument
exclude = ['active']
def save(self, commit=True):
gd = super(GoverningDocumentForm, self).save(commit=commit)
if commit:
gdt = gd.document_type
gds = GoverningDocument.objects.filter(document_type=gdt)
for gd_i in gds:
gd_i.active = False
gd_i.save()
gd.active = True
gd.save()
class GoverningDocumentTypeForm(forms.ModelForm):
"""" Form for adding a new type of governing documents"""
class Meta:
model = GoverningDocumentType
fields = ['name']
GoverningDocumentTypeFormset = modelformset_factory(
GoverningDocumentType,
form=GoverningDocumentTypeForm,
extra=1,
can_delete=True
)
class AwardForm(forms.ModelForm):
""" Form for giving out an award."""
recipient = forms.ModelChoiceField(
widget=Select2Widget( ),
queryset=MemberProfile.get_members()
)
class Meta:
model = Award
fields = [
'award_type',
'term',
'recipient',
'comment'
]
class OfficerForm(forms.ModelForm):
""" Form for specifying an officer.
Excludes term, since that is specified externally.
"""
user = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_members(),
label='Member'
)
position = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=OfficerPosition.get_current()
)
class Meta:
model = Officer
exclude = ['term']
class CommitteeMemberForm(forms.ModelForm):
""" Form for adding committee members for a given term."""
member = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_members(),
label='Member'
)
class Meta:
model = CommitteeMember
exclude = ['term']
class ArticleForm(forms.ModelForm):
""" Form for submitting printed articles (Publications)"""
class Meta:
model = Publication
fields = [
'date_published',
'volume_number',
'edition_number',
'name',
'type',
'pdf_file'
]
class WebArticleForm(forms.ModelForm):
""" Form for submitting website articles."""
TWEET_CHOICES = (
('N', 'No Tweet'),
('T', 'Tweet normally'),
('H', 'Tweet with #UmichEngin'),
)
tagged_members = forms.ModelMultipleChoiceField(
widget=Select2MultipleWidget(),
queryset=MemberProfile.get_members(),
required=False
)
tweet_option = forms.ChoiceField(choices=TWEET_CHOICES, initial='N')
class Meta:
model = WebsiteArticle
exclude = ['created_by', 'approved']
class MeetingMinutesForm(forms.ModelForm):
""" Form for submitting meeting minutes"""
semester = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=AcademicTerm.get_rchron(),
initial=AcademicTerm.get_current_term()
)
class Meta:
model = MeetingMinutes
fields = [
'pdf_file',
'meeting_type',
'semester',
'meeting_name',
'display_order'
]
class ProjectDescriptionForm(forms.Form):
""" Form used to provide project description for a project report
during compilation.
"""
description = forms.CharField(widget=forms.Textarea)
class ProjectPhotoForm(forms.ModelForm):
""" Form for associating photos with project reports during compilation.
Overrides init to specify initial value for use_in_report based on whether
the photo is associated with the project report or the event.
Overrides save to associate or de-associate the photo with the project
report based on the submitted value of use_in_report
"""
use_in_report = forms.BooleanField(required=False)
class Meta:
model = EventPhoto
exclude = ['event', 'project_report']
def __init__(self, *args, **kwargs):
super(ProjectPhotoForm, self).__init__(*args, **kwargs)
if self.instance.project_report:
self.fields['use_in_report'].initial = True
else:
self.fields['use_in_report'].initial = False
def save(self, commit=True):
use_pic = self.cleaned_data.pop('use_in_report', False)
m = super(ProjectPhotoForm, self).save(commit=False)
if m.project_report and use_pic:
if commit:
m.save()
return m
elif m.project_report and not use_pic:
m.project_report = None
if commit:
m.save()
return m
if m.event:
m.project_report = m.event.project_report
if commit:
m.save()
return m
ProjectPhotoFormset = modelformset_factory(
EventPhoto,
form=ProjectPhotoForm,
extra=0
)
class BaseProjectReportHeaderForm(forms.ModelForm):
""" Form for starting the project report compilation.
"""
terms = forms.ModelMultipleChoiceField(
widget=Select2MultipleWidget(),
queryset=AcademicTerm.get_rchron()
)
preparer = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_actives()
)
class Meta:
model = ProjectReportHeader
exclude = [
'finished_processing',
'finished_photos',
'last_processed',
'last_photo'
]
class BaseNEPForm(forms.ModelForm):
""" Base form for filling out a non-event project summary.
"""
leaders = forms.ModelMultipleChoiceField(
widget=Select2MultipleWidget(),
queryset=MemberProfile.get_members()
)
term = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=AcademicTerm.get_rchron(),
initial=AcademicTerm.get_current_term()
)
assoc_officer = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=OfficerPosition.get_current(),
label='Associated Officer'
)
class Meta:
model = NonEventProject
fields = [
'name',
'description',
'leaders',
'assoc_officer',
'term',
'start_date',
'end_date',
'location'
]
class BaseNEPParticipantForm(forms.ModelForm):
""" Base form for adding participants to a non-event project."""
participant = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_members()
)
class Meta:
model = NonEventProjectParticipant
fields = ['project', 'participant', 'hours']
class BaseBackgroundCheckForm(forms.ModelForm):
""" Base form for adding member background checks.
"""
member = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=UserProfile.objects.all().order_by('last_name')
)
class Meta:
model = BackgroundCheck
exclude = ['date_added']
class MassAddBackgroundCheckForm(forms.Form):
""" Form for quickly adding user background checks.
"""
uniqnames = forms.CharField(
widget=forms.Textarea,
help_text='Separate uniqnames with a newline'
)
check_type = forms.ChoiceField(
choices=BackgroundCheck.CHECK_CHOICES
)
def save(self):
""" Adds background checks for each uniqname in the list.
If there is no profile for that uniqname, marks it and continues.
"""
uniqnames = self.cleaned_data['uniqnames'].split('\n')
no_profiles = []
for uniqname in uniqnames:
u = UserProfile.objects.filter(uniqname=uniqname.strip())
if not u.exists():
no_profiles.append(uniqname.strip())
continue
else:
u = u[0]
b = BackgroundCheck(
member=u,
check_type=self.cleaned_data['check_type']
)
b.save()
if no_profiles:
return no_profiles
else:
return None
class AddStatusForm(forms.ModelForm):
member = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_actives()
)
approve = forms.BooleanField(required=False)
class Meta:
model = Distinction
exclude= ('term',)
def save(self, commit=True):
approved = self.cleaned_data.pop('approve', False)
if approved:
return super(AddStatusForm, self).save(commit=commit)
else:
print 'unapproved'
return None
class AddElecteeStatusForm(AddStatusForm):
member = forms.ModelChoiceField(
widget=Select2Widget(),
queryset=MemberProfile.get_electees()
)
class BaseAddStatusFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseAddStatusFormSet,
self).__init__(*args, **kwargs)
self.queryset = Distinction.objects.none()
def save(self, term, commit=True):
instances = super(BaseAddStatusFormSet, self).save(commit=False)
if commit:
for obj in self.deleted_objects:
obj.delete()
for instance in self.new_objects:
if instance:
instance.term = term
if not Distinction.objects.filter(
member=instance.member,
distinction_type=instance.distinction_type,
term=term).exists():
instance.save()
return instances
class BaseAddActiveStatusFormSet(BaseAddStatusFormSet):
def __init__(self, *args, **kwargs):
term = kwargs.pop('term', AcademicTerm.get_current_term())
initial=[]
for distinction in DistinctionType.objects.filter(status_type__name='Active'):
actives_already_received_distinction = MemberProfile.objects.filter(
distinction__distinction_type=distinction,
distinction__term=term
)
actives = distinction.get_actives_with_status(term)
for active in actives:
if active in actives_already_received_distinction:
continue
if distinction.name == 'Active':
gift = 'N/A'
else:
gift = 'Not specified'
initial.append(
{
'member': active,
'distinction_type': distinction,
'gift': gift,
'approve': False
}
)
kwargs['initial'] = initial
super(BaseAddActiveStatusFormSet,
self).__init__(*args, **kwargs)
self.extra = len(initial)+1
self.form.base_fields['distinction_type'].queryset =\
DistinctionType.objects.filter(status_type__name='Active')
ManageElecteeDAPAFormSet = modelformset_factory(Distinction,form=AddElecteeStatusForm)
ManageElecteeDAPAFormSet.form.base_fields['distinction_type'].queryset=DistinctionType.objects.filter(status_type__name='Electee').filter(Q(name__contains='DA')|Q(name__contains='PA'))
#ManageElecteeDAPAFormSet.form.base_fields['member'].queryset = MemberProfile.get_electees()
ElecteeToActiveFormSet = modelformset_factory(Distinction,form=AddElecteeStatusForm)
ElecteeToActiveFormSet.form.base_fields['distinction_type'].queryset=DistinctionType.objects.filter(status_type__name='Electee').exclude(Q(name__contains='DA')|Q(name__contains='PA'))
#ElecteeToActiveFormSet.form.base_fields['member'].queryset = MemberProfile.get_electees()
ManageActiveCurrentStatusFormSet = modelformset_factory(
Distinction,
form=AddStatusForm,
formset=BaseAddActiveStatusFormSet
)
|
|
# -*- coding: utf-8 -*-
"""
httpbin.helpers
~~~~~~~~~~~~~~~
This module provides helper functions for httpbin.
"""
import json
import base64
import re
import time
import os
from hashlib import md5, sha256, sha512
from werkzeug.http import parse_authorization_header
from werkzeug.datastructures import WWWAuthenticate
from flask import request, make_response
from six.moves.urllib.parse import urlparse, urlunparse
from .structures import CaseInsensitiveDict
ASCII_ART = r"""
-=[ teapot ]=-
_...._
.' _ _ `.
| ."` ^ `". _,
\_;`"---"`|//
| ;/
\_ _/
`\"\"\"`
"""
REDIRECT_LOCATION = '/redirect/1'
ENV_HEADERS = (
'X-Varnish',
'X-Request-Start',
'X-Heroku-Queue-Depth',
'X-Real-Ip',
'X-Forwarded-Proto',
'X-Forwarded-Protocol',
'X-Forwarded-Ssl',
'X-Heroku-Queue-Wait-Time',
'X-Forwarded-For',
'X-Heroku-Dynos-In-Use',
'X-Forwarded-Protocol',
'X-Forwarded-Port',
'X-Request-Id',
'Via',
'Total-Route-Time',
'Connect-Time'
)
ROBOT_TXT = """User-agent: *
Disallow: /deny
"""
ACCEPTED_MEDIA_TYPES = [
'image/webp',
'image/svg+xml',
'image/jpeg',
'image/png',
'image/*'
]
ANGRY_ASCII = r"""
.-''''''-.
.' _ _ '.
/ O O \\
: :
| |
: __ :
\ .-"` `"-. /
'. .'
'-......-'
YOU SHOULDN'T BE HERE
"""
def json_safe(string, content_type='application/octet-stream'):
"""Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
"""
try:
string = string.decode('utf-8')
json.dumps(string)
return string
except (ValueError, TypeError):
return b''.join([
b'data:',
content_type.encode('utf-8'),
b';base64,',
base64.b64encode(string)
]).decode('utf-8')
def get_files():
"""Returns files dict from request context."""
files = dict()
for k, v in request.files.items():
content_type = request.files[k].content_type or 'application/octet-stream'
val = json_safe(v.read(), content_type)
if files.get(k):
if not isinstance(files[k], list):
files[k] = [files[k]]
files[k].append(val)
else:
files[k] = val
return files
def get_headers(hide_env=True):
"""Returns headers dict from request context."""
headers = dict(request.headers.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_HEADERS:
try:
del headers[key]
except KeyError:
pass
return CaseInsensitiveDict(headers.items())
def semiflatten(multi):
"""Convert a MutiDict into a regular dict. If there are more than one value
for a key, the result will have a list of values for the key. Otherwise it
will have the plain value."""
if multi:
result = multi.to_dict(flat=False)
for k, v in result.items():
if len(v) == 1:
result[k] = v[0]
return result
else:
return multi
def get_url(request):
"""
Since we might be hosted behind a proxy, we need to check the
X-Forwarded-Proto, X-Forwarded-Protocol, or X-Forwarded-SSL headers
to find out what protocol was used to access us.
"""
protocol = request.headers.get('X-Forwarded-Proto') or request.headers.get('X-Forwarded-Protocol')
if protocol is None and request.headers.get('X-Forwarded-Ssl') == 'on':
protocol = 'https'
if protocol is None:
return request.url
url = list(urlparse(request.url))
url[0] = protocol
return urlunparse(url)
def get_dict(*keys, **extras):
"""Returns request dict of given keys."""
_keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json', 'method')
assert all(map(_keys.__contains__, keys))
data = request.data
form = semiflatten(request.form)
try:
_json = json.loads(data.decode('utf-8'))
except (ValueError, TypeError):
_json = None
d = dict(
url=get_url(request),
args=semiflatten(request.args),
form=form,
data=json_safe(data),
origin=request.headers.get('X-Forwarded-For', request.remote_addr),
headers=get_headers(),
files=get_files(),
json=_json,
method=request.method,
)
out_d = dict()
for key in keys:
out_d[key] = d.get(key)
out_d.update(extras)
return out_d
def status_code(code):
"""Returns response object of given status code."""
redirect = dict(headers=dict(location=REDIRECT_LOCATION))
code_map = {
301: redirect,
302: redirect,
303: redirect,
304: dict(data=''),
305: redirect,
307: redirect,
401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}),
402: dict(
data='Fuck you, pay me!',
headers={
'x-more-info': 'http://vimeo.com/22053820'
}
),
406: dict(data=json.dumps({
'message': 'Client did not request a supported media type.',
'accept': ACCEPTED_MEDIA_TYPES
}),
headers={
'Content-Type': 'application/json'
}),
407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}),
418: dict( # I'm a teapot!
data=ASCII_ART,
headers={
'x-more-info': 'http://tools.ietf.org/html/rfc2324'
}
),
}
r = make_response()
r.status_code = code
if code in code_map:
m = code_map[code]
if 'data' in m:
r.data = m['data']
if 'headers' in m:
r.headers = m['headers']
return r
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd
# Digest auth helpers
# qop is a quality of protection
def H(data, algorithm):
if algorithm == 'SHA-256':
return sha256(data).hexdigest()
elif algorithm == 'SHA-512':
return sha512(data).hexdigest()
else:
return md5(data).hexdigest()
def HA1(realm, username, password, algorithm):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
if not realm:
realm = u''
return H(b":".join([username.encode('utf-8'),
realm.encode('utf-8'),
password.encode('utf-8')]), algorithm)
def HA2(credentials, request, algorithm):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentials.get("qop") == "auth" or credentials.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]), algorithm)
elif credentials.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
A2 = b":".join([request['method'].encode('utf-8'),
request['uri'].encode('utf-8'),
H(request['body'], algorithm).encode('utf-8')])
return H(A2, algorithm)
raise ValueError
def response(credentials, password, request):
"""Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentials`: credentials dict
- `password`: request user password
- `request`: request dict
"""
response = None
algorithm = credentials.get('algorithm')
HA1_value = HA1(
credentials.get('realm'),
credentials.get('username'),
password,
algorithm
)
HA2_value = HA2(credentials, request, algorithm)
if credentials.get('qop') is None:
response = H(b":".join([
HA1_value.encode('utf-8'),
credentials.get('nonce', '').encode('utf-8'),
HA2_value.encode('utf-8')
]), algorithm)
elif credentials.get('qop') == 'auth' or credentials.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentials:
raise ValueError("%s required for response H" % k)
response = H(b":".join([HA1_value.encode('utf-8'),
credentials.get('nonce').encode('utf-8'),
credentials.get('nc').encode('utf-8'),
credentials.get('cnonce').encode('utf-8'),
credentials.get('qop').encode('utf-8'),
HA2_value.encode('utf-8')]), algorithm)
else:
raise ValueError("qop value are wrong")
return response
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentials = parse_authorization_header(request.headers.get('Authorization'))
if not credentials:
return
request_uri = request.script_root + request.path
if request.query_string:
request_uri += '?' + request.query_string
response_hash = response(credentials, passwd, dict(uri=request_uri,
body=request.data,
method=request.method))
if credentials.get('response') == response_hash:
return True
return False
def secure_cookie():
"""Return true if cookie should have secure attribute"""
return request.environ['wsgi.url_scheme'] == 'https'
def __parse_request_range(range_header_text):
""" Return a tuple describing the byte range requested in a GET request
If the range is open ended on the left or right side, then a value of None
will be set.
RFC7233: http://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7233.html#header.range
Examples:
Range : bytes=1024-
Range : bytes=10-20
Range : bytes=-999
"""
left = None
right = None
if not range_header_text:
return left, right
range_header_text = range_header_text.strip()
if not range_header_text.startswith('bytes'):
return left, right
components = range_header_text.split("=")
if len(components) != 2:
return left, right
components = components[1].split("-")
try:
right = int(components[1])
except:
pass
try:
left = int(components[0])
except:
pass
return left, right
def get_request_range(request_headers, upper_bound):
first_byte_pos, last_byte_pos = __parse_request_range(request_headers['range'])
if first_byte_pos is None and last_byte_pos is None:
# Request full range
first_byte_pos = 0
last_byte_pos = upper_bound - 1
elif first_byte_pos is None:
# Request the last X bytes
first_byte_pos = max(0, upper_bound - last_byte_pos)
last_byte_pos = upper_bound - 1
elif last_byte_pos is None:
# Request the last X bytes
last_byte_pos = upper_bound - 1
return first_byte_pos, last_byte_pos
def parse_multi_value_header(header_str):
"""Break apart an HTTP header string that is potentially a quoted,
comma separated list as used in entity headers in RFC2616."""
parsed_parts = []
if header_str:
parts = header_str.split(',')
for part in parts:
match = re.search(r'\s*(W/)?\"?([^"]*)\"?\s*', part)
if match is not None:
parsed_parts.append(match.group(2))
return parsed_parts
def next_stale_after_value(stale_after):
try:
stal_after_count = int(stale_after) - 1
return str(stal_after_count)
except ValueError:
return 'never'
def digest_challenge_response(app, qop, algorithm, stale=False):
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request, 'remote_addr', u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]), algorithm)
opaque = H(os.urandom(10), algorithm)
auth = WWWAuthenticate("digest")
auth.set_digest('[email protected]', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop,), algorithm=algorithm)
auth.stale = stale
response.headers['WWW-Authenticate'] = auth.to_header()
return response
|
|
# -*- coding: utf-8 -*-
import os
import time
import json
import logging
import time
from random import random, randrange
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.human_behaviour import sleep, action_delay
from pokemongo_bot.inventory import Pokemon
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.datastore import Datastore
from pokemongo_bot.base_dir import _base_dir
from datetime import datetime, timedelta
CATCH_STATUS_SUCCESS = 1
CATCH_STATUS_FAILED = 2
CATCH_STATUS_VANISHED = 3
CATCH_STATUS_MISSED = 4
ENCOUNTER_STATUS_SUCCESS = 1
ENCOUNTER_STATUS_NOT_IN_RANGE = 5
ENCOUNTER_STATUS_POKEMON_INVENTORY_FULL = 7
ITEM_POKEBALL = 1
ITEM_GREATBALL = 2
ITEM_ULTRABALL = 3
ITEM_RAZZBERRY = 701
LOGIC_TO_FUNCTION = {
'or': lambda x, y: x or y,
'and': lambda x, y: x and y
}
class PokemonCatchWorker(Datastore, BaseTask):
def __init__(self, pokemon, bot, config):
self.pokemon = pokemon
super(PokemonCatchWorker, self).__init__(bot, config)
def initialize(self):
self.api = self.bot.api
self.position = self.bot.position
self.pokemon_list = self.bot.pokemon_list
self.inventory = inventory.items()
self.spawn_point_guid = ''
self.response_key = ''
self.response_status_key = ''
#Config
self.min_ultraball_to_keep = self.config.get('min_ultraball_to_keep', 10)
self.catch_throw_parameters = self.config.get('catch_throw_parameters', {})
self.catch_throw_parameters_spin_success_rate = self.catch_throw_parameters.get('spin_success_rate', 0.6)
self.catch_throw_parameters_excellent_rate = self.catch_throw_parameters.get('excellent_rate', 0.1)
self.catch_throw_parameters_great_rate = self.catch_throw_parameters.get('great_rate', 0.5)
self.catch_throw_parameters_nice_rate = self.catch_throw_parameters.get('nice_rate', 0.3)
self.catch_throw_parameters_normal_rate = self.catch_throw_parameters.get('normal_rate', 0.1)
self.catch_throw_parameters_hit_rate = self.catch_throw_parameters.get('hit_rate', 0.8)
self.catchsim_config = self.config.get('catch_simulation', {})
self.catchsim_catch_wait_min = self.catchsim_config.get('catch_wait_min', 2)
self.catchsim_catch_wait_max = self.catchsim_config.get('catch_wait_max', 6)
self.catchsim_flee_count = int(self.catchsim_config.get('flee_count', 3))
self.catchsim_flee_duration = self.catchsim_config.get('flee_duration', 2)
self.catchsim_berry_wait_min = self.catchsim_config.get('berry_wait_min', 2)
self.catchsim_berry_wait_max = self.catchsim_config.get('berry_wait_max', 3)
self.catchsim_changeball_wait_min = self.catchsim_config.get('changeball_wait_min', 2)
self.catchsim_changeball_wait_max = self.catchsim_config.get('changeball_wait_max', 3)
############################################################################
# public methods
############################################################################
def work(self, response_dict=None):
response_dict = response_dict or self.create_encounter_api_call()
# validate response
if not response_dict:
return WorkerResult.ERROR
try:
responses = response_dict['responses']
response = responses[self.response_key]
if response[self.response_status_key] != ENCOUNTER_STATUS_SUCCESS:
if response[self.response_status_key] == ENCOUNTER_STATUS_NOT_IN_RANGE:
self.emit_event('pokemon_not_in_range', formatted='Pokemon went out of range!')
elif response[self.response_status_key] == ENCOUNTER_STATUS_POKEMON_INVENTORY_FULL:
self.emit_event('pokemon_inventory_full', formatted='Your Pokemon inventory is full! Could not catch!')
return WorkerResult.ERROR
except KeyError:
return WorkerResult.ERROR
# get pokemon data
pokemon_data = response['wild_pokemon']['pokemon_data'] if 'wild_pokemon' in response else response['pokemon_data']
pokemon = Pokemon(pokemon_data)
# skip ignored pokemon
if not self._should_catch_pokemon(pokemon):
return WorkerResult.SUCCESS
is_vip = self._is_vip_pokemon(pokemon)
if inventory.items().get(ITEM_POKEBALL).count < 1:
if inventory.items().get(ITEM_GREATBALL).count < 1:
if inventory.items().get(ITEM_ULTRABALL).count < 1:
return WorkerResult.SUCCESS
# log encounter
self.emit_event(
'pokemon_appeared',
formatted='A wild {pokemon} appeared! [CP {cp}] [Potential {iv}] [A/D/S {iv_display}]',
data={
'pokemon': pokemon.name,
'cp': pokemon.cp,
'iv': pokemon.iv,
'iv_display': pokemon.iv_display,
'encounter_id': self.pokemon['encounter_id'],
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude'],
'pokemon_id': pokemon.pokemon_id
}
)
# simulate app
time.sleep(3)
# check for VIP pokemon
if is_vip:
self.emit_event('vip_pokemon', formatted='This is a VIP pokemon. Catch!!!')
# check catch limits before catch
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
result = c.fetchone()
while True:
max_catch = self.bot.config.daily_catch_limit
if result[0] < max_catch:
# catch that pokemon!
encounter_id = self.pokemon['encounter_id']
catch_rate_by_ball = [0] + response['capture_probability']['capture_probability'] # offset so item ids match indces
self._do_catch(pokemon, encounter_id, catch_rate_by_ball, is_vip=is_vip)
break
else:
self.emit_event('catch_limit', formatted='WARNING! You have reached your daily catch limit')
break
# simulate app
time.sleep(5)
def create_encounter_api_call(self):
encounter_id = self.pokemon['encounter_id']
player_latitude = self.pokemon['latitude']
player_longitude = self.pokemon['longitude']
request = self.api.create_request()
if 'spawn_point_id' in self.pokemon:
spawn_point_id = self.pokemon['spawn_point_id']
self.spawn_point_guid = spawn_point_id
self.response_key = 'ENCOUNTER'
self.response_status_key = 'status'
request.encounter(
encounter_id=encounter_id,
spawn_point_id=spawn_point_id,
player_latitude=player_latitude,
player_longitude=player_longitude
)
else:
fort_id = self.pokemon['fort_id']
self.spawn_point_guid = fort_id
self.response_key = 'DISK_ENCOUNTER'
self.response_status_key = 'result'
request.disk_encounter(
encounter_id=encounter_id,
fort_id=fort_id,
player_latitude=player_latitude,
player_longitude=player_longitude
)
return request.call()
############################################################################
# helpers
############################################################################
def _pokemon_matches_config(self, config, pokemon, default_logic='and'):
pokemon_config = config.get(pokemon.name, config.get('any'))
if not pokemon_config:
return False
catch_results = {
'cp': False,
'iv': False,
}
if pokemon_config.get('never_catch', False):
return False
if pokemon_config.get('always_catch', False):
return True
catch_cp = pokemon_config.get('catch_above_cp', 0)
if pokemon.cp > catch_cp:
catch_results['cp'] = True
catch_iv = pokemon_config.get('catch_above_iv', 0)
if pokemon.iv > catch_iv:
catch_results['iv'] = True
return LOGIC_TO_FUNCTION[pokemon_config.get('logic', default_logic)](*catch_results.values())
def _should_catch_pokemon(self, pokemon):
return self._pokemon_matches_config(self.bot.config.catch, pokemon)
def _is_vip_pokemon(self, pokemon):
# having just a name present in the list makes them vip
if self.bot.config.vips.get(pokemon.name) == {}:
return True
return self._pokemon_matches_config(self.bot.config.vips, pokemon, default_logic='or')
def _pct(self, rate_by_ball):
return '{0:.2f}'.format(rate_by_ball * 100)
def _use_berry(self, berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball):
# Delay to simulate selecting berry
action_delay(self.catchsim_berry_wait_min, self.catchsim_berry_wait_max)
new_catch_rate_by_ball = []
self.emit_event(
'pokemon_catch_rate',
level='debug',
formatted='Catch rate of {catch_rate} with {ball_name} is low. Throwing {berry_name} (have {berry_count})',
data={
'catch_rate': self._pct(catch_rate_by_ball[current_ball]),
'ball_name': self.inventory.get(current_ball).name,
'berry_name': self.inventory.get(berry_id).name,
'berry_count': berry_count
}
)
response_dict = self.api.use_item_capture(
item_id=berry_id,
encounter_id=encounter_id,
spawn_point_id=self.spawn_point_guid
)
responses = response_dict['responses']
if response_dict and response_dict['status_code'] == 1:
# update catch rates using multiplier
if 'item_capture_mult' in responses['USE_ITEM_CAPTURE']:
for rate in catch_rate_by_ball:
new_catch_rate_by_ball.append(rate * responses['USE_ITEM_CAPTURE']['item_capture_mult'])
self.emit_event(
'threw_berry',
formatted="Threw a {berry_name}! Catch rate with {ball_name} is now: {new_catch_rate}",
data={
'berry_name': self.inventory.get(berry_id).name,
'ball_name': self.inventory.get(current_ball).name,
'new_catch_rate': self._pct(new_catch_rate_by_ball[current_ball])
}
)
# softban?
else:
new_catch_rate_by_ball = catch_rate_by_ball
self.bot.softban = True
self.emit_event(
'softban',
level='warning',
formatted='Failed to use berry. You may be softbanned.'
)
# unknown status code
else:
new_catch_rate_by_ball = catch_rate_by_ball
self.emit_event(
'threw_berry_failed',
formatted='Unknown response when throwing berry: {status_code}.',
data={
'status_code': response_dict['status_code']
}
)
return new_catch_rate_by_ball
def _do_catch(self, pokemon, encounter_id, catch_rate_by_ball, is_vip=False):
# settings that may be exposed at some point
"""
:type pokemon: Pokemon
"""
berry_id = ITEM_RAZZBERRY
maximum_ball = ITEM_ULTRABALL if is_vip else ITEM_GREATBALL
ideal_catch_rate_before_throw = 0.9 if is_vip else 0.35
berry_count = self.inventory.get(ITEM_RAZZBERRY).count
ball_count = {}
for ball_id in [ITEM_POKEBALL, ITEM_GREATBALL, ITEM_ULTRABALL]:
ball_count[ball_id] = self.inventory.get(ball_id).count
# use `min_ultraball_to_keep` from config if is not None
min_ultraball_to_keep = ball_count[ITEM_ULTRABALL]
if self.min_ultraball_to_keep is not None:
if self.min_ultraball_to_keep >= 0 and self.min_ultraball_to_keep < min_ultraball_to_keep:
min_ultraball_to_keep = self.min_ultraball_to_keep
while True:
# find lowest available ball
current_ball = ITEM_POKEBALL
while ball_count[current_ball] == 0 and current_ball < maximum_ball:
current_ball += 1
if ball_count[current_ball] == 0:
self.emit_event('no_pokeballs', formatted='No usable pokeballs found!')
# use untraball if there is no other balls with constraint to `min_ultraball_to_keep`
if maximum_ball != ITEM_ULTRABALL and ball_count[ITEM_ULTRABALL] > min_ultraball_to_keep:
maximum_ball = ITEM_ULTRABALL
continue
else:
break
# check future ball count
num_next_balls = 0
next_ball = current_ball
while next_ball < maximum_ball:
next_ball += 1
num_next_balls += ball_count[next_ball]
# check if we've got berries to spare
berries_to_spare = berry_count > 0 if is_vip else berry_count > num_next_balls + 30
# use a berry if we are under our ideal rate and have berries to spare
used_berry = False
changed_ball = False
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berries_to_spare:
new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
if new_catch_rate_by_ball != catch_rate_by_ball:
catch_rate_by_ball = new_catch_rate_by_ball
self.inventory.get(ITEM_RAZZBERRY).remove(1)
berry_count -= 1
used_berry = True
# pick the best ball to catch with
best_ball = current_ball
while best_ball < maximum_ball:
best_ball += 1
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and ball_count[best_ball] > 0:
# if current ball chance to catch is under our ideal rate, and player has better ball - then use it
current_ball = best_ball
changed_ball = True
# if the rate is still low and we didn't throw a berry before, throw one
if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berry_count > 0 and not used_berry:
new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
if new_catch_rate_by_ball != catch_rate_by_ball:
catch_rate_by_ball = new_catch_rate_by_ball
self.inventory.get(ITEM_RAZZBERRY).remove(1)
berry_count -= 1
used_berry = True
# If we change ball then wait to simulate user selecting it
if changed_ball:
action_delay(self.catchsim_changeball_wait_min, self.catchsim_changeball_wait_max)
# Randomize the quality of the throw
# Default structure
throw_parameters = {'normalized_reticle_size': 1.950,
'spin_modifier': 1.0,
'normalized_hit_position': 1.0,
'throw_type_label': 'Excellent'}
self.generate_spin_parameter(throw_parameters)
self.generate_throw_quality_parameters(throw_parameters)
# try to catch pokemon!
ball_count[current_ball] -= 1
self.inventory.get(current_ball).remove(1)
# Take some time to throw the ball from config options
action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
self.emit_event(
'threw_pokeball',
formatted='{throw_type}{spin_label} throw! Used {ball_name}, with chance {success_percentage} ({count_left} left)',
data={
'throw_type': throw_parameters['throw_type_label'],
'spin_label': throw_parameters['spin_label'],
'ball_name': self.inventory.get(current_ball).name,
'success_percentage': self._pct(catch_rate_by_ball[current_ball]),
'count_left': ball_count[current_ball]
}
)
hit_pokemon = 1
if random() >= self.catch_throw_parameters_hit_rate:
hit_pokemon = 0
response_dict = self.api.catch_pokemon(
encounter_id=encounter_id,
pokeball=current_ball,
normalized_reticle_size=throw_parameters['normalized_reticle_size'],
spawn_point_id=self.spawn_point_guid,
hit_pokemon=hit_pokemon,
spin_modifier=throw_parameters['spin_modifier'],
normalized_hit_position=throw_parameters['normalized_hit_position']
)
try:
catch_pokemon_status = response_dict['responses']['CATCH_POKEMON']['status']
except KeyError:
break
# retry failed pokemon
if catch_pokemon_status == CATCH_STATUS_FAILED:
self.emit_event(
'pokemon_capture_failed',
formatted='{pokemon} capture failed.. trying again!',
data={'pokemon': pokemon.name}
)
# sleep according to flee_count and flee_duration config settings
# randomly chooses a number of times to 'show' wobble animation between 1 and flee_count
# multiplies this by flee_duration to get total sleep
if self.catchsim_flee_count:
sleep((randrange(self.catchsim_flee_count)+1) * self.catchsim_flee_duration)
continue
# abandon if pokemon vanished
elif catch_pokemon_status == CATCH_STATUS_VANISHED:
self.emit_event(
'pokemon_vanished',
formatted='{pokemon} vanished!',
data={
'pokemon': pokemon.name,
'encounter_id': self.pokemon['encounter_id'],
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude'],
'pokemon_id': pokemon.pokemon_id
}
)
if self._pct(catch_rate_by_ball[current_ball]) == 100:
self.bot.softban = True
# pokemon caught!
elif catch_pokemon_status == CATCH_STATUS_SUCCESS:
pokemon.unique_id = response_dict['responses']['CATCH_POKEMON']['captured_pokemon_id']
self.bot.metrics.captured_pokemon(pokemon.name, pokemon.cp, pokemon.iv_display, pokemon.iv)
try:
self.emit_event(
'pokemon_caught',
formatted='Captured {pokemon}! [CP {cp}] [Potential {iv}] [{iv_display}] [+{exp} exp]',
data={
'pokemon': pokemon.name,
'cp': pokemon.cp,
'iv': pokemon.iv,
'iv_display': pokemon.iv_display,
'exp': sum(response_dict['responses']['CATCH_POKEMON']['capture_award']['xp']),
'encounter_id': self.pokemon['encounter_id'],
'latitude': self.pokemon['latitude'],
'longitude': self.pokemon['longitude'],
'pokemon_id': pokemon.pokemon_id
}
)
with self.bot.database as conn:
conn.execute('''INSERT INTO catch_log (pokemon, cp, iv, encounter_id, pokemon_id) VALUES (?, ?, ?, ?, ?)''', (pokemon.name, pokemon.cp, pokemon.iv, str(encounter_id), pokemon.pokemon_id))
#conn.commit()
user_data_caught = os.path.join(_base_dir, 'data', 'caught-%s.json' % self.bot.config.username)
with open(user_data_caught, 'ab') as outfile:
outfile.write(str(datetime.now()))
json.dump({
'pokemon': pokemon.name,
'cp': pokemon.cp,
'iv': pokemon.iv,
'encounter_id': self.pokemon['encounter_id'],
'pokemon_id': pokemon.pokemon_id
}, outfile)
outfile.write('\n')
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
candy = inventory.candies().get(pokemon.pokemon_id)
candy.add(self.get_candy_gained_count(response_dict))
self.emit_event(
'gained_candy',
formatted='You now have {quantity} {type} candy!',
data = {
'quantity': candy.quantity,
'type': candy.type,
},
)
self.bot.softban = False
elif catch_pokemon_status == CATCH_STATUS_MISSED:
self.emit_event(
'pokemon_capture_failed',
formatted='Pokeball thrown to {pokemon} missed.. trying again!',
data={'pokemon': pokemon.name}
)
# Take some time to throw the ball from config options
action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
continue
break
def get_candy_gained_count(self, response_dict):
total_candy_gained = 0
for candy_gained in response_dict['responses']['CATCH_POKEMON']['capture_award']['candy']:
total_candy_gained += candy_gained
return total_candy_gained
def generate_spin_parameter(self, throw_parameters):
spin_success_rate = self.catch_throw_parameters_spin_success_rate
if random() <= spin_success_rate:
throw_parameters['spin_modifier'] = 0.5 + 0.5 * random()
throw_parameters['spin_label'] = ' Curveball'
else:
throw_parameters['spin_modifier'] = 0.499 * random()
throw_parameters['spin_label'] = ''
def generate_throw_quality_parameters(self, throw_parameters):
throw_excellent_chance = self.catch_throw_parameters_excellent_rate
throw_great_chance = self.catch_throw_parameters_great_rate
throw_nice_chance = self.catch_throw_parameters_nice_rate
throw_normal_throw_chance = self.catch_throw_parameters_normal_rate
# Total every chance types, pick a random number in the range and check what type of throw we got
total_chances = throw_excellent_chance + throw_great_chance \
+ throw_nice_chance + throw_normal_throw_chance
random_throw = random() * total_chances
if random_throw <= throw_excellent_chance:
throw_parameters['normalized_reticle_size'] = 1.70 + 0.25 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Excellent'
return
random_throw -= throw_excellent_chance
if random_throw <= throw_great_chance:
throw_parameters['normalized_reticle_size'] = 1.30 + 0.399 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Great'
return
random_throw -= throw_great_chance
if random_throw <= throw_nice_chance:
throw_parameters['normalized_reticle_size'] = 1.00 + 0.299 * random()
throw_parameters['normalized_hit_position'] = 1.0
throw_parameters['throw_type_label'] = 'Nice'
return
# Not a any kind of special throw, let's throw a normal one
# Here the reticle size doesn't matter, we scored out of it
throw_parameters['normalized_reticle_size'] = 1.25 + 0.70 * random()
throw_parameters['normalized_hit_position'] = 0.0
throw_parameters['throw_type_label'] = 'OK'
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' query_operators.py '''
import sys
import math
import tornado.httpclient
import tornado.gen
from heron.tools.tracker.src.python.metricstimeline import getMetricsTimeline
isPY3 = sys.version_info >= (3, 0, 0)
# helper method to support python 2 and 3
def is_str_instance(obj):
if isPY3:
return isinstance(obj, str)
return str(type(obj)) == "<type 'unicode'>" or str(type(obj)) == "<type 'str'>"
#####################################################################
# Data Structure for fetched Metrics
#####################################################################
class Metrics:
"""Represents a univariate timeseries.
Multivariate timeseries is simply a list of this."""
def __init__(self, componentName, metricName, instance, start, end, timeline):
"""Takes (componentName, metricname, instance, timeline)"""
self.componentName = componentName
self.metricName = metricName
self.instance = instance
self.start = start
self.end = end
self.timeline = self.floorTimestamps(start, end, timeline)
# pylint: disable=no-self-use
def floorTimestamps(self, start, end, timeline):
""" floor timestamp """
ret = {}
for timestamp, value in list(timeline.items()):
ts = timestamp // 60 * 60
if start <= ts <= end:
ret[ts] = value
return ret
def setDefault(self, constant, start, end):
""" set default time """
starttime = start // 60 * 60
if starttime < start:
starttime += 60
endtime = end // 60 * 60
while starttime <= endtime:
# STREAMCOMP-1559
# Second check is a work around, because the response from tmaster
# contains value 0, if it is queries for the current timestamp,
# since the bucket is created in the tmaster, but is not filled
# by the metrics.
if starttime not in self.timeline or self.timeline[starttime] == 0:
self.timeline[starttime] = constant
starttime += 60
################################################################
# All the Operators supported by query system.
################################################################
# pylint: disable=no-self-use
class Operator:
"""Base class for all operators"""
def __init__(self, _):
raise Exception("Not implemented exception")
# pylint: disable=unused-argument
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
""" execute """
raise Exception("Not implemented exception")
def isOperator(self):
"""Returns True. This is just usefule for checking that an object is an operator or not."""
return True
class TS(Operator):
"""Time Series Operator. This is the basic operator that is
responsible for getting metrics from tmaster.
Accepts a list of 3 elements:
1. componentName
2. instance - can be "*" for all instances, or a single instance ID
3. metricName - Full metric name with stream id if applicable
Returns a list of Metrics objects, each representing single timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 3:
raise Exception("TS format error, expects 3 arguments")
self.component = children[0]
if not is_str_instance(self.component):
raise Exception("TS expects component name as first argument")
# A '*' represents all instances, which is represented by empty array.
# Otherwise, it represents a single instance
self.instances = []
if children[1] != "*":
if not is_str_instance(children[1]):
raise Exception("Second argument of TS must be * or instance name")
self.instances.append(children[1])
self.metricName = children[2]
if not is_str_instance(self.metricName):
raise Exception("TS expects metric name as third argument")
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Fetch metrics for start-60 to end+60 because the minute mark
# may be a little skewed. By getting a couple more values,
# we can then truncate based on the interval needed.
metrics = yield getMetricsTimeline(
tmaster, self.component, [self.metricName], self.instances,
start - 60, end + 60)
if not metrics:
return
if "message" in metrics:
raise Exception(metrics["message"])
# Put a blank timeline.
if "timeline" not in metrics or not metrics["timeline"]:
metrics["timeline"] = {
self.metricName: {}
}
timelines = metrics["timeline"][self.metricName]
allMetrics = []
for instance, timeline in list(timelines.items()):
toBeDeletedKeys = []
for key, value in list(timeline.items()):
floatValue = float(value)
# Check if the value is really float or not.
# In python, float("nan") returns "nan" which is actually a float value,
# but it is not what we required.
if math.isnan(floatValue):
toBeDeletedKeys.append(key)
continue
timeline[key] = floatValue
# Remove all keys for which the value was not float
for key in toBeDeletedKeys:
timeline.pop(key)
allMetrics.append(Metrics(self.component, self.metricName, instance, start, end, timeline))
raise tornado.gen.Return(allMetrics)
class Default(Operator):
"""Default Operator. This operator is responsible for filling
holes in the metrics timeline of its children.
Accepts a list of 2 elements:
1. constant to fill the holes with
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Returns a list of Metrics objects, each representing single timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 2:
raise Exception("DEFAULT format error, expects 2 arguments")
if not isinstance(children[0], float):
raise Exception("First argument to DEFAULT must be a constant")
self.constant = children[0]
self.timeseries = children[1]
if not self.timeseries.isOperator():
raise Exception(
"Second argument to DEFAULT must be an operator, but is " + str(type(self.timeseries)))
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
allMetrics = yield self.timeseries.execute(tracker, tmaster, start, end)
if is_str_instance(allMetrics):
raise Exception(allMetrics)
for metric in allMetrics:
metric.setDefault(self.constant, start, end)
raise tornado.gen.Return(allMetrics)
class Sum(Operator):
"""Sum Operator. This operator is used to take sum of all children timeseries.
Accepts a list of elements, all of which have to be either constant or Operators.
Note that the length of the children is unbounded.
1. constants will fill in the holes as well, if present in other timeseries
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Returns a list of only one Metrics object, representing sum of all timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
self.timeSeriesList = children
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Initialize the metric to be returned with sum of all the constants.
retMetrics = Metrics(None, None, None, start, end, {})
constants = [ts for ts in self.timeSeriesList if isinstance(ts, float)]
retMetrics.setDefault(sum(constants), start, end)
leftOverTimeSeries = [ts for ts in self.timeSeriesList if not isinstance(ts, float)]
futureMetrics = []
for timeseries in leftOverTimeSeries:
futureMetrics.append(timeseries.execute(tracker, tmaster, start, end))
metrics = yield futureMetrics
# Get all the timeseries metrics
allMetrics = []
for met in metrics:
if is_str_instance(met):
raise Exception(met)
allMetrics.extend(met)
# Aggregate all of the them
for metric in allMetrics:
for timestamp, value in list(metric.timeline.items()):
if timestamp in retMetrics.timeline:
retMetrics.timeline[timestamp] += value
raise tornado.gen.Return([retMetrics])
class Max(Operator):
"""Max Operator. This operator is used to find max of all children timeseries
for each individual timestamp.
Accepts a list of elements, all of which have to be either constant or Operators.
Note that the length of the children is unbounded.
1. constants will fill in the holes as well, if present in other timeseries
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Returns a list of only one Metrics object, representing max of all timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) < 1:
raise Exception("MAX expects at least one operand.")
self.timeSeriesList = children
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Initialize the metric to be returned with max of all the constants.
retMetrics = Metrics(None, None, None, start, end, {})
constants = [ts for ts in self.timeSeriesList if isinstance(ts, float)]
if constants:
retMetrics.setDefault(max(constants), start, end)
leftOverTimeSeries = [ts for ts in self.timeSeriesList if not isinstance(ts, float)]
futureMetrics = []
for timeseries in leftOverTimeSeries:
futureMetrics.append(timeseries.execute(tracker, tmaster, start, end))
metrics = yield futureMetrics
# Get all the timeseries metrics
allMetrics = []
for met in metrics:
if is_str_instance(met):
raise Exception(met)
allMetrics.extend(met)
# Aggregate all of the them
for metric in allMetrics:
for timestamp, value in list(metric.timeline.items()):
if start <= timestamp <= end:
if timestamp not in retMetrics.timeline:
retMetrics.timeline[timestamp] = value
retMetrics.timeline[timestamp] = max(value, retMetrics.timeline[timestamp])
raise tornado.gen.Return([retMetrics])
class Percentile(Operator):
"""Percentile Operator. This operator is used to find a quantile of all children
timeseries for each individual timestamp. This is a more general type of query
than max. Percentile(100, TS...) is equivalent to Max(TS...).
Accepts a list of elements, all of which have to be either constant or Operators.
Note that the length of the children is unbounded.
First argument must always be the required Quantile.
1. Quantile - Required quantile. 100 percentile = max, 0 percentile = min.
2. constants will fill in the holes as well, if present in other timeseries
3. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Returns a list of only one Metrics object, representing quantile of all timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) < 1:
raise Exception("PERCENTILE expects at least two operands.")
if not isinstance(children[0], float):
raise Exception("First argument to PERCENTILE must be a constant")
if not 0 <= children[0] <= 100:
raise Exception("Quantile must be between 0 and 100 inclusive.")
self.quantile = children[0]
self.timeSeriesList = children[1:]
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
leftOverTimeSeries = [ts for ts in self.timeSeriesList if not isinstance(ts, float)]
futureMetrics = []
for timeseries in leftOverTimeSeries:
futureMetrics.append(timeseries.execute(tracker, tmaster, start, end))
metrics = yield futureMetrics
# Get all the timeseries metrics
allMetrics = []
for met in metrics:
if is_str_instance(met):
raise Exception(met)
allMetrics.extend(met)
# Keep all the values for a timestamp and we will later do
# a percentile on it
timeline = {}
# Aggregate all of the them
for metric in allMetrics:
for timestamp, value in list(metric.timeline.items()):
if start <= timestamp <= end:
if timestamp not in timeline:
timeline[timestamp] = []
timeline[timestamp].append(value)
retTimeline = {}
for timestamp, values in list(timeline.items()):
if not values:
continue
index = int(self.quantile * 1.0 * (len(values) - 1) / 100.0)
retTimeline[timestamp] = sorted(values)[index]
retMetrics = Metrics(None, None, None, start, end, retTimeline)
raise tornado.gen.Return([retMetrics])
class Divide(Operator):
"""Divide Operator.
Accepts two arguments, both can be univariate or multivariate.
1. constant will be considered as a constant timeseries for all applicable timestamps
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Three main cases are:
1. When both operands are multivariate -
a. Divide operation will be done on matching data, that is, with same instance id.
b. If the instances in both the operands do not match, error is thrown.
c. Returns multivariate timeseries, each representing the result of division
on the two corresponding timeseries.
2. When one operand is univariate, and other is multivariate -
a. This includes division by constants as well.
b. The univariate operand will participate with all timeseries in multivariate.
c. The instance information of the multivariate timeseries will be preserved in the result.
d. Returns multivariate timeseries.
3. When both operands are univariate.
a. Instance information is ignored in this case
b. Returns univariate timeseries which is the result of division operation."""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 2:
raise Exception("DIVIDE expects exactly two arguments.")
self.timeSeries1 = children[0]
self.timeSeries2 = children[1]
# pylint: disable=too-many-branches, too-many-statements
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Future metrics so as to execute them in parallel
futureMetrics = []
if not isinstance(self.timeSeries1, float):
futureMetrics.append(self.timeSeries1.execute(tracker, tmaster, start, end))
if not isinstance(self.timeSeries2, float):
futureMetrics.append(self.timeSeries2.execute(tracker, tmaster, start, end))
futureResolvedMetrics = yield futureMetrics
# Get first set of metrics
metrics = {}
if isinstance(self.timeSeries1, float):
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries1, start, end)
metrics[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("DIVIDE with multivariate requires instance based timeseries")
metrics[m.instance] = m
# Get second set of metrics
metrics2 = {}
if isinstance(self.timeSeries2, float):
if self.timeSeries2 == 0:
raise Exception("Divide by zero not allowed")
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries2, start, end)
metrics2[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics2[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("DIVIDE with multivariate requires instance based timeseries")
metrics2[m.instance] = m
# In case both are multivariate, only equal instances will get operated on.
# pylint: disable=too-many-boolean-expressions
if ((len(metrics) > 1 or (len(metrics) == 1 and "" not in metrics))
and (len(metrics2) > 1 or (len(metrics2) == 1 and "" not in metrics2))):
allMetrics = []
for key in metrics:
if key not in metrics2:
continue
met = Metrics(None, None, key, start, end, {})
for timestamp in list(metrics[key].timeline.keys()):
if timestamp not in metrics2[key].timeline or metrics2[key].timeline[timestamp] == 0:
metrics[key].timeline.pop(timestamp)
else:
met.timeline[timestamp] = metrics[key].timeline[timestamp] / \
metrics2[key].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If first is univariate
if len(metrics) == 1 and "" in metrics:
allMetrics = []
for key, metric in list(metrics2.items()):
# Initialize with first metrics timeline, but second metric's instance
# because that is multivariate
met = Metrics(None, None, metric.instance, start, end, dict(metrics[""].timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metric.timeline or metric.timeline[timestamp] == 0:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] /= metric.timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If second is univariate
allMetrics = []
for key, metric in list(metrics.items()):
# Initialize with first metrics timeline and its instance
met = Metrics(None, None, metric.instance, start, end, dict(metric.timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metrics2[""].timeline or metrics2[""].timeline[timestamp] == 0:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] /= metrics2[""].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
class Multiply(Operator):
"""Multiply Operator. Has same conditions as division operator.
This is to keep the API simple.
Accepts two arguments, both can be univariate or multivariate.
1. constant will be considered as a constant timeseries for all applicable timestamps
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Three main cases are:
1. When both operands are multivariate -
a. Multiply operation will be done on matching data, that is, with same instance id.
b. If the instances in both the operands do not match, error is thrown.
c. Returns multivariate timeseries, each representing the result of multiplication
on the two corresponding timeseries.
2. When one operand is univariate, and other is multivariate -
a. This includes multiplication by constants as well.
b. The univariate operand will participate with all timeseries in multivariate.
c. The instance information of the multivariate timeseries will be preserved in the result.
d. Returns multivariate timeseries.
3. When both operands are univariate.
a. Instance information is ignored in this case
b. Returns univariate timeseries which is the result of multiplication operation."""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 2:
raise Exception("MULTIPLY expects exactly two arguments.")
self.timeSeries1 = children[0]
self.timeSeries2 = children[1]
# pylint: disable=too-many-branches, too-many-statements
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Future metrics so as to execute them in parallel
futureMetrics = []
if not isinstance(self.timeSeries1, float):
futureMetrics.append(self.timeSeries1.execute(tracker, tmaster, start, end))
if not isinstance(self.timeSeries2, float):
futureMetrics.append(self.timeSeries2.execute(tracker, tmaster, start, end))
futureResolvedMetrics = yield futureMetrics
# Get first set of metrics
metrics = {}
if isinstance(self.timeSeries1, float):
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries1, start, end)
metrics[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("MULTIPLY with multivariate requires instance based timeseries")
metrics[m.instance] = m
# Get second set of metrics
metrics2 = {}
if isinstance(self.timeSeries2, float):
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries2, start, end)
metrics2[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics2[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("MULTIPLY with multivariate requires instance based timeseries")
metrics2[m.instance] = m
# In case both are multivariate, only equal instances will get operated
# pylint: disable=too-many-boolean-expressions
if ((len(metrics) > 1 or (len(metrics) == 1 and "" not in metrics))
and (len(metrics2) > 1 or (len(metrics2) == 1 and "" not in metrics2))):
allMetrics = []
for key in metrics:
if key not in metrics2:
continue
met = Metrics(None, None, key, start, end, {})
for timestamp in list(metrics[key].timeline.keys()):
if timestamp not in metrics2[key].timeline:
metrics[key].timeline.pop(timestamp)
else:
met.timeline[timestamp] = metrics[key].timeline[timestamp] * \
metrics2[key].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If first is univariate
if len(metrics) == 1 and "" in metrics:
allMetrics = []
for key, metric in list(metrics2.items()):
# Initialize with first metrics timeline, but second metric's instance
# because that is multivariate
met = Metrics(None, None, metric.instance, start, end, dict(metrics[""].timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metric.timeline:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] *= metric.timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If second is univariate
allMetrics = []
for key, metric in list(metrics.items()):
# Initialize with first metrics timeline and its instance
met = Metrics(None, None, metric.instance, start, end, dict(metric.timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metrics2[""].timeline:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] *= metrics2[""].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
class Subtract(Operator):
"""Subtract Operator. Has same conditions as division operator.
This is to keep the API simple.
Accepts two arguments, both can be univariate or multivariate.
1. constant will be considered as a constant timeseries for all applicable timestamps
2. Operator - can be any concrete subclass of Operator on which "execute" can
be called which returns a list of Metrics.
Three main cases are:
1. When both operands are multivariate -
a. Subtract operation will be done on matching data, that is, with same instance id.
b. If the instances in both the operands do not match, error is thrown.
c. Returns multivariate timeseries, each representing the result of subtraction
on the two corresponding timeseries.
2. When one operand is univariate, and other is multivariate -
a. This includes subtraction by constants as well.
b. The univariate operand will participate with all timeseries in multivariate.
c. The instance information of the multivariate timeseries will be preserved in the result.
d. Returns multivariate timeseries.
3. When both operands are univariate.
a. Instance information is ignored in this case
b. Returns univariate timeseries which is the result of subtraction operation."""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 2:
raise Exception("SUBTRACT expects exactly two arguments.")
self.timeSeries1 = children[0]
self.timeSeries2 = children[1]
# pylint: disable=too-many-branches, too-many-statements
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Future metrics so as to execute them in parallel
futureMetrics = []
if not isinstance(self.timeSeries1, float):
futureMetrics.append(self.timeSeries1.execute(tracker, tmaster, start, end))
if not isinstance(self.timeSeries2, float):
futureMetrics.append(self.timeSeries2.execute(tracker, tmaster, start, end))
futureResolvedMetrics = yield futureMetrics
# Get first set of metrics
metrics = {}
if isinstance(self.timeSeries1, float):
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries1, start, end)
metrics[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("SUBTRACT with multivariate requires instance based timeseries")
metrics[m.instance] = m
# Get second set of metrics
metrics2 = {}
if isinstance(self.timeSeries2, float):
met = Metrics(None, None, None, start, end, {})
met.setDefault(self.timeSeries2, start, end)
metrics2[""] = met
else:
met = futureResolvedMetrics.pop(0)
if not met:
pass
elif len(met) == 1 and not met[0].instance:
# Only one but since it has instance, it is considered multivariate
metrics2[""] = met[0]
else:
for m in met:
if not m.instance:
raise Exception("SUBTRACT with multivariate requires instance based timeseries")
metrics2[m.instance] = m
# In case both are multivariate, only equal instances will get operated on.
if len(metrics) > 1 and len(metrics2) > 1:
allMetrics = []
for key in metrics:
if key not in metrics2:
continue
met = Metrics(None, None, key, start, end, {})
for timestamp in list(metrics[key].timeline.keys()):
if timestamp not in metrics2[key].timeline:
metrics[key].timeline.pop(timestamp)
else:
met.timeline[timestamp] = metrics[key].timeline[timestamp] - \
metrics2[key].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If first is univariate
if len(metrics) == 1 and "" in metrics:
allMetrics = []
for key, metric in list(metrics2.items()):
# Initialize with first metrics timeline, but second metric's instance
# because that is multivariate
met = Metrics(None, None, metric.instance, start, end, dict(metrics[""].timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metric.timeline:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] -= metric.timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
# If second is univariate
allMetrics = []
for key, metric in list(metrics.items()):
# Initialize with first metrics timeline and its instance
met = Metrics(None, None, metric.instance, start, end, dict(metric.timeline))
for timestamp in list(met.timeline.keys()):
if timestamp not in metrics2[""].timeline:
met.timeline.pop(timestamp)
else:
met.timeline[timestamp] -= metrics2[""].timeline[timestamp]
allMetrics.append(met)
raise tornado.gen.Return(allMetrics)
class Rate(Operator):
"""Rate Operator. This operator is used to find rate of change for all timeseries.
Accepts a list of 1 element, which has to be a concrete subclass of Operators.
Returns a list of Metrics object, representing rate of all timeseries"""
# pylint: disable=super-init-not-called
def __init__(self, children):
if len(children) != 1:
raise Exception("RATE expects exactly one argument.")
if isinstance(children[0], float):
raise Exception("RATE requires a timeseries, not constant.")
self.timeSeries = children[0]
@tornado.gen.coroutine
def execute(self, tracker, tmaster, start, end):
# Get 1 previous data point to be able to apply rate on the first data
metrics = yield self.timeSeries.execute(tracker, tmaster, start-60, end)
# Apply rate on all of them
for metric in metrics:
timeline = {}
allTimestamps = sorted(metric.timeline.keys())
for i in range(1, len(allTimestamps)):
timestamp = allTimestamps[i]
prev = allTimestamps[i-1]
if start <= timestamp <= end and timestamp - prev == 60:
timeline[timestamp] = metric.timeline[timestamp] - metric.timeline[prev]
metric.timeline = timeline
raise tornado.gen.Return(metrics)
|
|
"""
send documents representing object data to elasticsearch for supported file extensions.
note: we truncate outbound documents to DOC_SIZE_LIMIT characters
(to bound memory pressure and request size to elastic)
a little knowledge on deletes and delete markers:
if bucket versioning is on:
- `aws s3api delete-object (no --version-id)` or `aws s3 rm`
- push a new delete marker onto the stack with a version-id
- generate ObjectRemoved:DeleteMarkerCreated
if bucket versioning was on and is then turned off:
- `aws s3 rm` or `aws s3api delete-object (no --version-id)`
- replace event at top of stack
- if a versioned delete marker, push a new one on top of it
- if an un-versioned delete marker, replace that marker with new marker
with version "null" (ObjectCreate will similarly replace the same with an object
of version "null")
- if object, destroy object
- generate ObjectRemoved:DeleteMarkerCreated
- problem: no way of knowing if DeleteMarkerCreated destroyed bytes
or just created a DeleteMarker; this is usually given by the return
value of `delete-object` but the S3 event has no knowledge of the same
- `aws s3api delete-object --version-id VERSION`
- destroy corresponding delete marker or object; v may be null in which
case it will destroy the object with version null (occurs when adding
new objects to a bucket that aws versioned but is no longer)
- generate ObjectRemoved:Deleted
if bucket version is off and has always been off:
- `aws s3 rm` or `aws s3api delete-object`
- destroy object
- generate a single ObjectRemoved:Deleted
counterintuitive things:
- turning off versioning doesn't mean version stack can't get deeper (by at
least 1) as indicated above in the case where a new marker is pushed onto
the version stack
- both creating a delete marker (soft delete) and hard deleting a delete marker
by providing it's version-id will result in an eventType of DeleteObject
and $.detail.responseElements.x-amz-delete-marker = true; it is therefore
not possible to tell the difference between a new delete marker and the deletion
of an existing one
See docs/EventBridge.md for more
"""
import datetime
import json
import os
import pathlib
import re
from os.path import split
from typing import Optional
from urllib.parse import unquote_plus
import boto3
import botocore
import nbformat
from dateutil.tz import tzutc
from document_queue import (
EVENT_PREFIX,
MAX_RETRY,
DocTypes,
DocumentQueue,
get_content_index_bytes,
get_content_index_extensions,
)
from jsonschema import ValidationError, draft7_format_checker, validate
from pdfminer.high_level import extract_text as extract_pdf_text
from tenacity import (
retry,
retry_if_exception,
stop_after_attempt,
wait_exponential,
)
from t4_lambda_shared.preview import (
ELASTIC_LIMIT_LINES,
extract_excel,
extract_fcs,
extract_parquet,
get_bytes,
get_preview_lines,
trim_to_bytes,
)
from t4_lambda_shared.utils import (
MANIFEST_PREFIX_V1,
POINTER_PREFIX_V1,
get_available_memory,
get_quilt_logger,
query_manifest_content,
separated_env_to_iter,
)
# translate events to S3 native names
EVENTBRIDGE_TO_S3 = {
"PutObject": EVENT_PREFIX["Created"] + "Put",
"CopyObject": EVENT_PREFIX["Created"] + "Copy",
"CompleteMultipartUpload": EVENT_PREFIX["Created"] + "CompleteMultipartUpload",
# see map_event_name for complete logic
"DeleteObject": None,
# "DeleteObjects" is not handled since it does not contain enough information on
# which objects where deleted
}
# ensure that we process events of known and expected shape
EVENT_SCHEMA = {
'type': 'object',
'properties': {
'awsRegion': {
'type': 'string'
},
'eventName': {
'type': 'string'
},
'eventTime': {
'type': 'string',
'format': 'date-time'
},
's3': {
'type': 'object',
'properties': {
'bucket': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
}
},
'required': ['name'],
'additionalProperties': True
},
'object': {
'type': 'object',
'properties': {
'eTag': {
'type': 'string'
},
'isDeleteMarker': {
'type': 'string'
},
'key': {
'type': 'string'
},
'versionId': {
'type': 'string'
}
},
'required': ['key'],
'additionalProperties': True
},
},
'required': ['bucket', 'object'],
'additionalProperties': True
},
},
'required': ['s3', 'eventName'],
'additionalProperties': True
}
# Max number of PDF pages to extract because it can be slow
MAX_PDF_PAGES = 100
# 10 MB, see https://amzn.to/2xJpngN
NB_VERSION = 4 # default notebook version for nbformat
# currently only affects .parquet, TODO: extend to other extensions
assert 'SKIP_ROWS_EXTS' in os.environ
SKIP_ROWS_EXTS = separated_env_to_iter('SKIP_ROWS_EXTS')
SELECT_PACKAGE_META = "SELECT * from S3Object o WHERE o.version IS NOT MISSING LIMIT 1"
# No WHERE clause needed for aggregations since S3 Select skips missing fields for aggs
SELECT_PACKAGE_STATS = (
"SELECT COALESCE(SUM(obj['size']), 0) as total_bytes,"
" COUNT(obj['size']) as total_files from S3Object obj"
)
TEST_EVENT = "s3:TestEvent"
# we need to filter out GetObject and HeadObject calls generated by the present
# lambda in order to display accurate analytics in the Quilt catalog
# a custom user agent enables said filtration
USER_AGENT_EXTRA = " quilt3-lambdas-es-indexer"
def now_like_boto3():
"""ensure timezone UTC for consistency with boto3:
Example of what boto3 returns on head_object:
'LastModified': datetime.datetime(2019, 11, 6, 3, 1, 16, tzinfo=tzutc()),
"""
return datetime.datetime.now(tz=tzutc())
def infer_extensions(key, ext):
"""guess extensions if possible"""
# Handle special case of hive partitions
# see https://www.qubole.com/blog/direct-writes-to-increase-spark-performance/
if (
re.fullmatch(r".c\d{3,5}", ext) or re.fullmatch(r".*-c\d{3,5}$", key)
or key.endswith("_0")
or ext == ".pq"
):
return ".parquet"
return ext
def should_retry_exception(exception):
"""don't retry certain 40X errors"""
if hasattr(exception, 'response'):
error_code = exception.response.get('Error', {}).get('Code', 218)
return error_code not in ["402", "403", "404"]
return False
@retry(
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=10),
retry=(retry_if_exception(should_retry_exception))
)
def select_manifest_meta(s3_client, bucket: str, key: str):
"""
wrapper for retry and returning a string
"""
try:
raw = query_manifest_content(
s3_client,
bucket=bucket,
key=key,
sql_stmt=SELECT_PACKAGE_META
)
return raw.read()
except botocore.exceptions.ClientError as cle:
print(f"Unable to S3 select manifest: {cle}")
return None
def do_index(
s3_client,
doc_queue: DocumentQueue,
event_type: str,
*,
bucket: str,
etag: str,
ext: str,
key: str,
last_modified: str,
text: str = '',
size: int = 0,
version_id: Optional[str] = None,
):
"""wrap dual indexing of packages and objects"""
logger_ = get_quilt_logger()
# index as object (always)
logger_.debug("%s to indexing queue (%s)", key, event_type)
doc_queue.append(
event_type,
DocTypes.OBJECT,
bucket=bucket,
ext=ext,
etag=etag,
key=key,
last_modified=last_modified,
size=size,
text=text,
version_id=version_id
)
# maybe index as package
if index_if_package(
s3_client,
doc_queue,
event_type,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
size=size,
version_id=version_id,
):
logger_.debug("%s indexed as package (%s)", key, event_type)
def index_if_package(
s3_client,
doc_queue: DocumentQueue,
event_type: str,
*,
bucket: str,
etag: str,
ext: str,
key: str,
last_modified: str,
version_id: Optional[str],
size: int
) -> bool:
"""index manifest pointer files as package documents in ES
Returns:
- True if pointer to manifest (and passes to doc_queue for indexing)
- False if not a manifest (no attempt at indexing)
"""
logger_ = get_quilt_logger()
pointer_prefix, pointer_file = split(key)
handle = pointer_prefix[len(POINTER_PREFIX_V1):]
if (
not pointer_file
or not pointer_prefix.startswith(POINTER_PREFIX_V1)
or len(handle) < 3
or '/' not in handle
):
logger_.debug("Not indexing as manifest file s3://%s/%s", bucket, key)
return False
try:
manifest_timestamp = int(pointer_file)
is_tag = False
if not 1451631600 <= manifest_timestamp <= 1767250800:
logger_.warning("Unexpected manifest timestamp s3://%s/%s", bucket, key)
return False
except ValueError as err:
is_tag = True
logger_.debug("Non-integer manifest pointer: s3://%s/%s, %s", bucket, key, err)
package_hash = ''
first_dict = {}
stats = None
# we only need to get manifest contents for proper create events (not latest pointers)
if event_type.startswith(EVENT_PREFIX["Created"]) and not is_tag:
package_hash = get_plain_text(
bucket,
key,
size,
None,
etag=etag,
s3_client=s3_client,
version_id=version_id,
).strip()
manifest_key = f'{MANIFEST_PREFIX_V1}{package_hash}'
first = select_manifest_meta(s3_client, bucket, manifest_key)
stats = select_package_stats(s3_client, bucket, manifest_key)
if not first:
logger_.error("S3 select failed %s %s", bucket, manifest_key)
return False
try:
first_dict = json.loads(first)
except (json.JSONDecodeError, botocore.exceptions.ClientError) as exc:
print(
f"{exc}\n"
f"\tFailed to select first line of manifest s3://{bucket}/{key}."
f"\tGot {first}."
)
return False
doc_queue.append(
event_type,
DocTypes.PACKAGE,
bucket=bucket,
etag=etag,
ext=ext,
handle=handle,
key=key,
last_modified=last_modified,
# if we don't have the hash, we're processing a tag
package_hash=(package_hash or pointer_file),
package_stats=stats,
pointer_file=pointer_file,
comment=str(first_dict.get("message", "")),
metadata=json.dumps(first_dict.get("user_meta", {})),
version_id=version_id,
)
return True
def select_package_stats(s3_client, bucket, manifest_key) -> str:
"""use s3 select to generate file stats for package"""
logger_ = get_quilt_logger()
try:
raw_stats = query_manifest_content(
s3_client,
bucket=bucket,
key=manifest_key,
sql_stmt=SELECT_PACKAGE_STATS
).read()
if raw_stats:
stats = json.loads(raw_stats)
assert isinstance(stats['total_bytes'], int)
assert isinstance(stats['total_files'], int)
return stats
except (
AssertionError,
botocore.exceptions.ClientError,
json.JSONDecodeError,
KeyError,
) as err:
logger_.error("Unable to compute package stats via S3 select: %s", err)
return None
def maybe_get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
"""get the byte contents of a file if it's a target for deep indexing"""
logger_ = get_quilt_logger()
if ext.endswith('.gz'):
compression = 'gz'
ext = ext[:-len('.gz')]
else:
compression = None
logger_.debug(
"Entering maybe_get_contents (could run out of mem.) %s %s %s", bucket, key, version_id
)
content = ""
inferred_ext = infer_extensions(key, ext)
if inferred_ext in get_content_index_extensions(bucket_name=bucket):
if inferred_ext == ".fcs":
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, info = extract_fcs(get_bytes(obj["Body"], compression), as_html=False)
# be smart and just send column names to ES (instead of bloated full schema)
# if this is not an HTML/catalog preview
content = trim_to_bytes(f"{body}\n{info}", get_content_index_bytes(bucket_name=bucket))
elif inferred_ext == ".ipynb":
content = trim_to_bytes(
# we have no choice but to fetch the entire notebook, because we
# are going to parse it
# warning: huge notebooks could spike memory here
get_notebook_cells(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
),
get_content_index_bytes(bucket_name=bucket),
)
elif inferred_ext == ".parquet":
if size >= get_available_memory():
print(f"{bucket}/{key} too large to deserialize; skipping contents")
# at least index the key and other stats, but don't overrun memory
# and fail indexing altogether
return ""
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, info = extract_parquet(
get_bytes(obj["Body"], compression),
as_html=False,
skip_rows=(inferred_ext in SKIP_ROWS_EXTS),
max_bytes=get_content_index_bytes(bucket_name=bucket),
)
# be smart and just send column names to ES (instead of bloated full schema)
# if this is not an HTML/catalog preview
columns = ','.join(list(info['schema']['names']))
content = trim_to_bytes(f"{columns}\n{body}", get_content_index_bytes(bucket_name=bucket))
elif inferred_ext == ".pdf":
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
content = trim_to_bytes(
extract_pdf(get_bytes(obj["Body"], compression)),
get_content_index_bytes(bucket_name=bucket),
)
elif inferred_ext in (".xls", ".xlsx"):
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, _ = extract_excel(get_bytes(obj["Body"], compression), as_html=False)
content = trim_to_bytes(
body,
get_content_index_bytes(bucket_name=bucket),
)
else:
content = get_plain_text(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
return content
def extract_pdf(file_):
"""Get plain text form PDF for searchability.
Args:
file_ - file-like object opened in binary mode, pointing to XLS or XLSX
Returns:
pdf text as a string
Warning:
This function can be slow. The 8-page test PDF takes ~10 sec to turn into a string.
"""
txt = extract_pdf_text(file_, maxpages=MAX_PDF_PAGES)
# crunch down space; extract_text inserts multiple spaces
# between words, literal newlines, etc.
return re.sub(r"\s+", " ", txt)
def extract_text(notebook_str):
""" Extract code and markdown
Args:
* nb - notebook as a string
Returns:
* str - select code and markdown source (and outputs)
Pre:
* notebook is well-formed per notebook version 4
* "cell_type" is defined for all cells
* "source" defined for all "code" and "markdown" cells
Throws:
* Anything nbformat.reads() can throw :( which is diverse and poorly
documented, hence the `except Exception` in handler()
Notes:
* Deliberately decided not to index output streams and display strings
because they were noisy and low value
* Tested this code against ~6400 Jupyter notebooks in
s3://alpha-quilt-storage/tree/notebook-search/
* Might be useful to index "cell_type" : "raw" in the future
See also:
* Format reference https://nbformat.readthedocs.io/en/latest/format_description.html
"""
formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)
text = []
for cell in formatted.get("cells", []):
if "source" in cell and cell.get("cell_type") in ("code", "markdown"):
text.append(cell["source"])
return "\n".join(text)
def get_notebook_cells(bucket, key, size, compression, *, etag, s3_client, version_id):
"""extract cells for ipynb notebooks for indexing"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
data = get_bytes(obj["Body"], compression)
notebook = data.getvalue().decode("utf-8")
try:
text = extract_text(notebook)
except (json.JSONDecodeError, nbformat.reader.NotJSONError):
print(f"Invalid JSON in {key}.")
except (KeyError, AttributeError) as err:
print(f"Missing key in {key}: {err}")
# there might be more errors than covered by test_read_notebook
# better not to fail altogether
except Exception as exc: # pylint: disable=broad-except
print(f"Exception in file {key}: {exc}")
except UnicodeDecodeError as uni:
print(f"Unicode decode error in {key}: {uni}")
return text
def get_plain_text(
bucket,
key,
size,
compression,
*,
etag,
s3_client,
version_id
) -> str:
"""get plain text object contents"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
limit=get_content_index_bytes(bucket_name=bucket),
version_id=version_id
)
lines = get_preview_lines(
obj["Body"],
compression,
ELASTIC_LIMIT_LINES,
get_content_index_bytes(bucket_name=bucket),
)
text = '\n'.join(lines)
except UnicodeDecodeError as ex:
print(f"Unicode decode error in {key}", ex)
return text
def make_s3_client():
"""make a client with a custom user agent string so that we can
filter the present lambda's requests to S3 from object analytics"""
configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)
return boto3.client("s3", config=configuration)
def map_event_name(event: dict):
"""transform eventbridge names into S3-like ones"""
input_ = event["eventName"]
if input_ in EVENTBRIDGE_TO_S3:
if input_ == "DeleteObject":
if event["s3"]["object"].get("isDeleteMarker"):
return EVENT_PREFIX["Removed"] + "DeleteMarkerCreated"
return EVENT_PREFIX["Removed"] + "Delete"
# all non-delete events just use the map
return EVENTBRIDGE_TO_S3[input_]
# leave event type unchanged if we don't recognize it
return input_
def shape_event(event: dict):
"""check event schema, return None if schema check fails"""
logger_ = get_quilt_logger()
try:
validate(
instance=event,
schema=EVENT_SCHEMA,
# format_checker= required for for format:date-time validation
# (we also need strict-rfc3339 in requirements.txt)
format_checker=draft7_format_checker,
)
except ValidationError as error:
logger_.error("Invalid event format: %s\n%s", error, event)
return None
# be a good citizen and don't modify params
return {
**event,
'eventName': map_event_name(event),
}
def handler(event, context):
"""enumerate S3 keys in event, extract relevant data, queue events, send to
elastic via bulk() API
"""
logger_ = get_quilt_logger()
# message is a proper SQS message, which either contains a single event
# (from the bucket notification system) or batch-many events as determined
# by enterprise/**/bulk_loader.py
# An exception that we'll want to re-raise after the batch sends
content_exception = None
batch_processor = DocumentQueue(context)
s3_client = make_s3_client()
for message in event["Records"]:
body = json.loads(message["body"])
body_message = json.loads(body["Message"])
if "Records" not in body_message:
# could be TEST_EVENT, or another unexpected event; skip it
logger_.error("No 'Records' key in message['body']: %s", message)
continue
events = body_message["Records"]
# event is a single S3 event
for event_ in events:
validated = shape_event(event_)
if not validated:
logger_.debug("Skipping invalid event %s", event_)
continue
event_ = validated
logger_.debug("Processing %s", event_)
try:
event_name = event_["eventName"]
# Process all Create:* and Remove:* events
if not any(event_name.startswith(n) for n in EVENT_PREFIX.values()):
logger_.warning("Skipping unknown event type: %s", event_name)
continue
bucket = event_["s3"]["bucket"]["name"]
# In the grand tradition of IE6, S3 events turn spaces into '+'
# TODO: check if eventbridge events do the same thing with +
key = unquote_plus(event_["s3"]["object"]["key"])
version_id = event_["s3"]["object"].get("versionId", None)
# ObjectRemoved:Delete does not include "eTag"
etag = event_["s3"]["object"].get("eTag", "")
# synthetic events from bulk scanner might define lastModified
last_modified = (
event_["s3"]["object"].get("lastModified") or event_["eventTime"]
)
# Get two levels of extensions to handle files like .csv.gz
path = pathlib.PurePosixPath(key)
ext1 = path.suffix
ext2 = path.with_suffix('').suffix
ext = (ext2 + ext1).lower()
# Handle delete and deletemarker first and then continue so that
# head_object and get_object (below) don't fail
if event_name.startswith(EVENT_PREFIX["Removed"]):
do_index(
s3_client,
batch_processor,
event_name,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
version_id=version_id
)
continue
try:
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=version_id,
etag=etag
)
except botocore.exceptions.ClientError as first:
logger_.warning("head_object error: %s", first)
# "null" version sometimes results in 403s for buckets
# that have changed versioning, retry without it
if (first.response.get('Error', {}).get('Code') == "403"
and version_id == "null"):
try:
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=None,
etag=etag
)
except botocore.exceptions.ClientError as second:
# this will bypass the DLQ but that's the right thing to do
# as some listed objects may NEVER succeed head requests
# (e.g. foreign owner) and there's no reason to torpedo
# the whole batch (which might include good files)
logger_.warning("Retried head_object error: %s", second)
logger_.error("Fatal head_object, skipping event: %s", event_)
continue
# backfill fields based on the head_object
size = head["ContentLength"]
last_modified = last_modified or head["LastModified"].isoformat()
etag = head.get("etag") or etag
version_id = head.get("VersionId") or version_id
try:
text = maybe_get_contents(
bucket,
key,
ext,
etag=etag,
version_id=version_id,
s3_client=s3_client,
size=size
)
# we still want an entry for this document in elastic so that, e.g.,
# the file counts from elastic are correct
# these exceptions can happen for a variety of reasons (e.g. glacier
# storage class, index event arrives after delete has occurred, etc.)
# given how common they are, we shouldn't fail the batch for this
except Exception as exc: # pylint: disable=broad-except
text = ""
logger_.warning("Content extraction failed %s %s %s", bucket, key, exc)
do_index(
s3_client,
batch_processor,
event_name,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
size=size,
text=text,
version_id=version_id
)
except botocore.exceptions.ClientError as boto_exc:
if not should_retry_exception(boto_exc):
logger_.warning("Skipping non-fatal exception: %s", boto_exc)
continue
logger_.critical("Failed record: %s, %s", event, boto_exc)
raise boto_exc
# flush the queue
batch_processor.send_all()
def retry_s3(
operation,
bucket,
key,
size=None,
limit=None,
*,
etag,
version_id,
s3_client
):
"""retry head or get operation to S3 with; stop before we run out of time.
retry is necessary since, due to eventual consistency, we may not
always get the required version of the object.
"""
logger_ = get_quilt_logger()
if operation == "head":
function_ = s3_client.head_object
elif operation == "get":
function_ = s3_client.get_object
else:
raise ValueError(f"unexpected operation: {operation}")
# Keyword arguments to function_
arguments = {
"Bucket": bucket,
"Key": key
}
if operation == 'get' and size and limit:
# can only request range if file is not empty
arguments['Range'] = f"bytes=0-{min(size, limit) - 1}"
if version_id:
arguments['VersionId'] = version_id
elif etag:
arguments['IfMatch'] = etag
logger_.debug("Entering @retry: %s, %s", operation, arguments)
@retry(
# debug
reraise=True,
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=10),
retry=(retry_if_exception(should_retry_exception))
)
def call():
"""local function so we can set stop_after_delay dynamically"""
# TODO: remove all this, stop_after_delay is not dynamically loaded anymore
return function_(**arguments)
return call()
|
|
from datetime import datetime
from random import shuffle
import gridfs
import pymongo
import pytz
from bson.codec_options import CodecOptions
from bson.objectid import ObjectId
from langdetect import detect
from upol_search_engine import settings
from upol_search_engine.utils import document, urls
def create_client():
client = pymongo.MongoClient(
settings.CONFIG.get('General', 'mongo_db_server'),
settings.CONFIG.getint('General', 'mongo_db_port'),
username=settings.CONFIG.get('General', 'mongo_db_user'),
password=settings.CONFIG.get('General', 'mongo_db_password'),
authSource='admin',
authMechanism='SCRAM-SHA-1',
maxPoolSize=None)
return client
def get_database(limit_domain, client):
database_name = urls.domain_replace_dots(limit_domain)
database = client[database_name]
return database
def get_stats_database(client):
return client["stats"]
def drop_database(db_name):
client = create_client()
client.drop_database(db_name)
def init(db):
"""Database init, create indexes"""
db['Urls'].create_index('visited')
db['Urls'].create_index('indexed')
db['Urls'].create_index('noindex')
db['Urls'].create_index('file')
db['Urls'].create_index('file_type')
db['Urls'].create_index('invalid')
db['Urls'].create_index('queued')
db['Urls'].create_index('timeout')
db['Urls'].create_index('alias')
db['Urls'].create_index('canonical_group')
db['Limiter'].create_index('ip')
db['Limiter'].create_index('domain', unique=True)
db['PageRank'].create_index('to_hash')
db['PageRank'].create_index([('from_hash', pymongo.DESCENDING),
('to_hash', pymongo.DESCENDING)], unique=True)
def _prepare_url_object(url, visited, queued, depth):
"""Prepare url object before inserting into database"""
url_object = {'_id': urls.hash(url),
'url': url,
'domain': urls.domain(url),
'depth': depth,
'visited': visited,
'queued': queued,
'alias': False,
'invalid': False,
'file': False,
'progress': {'discovered': str(datetime.utcnow())}}
return url_object
def insert_url(db, url, visited, queued, depth):
"""Insert url into db"""
url_object = _prepare_url_object(url, visited, queued, depth)
try:
result = db['Urls'].insert_one(url_object).inserted_id
except pymongo.errors.DuplicateKeyError as e:
return False
return result
def batch_insert_url(db, urls_with_depths, visited, queued):
"""Inser batch of urls into db"""
url_documents = []
for url in urls_with_depths:
url_object = _prepare_url_object(url.get('url'),
visited,
queued,
url.get('depth'))
url_documents.append(url_object)
try:
result = db['Urls'].insert_many(url_documents, ordered=False)
except pymongo.errors.BulkWriteError:
result = None
return result
def batch_insert_pagerank_outlinks(db, from_url, to_urls):
"""Inser batch of outlinks into database"""
url_documents = []
for to_url in to_urls:
to_url = to_url.get('url')
url_object = {'from_hash': urls.hash(from_url),
'to_hash': urls.hash(to_url)}
url_documents.append(url_object)
try:
result = db['PageRank'].insert_many(url_documents, ordered=False)
except pymongo.errors.BulkWriteError:
result = None
return result
def delete_pagerank_edge_to(db, to_hash):
"""Delete edge from pagerank"""
result = db['PageRank'].delete_many({'to_hash': to_hash})
return result.deleted_count > 0
def delete_url(db, url):
"""Try to delete url from db, returns True if case of success"""
result = db['Urls'].delete_one({'_id': urls.hash(url)})
return result.deleted_count > 0
def get_or_create_canonical_group(db, text_hash):
"""Try to get canonical group with given hash.
Create new canonical g roup in case of fail.
Canonical group groups url with same text hash, not HTML tags."""
# TODO - Possible chance of optimalization here
canonical_group = list(db['CanonicalGroups'].find(
{'text_hash': text_hash}).limit(1))
# Create new one
if len(canonical_group) == 0:
return db['CanonicalGroups'].insert({'text_hash': text_hash})
else:
return canonical_group[0].get('_id')
def get_url(db, url):
document = db['Urls'].find_one({'_id': urls.hash(url)})
return document
def get_document_by_id(db, document_id):
document = db['Urls'].find_one({'_id': document_id})
return document
def get_batch_by_id(db, id_list):
result = db['Urls'].find({'_id': {'$in': id_list}})
return result
def select_representative_for_canonical_group(db, canonical_group):
"""Return id of URL which is suitable
as representative of canonical group"""
urls_representatives = db['Urls'].find(
{'canonical_group': ObjectId(canonical_group),
'alias': False,
'invalid': False})
representatives = []
for url in urls_representatives:
representatives.append(url.get('url'))
# Return hash of the shortest url
return urls.hash(min(representatives, key=len))
def update_canonical_group_representative(db, canonical_group, representative):
"""Update representative url of canonical group"""
return db['CanonicalGroups'].find_one_and_update(
{'_id': ObjectId(canonical_group)},
{'$set': {'representative': representative}})
def set_alias_visited_url(db, url):
url_hash = urls.hash(url)
url_addition = {}
url_addition['visited'] = True
url_addition['queued'] = False
url_addition['alias'] = True
url_addition['progress.last_visited'] = str(datetime.utcnow())
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': url_addition})
return result is not None
def set_visited_invalid_url(db, url, response, reason, is_file=False):
url_hash = urls.hash(url)
url_addition = {}
url_addition['visited'] = True
url_addition['queued'] = False
url_addition['invalid'] = True
url_addition['file'] = is_file
url_addition['invalid_reason'] = reason
url_addition['progress.last_visited'] = str(datetime.utcnow())
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': url_addition})
return result is not None
def _determine_type_of_redirect(response):
is_permanent_redirect = False
for history in response.history:
if history.is_permanent_redirect:
is_permanent_redirect = True
break
is_redirect = False
for history in response.history:
if history.is_redirect:
is_redirect = True
break
return is_redirect, is_permanent_redirect
def set_canonical_group_to_alias(db, original_url, canonical_group):
"""If there was redirect, set the canonical group to
the orginal alias url"""
modification = {'canonical_group': canonical_group}
return db['Urls'].find_one_and_update(
{'_id': urls.hash(original_url)}, {'$set': modification})
def _update_representatives_of_canonical_groups(db, canonical_group):
"""If insertion was successful update representative of canonical group"""
representative = select_representative_for_canonical_group(db,
canonical_group)
return update_canonical_group_representative(db,
canonical_group,
representative)
def _format_response_header(response, url_addition):
for key, value in response.headers.items():
url_addition['response.' + str(key).replace('$', '')] = str(value)
return url_addition
def set_visited_file_url(db, url, response, original_url=None):
"""Save file into database and set is as visited"""
content_type = response.headers.get('Content-Type')
if 'application/pdf' in content_type:
file_type = 'pdf'
elif 'text/plain' in content_type:
file_type = 'txt'
else:
file_type = None
url_hash = urls.hash(url)
is_redirect, is_permanent_redirect = _determine_type_of_redirect(response)
url_addition = {}
# Pairing url with canonical group id
content_hash = urls.hash_document(response.content)
url_addition['canonical_group'] = get_or_create_canonical_group(
db,
content_hash)
url_addition['visited'] = True
url_addition['queued'] = False
url_addition['indexed'] = False
url_addition['noindex'] = False
url_addition['file'] = True
url_addition['file_type'] = file_type
url_addition['progress.last_visited'] = str(datetime.utcnow())
# GridFS connection
fs = gridfs.GridFS(db)
file_id = fs.put(response.content)
url_addition['content.binary'] = file_id
url_addition['content.hashes.content'] = content_hash
url_addition['response.elapsed'] = str(response.elapsed)
url_addition['response.is_redirect'] = is_redirect
url_addition['response.is_permanent_redirect'] = is_permanent_redirect
url_addition['response.status_code'] = response.status_code
url_addition['response.reason'] = response.reason
url_addition = _format_response_header(response, url_addition)
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': url_addition})
# If there was redirect, set the canonical group to the orginal alias url
if original_url is not None:
set_canonical_group_to_alias(db,
original_url,
url_addition['canonical_group'])
# If insertion was successful update representative of canonical group
if result is not None:
_update_representatives_of_canonical_groups(
db,
url_addition['canonical_group'])
return result is not None
def set_visited_url(db, url, response, soup, noindex, original_url=None):
"""Try to set url to visited and update other important informations"""
url_hash = urls.hash(url)
is_redirect, is_permanent_redirect = _determine_type_of_redirect(response)
url_addition = {}
# Pairing url with canonical group id
# Remove script tags from soup
for script in soup('script'):
script.extract()
text = soup.getText(separator='\n')
try:
url_addition['language'] = detect(text)
except Exception as e:
# Fallback language
url_addition['language'] = 'cs'
text_hash = document.hash_document(
document.extract_document_text_for_hash(soup))
url_addition['canonical_group'] = get_or_create_canonical_group(db,
text_hash)
url_addition['visited'] = True
url_addition['queued'] = False
url_addition['indexed'] = False
url_addition['noindex'] = noindex
url_addition['progress.last_visited'] = str(datetime.utcnow())
url_addition['content.binary'] = response.content
url_addition['content.hashes.text'] = text_hash
url_addition['content.encoding'] = response.encoding
# Later detect language
url_addition['response.elapsed'] = str(response.elapsed)
url_addition['response.is_redirect'] = is_redirect
url_addition['response.is_permanent_redirect'] = is_permanent_redirect
url_addition['response.status_code'] = response.status_code
url_addition['response.reason'] = response.reason
url_addition = _format_response_header(response, url_addition)
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': url_addition})
# If there was redirect, set the canonical group to the orginal alias url
if original_url is not None:
set_canonical_group_to_alias(db,
original_url,
url_addition['canonical_group'])
# If insertion was successful update representative of canonical group
if result is not None:
_update_representatives_of_canonical_groups(
db,
url_addition['canonical_group'])
return result is not None
def is_first_run(db):
result = db['Urls'].find_one({'visited': True})
return result is None
def reset_visited_for_fast_recrawl(db):
result = db['Urls'].update_many(
{'visited': True, 'alias': False, 'invalid': False},
{'$set': {'visited': False}})
return result is not None
def set_queued_batch(db, list_url_hash):
"""Try to set batch of urls to queued"""
result = db['Urls'].update_many({'_id': {'$in': list_url_hash}},
{'$set': {'queued': True}})
return result is not None
def set_url_for_recrawl(db, url):
"""Set url for recrawl later"""
url_hash = urls.hash(url)
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': {'queued': False,
'visited': False}})
return result is not None
def set_timeout_url(db, url):
"""Try to set url as timouted"""
url_hash = urls.hash(url)
result = db['Urls'].find_one_and_update(
{'_id': url_hash},
{'$set': {
'queued': False,
'timeout.timeout': True,
'timeout.last_timeout': str(datetime.utcnow())
}})
return result is not None
def get_batch_url_for_crawl(db, size):
"""Return batch of url from db for crawl"""
db_batch = list(db['Urls'].aggregate([{'$match':
{'$and': [
{'visited': False},
{'queued': False},
{'timeout': {
'$exists': False}}]}},
{'$sample': {'size': size}}]))
if len(db_batch) != 0:
batch = []
for field in db_batch:
url = {'_id': field.get('_id'),
'url': field.get('url'),
'depth': field.get('depth')}
batch.append(url)
shuffle(batch)
return batch
else:
return None
def exists_url(db, url):
"""Return if url is exists in db"""
url_hash = urls.hash(url)
result = db['Urls'].find_one({'_id': url_hash})
return result is not None
def is_queued(db, url):
"""Check if url is queued"""
result = db['Urls'].find_one({'queued': True})
if result is not None:
return True
def should_crawler_wait(db):
"""Check if crawler can terminate or not"""
result = db['Urls'].find_one({'$or': [
{'$and': [
{'visited': False},
{'queued': True}]},
{'$and': [
{'visited': False},
{'queued': False},
{'timeout': {'$exists': False}}]}]})
return not ((result is None) or (len(result) == 0))
def get_crawler_stats(db):
stats = {}
stats['urls_count'] = db['Urls'].count()
stats['files_count'] = db['Urls'].find({'file': True, 'invalid': False, 'alias': False}).count()
stats['invalid_count'] = db['Urls'].find(
{'invalid': True, 'alias': False}).count()
stats['aliases_count'] = db['Urls'].find({'alias': True}).count()
stats['timeout_count'] = db['Urls'].find({'timeout.timeout': True}).count()
stats['urls_visited'] = db['Urls'].find({'visited': True}).count()
stats['urls_queued'] = db['Urls'].find(
{'$and': [{'visited': False}, {'queued': True}]}).count()
stats['urls_not_queued'] = db['Urls'].find(
{'$and': [{'visited': False},
{'queued': False},
{'timeout': {'$exists': False}}]}).count()
stats['number_of_domains'] = get_number_of_domains(db)
stats['number_of_servers'] = get_number_of_servers(db)
return stats
def get_number_of_domains(db):
return len(db['Limiter'].distinct('domain'))
def get_number_of_servers(db):
return len(db['Limiter'].distinct('ip'))
def insert_engine_start(client, task_id, crawler_settings):
db_stats = get_stats_database(client)
start_time = datetime.utcnow()
stats_object = {
'task_id': task_id,
'progress': {'start': start_time,
'end': None,
'result': 'running',
'stage': 'loading'},
'crawler': {'result': None,
'start': None,
'end': None},
'pagerank': {'result': None,
'start': None,
'end': None},
'indexer': {'result': None,
'start': None,
'end': None},
'limit_domain': crawler_settings.get('limit_domain'),
}
return db_stats['Stats'].insert_one(stats_object)
def insert_engine_finish(client, task_id, reason):
db_stats = get_stats_database(client)
end_time = datetime.utcnow()
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {'progress.end': end_time,
'progress.result': reason}})
def insert_sub_task_start(client, task_id, subtask_name):
db_stats = get_stats_database(client)
start_time = datetime.utcnow()
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {subtask_name + '.start': start_time,
subtask_name + '.result': "running",
'progress.stage': subtask_name}})
def insert_sub_task_finish(client, task_id, subtask_name, reason):
db_stats = get_stats_database(client)
end_time = datetime.utcnow()
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {subtask_name + '.end': end_time,
subtask_name + '.result': reason}})
def update_crawler_progress(client, db, task_id):
db_stats = get_stats_database(client)
stats = get_crawler_stats(db)
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {'crawler.progress': stats}})
def update_pagerank_progress(client, task_id, stage):
db_stats = get_stats_database(client)
start_time = datetime.utcnow()
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {'pagerank.progress.' + stage: start_time}})
def update_indexer_progress(client, task_id, progress):
db_stats = get_stats_database(client)
actual = db_stats['Stats'].find_one({'task_id': task_id})
if actual is None:
return
indexer_progress = actual.get('indexer').get('progress')
if indexer_progress is None:
new = int(progress)
else:
new = int(indexer_progress.get('progress')) + int(progress)
return db_stats['Stats'].find_one_and_update(
{'task_id': task_id},
{'$set': {'indexer.progress.progress': new}})
def get_latest_stats(client):
db_stats = get_stats_database(client)
aware_times = db_stats['Stats'].with_options(codec_options=CodecOptions(
tz_aware=True,
tzinfo=pytz.timezone('Europe/Prague')))
result = aware_times.find().sort('$natural', pymongo.DESCENDING).limit(1)
if result.count() == 0:
return None
else:
return result[0]
def insert_or_iterate_search_words(db, words):
for word in words:
try:
db['SearchWordsStats'].insert({'word': word, 'count': 0})
except Exception as e:
pass
db['SearchWordsStats'].update({'word': word}, {'$inc': {'count': 1}})
def insert_search_query(db, query, language):
db['SearchStats'].insert(
{'query': query, 'language': language, 'date': str(datetime.utcnow())})
def get_count_of_not_indexed(db):
count = db['Urls'].find({
'page.visited': True,
'page.noindex': False,
'page.file': False, # Just for now
'page.invalid': False,
'page.response.status_code': 200,
'page.indexed': False
}).count()
return count
# DEPRECATED
def get_batch_for_indexer(db, size):
pipeline = [
{'$lookup': {
'from': 'Urls',
'localField': 'representative',
'foreignField': '_id',
'as': 'page'
}},
{'$unwind': '$page'},
{'$match': {
'page.visited': True,
'page.noindex': False,
'page.file': False, # Just for now
'page.invalid': False,
'page.response.status_code': 200,
'page.indexed': False
}},
{'$project': {'representative': 1,
'page.url': 1,
'page.depth': 1,
'page.file': 1,
'page.language': 1,
'page.content.binary': 1,
'page.pagerank': 1}},
{'$limit': size}
]
url_batch = db['CanonicalGroups'].aggregate(
pipeline, allowDiskUse=True)
return url_batch
def get_batch_of_ids_for_indexer(db, size):
pipeline = [
{'$lookup': {
'from': 'Urls',
'localField': 'representative',
'foreignField': '_id',
'as': 'page'
}},
{'$unwind': '$page'},
{'$match': {
'page.visited': True,
'page.noindex': False,
'page.invalid': False,
'page.response.status_code': 200,
'page.indexed': False
}},
{'$project': {'representative': 1}},
{'$limit': size}
]
url_batch = db['CanonicalGroups'].aggregate(
pipeline, allowDiskUse=True)
return url_batch
def set_documents_as_indexed(db, document_hashes):
requests = []
for url_hash in document_hashes:
requests.append(pymongo.UpdateOne(
{'_id': url_hash}, {'$set': {'indexed': True}}))
return db['Urls'].bulk_write(requests)
|
|
#!/usr/bin/python
# EVRYTHNG API Python Wrapper v0.92 - Vlad Trifa
# Engine 1.17
# Import all basic libs needed
import simplejson as json
import httplib, urllib
import csv
import logging
# Import some tools to measure execution time
import time
import corestats
# Set to 1 to force HTTPS
SECURE=1
# Which API Endpoint
hostname="api.evrythng.com"
# The Various API Keys
apiKey="YOUR_OPERATOR_KEY_HERE"
appId="YOUR_APP_ID"
userId="YOUR_USER_ID"
inApp=0
logLevel=0
# FIXME: add other log levels here - 0 is debug, 1 is info, 2 is warning, 3 is errors
def setLogLevel(level):
global logLevel
if level:
logging.basicConfig(level=logging.INFO,
format='%(levelname)-8s: %(message)s'
)
logging.info("Simple Log mode")
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(filename)s:%(lineno)-4d: %(message)s',
datefmt='%m-%d %H:%M',
)
logging.info("Full log mode")
logLevel=level
### This is to setup the environments / app / user contexts
def setDomain(domain):
global hostname
hostname = domain
logging.info("API Endpoint: " + hostname)
def setOperator(key):
global apiKey
apiKey = key
logging.info("Scope: OPERATOR key: " + apiKey)
headProducts()
# FIXME: add testing right here if works, otherwise return error
def setApp(key,id):
global apiKey,appId
apiKey = key
appId = id
logging.info("Scope: APP id "+ appId +" (APP API KEY: " + apiKey +")")
inApp=1
headProducts()
# FIXME: add testing right here if works, otherwise return error
def setUser(key,id):
global apiKey,userId
apiKey = key
userId = id
logging.info("Scope: USER id "+ userId +" (APP API KEY: " + apiKey +")")
inApp=1
# FIXME: add testing right here if works, otherwise return error
# Print the response of the API
def printResponse(response):
logging.info(response['headers'])
# pretty print response body, if any
if response['body'] != '':
logging.info("Response Content Body: \n"+ json.dumps(json.loads(response['body']), indent=4))
else:
logging.info("No response body.")
# Validates if the API response is the one expected
def validateResponse(response,validCode,errorCode):
# Check if successful
if response[0]['status'] != validCode:
logging.error(errorCode + ". Status code: " + str(response[0]['status']))
logging.error(response[0]['headers'])
logging.error(response[0]['body'])
# Sends a request to the EVRYTHNG Engine
def sendRequest(method, url, body='', headers=''):
global hostname
# By default for all calls
headers['Authorization'] = apiKey
# Use HTTP or HTTPs Connection
if SECURE:
port=443
else:
port=80
# We create a connection
conn = httplib.HTTPSConnection(
host=hostname,
port=port
)
json_body=json.dumps(body)
# Build the HTTP request with the body, headers & co
conn.request(
method=method,
url='%s' % url,
body=json_body,
headers=headers
)
# Send the request
logging.info("-> ### %s %s:%s%s " % (method,hostname,port,url))
logging.info("-> Headers: " + json.dumps(headers))
logging.info("-> Payload: " + json_body)
# Send the request and time it
start = time.time()
full_response = conn.getresponse()
rt = float("%.2f" % ((time.time() - start)*1000)) # Response Time RT (milliseconds)
# Parse the response
response={} # Parse the HTTP response
response['body']=full_response.read()
response['headers']=full_response.getheaders()
response['status']=full_response.status
response['reason']=full_response.reason
conn.close()
# And confirm
logging.info("<- ### %s %s ###" % (response['status'],response['reason']))
logging.info("<- ### RT %0.5f [ms] ###" % rt)
if logLevel==0:
printResponse(response)
return [response, rt]
#---- Implementation of a few endpoints in the engine
def headThngs():
headers = {"Accept": "application/json"}
response = sendRequest(
method="HEAD",
url="/thngs",
headers=headers
)
validateResponse(response,200,"Problem HEAD /thngs")
return response
# GET the list of all THNGS for a user
def getAllThngs(scope=''):
headers = {"Accept": "application/json"}
if scope=='all':
scopedUrl="/thngs?app=all"
elif scope=='':
scopedUrl="/thngs"
else:
scopedUrl="/thngs/?app=%s" % scope
response = sendRequest(
method="GET",
url=scopedUrl,
headers=headers
)
return response
# Gets list of thngs using a filter (filter should be human readable)
def getThngs(filterString):
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="/thngs/?filter=%s" % urllib.quote_plus(filterString),
headers=headers
)
validateResponse(response,200,"Problem GET /thngs")
return response
# POST a new THNG
def createThng(thngDocument,scope=''):
headers = {"Content-Type": "application/json"}
if scope=='all':
scopedUrl="/thngs?app=all"
elif scope=='':
scopedUrl="/thngs"
else:
scopedUrl="/thngs/?app=%s" % scope
response = sendRequest(
method="POST",
url=scopedUrl,
body=thngDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /thngs")
return response
# GET a THNG
def getThng(thngID):
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="/thngs/%s" % thngID,
headers=headers
)
validateResponse(response,200,"Problem GET /thngs")
return response
# DELETE a THNG
def deleteThng(thngID):
headers = {"Accept": "application/json"}
response = sendRequest(
method="DELETE",
url="/thngs/%s" % thngID,
headers=headers
)
validateResponse(response,200,"Problem DELETE /products/{id}")
return response
# UPDATE a THNG
def updateThng(thngID,propDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/thngs/%s" % thngID,
body=thngDocument,
headers=headers
)
validateResponse(response,200,"Problem PUT /thngs/{id}")
return response
def headProducts():
headers = {"Accept": "application/json"}
response = sendRequest(
method="HEAD",
url="/products",
headers=headers
)
validateResponse(response,200,"Problem HEAD /products")
return response
# POST a new PRODUCT
def createProduct(productDocument,scope='all'):
headers = {"Content-Type": "application/json"}
if scope=='all':
scopedUrl="/products?app=all"
elif scope=='':
scopedUrl="/products"
else:
scopedUrl="/products/?app=%s" % scope
response = sendRequest(
method="POST",
url=scopedUrl,
body=productDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /products")
return response
# GET the list of all THNGS for a user
def getProducts(scope=''):
headers = {"Accept": "application/json"}
if scope=='all':
scopedUrl="/products?app=all"
elif scope=='':
scopedUrl="/products"
else:
scopedUrl="/products/?app=%s" % scope
response = sendRequest(
method="GET",
url=scopedUrl,
headers=headers
)
validateResponse(response,200,"Problem GET /products")
return response
# GET a PRODUCT
def getProduct(productID):
headers = {}
response = sendRequest(
method="GET",
url="/products/%s" % productID,
headers=headers
)
return response
# DELETE a PRODUCT
def deleteProduct(productID):
headers = {}
response = sendRequest(
method="DELETE",
url="/products/%s" % productID,
headers=headers
)
validateResponse(response,200,"Problem DELETE /products")
return response
# UPDATE a PRODUCT
def updateProduct(productID,productDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/thngs/%s" % productID,
body=productDocument,
headers=headers
)
return response
# UPDATE PROPERTIES of an entity, by default a THNG
def updateProperties(entityID,propertyDocument,entityPath='/thngs'):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="%s/%s/properties" % (entityPath,entityID),
body=propertyDocument,
headers=headers
)
return response
# GET LIST of PROPERTIES of a THNG
# Returns array of form ...
def getProperties(entityID,entityPath='/thngs'):
response = sendRequest(
method="GET",
url="%s/%s/properties" % (entityPath,entityID),
headers=headers
)
return response
# GET HISTORY of a single PROPERTY of a THNG
# Returns array of form ...
def getProperty(entityID,propertyID,entityPath='/thngs'):
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="%s/%s/properties%s" % (entityPath,entityID,propertyID),
headers=headers
)
return response
# GET list of LOCATIONS of a THNG
# Location *must* be an object: location={'latitude': 'some', 'longitude': 'stuff', 'timestamp': 234234}
def getLocations(thngID):
call_url="/thngs/%s/location" % thngID
# if from !=''
# call_url += "?from=%s" % from
# if to !=''
# call_url += "to=%s" % from
response = sendRequest(
method="GET",
url=call_url,
headers=headers
)
return response
# UPDATE LOCATION of a THNG
# Location *must* be an object: location={'latitude': 'some', 'longitude': 'stuff', 'timestamp': 234234}
def updateLocation(thngID,location):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/thngs/%s/location" % thngID,
body=location,
headers=headers
)
return response
# CREATE COLLECTION
def createCollection(collDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/collections",
body=collDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /collections")
return response
# UPDATE COLLECTION
def updateCollection(collId,collDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/collections/%s" % collId,
body=collDocument,
headers=headers
)
return response
# GET COLLECTION
def getCollection(collId,headers=''):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="GET",
url="/collections/%s" % collId,
headers=headers
)
validateResponse(response,200,"Problem GET /collections")
return response
# DELETE COLLECTION
def deleteCollection(collId,headers=''):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="DELETE",
url="/collections/%s" % collId,
headers=headers
)
validateResponse(response,200,"Problem DELETE /collections")
return response
# ADD THNGS to COLLECTION
def addToCollection(collID,thngList):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/collections/%s/thngs" % collID,
body=thngList,
headers=headers
)
validateResponse(response,200,"Problem PUT /collections/")
return response
# POST a redirection
def createRedirection(thngID,redirectionDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/thngs/%s/redirector" % thngID,
body=redirectionDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /redirector")
return response
# DELETE a redirection
def deleteRedirection(thngID):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="DELETE",
url="/thngs/%s/redirector" % thngID,
headers=headers
)
validateResponse(response,200,"Problem DELETE /redirector")
return response
# GET the QR code
def getQr(thngID,size,format):
headers = {"Accept": format}
response = sendRequest(
method="GET",
url="/thngs/%s/redirector/qr?h=%s&w=%s" % (thngID,size,size),
headers=headers
)
validateResponse(response,200,"Problem GET /redirector/qr")
return response
# GET the QR code
def getQrTemplated(shortID,size,format,template):
# The endpoint to generate QR codes with templates
headers = {"Accept": format}
response = sendRequest(
method="GET",
url="/redirections/%s.qr?h=%s&w=%s&tpl=%s" % (shortID,size,size,template),
headers=headers,
body='',
domain="tn.gg"
)
validateResponse(response,200,"Problem GET /redirector/qr")
return response
def storeCollectionThngsinCSV(collectionID):
# Do the stuff and let go
return null
############## ACTIONS
### Action types
# GET all action types
# FIXME Allow to give as param the projectId, the thng, the product, the tag(s)
def getActionTypes(scope=''):
if scope=='all':
scopedUrl="/actions?app=all"
elif scope=='':
scopedUrl="/actions"
else:
scopedUrl="/actions?app=%s" % scope
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url=scopedUrl,
headers=headers
)
validateResponse(response,200,"Problem GET /actions")
return response
# POST a new action type
def createActionType(actionTypeDocument,scope=''):
if scope=='all':
scopedUrl="/actions?app=all"
elif scope=='':
scopedUrl="/actions"
else:
scopedUrl="/actions?app=%s" % scope
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url=scopedUrl,
body=actionTypeDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /actions")
return response
# DELETE an action type
def deleteActionType(actionType):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="DELETE",
url="/actions/%s" % actionType,
headers=headers
)
validateResponse(response,200,"Problem DELETE /actions/{Type}")
return response
### Actions
def createAction(actionType,actionDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/actions/%s" % actionType,
body=actionDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /actions/{Type}")
return response
# Attention this returns only 100 actions max
# Allow to give as param the projectId, the thng, the product, the tag(s)
def getActions(actionType):
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="/actions/%s" % actionType,
headers=headers
)
validateResponse(response,200,"Problem GET /actions/{Type}")
return response
############## APPLICATIONS
# GET /applications --- Returns the list of all Applications
def getAllApplications():
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="/applications",
headers=headers
)
validateResponse(response,200,"Problem GET /applications")
return response
# GET /applications --- Reads an existing application
def getApplication(appId):
headers = {"Accept": "application/json"}
response = sendRequest(
method="GET",
url="/applications/%s" % appId,
headers=headers
)
validateResponse(response,200,"Problem GET /applications/{id}")
return response
# POST /applications --- Create a new application
def createApplication(appDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/applications",
body=appDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /applications")
return response
# PUT /applications --- Updates an existing application
def updateApplication(appId,appDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="PUT",
url="/applications/%s" % appId,
body=appDocument,
headers=headers
)
validateResponse(response,200,"Problem PUT /applications/{id}")
return response
# POST /applications --- DELETE a new application
def deleteApplication(appId):
headers = {"Accept": "application/json"}
response = sendRequest(
method="DELETE",
url="/applications/%s" % appId,
headers=headers
)
validateResponse(response,200,"Problem DELETE /applications")
return response
############## USERS
# POST /auth/evrythng/users --- Create a new EVRYTHNG Anonymous user in an APP
def createAnonUser(userDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/evrythng/users?anonymous=true",
body=userDocument,
headers=headers
)
# FIXME use correct API endpoint & signature
validateResponse(response,201,"Problem POST /auth/evrythng/users")
return response
# POST /auth/evrythng/users --- Create a new EVRYTHNG user in an APP
def createEvtUser(userDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/evrythng/users",
body=userDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /auth/evrythng/users")
return response
# POST /users/X/validate --- Create a new EVRYTHNG user in an APP
def validateEvtUser(userId,validationDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/evrythng/users/%s/validate" % userId,
body=validationDocument,
headers=headers
)
validateResponse(response,200,"Problem POST /auth/evrythng/users")
return response
# POST /auth/evrythng/users --- Create a new application
# loginDocument={"email":"XXX","password":"YYY"}
def loginEvtUser(loginDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/evrythng",
body=loginDocument,
headers=headers
)
validateResponse(response,201,"Problem POST /auth/evrythng/")
return response
# POST FB user
# {"access": {"expires" : <Timestamp>,"token"": <Facebook-Token>}}
def createFbUser(userDocument):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/facebook",
headers=headers
)
validateResponse(response,201,"Problem POST /auth/evrythng/users")
return response
# POST /logout -- Logs out the user, done using the user api key
def logoutUser():
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="POST",
url="/auth/all/logout",
headers=headers
)
# FIXME PUT instead of POST, 200 instead of 201
validateResponse(response,201,"Problem POST /auth/all/logout")
return response
# GET /users/X -- reads data about 1 user
def getUser(userId):
headers = {"Content-Type": "application/json"}
response = sendRequest(
method="GET",
url="/users/%s" % userId,
headers=headers
)
validateResponse(response,200,"Problem GET /users/X")
return response
# GET /users/ -- reads all users in a given app (or all apps)
def getUsers(appId=0):
headers = {"Accept": "application/json"}
if appId == 0:
userScope="/users"
else:
userScope="/users/?app="+str(appId)
response = sendRequest(
method="GET",
url=userScope,
headers=headers
)
validateResponse(response,200,"Problem GET /users")
return response
# TOOLS
# Reads a product file
def importProducts(filename):
products=[]
data = csv.reader(open(filename, 'rb'), delimiter=',', quotechar='"')
fields = data.next()
for row in data:
items = zip(fields, row)
products.append(dict(zip(fields, row)))
logging.info("Imported " + str(len(products)) + " products from:" + filename)
return products
# Not Done Not Tested
def importCsvData(filename):
output=[]
data = csv.reader(open(filename, 'rb'), delimiter=',', quotechar='"')
fields = data.next() # Gets the first column
for row in data:
output.append(dict(zip(fields, row)))
logging.info("Imported " + str(len(output)) + " entities from:" + filename)
return output
|
|
#!/usr/bin/env python
import calendar
import datetime as dt
import fileinput
import re
import smtplib
import subprocess
import time
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import urllib2
from bs4 import BeautifulSoup
username = "[email protected]"
password = "password"
def send_notice(match_date):
msg = MIMEMultipart()
msg['Subject'] = 'Download Notice for Match xxx'
msg['From'] = username
msg['To'] = '[email protected]'
text = MIMEText('Farca match date ' + str(match_date) + ' successfully sent to download list')
msg.attach(text)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(username, password)
s.sendmail(username, msg['To'], msg.as_string())
print('Email notice sent!')
s.quit()
def create_download_job(links):
filename = 'myFile' + str(int(calendar.timegm(time.gmtime()))) + '.crawljob'
f = open(filename, 'a')
for link in links:
f.write('\ntext=' + link + '\nautoStart=TRUE\n')
f.close()
return filename
def update_matchlist(match_date):
for line in fileinput.input('matches.txt', inplace=1):
try:
if match_date == dt.datetime.strptime(line.strip().split(',')[0], '%d/%m/%Y').date():
print(line.replace(',0', ',1'))
else:
print(line)
except:
pass
to_dropbox('matches.txt', '/')
def get_matches():
try:
subprocess.check_call(['./dropbox_uploader.sh', 'download', 'matches.txt'])
except:
print('download fail')
match_file = open('matches.txt', "r")
match_list = []
for row in match_file:
match_list.append(row.strip().split(','))
match_file.close()
for match in match_list:
try:
if dt.datetime.strptime(match[0], '%d/%m/%Y').date() < dt.datetime.now().date():
if match[1] == '0':
print(match[0])
return match[0]
except:
pass
return False
def to_dropbox(filename, directory):
try:
subprocess.check_call(['./dropbox_uploader.sh', 'upload', filename, directory])
except:
print('upload fail')
def thread_scraper(url):
"""
scrape thread for Sky Sports HD links on ul.to
"""
ua = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20110506 Firefox/4.0.1'
req = urllib2.Request(url)
req.add_header('User-Agent', ua)
try:
html = (urllib2.urlopen(req)).read()
except BaseException:
print('Failed to read URL.')
exit(1)
soup = BeautifulSoup(html)
search = soup.findAll('div', attrs={'class': 'postrow has_after_content'})
index = 1
links = False
keyword_list = ['Sky Sports', 'English', '720p']
for post in search:
print('-----------------POST START---------------')
if index == 1:
# skip first post since it usually just has quotes of future posts and is annoying to parse
pass
elif index > 1:
if all(keyword in post.renderContents() for keyword in keyword_list):
print('===============found keywords===========')
# found the post we're looking for
# print post number
# print 'Index:' + str(index)
raw_links = post.findAll('a', href=True, text=re.compile(r'(http://ul.to/)'))
links = [link.get('href') for link in raw_links]
if links:
return links
index += 1
return links
def forum_scraper(url, matchdate):
"""
scrape forum index to find match thread for given match date (if it exists)
"""
ua = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20110506 Firefox/4.0.1'
req = urllib2.Request(url)
req.add_header('User-Agent', ua)
try:
html = (urllib2.urlopen(req)).read()
except BaseException:
print('Failed to read URL.')
exit(1)
soup = BeautifulSoup(html)
search = soup.findAll('div', attrs={'class': 'inner'})
index = 1
keyword_list = ['La Liga', 'Copa', 'UEFA Champions', 'UCL']
found_thread = False
for base in search:
title = base.find('h3', attrs={'class': 'threadtitle'}).a.string
details = base.find('div', attrs={'class': 'author'}).span.a['title']
if title:
if title.startswith('FUTBOL'):
if any(keyword in title for keyword in keyword_list) and 'Barcelona' in title:
match = re.search(r'(\d{2}/\d{2}/\d{4})', title)
if match:
date = dt.datetime.strptime(match.group(1), '%d/%m/%Y').date()
if date == match_date:
print(title.encode('latin-1'))
found_thread = 'http://forum.rojadirecta.es/' + base.find('h3', attrs={
'class': 'threadtitle'}).a.get('href').encode('latin-1')
break
match = re.search(r'(\d{2}/\d{2}/\d{2})', title)
if match:
date = dt.datetime.strptime(match.group(1), '%d/%m/%y').date()
if date == match_date:
found_thread = 'http://forum.rojadirecta.es/' + base.find('h3', attrs={
'class': 'threadtitle'}).a.get('href').encode('latin-1')
break
index += 1
return found_thread
def print_start_message():
print('\n\t Parsing forum for farca')
print('\n')
if __name__ == '__main__':
print_start_message()
match_date = get_matches()
if match_date:
match_date = dt.datetime.strptime(match_date, '%d/%m/%Y').date()
found_thread = False
if match_date:
for n in range(1, 25):
found_thread = forum_scraper(
'http://forum.rojadirecta.es/forumdisplay.php?15-Partidos-en-descarga-(Full-matches)/page' + str(n),
match_date)
if found_thread:
print(found_thread)
break
found_links = False
if found_thread:
found_links = thread_scraper(found_thread)
if found_links:
filename = create_download_job(found_links)
to_dropbox(filename, '/fw')
update_matchlist(match_date)
send_notice(match_date)
elif not found_links:
print('found match thread but not links')
elif not found_thread:
print('no match thread found')
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import trace as trace_service
from st2common.transport import consumers, liveaction, publishers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
__all__ = [
'Notifier',
'get_notifier'
]
LOG = logging.getLogger(__name__)
ACTIONUPDATE_WORK_Q = liveaction.get_queue('st2.notifiers.work',
routing_key=publishers.UPDATE_RK)
ACTION_COMPLETE_STATES = [LIVEACTION_STATUS_FAILED, LIVEACTION_STATUS_SUCCEEDED]
ACTION_SENSOR_ENABLED = cfg.CONF.action_sensor.enable
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][1]
class Notifier(consumers.MessageHandler):
message_type = LiveActionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE['pack'],
name=NOTIFY_TRIGGER_TYPE['name'])
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE['pack'],
name=ACTION_TRIGGER_TYPE['name'])
def process(self, liveaction):
LOG.debug('Processing liveaction. %s', liveaction)
if liveaction.status not in ACTION_COMPLETE_STATES:
return
execution_id = self._get_execution_id_for_liveaction(liveaction)
if not execution_id:
LOG.exception('Execution object corresponding to LiveAction %s not found.',
str(liveaction.id))
return None
self._apply_post_run_policies(liveaction=liveaction, execution_id=execution_id)
if liveaction.notify is not None:
self._post_notify_triggers(liveaction=liveaction, execution_id=execution_id)
self._post_generic_trigger(liveaction=liveaction, execution_id=execution_id)
def _get_execution_id_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return str(execution.id)
def _post_notify_triggers(self, liveaction=None, execution_id=None):
notify = getattr(liveaction, 'notify', None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution_id=execution_id,
notify_subsection=notify.on_complete,
default_message_suffix='completed.')
if liveaction.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution_id=execution_id,
notify_subsection=notify.on_success,
default_message_suffix='succeeded.')
if liveaction.status == LIVEACTION_STATUS_FAILED and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution_id=execution_id,
notify_subsection=notify.on_failure,
default_message_suffix='failed.')
def _post_notify_subsection_triggers(self, liveaction=None, execution_id=None,
notify_subsection=None,
default_message_suffix=None):
routes = (getattr(notify_subsection, 'routes') or
getattr(notify_subsection, 'channels', None))
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
'Action ' + liveaction.action + ' ' + default_message_suffix)
data = notify_subsection.data or {} # XXX: Handle Jinja
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data['result'] = json.dumps(liveaction.result)
payload['message'] = message
payload['data'] = data
payload['execution_id'] = execution_id
payload['status'] = liveaction.status
payload['start_timestamp'] = str(liveaction.start_timestamp)
payload['end_timestamp'] = str(liveaction.end_timestamp)
payload['action_ref'] = liveaction.action
payload['runner_ref'] = self._get_runner_ref(liveaction.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload['route'] = route
# Deprecated. Only for backward compatibility reasons.
payload['channel'] = route
LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'],
liveaction.id, payload)
self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload,
trace_context=trace_context)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception('Failed notifications to routes: %s' % ', '.join(failed_routes))
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction=None, execution_id=None):
if not ACTION_SENSOR_ENABLED:
LOG.debug('Action trigger is disabled, skipping trigger dispatch...')
return
payload = {'execution_id': execution_id,
'status': liveaction.status,
'start_timestamp': str(liveaction.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
'action_name': liveaction.action,
'action_ref': liveaction.action,
'runner_ref': self._get_runner_ref(liveaction.action),
'parameters': liveaction.get_masked_parameters(),
'result': liveaction.result}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s',
ACTION_TRIGGER_TYPE['name'], liveaction.id, payload, trace_context)
self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload,
trace_context=trace_context)
def _apply_post_run_policies(self, liveaction=None, execution_id=None):
# Apply policies defined for the action.
for policy_db in Policy.query(resource_ref=liveaction.action):
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
liveaction = driver.apply_after(liveaction)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action['runner_type']['name']
def get_notifier():
with Connection(transport_utils.get_messaging_urls()) as conn:
return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
|
|
"""
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
import numpy as np
from scipy._lib._util import _asarray_validated
from scipy._lib import _numpy_compat
from . import _vq
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = np.std(obs, axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (np.float32, np.float64):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = np.shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same "
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = np.zeros(n, dtype=int)
min_dist = np.zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = np.argmin(dist)
min_dist[i] = dist[code[i]]
return code, np.sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print(dist)
code = np.argmin(dist)
min_dist = dist[code]
return code, np.sqrt(min_dist)
def py_vq2(obs, code_book, check_finite=True):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
d = np.shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[np.newaxis, :, :] - code_book[:,np.newaxis,:]
dist = np.sqrt(np.sum(diff * diff, -1))
code = np.argmin(dist, 0)
min_dist = np.minimum.reduce(dist, 0)
# The next line I think is equivalent and should be faster than the one
# above, but in practice didn't seem to make much difference:
# min_dist = choose(code,dist)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
the lowest distortion codebook found.
avg_dist
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.array(guess, copy=True)
avg_dist = []
diff = np.inf
while diff > thresh:
nc = code_book.shape[0]
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(np.mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
if(diff > thresh):
code_book, has_members = _vq.update_cluster_means(obs, obs_code, nc)
code_book = code_book.compress(has_members, axis=0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = np.random.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
# Determine whether a count (scalar) or an initial guess (array) was passed.
k = None
guess = None
try:
k = int(k_or_guess)
except TypeError:
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess is not None:
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" %
guess)
result = _kmeans(obs, guess, thresh=thresh)
else:
if k != k_or_guess:
raise ValueError('if k_or_guess is a scalar, it must be an integer')
# initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
# the initial code book is randomly selected from observations
k_random_indices = np.random.randint(0, No, k)
if np.any(_numpy_compat.unique(k_random_indices,
return_counts=True)[1] > 1):
# randint can give duplicates, which is incorrect. Only fix
# the issue if it occurs, to not change results for users who
# use a random seed and get no duplicates.
k_random_indices = np.random.permutation(No)[:k]
guess = np.take(obs, k_random_indices, 0)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar=0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
def init_rank_def(data):
# initialize when the covariance matrix is rank deficient
mu = np.mean(data, axis=0)
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = np.dot(x, sVh) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
elif data.shape[1] > data.shape[0]:
return init_rank_def(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
data = _asarray_validated(data, check_finite=check_finite)
if missing not in _valid_miss_meth:
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
# raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
new_code, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
missing()
# Set the empty clusters to their previous positions
new_code[~has_members] = code[~has_members]
code = new_code
return code, label
|
|
from __future__ import unicode_literals
from __future__ import print_function
import unittest
from moya.context import Context
from moya.context import dataindex
class TestDataIndex(unittest.TestCase):
def test_parse(self):
"""Test dataindex parse"""
tests = [
("", []),
(".", []),
('""', [""]),
("\\\\", ["\\"]),
("foo", ["foo"]),
("foo.bar", ["foo", "bar"]),
(".foo.bar", ["foo", "bar"]),
("foo.bar.baz", ["foo", "bar", "baz"]),
('"foo"', ["foo"]),
('"foo".bar', ["foo", "bar"]),
('"foo.bar"', ["foo.bar"]),
("foo\.bar", ["foo.bar"]),
("1", [1]),
('"1"', ["1"]),
("foo.2", ["foo", 2]),
]
for index, parsed in tests:
self.assertEqual(dataindex.parse(index), parsed)
def test_build(self):
"""Test encoding indices as a dataindex string"""
self.assertEqual(dataindex.build(["Hello", "World", 1]), "Hello.World.1")
self.assertEqual(dataindex.build(["Hello"]), "Hello")
def test_join(self):
"""Test joining of indices"""
self.assertEqual(dataindex.join("foo"), "foo")
self.assertEqual(dataindex.join("foo", "bar.baz"), "foo.bar.baz")
self.assertEqual(dataindex.join("foo", "bar\.baz"), 'foo."bar.baz"')
self.assertEqual(dataindex.join("foo", '"bar.baz"'), 'foo."bar.baz"')
self.assertEqual(dataindex.join("foo", "bar.baz.1:5"), "foo.bar.baz.1:5")
self.assertEqual(dataindex.join("foo", "bar", "baz"), "foo.bar.baz")
self.assertEqual(dataindex.join("foo", ["bar", "baz"]), "foo.bar.baz")
self.assertEqual(dataindex.join(".foo", "bar", "baz"), ".foo.bar.baz")
self.assertEqual(dataindex.join("foo", ".bar", "baz"), ".bar.baz")
def test_normalize(self):
"""Test normalizing indices"""
self.assertEqual(dataindex.normalize("foo"), "foo")
self.assertEqual(dataindex.normalize(r"\foo"), "foo")
self.assertEqual(dataindex.normalize(r"\f\o\o"), "foo")
self.assertEqual(dataindex.normalize('"foo"'), "foo")
def test_make_absolute(self):
"""Test making a data index absolute"""
self.assertEqual(dataindex.make_absolute("foo.bar"), ".foo.bar")
self.assertEqual(dataindex.make_absolute(".foo.bar"), ".foo.bar")
def test_iter_index(self):
"""Test iter_index method"""
self.assertEqual(
list(dataindex.iter_index("foo.bar.baz")),
[("foo", "foo"), ("bar", "foo.bar"), ("baz", "foo.bar.baz")],
)
class TestContext(unittest.TestCase):
def setUp(self):
pass
def test_basic_root(self):
"""Test basic operations from root"""
c = Context()
c["foo"] = "bar"
self.assert_("foo" in c)
self.assertEqual(c["foo"], "bar")
self.assertEqual(c.root["foo"], "bar")
c["fruit"] = "apple"
self.assert_("fruit" in c)
self.assertEqual(c["fruit"], "apple")
self.assertEqual(c.root["fruit"], "apple")
self.assertEqual(c.get("nothere", "missing"), "missing")
self.assertEqual(sorted(c.keys()), ["foo", "fruit"])
self.assertEqual(sorted(c.values()), ["apple", "bar"])
self.assertEqual(sorted(c.items()), [("foo", "bar"), ("fruit", "apple")])
def test_attr(self):
"""Test attribute / getitem distinction"""
class A(object):
foo = "buzz"
bar = "cantsee"
def __getitem__(self, key):
if key == "foo":
return "baz"
raise IndexError(key)
def __contains__(self, key):
return key == "foo"
c = Context()
c["a"] = A()
self.assertEqual(c["a.foo"], "baz")
self.assert_(c["a.bar"].moya_missing)
# self.assertRaises(errors.ContextKeyError, c.__getitem__, "a.bar")
self.assert_("a.bar" not in c)
self.assert_("a.foo" in c)
def test_get_root(self):
"""Test looking up root object"""
c = Context({"foo": [1, 2, 3]})
self.assertEqual(c[""], {"foo": [1, 2, 3]})
c.push_frame("foo")
self.assertEqual(c[""], [1, 2, 3])
c.push_frame(".foo")
self.assertEqual(c[""], [1, 2, 3])
c.push_frame(".")
self.assertEqual(c[""], {"foo": [1, 2, 3]})
def test_inspect(self):
"""Test keys/values/items"""
c = Context()
c["foo"] = dict(a=1, b=2, c=3)
c["bar"] = ["a", "b", "c"]
def compare(a, b):
a = sorted(a, key=lambda k: str(k.__class__.__name__))
b = sorted(b, key=lambda k: str(k.__class__.__name__))
for compare_a, compare_b in zip(a, b):
self.assertEqual(compare_a, compare_b)
self.assertEqual(sorted(c.keys()), ["bar", "foo"])
self.assertEqual(sorted(c.keys("foo")), ["a", "b", "c"])
self.assertEqual(sorted(c.keys("bar")), [0, 1, 2])
compare((c.values()), [dict(a=1, b=2, c=3), ["a", "b", "c"]])
self.assertEqual(sorted(c.values("foo")), [1, 2, 3])
self.assertEqual(sorted(c.values("bar")), ["a", "b", "c"])
compare(
sorted(c.items()),
sorted([("foo", dict(a=1, b=2, c=3)), ("bar", ["a", "b", "c"])]),
)
self.assertEqual(sorted(c.items("foo")), [("a", 1), ("b", 2), ("c", 3)])
self.assertEqual(sorted(c.items("bar")), [(0, "a"), (1, "b"), (2, "c")])
self.assertEqual(
sorted(c.all_keys()),
sorted(
["", "foo", "foo.a", "foo.c", "foo.b", "bar", "bar.0", "bar.1", "bar.2"]
),
)
def test_frame_stack(self):
"""Test push/pop frame operations"""
c = Context()
c["foo"] = {}
c.push_frame("foo")
self.assertEqual(c.get_frame(), ".foo")
c["bar"] = 1
self.assertEqual(c.root["foo"]["bar"], 1)
c.pop_frame()
self.assertEqual(c.get_frame(), ".")
c["baz"] = 2
self.assertEqual(c.root["baz"], 2)
def test_root_indices(self):
"""Test root indices"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c.push_frame("foo") # In .foo
c["bar"] = 1
self.assertEqual(c[".baz"], 2)
self.assertEqual(c["bar"], 1)
c.push_frame(".") # In .
self.assertEqual(c["baz"], 2)
self.assertEqual(c["foo.bar"], 1)
c.pop_frame() # In .foo
self.assertEqual(c[".baz"], 2)
self.assertEqual(c["bar"], 1)
self.assertEqual(c[".foo.bar"], 1)
def test_expressions(self):
"""Test expression evaluation"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c["foo.a"] = 10
c["foo.b"] = 20
c["foo.c"] = dict(inception="three levels")
c["word"] = "apples"
c["word2"] = c["word"]
c["lt"] = "less than"
class ChoiceTest(object):
def __init__(self):
self.choices = []
c["choicetest"] = ChoiceTest()
class Obj(object):
def __init__(self, id):
self.id = id
c["objects"] = [Obj(1), Obj(2), Obj(3)]
tests = [
("1", 1),
("123", 123),
('"1"', "1"),
("'1'", "1"),
('"\\""', '"'),
("'''1'''", "1"),
('"""1"""', "1"),
("100-5", 95),
("7//2", 3),
("1+1", 2),
("1+2+3", 6),
("2+3*2", 8),
("(2+3)*2", 10),
("foo.a", 10),
("$foo.a", 10),
("$lt", "less than"),
("foo.c.inception", "three levels"),
# ('foo.c.inception.:5 + " "+"little pigs"', "three little pigs"),
# ('foo.c.inception.::-1', "slevel eerht"),
("foo.a+foo.b", 30),
(".foo.a+.foo.b", 30),
("foo.a/2", 5),
("foo.a/4", 2.5),
("word*3", "applesapplesapples"),
("word.2*3", "ppp"),
("word+str:2", "apples2"),
('word^="a"', True),
('word^="app"', True),
('word^="ppa"', False),
('word$="les"', True),
('word$="s"', True),
("2!=3", True),
("2>1", True),
("1<2", True),
("1>2", False),
("3<1", False),
("1==1", True),
("10>=10", True),
("9.9<=10", True),
("foo.a==10", True),
('foo.a=="a"', False),
("foo.a=='a'", False),
("3*2>5", True),
("2 gt 1", True),
("1 lt 2", True),
("1 gt 2", False),
("3 lt 1", False),
("10 gte 10", True),
("9.9 lte 10", True),
("3*2 gt 5", True),
("None", None),
("True", True),
("False", False),
("yes", True),
("no", False),
('int:"3"', 3),
("str:50", "50"),
('float:"2.5"', 2.5),
('bool:"test"', True),
("bool:1", True),
('bool:""', False),
("isint:5", True),
('isint:"5"', False),
("isnumber:2", True),
("isnumber:2.5", True),
('isnumber:"a"', False),
("isfloat:1.0", True),
("isfloat:1", False),
("isstr:1", False),
('isstr:"a"', True),
("isbool:True", True),
("isbool:False", True),
("isbool:(2+1)", False),
("isbool:bool:1", True),
("isbool:bool:0", True),
("len:word", 6),
("True and True", True),
("False and False", False),
("True or False", True),
("False or False", False),
# ('2>1 and word.-1=="s"', True),
('word=="apples"', True),
('1==2 or word=="apples"', True),
("'a' in 'apples'", True),
("'ppl' in 'apples'", True),
("word.1==word.2", True),
("word is word2", True),
("'index.html' fnmatches '*.html'", True),
("'foo/index.html' fnmatches '*.html'", True),
("'index.html' fnmatches '*.py'", False),
("'index.html' fnmatches '*.h??l'", True),
("'hello, world' matches /.*world/", True),
("'hello, will' matches /.*world/", False),
("'hello, world' matches '.*world'", True),
("'hello, will' matches '.*world'", False),
("'inception' in foo['c']", True),
("'inception' in (foo['c'])", True),
("exists:foo", True),
("exists:baz", True),
("exists:asdfsadf", False),
("missing:foo", False),
("missing:nobodyherebutuschickens", True),
("missing:yesterday", True),
("missing:foo.bar.baz", True),
("missing:andrew", True),
("'1' instr [1,2,3,4]", True),
("'5' instr [1,2,3,4]", False),
("'1' not instr [1,2,3,4]", False),
("'5' not instr [1,2,3,4]", True),
("1 in None", False),
("1 instr None", False),
("a=1", {"a": 1}),
('{"a":1}', {"a": 1}),
("[1,2,3]", [1, 2, 3]),
("[1,2,3,[4,5,6]]", [1, 2, 3, [4, 5, 6]]),
("[1,2,3,[4,5,6,[7,8,9]]]", [1, 2, 3, [4, 5, 6, [7, 8, 9]]]),
("[1]", [1]),
("[]", []),
("d:'5'", 5),
("d:'5' + 1", 6),
("d:'5' + d:'1'", 6),
("debug:d:5", "d:'5'"),
("filesize:1024", "1.0 KB"),
("abs:-3.14", 3.14),
('basename:"/foo/bar/baz"', "baz"),
('bool:""', False),
('capitalize:"hello"', "Hello"),
("ceil:3.14", 4),
("choices:choicetest", []),
("chain:[[1, 2], [3, 4]]", [1, 2, 3, 4]),
("chr:65", "A"),
("collect:[['hello', 'world'], 0]", ["h", "w"]),
(
"sorted:items:collectmap:[['hello', 'world'], 0]",
[("h", "hello"), ("w", "world")],
),
("collectids:objects", [1, 2, 3]),
("commalist:['hello', 'world']", "hello,world"),
("commaspacelist:['hello', 'world']", "hello, world"),
("'hello\\nworld'", "hello\nworld"),
(r"'you can \"quote me\" on that'", 'you can "quote me" on that'),
("'\\\\'", "\\"),
("'helloworld'[1]", "e"),
("'helloworld'[-1]", "d"),
("'helloworld'[:2]", "he"),
("'helloworld'[2:4]", "ll"),
("'helloworld'[::-1]", "dlrowolleh"),
]
for expression, result in tests:
print(expression, result)
expression_result = c.eval(expression)
print("\t", expression_result)
self.assertEqual(expression_result, result)
def test_expression_index(self):
"""Test the index operator"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c["foo.a"] = 10
c["foo.b"] = 20
c["foo.c"] = dict(inception="three levels")
c["word"] = "apples"
c["word2"] = c["word"]
c["lt"] = "less than"
class Obj(object):
def __init__(self):
self.n = 123
self.foo = ["Hello", "World", "!"]
c["o"] = Obj()
tests = [
('"apples"[0]', "a"),
('"apples"[1]', "p"),
('"apples"[1+2]', "l"),
('"apples"[-1]', "s"),
('foo["a"]', 10),
('foo["b"]', 20),
('foo["c"]', dict(inception="three levels")),
('foo["c"]["inception"]', "three levels"),
('foo.c["inception"]', "three levels"),
('foo.c["inception"][1]', "h"),
('o["n"]', 123),
('o["foo"][1]', "World"),
]
for expression, result in tests:
print(expression)
expression_result = c.eval(expression)
self.assertEqual(expression_result, result)
# expression_result_callable = c.compile(expression)
# self.assertEqual(expression_result_callable(), result)
def test_expression_filter(self):
"""Test filter evaluation"""
c = Context()
c["filter"] = dict(double=lambda v: v * 2, square=lambda v: v * v)
c["data"] = dict(a=1, b=10, c=123)
tests = [
("3|filter.double", 6),
("3|.filter.double", 6),
("data.a + data.b|filter.double", 22),
("(data.a + data.b)|filter.double", 22),
("3|filter.square", 9),
("3|filter.double|filter.square", 36),
]
for expression, result in tests:
print(expression)
expression_result = c.eval(expression)
self.assertEqual(expression_result, result)
# expression_result_callable = c.compile(expression)
# self.assertEqual(expression_result_callable(), result)
def test_expressions_with_fame(self):
"""Test expression evaluation in a frame"""
c = Context()
c["foo"] = dict(a=1, b=2, bar="apples")
c["top"] = 10
c["r"] = list(range(10))
tests = [("a+b", 3), (".top", 10), ("a+.top", 11), (".r.4+.top", 14)]
with c.frame("foo"):
for expression, result in tests:
self.assertEqual(c.eval(expression), result)
def test_set_lazy(self):
"""Test lazy evaluation"""
c = Context()
evaluations = [0]
def add(a, b):
evaluations[0] += 1
return a + b
c.set_lazy("foo", add, 3, 4)
self.assertEqual(evaluations[0], 0)
self.assertEqual(c["foo"], 7)
self.assertEqual(evaluations[0], 1)
self.assertEqual(c["foo"], 7)
self.assertEqual(evaluations[0], 1)
c.set_lazy("bar", lambda: {})
self.assertEqual(c["bar"], {})
def test_set_async(self):
"""Test asyncronous evaluation"""
c = Context()
c.set_async("foo", lambda: "bar")
self.assertEqual(c["foo"], "bar")
self.assertEqual(c["foo"], "bar")
def waiter(wait_time, result):
import time
time.sleep(wait_time)
return result
c.set_async("bestthings", waiter, 0.1, "guiness")
self.assertEqual(c["bestthings"], "guiness")
self.assertEqual(c["bestthings"], "guiness")
def test_set_new(self):
"""Test setting values if not present"""
c = Context()
c.set_new("foo", {})
self.assertEqual(c["foo"], {})
c.set_new("foo", 100)
self.assertEqual(c["foo"], {})
def test_deleting(self):
"""Test deleting from context"""
c = Context()
c["foo"] = {}
c["foo.bar"] = 1
c["foo.baz"] = 2
self.assert_("foo" in c)
self.assert_("foo.bar" in c)
self.assert_("foo.baz" in c)
del c["foo.bar"]
self.assert_("foo" in c)
self.assert_("foo.bar" not in c)
self.assert_("foo.baz" in c)
del c["foo"]
self.assert_("foo" not in c)
self.assert_("foo.bar" not in c)
self.assert_("foo.baz" not in c)
def test_copy_move(self):
"""Test copying and moving values"""
c = Context()
c["foo"] = 123
c["bar"] = {}
c["bar.baz"] = 456
c.copy("foo", "foo2")
self.assertEqual(c["foo"], 123)
self.assertEqual(c["foo2"], 123)
with c.frame("bar"):
c.copy("baz", ".zab")
self.assertEqual(c["zab"], 456)
c = Context()
c["foo"] = 123
c["bar"] = {}
self.assert_("foo" in c)
c.move("foo", "bar.foo")
self.assert_("foo" not in c)
self.assert_("bar.foo" in c)
self.assertEqual(c["bar.foo"], 123)
def test_scope(self):
"""Test scope facility"""
c = Context()
c["foo"] = dict(a=1, b=2)
c["bar"] = {}
c.push_frame(".foo")
self.assertEqual(c["a"], 1)
self.assertEqual(c["b"], 2)
self.assert_("c" not in c)
c.push_scope(".bar")
c[".bar.c"] = 3
self.assert_("c" in c)
self.assertEqual(c["c"], 3)
c.pop_scope()
self.assert_("c" not in c)
self.assertEqual(c["a"], 1)
self.assertEqual(c["b"], 2)
def test_stack(self):
c = Context()
c.push_stack("content", "foo")
self.assertEqual(c[".content"], "foo")
c.push_stack("content", "bar")
self.assertEqual(c[".content"], "bar")
value = c.pop_stack("content")
self.assertEqual(value, "bar")
self.assertEqual(c[".content"], "foo")
value = c.pop_stack("content")
self.assertEqual(value, "foo")
self.assert_(c[".content"] is None)
|
|
# -*- coding: utf-8 -*-
import time
import mock
import unittest
from nose.tools import * # noqa
import webtest
import furl
import itsdangerous
from modularodm import storage
from framework.auth import cas
from framework.auth import signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from framework.sessions.model import Session
from framework.mongo import set_up_storage
from website import settings
from website.files import models
from website.files.models.base import PROVIDER_MAP
from website.util import api_url_for, rubeus
from website.addons.base import GuidFile
from website.project import new_private_link
from website.project.views.node import _view_project as serialize_node
from website.addons.base import AddonConfig, AddonNodeSettingsBase, views
from website.addons.github.model import AddonGitHubOauthSettings
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
from website.addons.github.exceptions import ApiError
class DummyGuidFile(GuidFile):
file_name = 'foo.md'
name = 'bar.md'
@property
def provider(self):
return 'dummy'
@property
def version_identifier(self):
return 'versionidentifier'
@property
def unique_identifier(self):
return 'dummyid'
@property
def waterbutler_path(self):
return '/path/to/file/'
def enrich(self):
pass
class TestAddonConfig(unittest.TestCase):
def setUp(self):
self.addon_config = AddonConfig(
short_name='test', full_name='test', owners=['node'],
added_to={'node': False}, categories=[],
settings_model=AddonNodeSettingsBase,
)
def test_static_url_relative(self):
url = self.addon_config._static_url('foo')
assert_equal(
url,
'/static/addons/test/foo'
)
def test_deleted_defaults_to_false(self):
class MyAddonSettings(AddonNodeSettingsBase):
pass
config = MyAddonSettings()
assert_is(config.deleted, False)
def test_static_url_absolute(self):
url = self.addon_config._static_url('/foo')
assert_equal(
url,
'/foo'
)
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='127.0.0.1')
self.test_app = webtest.TestApp(self.flask_app)
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(github_user_id='john')
self.oauth_settings.save()
self.user_addon.oauth_settings = self.oauth_settings
self.user_addon.oauth_access_token = 'secret'
self.user_addon.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.save()
def build_url(self, **kwargs):
options = dict(
action='download',
cookie=self.cookie,
nid=self.node._id,
provider=self.node_addon.config.short_name,
)
options.update(kwargs)
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.test_app.get(url)
assert_equal(res.json['auth'], views.make_auth(self.user))
assert_equal(res.json['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(res.json['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True))
observed_url = furl.furl(res.json['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
def test_auth_bad_ip(self):
flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='192.168.1.1')
test_app = webtest.TestApp(flask_app)
url = self.build_url()
res = test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 403)
@mock.patch('website.addons.base.views.cas.get_client')
def test_auth_bad_bearer_token(self, mock_cas_client):
mock_cas_client.return_value = mock.Mock(profile=mock.Mock(return_value=cas.CasResponse(authenticated=False)))
url = self.build_url()
res = self.test_app.get(url, headers={'Authorization': 'Bearer invalid_access_token'}, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='127.0.0.1')
self.test_app = webtest.TestApp(self.flask_app)
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(github_user_id='john')
self.oauth_settings.save()
self.user_addon.oauth_settings = self.oauth_settings
self.user_addon.oauth_access_token = 'secret'
self.user_addon.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.iteritems()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
@mock.patch('website.notifications.events.files.FileAdded.perform')
def test_add_log(self, mock_perform):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = len(self.node.logs)
self.test_app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(len(self.node.logs), nlogs + 1)
# # Mocking form_message and perform so that the payload need not be exact.
# assert_true(mock_form_message.called, "form_message not called")
assert_true(mock_perform.called, "perform not called")
def test_add_log_missing_args(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth=None)
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth={'id': None})
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
def test_add_log_no_addon(self):
path = 'pizza'
node = ProjectFactory(creator=self.user)
url = node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = len(node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(node.logs), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, action='dance')
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
def test_action_file_rename(self):
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(
action='rename',
metadata={
'path': 'foo',
},
source={
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'new.txt',
'kind': 'file',
},
destination={
'path': 'foo',
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'old.txt',
'kind': 'file',
},
)
self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'}
)
self.node.reload()
assert_equal(
self.node.logs[-1].action,
'github_addon_file_renamed',
)
class TestCheckAuth(OsfTestCase):
def setUp(self):
super(TestCheckAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.node, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_not_has_permission_read_public(self):
self.node.is_public = True
self.node.save()
res = views.check_access(self.node, Auth(), 'download', None)
def test_not_has_permission_read_has_link(self):
link = new_private_link('red-special', self.user, [self.node], anonymous=False)
res = views.check_access(self.node, Auth(private_key=link.key), 'download', None)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(user=user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
def test_has_permission_on_parent_node_copyto_pass_if_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
component.is_registration = True
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'copyto', None)
assert_true(res)
def test_has_permission_on_parent_node_copyto_fail_if_not_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError):
views.check_access(component, Auth(user=self.user), 'copyto', None)
def test_has_permission_on_parent_node_copyfrom(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'copyfrom', None)
assert_true(res)
class TestCheckOAuth(OsfTestCase):
def setUp(self):
super(TestCheckOAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission_private_not_authenticated(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=False)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_private_no_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_public_irrelevant_scope_allowed(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=True, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all+read'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_private_irrelevant_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all+read'}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_decommissioned_scope_no_error(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {
'decommissioned.scope+write',
'osf.nodes.data+read',
}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_write_scope_read_action(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data+write'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_read_scope_write_action_forbidden(self):
component = ProjectFactory(creator=self.user, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data+read'}})
assert_true(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'upload', cas_resp)
assert_equal(exc_info.exception.code, 403)
class OsfFileTestCase(OsfTestCase):
@classmethod
def setUpClass(cls):
super(OsfTestCase, cls).setUpClass()
set_up_storage([DummyGuidFile], storage.MongoStorage)
# class TestAddonFileViewHelpers(OsfFileTestCase):
# def test_key_error_raises_attr_error_for_name(self):
# class TestGuidFile(GuidFile):
# pass
# with assert_raises(AttributeError):
# TestGuidFile().name
# def test_getattrname_catches(self):
# class TestGuidFile(GuidFile):
# pass
# assert_equals(getattr(TestGuidFile(), 'name', 'foo'), 'foo')
# def test_getattrname(self):
# class TestGuidFile(GuidFile):
# pass
# guid = TestGuidFile()
# guid._metadata_cache = {'name': 'test'}
# assert_equals(getattr(guid, 'name', 'foo'), 'test')
def assert_urls_equal(url1, url2):
furl1 = furl.furl(url1)
furl2 = furl.furl(url2)
for attr in ['scheme', 'host', 'port']:
setattr(furl1, attr, None)
setattr(furl2, attr, None)
assert_equal(furl1, furl2)
class TestFileNode(models.FileNode):
provider = 'test_addons'
def touch(self, bearer, **kwargs):
return models.FileVersion()
class TestFile(TestFileNode, models.File):
pass
class TestFolder(TestFileNode, models.Folder):
pass
@mock.patch('website.addons.github.model.GitHub.repo', mock.Mock(side_effect=ApiError))
class TestAddonFileViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestAddonFileViews, cls).setUpClass()
PROVIDER_MAP['github'] = [TestFolder, TestFile, TestFileNode]
TestFileNode.provider = 'github'
def setUp(self):
super(TestAddonFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.project.add_addon('github', auth=Auth(self.user))
self.user_addon = self.user.get_addon('github')
self.node_addon = self.project.get_addon('github')
self.oauth = AddonGitHubOauthSettings(
github_user_id='denbarell',
oauth_access_token='Truthy'
)
self.oauth.save()
self.user_addon.oauth_settings = self.oauth
self.user_addon.save()
self.node_addon.user_settings = self.user_addon
self.node_addon.repo = 'Truth'
self.node_addon.user = 'E'
self.node_addon.save()
@classmethod
def tearDownClass(cls):
super(TestAddonFileViews, cls).tearDownClass()
PROVIDER_MAP['github'] = [models.GithubFolder, models.GithubFile, models.GithubFileNode]
del PROVIDER_MAP['test_addons']
def get_test_file(self):
ret = TestFile(
name='Test',
node=self.project,
path='/test/Test',
materialized_path='/test/Test'
)
ret.save()
return ret
def get_mako_return(self):
ret = serialize_node(self.project, Auth(self.user), primary=True)
ret.update({
'error': '',
'provider': '',
'file_path': '',
'sharejs_uuid': '',
'private': '',
'urls': {
'files': '',
'render': '',
'sharejs': '',
'mfr': '',
'gravatar': '',
'external': '',
},
'size': '',
'extra': '',
'file_name': '',
'materialized_path': '',
})
ret.update(rubeus.collect_addon_assets(self.project))
return ret
def test_redirects_to_guid(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github'
),
auth=self.user.auth
)
assert_equals(resp.status_code, 302)
assert_equals(resp.location, 'http://localhost:80/{}/'.format(guid._id))
def test_action_download_redirects_to_download(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None))
@mock.patch('website.addons.base.views.addon_view_file')
def test_action_view_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/?action=view'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user, self.user)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
@mock.patch('website.addons.base.views.addon_view_file')
def test_no_action_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user, self.user)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
def test_download_create_guid(self):
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
),
auth=self.user.auth
)
assert_true(file_node.get_guid())
def test_unauthorized_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_nonstorage_addons_raise(self):
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path='sillywiki',
provider='wiki',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_head_returns_url(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None))
def test_nonexistent_addons_raise(self):
path = 'cloudfiles'
self.project.delete_addon('github', Auth(self.user))
self.project.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_unauth_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
class TestLegacyViews(OsfTestCase):
def setUp(self):
super(TestLegacyViews, self).setUp()
self.path = 'mercury.png'
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
file_record = self.node_addon.get_root().append_file(self.path)
self.expected_path = file_record._id
self.node_addon.save()
file_record.save()
def test_view_file_redirect(self):
url = '/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_redirect(self):
url = '/{0}/osffiles/{1}/download/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_version_redirect(self):
url = '/{0}/osffiles/{1}/version/3/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_version_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/version/3/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_no_provider_name(self):
url = '/{0}/files/{1}'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_action_as_param(self):
url = '/{}/osfstorage/files/{}/?action=download'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect(self):
url = '/project/{0}/mycooladdon/files/{1}/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.path,
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect_download(self):
url = '/project/{0}/mycooladdon/files/{1}/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.path,
action='download',
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.