gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
import util
from util import print_msg, format_satoshis, print_stderr
import bitcoin
from bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
from transaction import Transaction
import paymentrequest
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
import contacts
known_commands = {}
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.func_code.co_varnames[1:func.func_code.co_argcount]
self.defaults = func.func_defaults
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None, password=None, new_password=None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
self._password = password
self.new_password = new_password
self.contacts = contacts.Contacts(self.config)
def _run(self, method, args, password_getter):
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
self._password = apply(password_getter,())
if self._password is None:
return
f = getattr(self, method)
result = f(*args)
self._password = None
if self._callback:
apply(self._callback, ())
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self):
"""Change wallet password. """
self.wallet.update_password(self._password, self.new_password)
self.wallet.storage.write()
return {'password':self.wallet.use_encryption}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=128, entropy=1, language=None):
"""Create a seed"""
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed(nbits, custom_entropy=entropy)
return s.encode('utf8')
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_spendable_coins(exclude_frozen = False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('n')
def getutxoaddress(self, txid, pos):
"""Get the address of a UTXO. Note: This is a walletless server query, results are
not checked by SPV.
"""
r = self.network.synchronous_get(('blockchain.utxo.get_address', [txid, pos]))
return {'address': r}
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs. Inputs must have a redeemPubkey. Outputs must be a list of (address, value).
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
else:
raise BaseException('Output point missing', txin)
if txin.get('redeemPubkey'):
pubkey = txin['redeemPubkey']
txin['pubkeys'] = [pubkey]
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
privkey = txin.get('privkey')
if privkey:
keypairs[pubkey] = privkey
elif txin.get('redeemScript'):
raise BaseException('Not implemented')
else:
raise BaseException('No redeem script')
outputs = map(lambda x: (TYPE_ADDRESS, x[0], int(COIN*Decimal(x[1]))), outputs)
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
pubkey = bitcoin.public_key_from_private_key(privkey)
h160 = bitcoin.hash_160(pubkey.decode('hex'))
x_pubkey = 'fd' + (chr(0) + h160).encode('hex')
tx.sign({x_pubkey:privkey})
else:
self.wallet.sign_transaction(tx, self._password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = Transaction.multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(redeem_script.decode('hex')))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if is_address(address):
return self.wallet.get_private_key(address, self._password)
domain = json_loads(address)
return [self.wallet.get_private_key(address, self._password) for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
from version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(self._password))
@command('wp')
def getseed(self):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(self._password)
return s.encode('utf8')
@command('wp')
def importprivkey(self, privkey):
"""Import a private key. """
try:
addr = self.wallet.import_key(privkey, self._password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('nw')
def sweep(self, privkey, destination, tx_fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
privkeys = privkey if type(privkey) is list else [privkey]
self.nocheck = nocheck
dest = self._resolver(destination)
tx = self.wallet.sweep(privkeys, self.network, self.config, dest, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, self._password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
if amount != '!':
amount = int(COIN*Decimal(amount))
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if rbf:
tx.set_sequence(0)
if not unsigned:
self.wallet.sign_transaction(tx, self._password)
return tx
@command('wp')
def payto(self, destination, amount, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a transaction. """
domain = [from_addr] if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a multi-output transaction. """
domain = [from_addr] if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('')
def listcontacts(self):
"""Show your list of contacts"""
return self.contacts
@command('')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.contacts.resolve(key)
@command('')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, show_labels=False, frozen=False, unused=False, funded=False, show_balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if show_balance:
item += ", "+ format_satoshis(sum(self.wallet.get_addr_balance(addr)))
if show_labels:
item += ', ' + repr(self.wallet.labels.get(addr, ''))
out.append(item)
return out
@command('w')
def gettransaction(self, txid):
"""Retrieve a transaction. """
tx = self.wallet.transactions.get(txid) if self.wallet else None
if tx is None and self.network:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, self._password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (BTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = filter(lambda x: x.get('status')==f, out)
return map(self._format_request, out)
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = int(COIN*Decimal(amount))
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, self._password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
}
command_options = {
'password': ("-W", "--password", "Password"),
'receiving': (None, "--receiving", "Show only receiving addresses"),
'change': (None, "--change", "Show only change addresses"),
'frozen': (None, "--frozen", "Show only frozen addresses"),
'unused': (None, "--unused", "Show only unused addresses"),
'funded': (None, "--funded", "Show only funded addresses"),
'show_balance':("-b", "--balance", "Show the balances of listed addresses"),
'show_labels': ("-l", "--labels", "Show the labels of listed addresses"),
'nocheck': (None, "--nocheck", "Do not verify aliases"),
'imax': (None, "--imax", "Maximum number of inputs"),
'tx_fee': ("-f", "--fee", "Transaction fee (in BTC)"),
'from_addr': ("-F", "--from", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "--change", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "--nbits", "Number of bits of entropy"),
'entropy': (None, "--entropy", "Custom entropy"),
'language': ("-L", "--lang", "Default language for wordlist"),
'gap_limit': ("-G", "--gap", "Gap limit"),
'privkey': (None, "--privkey", "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "--unsigned", "Do not sign transaction"),
'rbf': (None, "--rbf", "Replace-by-fee transaction"),
'domain': ("-D", "--domain", "List of addresses"),
'memo': ("-m", "--memo", "Description of the request"),
'expiration': (None, "--expiration", "Time in seconds"),
'timeout': (None, "--timeout", "Timeout in seconds"),
'force': (None, "--force", "Create new address beyong gap limit, if no more address is available."),
'pending': (None, "--pending", "Show only pending requests."),
'expired': (None, "--expired", "Show only expired requests."),
'paid': (None, "--paid", "Show only paid requests."),
}
# don't use floats because of rounding errors
from transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': long,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'tx_fee': lambda x: int(COIN*Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x!='!' else '!',
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of myriadcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of myriadcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
from util import profiler
@profiler
def get_parser():
# parent parser, because set_default_subparser removes global options
parent_parser = argparse.ArgumentParser('parent', add_help=False)
group = parent_parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
# create main parser
parser = argparse.ArgumentParser(
parents=[parent_parser],
epilog="Run 'electrum help <command>' to see the help for a command")
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', parents=[parent_parser], description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="myriadcoin URI (or bip70 file)")
#parser_gui.set_defaults(func=run_gui)
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', parents=[parent_parser], help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, parents=[parent_parser], help=cmd.help, description=cmd.description)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
#p.set_defaults(func=run_cmdline)
if cmd.requires_password:
p.add_argument("-W", "--password", dest="password", default=None, help="password")
for optname, default in zip(cmd.options, cmd.defaults):
a, b, help = command_options[optname]
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
from StringIO import StringIO
import appengine_blobstore as blobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import GetAppVersion, urlfetch
from file_system import FileSystem, StatInfo
from future import Future
from object_store_creator import ObjectStoreCreator
import url_constants
from zipfile import ZipFile, BadZipfile
ZIP_KEY = 'zipball'
USERNAME = None
PASSWORD = None
def _MakeBlobstoreKey(version):
return ZIP_KEY + '.' + str(version)
class _AsyncFetchFutureZip(object):
def __init__(self,
fetcher,
username,
password,
blobstore,
key_to_set,
key_to_delete=None):
self._fetcher = fetcher
self._fetch = fetcher.FetchAsync(ZIP_KEY,
username=username,
password=password)
self._blobstore = blobstore
self._key_to_set = key_to_set
self._key_to_delete = key_to_delete
def Get(self):
try:
result = self._fetch.Get()
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
blob = self._fetcher.Fetch(ZIP_KEY).content
else:
blob = result.content
except urlfetch.DownloadError as e:
logging.error('Bad github zip file: %s' % e)
return None
if self._key_to_delete is not None:
self._blobstore.Delete(_MakeBlobstoreKey(self._key_to_delete),
blobstore.BLOBSTORE_GITHUB)
try:
return_zip = ZipFile(StringIO(blob))
except BadZipfile as e:
logging.error('Bad github zip file: %s' % e)
return None
self._blobstore.Set(_MakeBlobstoreKey(self._key_to_set),
blob,
blobstore.BLOBSTORE_GITHUB)
return return_zip
class GithubFileSystem(FileSystem):
@staticmethod
def Create(object_store_creator):
return GithubFileSystem(
AppEngineUrlFetcher(url_constants.GITHUB_URL),
blobstore.AppEngineBlobstore(),
object_store_creator)
def __init__(self, fetcher, blobstore, object_store_creator):
# Password store doesn't depend on channel, and if we don't cancel the app
# version then the whole advantage of having it in the first place is
# greatly lessened (likewise it should always start populated).
password_store = object_store_creator.Create(
GithubFileSystem,
channel=None,
app_version=None,
category='password',
start_empty=False)
if USERNAME is None:
password_data = password_store.GetMulti(('username', 'password')).Get()
self._username, self._password = (password_data.get('username'),
password_data.get('password'))
else:
password_store.SetMulti({'username': USERNAME, 'password': PASSWORD})
self._username, self._password = (USERNAME, PASSWORD)
self._fetcher = fetcher
self._blobstore = blobstore
# Github has no knowledge of Chrome channels, set channel to None.
self._stat_object_store = object_store_creator.Create(
GithubFileSystem,
channel=None)
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
blob = self._blobstore.Get(_MakeBlobstoreKey(version),
blobstore.BLOBSTORE_GITHUB)
if blob is not None:
try:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
except BadZipfile as e:
self._blobstore.Delete(_MakeBlobstoreKey(version),
blobstore.BLOBSTORE_GITHUB)
logging.error('Bad github zip file: %s' % e)
self._zip_file = Future(value=None)
else:
self._zip_file = Future(
delegate=_AsyncFetchFutureZip(self._fetcher,
self._username,
self._password,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ReadFile error: %s' % e)
return ''
if zip_file is None:
logging.error('Bad github zip file.')
return ''
prefix = zip_file.namelist()[0][:-1]
return zip_file.read(prefix + path)
def _ListDir(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ListDir error: %s' % e)
return []
if zip_file is None:
logging.error('Bad github zip file.')
return []
filenames = zip_file.namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]) - 1:] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, binary=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if path.endswith('/'):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def _DefaultStat(self, path):
version = 0
# TODO(kalman): we should replace all of this by wrapping the
# GithubFileSystem in a CachingFileSystem. A lot of work has been put into
# CFS to be robust, and GFS is missing out.
# For example: the following line is wrong, but it could be moot.
self._stat_object_store.Set(path, version)
return StatInfo(version)
def Stat(self, path):
version = self._stat_object_store.Get(path).Get()
if version is not None:
return StatInfo(version)
try:
result = self._fetcher.Fetch('commits/HEAD',
username=USERNAME,
password=PASSWORD)
except urlfetch.DownloadError as e:
logging.error('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
try:
result = self._fetcher.Fetch('commits/HEAD')
except urlfetch.DownloadError as e:
logging.error('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
version = (json.loads(result.content).get('commit', {})
.get('tree', {})
.get('sha', None))
# Check if the JSON was valid, and set to 0 if not.
if version is not None:
self._stat_object_store.Set(path, version)
else:
logging.warning('Problem fetching commit hash from github.')
return self._DefaultStat(path)
return StatInfo(version)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import glanceclient.v1.images
import routes
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.image.glance
from nova.network import api as network_api
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(name='key', public_key='public_key')
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__init__', fake_rate_init)
stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
'__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs,
spectacular=True)
def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
def get_floating_ips_by_fixed_address(self, context, fixed_ip):
return ['1.2.3.4']
if func is None:
func = get_floating_ips_by_fixed_address
stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake:
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True)
def _make_image_fixtures():
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
image_id = 123
fixtures = []
def add_fixture(**kwargs):
fixtures.append(kwargs)
# Public image
add_fixture(id=image_id, name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="10", size='25165824')
image_id += 1
# Snapshot for User 1
uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
server_ref = 'http://localhost/v2/servers/' + uuid
snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'}
for status in ('queued', 'saving', 'active', 'killed',
'deleted', 'pending_delete'):
deleted = False if status != 'deleted' else True
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
properties=snapshot_properties, size='25165824',
deleted=deleted)
image_id += 1
# Image without a name
add_fixture(id=image_id, is_public=True, status='active', properties={})
return fixtures
def stub_out_glanceclient_create(stubs, sent_to_glance):
"""
We return the metadata sent to glance by modifying the sent_to_glance dict
in place.
"""
orig_add_image = glanceclient.v1.images.ImageManager.create
def fake_create(context, metadata, data=None):
sent_to_glance['metadata'] = metadata
sent_to_glance['data'] = data
return orig_add_image(metadata, data)
stubs.Set(glanceclient.v1.images.ImageManager, 'create', fake_create)
def stub_out_glance(stubs):
def fake_get_remote_image_service():
client = glance_stubs.StubGlanceClient(_make_image_fixtures())
client_wrapper = nova.image.glance.GlanceClientWrapper()
client_wrapper.host = 'fake_host'
client_wrapper.port = 9292
client_wrapper.client = client
return nova.image.glance.GlanceImageService(client=client_wrapper)
stubs.Set(nova.image.glance,
'get_default_image_service',
fake_get_remote_image_service)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
return super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller):
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
return {"info_cache": {"network_info": nw_cache}}
if not isinstance(nw_cache, basestring):
nw_cache = jsonutils.dumps(nw_cache)
return {"info_cache": {"network_info": nw_cache}}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid):
return stub_instance(1, **kwargs)
return _return_server
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
for i in xrange(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def stub_instance(id, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))
sys_meta = instance_types.save_instance_type_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test"}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"ephemeral_gb": 0,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": dict(inst_type),
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": display_name or server_name,
"display_description": "",
"locked": False,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta)}
instance.update(info_cache)
return instance
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'nova'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] == '-1':
raise exc.NotFound
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.NotFound
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
|
|
#!/usr/bin/env python
"""
See https://edx-wiki.atlassian.net/wiki/display/ENG/PO+File+workflow
This task extracts all English strings from all source code
and produces three human-readable files:
conf/locale/en/LC_MESSAGES/django-partial.po
conf/locale/en/LC_MESSAGES/djangojs-partial.po
conf/locale/en/LC_MESSAGES/mako.po
This task will clobber any existing django.po file.
This is because django-admin.py makemessages hardcodes this filename
and it cannot be overridden.
"""
from datetime import datetime
import importlib
import os
import os.path
import logging
import sys
import polib
from path import Path
from i18n import config, Runner
from i18n.execute import execute
from i18n.segment import segment_pofiles
EDX_MARKER = "edX translation file"
LOG = logging.getLogger(__name__)
DEVNULL = open(os.devnull, 'wb')
def base(path1, *paths):
"""Return a relative path from config.BASE_DIR to path1 / paths[0] / ... """
return config.BASE_DIR.relpathto(path1.joinpath(*paths)) # pylint: disable=no-value-for-parameter
class Extract(Runner):
def add_args(self):
# pylint: disable=invalid-name
self.parser.description = __doc__
def rename_source_file(self, src, dst):
"""
Rename a file in the source directory.
"""
os.rename(self.source_msgs_dir.joinpath(src), self.source_msgs_dir.joinpath(dst))
def run(self, args):
"""
Main entry point of script
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
config.LOCALE_DIR.parent.makedirs_p()
self.source_msgs_dir = config.CONFIGURATION.source_messages_dir
# The extraction process clobbers django.po and djangojs.po.
# Save them so that it won't do that.
self.rename_source_file('django.po', 'django-saved.po')
self.rename_source_file('djangojs.po', 'djangojs-saved.po')
# Extract strings from mako templates.
verbosity_map = {
0: "-q",
1: "",
2: "-v",
}
babel_verbosity = verbosity_map.get(args.verbose, "")
if args.verbose:
stderr = None
else:
stderr = DEVNULL
# --keyword informs Babel that `interpolate()` is an expected
# gettext function, which is necessary because the `tokenize` function
# in the `markey` module marks it as such and passes it to Babel.
# (These functions are called in the django-babel-underscore module.)
babel_cmd_template = (
'pybabel {verbosity} extract --mapping={config} '
'--add-comments="Translators:" --keyword="interpolate" '
'. --output={output}'
)
babel_mako_cfg = base(config.LOCALE_DIR, 'babel_mako.cfg')
if babel_mako_cfg.exists():
babel_mako_cmd = babel_cmd_template.format(
verbosity=babel_verbosity,
config=babel_mako_cfg,
output=base(config.CONFIGURATION.source_messages_dir, 'mako.po'),
)
execute(babel_mako_cmd, working_directory=config.BASE_DIR, stderr=stderr)
babel_underscore_cfg = base(config.LOCALE_DIR, 'babel_underscore.cfg')
if babel_underscore_cfg.exists():
babel_underscore_cmd = babel_cmd_template.format(
verbosity=babel_verbosity,
config=babel_underscore_cfg,
output=base(config.CONFIGURATION.source_messages_dir, 'underscore.po'),
)
execute(babel_underscore_cmd, working_directory=config.BASE_DIR, stderr=stderr)
makemessages = "django-admin.py makemessages -l en -v{}".format(args.verbose)
ignores = " ".join('--ignore="{}/*"'.format(d) for d in config.CONFIGURATION.ignore_dirs)
if ignores:
makemessages += " " + ignores
# Extract strings from django source files (*.py, *.html, *.txt).
make_django_cmd = makemessages + ' -d django'
execute(make_django_cmd, working_directory=config.BASE_DIR, stderr=stderr)
# Extract strings from Javascript source files (*.js).
make_djangojs_cmd = makemessages + ' -d djangojs'
execute(make_djangojs_cmd, working_directory=config.BASE_DIR, stderr=stderr)
# makemessages creates 'django.po'. This filename is hardcoded.
# Rename it to django-partial.po to enable merging into django.po later.
self.rename_source_file('django.po', 'django-partial.po')
# makemessages creates 'djangojs.po'. This filename is hardcoded.
# Rename it to djangojs-partial.po to enable merging into djangojs.po later.
self.rename_source_file('djangojs.po', 'djangojs-partial.po')
files_to_clean = set()
# Extract strings from third-party applications.
for app_name in config.CONFIGURATION.third_party:
# Import the app to find out where it is. Then use pybabel to extract
# from that directory.
app_module = importlib.import_module(app_name)
app_dir = Path(app_module.__file__).dirname().dirname() # pylint: disable=no-value-for-parameter
output_file = self.source_msgs_dir / (app_name + ".po")
files_to_clean.add(output_file)
babel_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" {app} -o {output}'
babel_cmd = babel_cmd.format(
verbosity=babel_verbosity,
config=config.LOCALE_DIR / 'babel_third_party.cfg',
app=app_name,
output=output_file,
)
execute(babel_cmd, working_directory=app_dir, stderr=stderr)
# Segment the generated files.
segmented_files = segment_pofiles("en")
files_to_clean.update(segmented_files)
# Finish each file.
for filename in files_to_clean:
LOG.info('Cleaning %s', filename)
pofile = polib.pofile(self.source_msgs_dir.joinpath(filename))
# replace default headers with edX headers
fix_header(pofile)
# replace default metadata with edX metadata
fix_metadata(pofile)
# remove key strings which belong in messages.po
strip_key_strings(pofile)
pofile.save()
# Restore the saved .po files.
self.rename_source_file('django-saved.po', 'django.po')
self.rename_source_file('djangojs-saved.po', 'djangojs.po')
def fix_header(pofile):
"""
Replace default headers with edX headers
"""
# By default, django-admin.py makemessages creates this header:
#
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
pofile.metadata_is_fuzzy = [] # remove [u'fuzzy']
header = pofile.header
fixes = (
('SOME DESCRIPTIVE TITLE', EDX_MARKER),
('Translations template for PROJECT.', EDX_MARKER),
('YEAR', str(datetime.utcnow().year)),
('ORGANIZATION', 'edX'),
("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"),
(
'This file is distributed under the same license as the PROJECT project.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
(
'This file is distributed under the same license as the PACKAGE package.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <[email protected]>'),
)
for src, dest in fixes:
header = header.replace(src, dest)
pofile.header = header
def fix_metadata(pofile):
"""
Replace default metadata with edX metadata
"""
# By default, django-admin.py makemessages creates this metadata:
#
# {u'PO-Revision-Date': u'YEAR-MO-DA HO:MI+ZONE',
# u'Language': u'',
# u'Content-Transfer-Encoding': u'8bit',
# u'Project-Id-Version': u'PACKAGE VERSION',
# u'Report-Msgid-Bugs-To': u'',
# u'Last-Translator': u'FULL NAME <EMAIL@ADDRESS>',
# u'Language-Team': u'LANGUAGE <[email protected]>',
# u'POT-Creation-Date': u'2013-04-25 14:14-0400',
# u'Content-Type': u'text/plain; charset=UTF-8',
# u'MIME-Version': u'1.0'}
fixes = {
'PO-Revision-Date': datetime.utcnow(),
'Report-Msgid-Bugs-To': '[email protected]',
'Project-Id-Version': '0.1a',
'Language': 'en',
'Last-Translator': '',
'Language-Team': 'openedx-translation <[email protected]>',
}
pofile.metadata.update(fixes)
def strip_key_strings(pofile):
"""
Removes all entries in PO which are key strings.
These entries should appear only in messages.po, not in any other po files.
"""
newlist = [entry for entry in pofile if not is_key_string(entry.msgid)]
del pofile[:]
pofile += newlist
def is_key_string(string):
"""
returns True if string is a key string.
Key strings begin with underscore.
"""
return len(string) > 1 and string[0] == '_'
main = Extract() # pylint: disable=invalid-name
if __name__ == '__main__':
main()
|
|
# coding=utf-8
import os
import platform
import sys
from typing import Optional, Dict, Any, List
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.clients.system
import dbt.config
import dbt.utils
import dbt.exceptions
from dbt.links import ProfileConfigDocs
from dbt.adapters.factory import get_adapter, register_adapter
from dbt.version import get_installed_version
from dbt.config import Project, Profile
from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer
from dbt.context.base import generate_base_context
from dbt.context.target import generate_target_context
from dbt.clients.yaml_helper import load_yaml_text
from dbt.ui.printer import green, red
from dbt.task.base import BaseTask, get_nearest_project_dir
PROFILE_DIR_MESSAGE = """To view your profiles.yml file, run:
{open_cmd} {profiles_dir}"""
ONLY_PROFILE_MESSAGE = '''
A `dbt_project.yml` file was not found in this directory.
Using the only profile `{}`.
'''.lstrip()
MULTIPLE_PROFILE_MESSAGE = '''
A `dbt_project.yml` file was not found in this directory.
dbt found the following profiles:
{}
To debug one of these profiles, run:
dbt debug --profile [profile-name]
'''.lstrip()
COULD_NOT_CONNECT_MESSAGE = '''
dbt was unable to connect to the specified database.
The database returned the following error:
>{err}
Check your database credentials and try again. For more information, visit:
{url}
'''.lstrip()
MISSING_PROFILE_MESSAGE = '''
dbt looked for a profiles.yml file in {path}, but did
not find one. For more information on configuring your profile, consult the
documentation:
{url}
'''.lstrip()
FILE_NOT_FOUND = 'file not found'
class QueryCommentedProfile(Profile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.query_comment = None
class DebugTask(BaseTask):
def __init__(self, args, config):
super().__init__(args, config)
self.profiles_dir = getattr(self.args, 'profiles_dir',
dbt.config.PROFILES_DIR)
self.profile_path = os.path.join(self.profiles_dir, 'profiles.yml')
try:
self.project_dir = get_nearest_project_dir(self.args)
except dbt.exceptions.Exception:
# we probably couldn't find a project directory. Set project dir
# to whatever was given, or default to the current directory.
if args.project_dir:
self.project_dir = args.project_dir
else:
self.project_dir = os.getcwd()
self.project_path = os.path.join(self.project_dir, 'dbt_project.yml')
self.cli_vars = dbt.utils.parse_cli_vars(
getattr(self.args, 'vars', '{}')
)
# set by _load_*
self.profile: Optional[Profile] = None
self.profile_fail_details = ''
self.raw_profile_data: Optional[Dict[str, Any]] = None
self.profile_name: Optional[str] = None
self.project: Optional[Project] = None
self.project_fail_details = ''
self.messages: List[str] = []
@property
def project_profile(self):
if self.project is None:
return None
return self.project.profile_name
def path_info(self):
open_cmd = dbt.clients.system.open_dir_cmd()
message = PROFILE_DIR_MESSAGE.format(
open_cmd=open_cmd,
profiles_dir=self.profiles_dir
)
logger.info(message)
def run(self):
if self.args.config_dir:
self.path_info()
return
version = get_installed_version().to_version_string(skip_matcher=True)
print('dbt version: {}'.format(version))
print('python version: {}'.format(sys.version.split()[0]))
print('python path: {}'.format(sys.executable))
print('os info: {}'.format(platform.platform()))
print('Using profiles.yml file at {}'.format(self.profile_path))
print('Using dbt_project.yml file at {}'.format(self.project_path))
print('')
self.test_configuration()
self.test_dependencies()
self.test_connection()
for message in self.messages:
print(message)
print('')
def _load_project(self):
if not os.path.exists(self.project_path):
self.project_fail_details = FILE_NOT_FOUND
return red('ERROR not found')
if self.profile is None:
ctx = generate_base_context(self.cli_vars)
else:
ctx = generate_target_context(self.profile, self.cli_vars)
renderer = DbtProjectYamlRenderer(ctx)
try:
self.project = Project.from_project_root(
self.project_dir, renderer
)
except dbt.exceptions.DbtConfigError as exc:
self.project_fail_details = str(exc)
return red('ERROR invalid')
return green('OK found and valid')
def _profile_found(self):
if not self.raw_profile_data:
return red('ERROR not found')
assert self.raw_profile_data is not None
if self.profile_name in self.raw_profile_data:
return green('OK found')
else:
return red('ERROR not found')
def _target_found(self):
requirements = (self.raw_profile_data and self.profile_name and
self.target_name)
if not requirements:
return red('ERROR not found')
# mypy appeasement, we checked just above
assert self.raw_profile_data is not None
assert self.profile_name is not None
assert self.target_name is not None
if self.profile_name not in self.raw_profile_data:
return red('ERROR not found')
profiles = self.raw_profile_data[self.profile_name]['outputs']
if self.target_name not in profiles:
return red('ERROR not found')
return green('OK found')
def _choose_profile_names(self) -> Optional[List[str]]:
project_profile: Optional[str] = None
if os.path.exists(self.project_path):
try:
partial = Project.partial_load(
os.path.dirname(self.project_path)
)
renderer = DbtProjectYamlRenderer(
generate_base_context(self.cli_vars)
)
project_profile = partial.render_profile_name(renderer)
except dbt.exceptions.DbtProjectError:
pass
args_profile: Optional[str] = getattr(self.args, 'profile', None)
try:
return [Profile.pick_profile_name(args_profile, project_profile)]
except dbt.exceptions.DbtConfigError:
pass
# try to guess
profiles = []
if self.raw_profile_data:
profiles = [k for k in self.raw_profile_data if k != 'config']
if project_profile is None:
self.messages.append('Could not load dbt_project.yml')
elif len(profiles) == 0:
self.messages.append('The profiles.yml has no profiles')
elif len(profiles) == 1:
self.messages.append(ONLY_PROFILE_MESSAGE.format(profiles[0]))
else:
self.messages.append(MULTIPLE_PROFILE_MESSAGE.format(
'\n'.join(' - {}'.format(o) for o in profiles)
))
return profiles
def _choose_target_name(self, profile_name: str):
has_raw_profile = (
self.raw_profile_data is not None and
profile_name in self.raw_profile_data
)
if not has_raw_profile:
return None
# mypy appeasement, we checked just above
assert self.raw_profile_data is not None
raw_profile = self.raw_profile_data[profile_name]
renderer = ProfileRenderer(generate_base_context(self.cli_vars))
target_name, _ = Profile.render_profile(
raw_profile=raw_profile,
profile_name=profile_name,
target_override=getattr(self.args, 'target', None),
renderer=renderer
)
return target_name
def _load_profile(self):
if not os.path.exists(self.profile_path):
self.profile_fail_details = FILE_NOT_FOUND
self.messages.append(MISSING_PROFILE_MESSAGE.format(
path=self.profile_path, url=ProfileConfigDocs
))
return red('ERROR not found')
try:
raw_profile_data = load_yaml_text(
dbt.clients.system.load_file_contents(self.profile_path)
)
except Exception:
pass # we'll report this when we try to load the profile for real
else:
if isinstance(raw_profile_data, dict):
self.raw_profile_data = raw_profile_data
profile_errors = []
profile_names = self._choose_profile_names()
renderer = ProfileRenderer(generate_base_context(self.cli_vars))
for profile_name in profile_names:
try:
profile: Profile = QueryCommentedProfile.render_from_args(
self.args, renderer, profile_name
)
except dbt.exceptions.DbtConfigError as exc:
profile_errors.append(str(exc))
else:
if len(profile_names) == 1:
# if a profile was specified, set it on the task
self.target_name = self._choose_target_name(profile_name)
self.profile = profile
if profile_errors:
self.profile_fail_details = '\n\n'.join(profile_errors)
return red('ERROR invalid')
return green('OK found and valid')
def test_git(self):
try:
dbt.clients.system.run_cmd(os.getcwd(), ['git', '--help'])
except dbt.exceptions.ExecutableError as exc:
self.messages.append('Error from git --help: {!s}'.format(exc))
return red('ERROR')
return green('OK found')
def test_dependencies(self):
print('Required dependencies:')
print(' - git [{}]'.format(self.test_git()))
print('')
def test_configuration(self):
profile_status = self._load_profile()
project_status = self._load_project()
print('Configuration:')
print(' profiles.yml file [{}]'.format(profile_status))
print(' dbt_project.yml file [{}]'.format(project_status))
# skip profile stuff if we can't find a profile name
if self.profile_name is not None:
print(' profile: {} [{}]'.format(self.profile_name,
self._profile_found()))
print(' target: {} [{}]'.format(self.target_name,
self._target_found()))
print('')
self._log_project_fail()
self._log_profile_fail()
def _log_project_fail(self):
if not self.project_fail_details:
return
if self.project_fail_details == FILE_NOT_FOUND:
return
print('Project loading failed for the following reason:')
print(self.project_fail_details)
print('')
def _log_profile_fail(self):
if not self.profile_fail_details:
return
if self.profile_fail_details == FILE_NOT_FOUND:
return
print('Profile loading failed for the following reason:')
print(self.profile_fail_details)
print('')
@staticmethod
def attempt_connection(profile):
"""Return a string containing the error message, or None if there was
no error.
"""
register_adapter(profile)
adapter = get_adapter(profile)
try:
with adapter.connection_named('debug'):
adapter.execute('select 1 as id')
except Exception as exc:
return COULD_NOT_CONNECT_MESSAGE.format(
err=str(exc),
url=ProfileConfigDocs,
)
return None
def _connection_result(self):
result = self.attempt_connection(self.profile)
if result is not None:
self.messages.append(result)
return red('ERROR')
return green('OK connection ok')
def test_connection(self):
if not self.profile:
return
print('Connection:')
for k, v in self.profile.credentials.connection_info():
print(' {}: {}'.format(k, v))
print(' Connection test: {}'.format(self._connection_result()))
print('')
@classmethod
def validate_connection(cls, target_dict):
"""Validate a connection dictionary. On error, raises a DbtConfigError.
"""
target_name = 'test'
# make a fake profile that we can parse
profile_data = {
'outputs': {
target_name: target_dict,
},
}
# this will raise a DbtConfigError on failure
profile = Profile.from_raw_profile_info(
raw_profile=profile_data,
profile_name='',
target_override=target_name,
renderer=ProfileRenderer(generate_base_context({})),
)
result = cls.attempt_connection(profile)
if result is not None:
raise dbt.exceptions.DbtProfileError(
result,
result_type='connection_failure'
)
|
|
from copy import copy
from sympy.tensor.array.dense_ndim_array import MutableDenseNDimArray
from sympy import Symbol, Rational, SparseMatrix
from sympy.matrices import Matrix
from sympy.tensor.array.sparse_ndim_array import MutableSparseNDimArray
def test_ndim_array_initiation():
arr_with_one_element = MutableDenseNDimArray([23])
assert len(arr_with_one_element) == 1
assert arr_with_one_element[0] == 23
assert arr_with_one_element.rank() == 1
arr_with_symbol_element = MutableDenseNDimArray([Symbol('x')])
assert len(arr_with_symbol_element) == 1
assert arr_with_symbol_element[0] == Symbol('x')
assert arr_with_symbol_element.rank() == 1
number5 = 5
vector = MutableDenseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector.rank() == 1
vector = MutableSparseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector._sparse_array == {}
assert vector.rank() == 1
n_dim_array = MutableDenseNDimArray(range(3**4), (3, 3, 3, 3,))
assert len(n_dim_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == (3, 3, 3, 3)
assert n_dim_array.rank() == 4
array_shape = (3, 3, 3, 3)
sparse_array = MutableSparseNDimArray.zeros(*array_shape)
assert len(sparse_array._sparse_array) == 0
assert len(sparse_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.rank() == 4
one_dim_array = MutableDenseNDimArray([2, 3, 1])
assert len(one_dim_array) == 3
assert one_dim_array.shape == (3,)
assert one_dim_array.rank() == 1
assert one_dim_array.tolist() == [2, 3, 1]
shape = (3, 3)
array_with_many_args = MutableSparseNDimArray.zeros(*shape)
assert len(array_with_many_args) == 3 * 3
assert array_with_many_args.shape == shape
assert array_with_many_args[0, 0] == 0
assert array_with_many_args.rank() == 2
def test_reshape():
array = MutableDenseNDimArray(range(50), 50)
assert array.shape == (50,)
assert array.rank() == 1
array = array.reshape(5, 5, 2)
assert array.shape == (5, 5, 2)
assert array.rank() == 3
assert len(array) == 50
def test_iterator():
array = MutableDenseNDimArray(range(4), (2, 2))
j = 0
for i in array:
assert i == j
j += 1
array = array.reshape(4)
j = 0
for i in array:
assert i == j
j += 1
def test_sparse():
sparse_array = MutableSparseNDimArray([0, 0, 0, 1], (2, 2))
assert len(sparse_array) == 2 * 2
# dictionary where all data is, only non-zero entries are actually stored:
assert len(sparse_array._sparse_array) == 1
assert list(sparse_array) == [0, 0, 0, 1]
for i, j in zip(sparse_array, [0, 0, 0, 1]):
assert i == j
sparse_array[0, 0] = 123
assert len(sparse_array._sparse_array) == 2
assert sparse_array[0, 0] == 123
# when element in sparse array become zero it will disappear from
# dictionary
sparse_array[0, 0] = 0
assert len(sparse_array._sparse_array) == 1
sparse_array[1, 1] = 0
assert len(sparse_array._sparse_array) == 0
assert sparse_array[0, 0] == 0
def test_calculation():
a = MutableDenseNDimArray([1]*9, (3, 3))
b = MutableDenseNDimArray([9]*9, (3, 3))
c = a + b
for i in c:
assert i == 10
assert c == MutableDenseNDimArray([10]*9, (3, 3))
assert c == MutableSparseNDimArray([10]*9, (3, 3))
c = b - a
for i in c:
assert i == 8
assert c == MutableDenseNDimArray([8]*9, (3, 3))
assert c == MutableSparseNDimArray([8]*9, (3, 3))
def test_ndim_array_converting():
dense_array = MutableDenseNDimArray([1, 2, 3, 4], (2, 2))
alist = dense_array.tolist()
alist == [[1, 2], [3, 4]]
matrix = dense_array.tomatrix()
assert (isinstance(matrix, Matrix))
for i in range(len(dense_array)):
assert dense_array[i] == matrix[i]
assert matrix.shape == dense_array.shape
sparse_array = MutableSparseNDimArray([1, 2, 3, 4], (2, 2))
alist = sparse_array.tolist()
assert alist == [[1, 2], [3, 4]]
matrix = sparse_array.tomatrix()
assert(isinstance(matrix, SparseMatrix))
for i in range(len(sparse_array)):
assert sparse_array[i] == matrix[i]
assert matrix.shape == sparse_array.shape
def test_converting_functions():
arr_list = [1, 2, 3, 4]
arr_matrix = Matrix(((1, 2), (3, 4)))
# list
arr_ndim_array = MutableDenseNDimArray(arr_list, (2, 2))
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
# Matrix
arr_ndim_array = MutableDenseNDimArray(arr_matrix)
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
assert arr_matrix.shape == arr_ndim_array.shape
def test_equality():
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
third_list = [4, 3, 2, 1]
assert first_list == second_list
assert first_list != third_list
first_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
second_ndim_array = MutableDenseNDimArray(second_list, (2, 2))
third_ndim_array = MutableDenseNDimArray(third_list, (2, 2))
fourth_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
assert first_ndim_array == second_ndim_array
second_ndim_array[0, 0] = 0
assert first_ndim_array != second_ndim_array
assert first_ndim_array != third_ndim_array
assert first_ndim_array == fourth_ndim_array
def test_arithmetic():
a = MutableDenseNDimArray([3 for i in range(9)], (3, 3))
b = MutableDenseNDimArray([7 for i in range(9)], (3, 3))
c1 = a + b
c2 = b + a
assert c1 == c2
d1 = a - b
d2 = b - a
assert d1 == d2 * (-1)
e1 = a * 5
e2 = 5 * a
e3 = copy(a)
e3 *= 5
assert e1 == e2 == e3
f1 = a / 5
f2 = copy(a)
f2 /= 5
assert f1 == f2
assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \
f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5)
assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \
== type(e1) == type(e2) == type(e3) == type(f1)
def test_higher_dimenions():
m3 = MutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert m3.tolist() == [[[10, 11, 12, 13],
[14, 15, 16, 17],
[18, 19, 20, 21]],
[[22, 23, 24, 25],
[26, 27, 28, 29],
[30, 31, 32, 33]]]
assert m3._get_tuple_index(0) == (0, 0, 0)
assert m3._get_tuple_index(1) == (0, 0, 1)
assert m3._get_tuple_index(4) == (0, 1, 0)
assert m3._get_tuple_index(12) == (1, 0, 0)
assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]'
m3_rebuilt = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]])
assert m3 == m3_rebuilt
m3_other = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4))
assert m3 == m3_other
def test_slices():
md = MutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert md[:] == md._array
assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert md[:, :, :] == md
sd = MutableSparseNDimArray(range(10, 34), (2, 3, 4))
assert sd == MutableSparseNDimArray(md)
assert sd[:] == md._array
assert sd[:] == list(sd)
assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert sd[:, :, :] == sd
def test_diff():
from sympy.abc import x, y, z
md = MutableDenseNDimArray([[x, y], [x*z, x*y*z]])
assert md.diff(x) == MutableDenseNDimArray([[1, 0], [z, y*z]])
sd = MutableSparseNDimArray(md)
assert sd == MutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2))
assert sd.diff(x) == MutableSparseNDimArray([[1, 0], [z, y*z]])
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tokenizers."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import ops
from lingvo.core import py_utils
from lingvo.core import wpm_encoder
import tensorflow_text as tf_text
class BaseTokenizer(base_layer.BaseLayer):
"""The base tokenizer."""
@classmethod
def Params(cls):
"""Defaults params for tokenizers."""
p = super().Params()
p.name = 'tokenizer'
p.Define('vocab_size', 64, 'The size of the vocabuary.')
p.Define(
'append_eos', True, 'Whether to append </s> at the end and treat '
'it as a non-padded label.')
p.Define('pad_to_max_length', True,
'If True, output ids will be padded to max_length.')
# TODO(ciprianchelba): there should be a check in __init__ that the ids
# below are consistent with the ones assigned by the vocabulary.
p.Define('target_unk_id', 0, 'Target unknown token id.')
p.Define('target_sos_id', 1, 'Target start of sequence id.')
p.Define('target_eos_id', 2, 'Target end of sequence id.')
p.Define('target_wb_id', -1, 'Target word boundary id.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self.sos_id = p.target_sos_id # <S>
self.eos_id = p.target_eos_id # </S>
self.unk_id = p.target_unk_id # <UNK>
def Initialize(self, sess):
"""Run any tokenizer initialization that requires a session."""
pass
def StringsToIds(self,
strs,
max_length,
external_append_eos=None,
languages=None):
"""Tokenize strs into vocab ids.
Args:
strs: A vector of strings.
max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
languages: A vector of strings with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
Raises:
ValueError: If unknown token type.
"""
p = self.params
if external_append_eos is None:
append_eos = p.append_eos
else:
append_eos = external_append_eos
return self._StringsToIdsImpl(strs, max_length, append_eos, languages)
def StringsToIdsWithOffsets(self,
strs,
max_length,
external_append_eos=None,
languages=None):
"""Tokenize strs into vocab ids, and also return byte-level offsets.
Args:
strs: A vector of strings.
max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
languages: A vector of strings with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings, start_offsets, end_offsets). Each tensor
has the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
- start_offset[i, j] is the byte-level offset of the start of the j-th id
in the i-th original string
- end_offset[i, j] is the byte-level offset of the end of the j-th id
in the i-th original string
Raises:
ValueError: If unknown token type.
Exception: If the tokenizer does not support offsets.
"""
p = self.params
if external_append_eos is None:
append_eos = p.append_eos
else:
append_eos = external_append_eos
return self._StringsToIdsWithOffsetsImpl(strs, max_length, append_eos,
languages)
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
raise NotImplementedError('Abstract method.')
def _StringsToIdsWithOffsetsImpl(self, strs, max_length, append_eos,
languages):
raise NotImplementedError('Abstract method.')
def IdsToStrings(self, ids, lens, languages=None):
"""Converts ids back to strings.
Args:
ids: A matrix of shape [batch, seqlen]. ids[i, :] is the i-th sample's
ids.
lens: A vector of shape [batch]. lens[i] is the sequence length of the
i-th sample. Only the first lens[i] tokens in ids[i, :] are valid tokens
for the i-th sequence.
languages: A vector of strings of shape [batch].
Returns:
sequences - A vector of shape [batch]. The converted string sequence.
Raises:
ValueError: If unknown token type.
"""
raise NotImplementedError('Abstract method.')
def IdsToTokens(self, ids, languages=None):
"""Converts ids back to tokens (as separate strings).
Args:
ids: A matrix of shape [batch, seqlen]. ids[i, :] is the i-th sample's
ids.
languages: A vector of strings of shape [batch].
Returns:
tokens - A matrix of shape [batch, seqlen] of bytes.
Raises:
ValueError: If unknown token type.
"""
raise NotImplementedError('Abstract method.')
class AsciiTokenizer(BaseTokenizer):
"""A simple grapheme tokenizer.
Maps a small vocabulary of character tokens for (lower case) letters, digits,
and punctuation symbols.
"""
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
p = self.params
return ops.ascii_to_token_id(
strs,
maxlen=max_length,
pad_to_maxlen=p.pad_to_max_length,
append_eos=append_eos)
def IdsToStrings(self, ids, lens):
return ops.id_to_ascii(ids, lens)
class VocabFileTokenizer(BaseTokenizer):
"""Tokenizers that use vocab files for look-up."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('token_vocab_filepath', None,
'If set, specifies a filepath to the token vocab file.')
p.Define('ngram_vocab_filepath', None,
'If set, specifies a filepath to the Ngram vocab file.')
p.Define('ngram_separator', '',
'string separator to use when joining ngrams.')
p.Define('tokens_delimiter', ' ',
'The delimiter to split a string to tokens with.')
p.Define(
'load_token_ids_from_vocab', True,
'Whether token ids are present in vocab (i.e. vocab contains two '
'colums, one for IDs and one for words). If false, line numbers '
'are used.')
return p
@property
def _vocab_file_params(self):
return ['token_vocab_filepath', 'ngram_vocab_filepath']
def _CheckParams(self):
p = self.params
num_params_specified = sum(
[getattr(p, x) is not None for x in self._vocab_file_params])
if num_params_specified != 1:
raise ValueError('Exactly one vocab file should be specified!')
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
self._CheckParams()
p = self.params
if p.token_vocab_filepath:
return ops.str_to_vocab_tokens(
strs,
maxlen=max_length,
pad_to_maxlen=p.pad_to_max_length,
append_eos=append_eos,
vocab_filepath=p.token_vocab_filepath,
load_token_ids_from_vocab=p.load_token_ids_from_vocab,
delimiter=p.tokens_delimiter)
elif p.ngram_vocab_filepath:
raise NotImplementedError('ngram vocab StringsToIds is not supported.')
def IdsToStrings(self, ids, lens):
self._CheckParams()
p = self.params
if p.token_vocab_filepath:
ngram_vocab_filepath = p.token_vocab_filepath
ngram_separator = p.tokens_delimiter
elif p.ngram_vocab_filepath:
ngram_vocab_filepath = p.ngram_vocab_filepath
ngram_separator = p.ngram_separator
return ops.ngram_id_to_token(
token_ids=ids,
seq_lengths=lens,
ngram_vocab_filepath=ngram_vocab_filepath,
ngram_separator=ngram_separator)
class BpeTokenizer(BaseTokenizer):
"""Tokenizers that use BPE vocab files and word to id lists for look-up."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('codes_filepath', None,
'Specifies a filepath to the list of bpe codes vocab file.')
p.Define('words_to_ids_filepath', None,
'Specifies a filepath to the word bpe vocab file.')
return p
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
p = self.params
return ops.bpe_words_to_ids(
strs,
maxlen=max_length,
append_eos=append_eos,
tokenization_filepath=p.words_to_ids_filepath)
def IdsToStrings(self, ids, lens):
p = self.params
return ops.bpe_ids_to_words(
token_ids=ids, seq_lengths=lens, vocab_filepath=p.codes_filepath)
class WpmTokenizer(BaseTokenizer):
"""Tokenizer for word-piece models."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'vocab_filepath', None,
'Specifies a filepath to the WPM vocab. The vocab is sorted by '
'descending merge score.')
p.Define(
'merge_prob', 1.,
'Probability of merging WPMs. If less than 1, then decomposition '
'of words into wordpieces will no longer be deterministic, and '
'result in longer ID sequences. At 0, it will be graphemes.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._wpm_encoder = wpm_encoder.WpmEncoder(p.vocab_filepath, p.merge_prob)
assert p.target_unk_id == self._wpm_encoder.unk_id
assert p.target_sos_id == self._wpm_encoder.sentence_start_id
assert p.target_eos_id == self._wpm_encoder.sentence_end_id
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
"""Takes a tensor of strings and returns id/padding tensors.
This generates `token_ids`, `target_ids`, and `paddings` in the format that
is expected for tokenizers. This performs padding to a fixed length and
appends the end-of-sentence token as appropriate.
Args:
strs: a string Tensor.
max_length: a python integer. The second dimension of the returned arrays.
All sequences are padded or truncated to that length.
append_eos: a python bool. See `BaseTokenizer` for explanation.
languages: A vector of strings with the same length as `strs`.
Returns:
A tuple of 3 tensors:
- token_ids: a tensor of sequences of WPM ids starting with SOS. Sequences
always end with EOS unless the sequence exceeds the maximum length.
Always padded with EOS.
- target_ids: a tensor of sequences of WPM ids not starting with SOS
but ending with EOS. Always padded with EOS.
- paddings: a tensor of floats indicating, at each position, whether
the corresponding position is padded.
"""
p = self.params
if append_eos is None:
append_eos = p.append_eos
batch_size = py_utils.GetShape(strs)[0]
token_ids_ta = tf.TensorArray(tf.int32, batch_size)
target_ids_ta = tf.TensorArray(tf.int32, batch_size)
paddings_ta = tf.TensorArray(tf.float32, batch_size)
def _TokenizeOneSentence(i, strs, token_ids_ta, target_ids_ta, paddings_ta):
"""Tokenizes a single sentence."""
ids, _ = self._wpm_encoder.Encode(strs[i])
if append_eos:
ids = tf.concat([ids, [self.eos_id]], axis=0)
# This truncates after the eos is added, so some sentences might
# not have </s> at the end.
token_ids_ta = token_ids_ta.write(
i,
py_utils.PadOrTrimTo(
tf.concat([[self.sos_id], ids], axis=0), [max_length],
self.eos_id))
target_ids_ta = target_ids_ta.write(
i, py_utils.PadOrTrimTo(ids, [max_length], self.eos_id))
paddings_ta = paddings_ta.write(
i,
py_utils.PadOrTrimTo(
tf.zeros_like(ids, dtype=tf.float32), [max_length], 1.))
return i + 1, strs, token_ids_ta, target_ids_ta, paddings_ta
_, _, token_ids_ta, target_ids_ta, paddings_ta = tf.while_loop(
lambda i, *_: i < batch_size,
_TokenizeOneSentence,
loop_vars=(tf.constant(0, tf.int32), strs, token_ids_ta, target_ids_ta,
paddings_ta),
parallel_iterations=30,
back_prop=False)
token_ids = token_ids_ta.stack()
target_ids = target_ids_ta.stack()
paddings = paddings_ta.stack()
if not p.pad_to_max_length:
maxlen = tf.cast(
tf.round(tf.reduce_max(tf.reduce_sum(1.0 - paddings, axis=1))),
tf.int32)
token_ids = token_ids[:, :maxlen]
target_ids = target_ids[:, :maxlen]
paddings = paddings[:, :maxlen]
return token_ids, target_ids, paddings
def IdsToStrings(self, ids, lens):
"""Takes integer matrices and returns vectors of strings."""
ids = py_utils.with_dependencies([py_utils.assert_same_dim0([ids, lens])],
ids)
return tf.map_fn(
lambda inputs: self._wpm_encoder.Decode(inputs[0][:inputs[1]]),
(ids, lens),
dtype=tf.string,
parallel_iterations=30,
back_prop=False)
class SentencePieceTokenizer(BaseTokenizer):
"""SentencePiece tokenizer (https://arxiv.org/abs/1808.06226).
This is a wrapper around tf_text.SentencepieceTokenizer.
NOTE: this tokenizer is incompatible with GenericInput (b/191804185).
"""
@classmethod
def Params(cls):
p = super().Params()
p.vocab_size = 0
p.Define('spm_model', None, 'The file path to the SentencePiece model.')
p.Define('alpha', 1.0,
'Inverse temparature for probability rescaling used in eval.')
p.Define(
'nbest_size', 0, 'A integer parameter for sampling.'
' nbest_size = {0,1}: No sampling is performed.'
' nbest_size > 1: samples from the nbest_size results.'
' nbest_size < 0: assuming that nbest_size is infinite and samples from'
' all hypothesis (lattice) using'
' forward-filtering-and-backward-sampling algorithm.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
with tf.io.gfile.GFile(p.spm_model, 'rb') as f:
self._tokenizer = tf_text.SentencepieceTokenizer(
model=f.read(),
out_type=tf.int32,
nbest_size=p.nbest_size,
alpha=p.alpha,
reverse=False,
add_bos=False,
add_eos=p.append_eos)
def StringsToIds(self,
strs,
max_length,
external_append_eos=None,
languages=None):
"""Tokenize strs into vocab ids."""
if (external_append_eos is not None and
external_append_eos != self.params.append_eos):
raise ValueError('external_append_eos is not supported.')
if languages is not None:
raise ValueError('languages is not supported.')
return self._StringsToIdsImpl(
strs, max_length, append_eos=None, languages=None)
def _StringsToIdsImpl(self, strs, max_length, append_eos, languages):
del append_eos
del languages
p = self.params
tokens = self._tokenizer.tokenize(strs)
num_tokens = tokens.row_lengths(-1)
if max_length is None:
labels = tokens.to_tensor(default_value=p.target_unk_id)
else:
output_shape = tf.convert_to_tensor(strs).shape + [max_length]
labels = tokens.to_tensor(
default_value=p.target_unk_id, shape=output_shape)
num_tokens = tf.minimum(num_tokens, max_length)
ids = tf.concat([
tf.expand_dims(tf.ones_like(strs, tf.int32) * p.target_sos_id, -1),
labels[:, :-1]
], -1)
paddings = 1.0 - tf.sequence_mask(
num_tokens, maxlen=max_length, dtype=tf.float32)
return ids, labels, paddings
def IdsToStrings(self, ids, lens, languages=None):
return self._tokenizer.detokenize(tf.RaggedTensor.from_tensor(ids, lens))
def IdsToTokens(self, ids, languages=None):
return self._tokenizer.id_to_string(ids)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
self.inv_hash_ignore = []
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
for idx, i in enumerate(message.inv):
if i.hash in self.inv_hash_ignore:
del message.inv[idx]
elif i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analagous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=60*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ])
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
|
import csv
import datetime
from .constants import MAX_NUMBER, MIN_NUMBER, \
MAX_YEAR, MIN_YEAR, \
PERIODS_ABBR, \
START_DATE
class Params:
def __init__(self, year, month):
if year >= MIN_YEAR and year <= MAX_YEAR:
self.year = year
else:
raise ValueError('year must be between {} and {} inclusive: year={!r}'.format(MIN_YEAR, MAX_YEAR, year))
if month >= 1 and month <= 12:
self.month = month
else:
raise ValueError('month must be between 1 and 12 inclusive: month={!r}'.format(month))
self.yy = to_yy(year)
self.mmm = to_mmm(month)
def __repr__(self):
return '{}(year={!r}, yy={!r}, month={!r}, mmm={!r})'.format(self.__class__.__name__, self.year, self.yy, self.month, self.mmm)
def to_yy(year):
"""Returns the last 2 digits of the year."""
return str(year % 100).zfill(2)
MONTHS_ABBR = ['',
'Jan', 'Feb', 'Mar',
'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'
]
def to_mmm(month):
"""Returns the first 3 letters of the month.
The first letter is capitalized.
"""
return MONTHS_ABBR[month]
class Settings:
DEFAULT_TIMEOUT = 5
DEFAULT_URL = 'http://nlcb.co.tt/app/index.php/pwresults/playwhemonthsum'
def __init__(self, timeout=DEFAULT_TIMEOUT, url=DEFAULT_URL):
self.timeout = timeout
self.url = url
def __repr__(self):
return '{}(timeout={!r}, url={!r})'.format(self.__class__.__name__, self.timeout, self.url)
class Result:
@classmethod
def from_csvline(cls, csvline, delimiter=','):
if isinstance(csvline, str):
csvline = csvline.split(delimiter)
else:
try:
csvline = list(map(str, csvline))
except:
csvline = []
line = csvline + ['', '', '', '']
draw = line[0]
year, month, day = (line[1].split('-') + ['', '', ''])[:3]
period = line[2]
number = line[3]
return cls(draw, year, month, day, period, number)
def __init__(self, draw, year, month, day, period, number):
original_args = {
'draw': draw,
'year': year,
'month': month,
'day': day,
'period': period,
'number': number
}
self.errors = errors = []
self.draw = None
self.date = None
self.period = None
self.number = None
# Clean and validate draw
draw = _parse_int(draw)
if draw < 1:
errors.append('draw must be a positive integer: draw={!r}'.format(original_args['draw']))
else:
self.draw = draw
# Clean and validate year, month, day
year = _parse_int(year)
month = _parse_int(month)
day = _parse_int(day)
try:
self.date = datetime.date(year, month, day)
except ValueError:
errors.append('year, month and day must represent a valid date: year={!r}, month={!r}, day={!r}'.format(
original_args['year'],
original_args['month'],
original_args['day'])
)
# Clean and validate period
period = _parse_str(period).upper()
if period not in PERIODS_ABBR:
errors.append('period must be one of {}: period={!r}'.format(', '.join(PERIODS_ABBR), original_args['period']))
else:
self.period = period
# Clean and validate number
number = _parse_int(number)
if number < MIN_NUMBER or number > MAX_NUMBER:
errors.append('number must be an integer between {} and {} inclusive: number={!r}'.format(MIN_NUMBER, MAX_NUMBER, original_args['number']))
else:
self.number = number
def __eq__(self, other):
return type(other) is type(self) and \
self.is_valid() and other.is_valid() and \
self.draw == other.draw and \
self.date == other.date and \
self.period == other.period and \
self.number == other.number
def is_valid(self):
return not self.errors
def full_error_message(self):
if hasattr(self, 'lineno'):
header = 'Line {}: {!r}'.format(self.lineno, self.line)
else:
header = '{!r}'.format(self)
reasons = '\n'.join(map(lambda e: ' ' + e, self.errors))
return header + '\n' + reasons
def __repr__(self):
return '{}(draw={!r}, date={!r}, period={!r}, number={!r})'.format(self.__class__.__name__, self.draw, self.date, self.period, self.number)
def _parse_int(x):
try:
return int(x)
except:
return 0
def _parse_str(x):
try:
return str(x)
except:
return ''
class Results(list):
@classmethod
def from_csvfile(cls, csvfile):
delimiter = csv.get_dialect('excel').delimiter
results = []
for lineno, line in enumerate(csv.reader(csvfile), start=1):
contents = delimiter.join(line)
if contents.strip():
result = Result.from_csvline(line, delimiter=delimiter)
# Track these values for error reporting purposes
result.lineno = lineno
result.line = contents
results.append(result)
return cls(results)
def __init__(self, results):
super().__init__()
self.invalid = []
for result in results:
if result.is_valid():
self.append(result)
else:
self.invalid.append(result)
def all_valid(self):
return not bool(self.invalid)
def full_error_messages(self):
messages = '\n'.join(map(lambda r: r.full_error_message(), self.invalid))
footer = 'Total errors = {}'.format(len(self.invalid))
return messages + '\n\n' + footer
def date_range(start_date=None, period=PERIODS_ABBR[0], today=datetime.date.today):
if start_date is None:
start_date = START_DATE
period = PERIODS_ABBR[0]
elif period == PERIODS_ABBR[-1]:
start_date += datetime.timedelta(days=1)
end_date = today()
start_year = start_date.year
end_year = end_date.year
for year in range(start_year, end_year + 1):
start_month = start_date.month if year == start_year else 1
end_month = end_date.month if year == end_year else 12
for month in range(start_month, end_month + 1):
yield year, month
|
|
from rpython.jit.metainterp.walkvirtual import VirtualVisitor
from rpython.jit.metainterp.history import ConstInt, ConstPtr, ConstFloat
from rpython.jit.metainterp.optimizeopt.info import ArrayPtrInfo,\
ArrayStructInfo, AbstractStructPtrInfo
from rpython.jit.metainterp.optimizeopt.intutils import \
MININT, MAXINT, IntBound, IntLowerBound
from rpython.jit.metainterp.resoperation import rop, ResOperation, \
InputArgInt, InputArgRef, InputArgFloat
from rpython.rlib.debug import debug_print
LEVEL_UNKNOWN = '\x00'
LEVEL_NONNULL = '\x01'
LEVEL_KNOWNCLASS = '\x02'
LEVEL_CONSTANT = '\x03'
class VirtualStatesCantMatch(Exception):
def __init__(self, msg='?', state=None):
self.msg = msg
self.state = state
class GenerateGuardState(object):
def __init__(self, optimizer=None, guards=None, renum=None, bad=None, force_boxes=False):
self.optimizer = optimizer
self.cpu = optimizer.cpu
if guards is None:
guards = []
self.extra_guards = guards
if renum is None:
renum = {}
self.renum = renum
if bad is None:
bad = {}
self.bad = bad
self.force_boxes = force_boxes
def get_runtime_item(self, box, descr, i):
array = box.getref_base()
if descr.is_array_of_pointers():
return InputArgRef(self.cpu.bh_getarrayitem_gc_r(array, i, descr))
elif descr.is_array_of_floats():
return InputArgFloat(self.cpu.bh_getarrayitem_gc_f(array, i, descr))
else:
return InputArgInt(self.cpu.bh_getarrayitem_gc_i(array, i, descr))
def get_runtime_field(self, box, descr):
struct = box.getref_base()
if descr.is_pointer_field():
return InputArgRef(self.cpu.bh_getfield_gc_r(struct, descr))
elif descr.is_float_field():
return InputArgFloat(self.cpu.bh_getfield_gc_f(struct, descr))
else:
return InputArgInt(self.cpu.bh_getfield_gc_i(struct, descr))
def get_runtime_interiorfield(self, box, descr, i):
struct = box.getref_base()
if descr.is_pointer_field():
return InputArgRef(self.cpu.bh_getinteriorfield_gc_r(struct, i,
descr))
elif descr.is_float_field():
return InputArgFloat(self.cpu.bh_getinteriorfield_gc_f(struct, i,
descr))
else:
return InputArgInt(self.cpu.bh_getinteriorfield_gc_i(struct, i,
descr))
class AbstractVirtualStateInfo(object):
position = -1
def generate_guards(self, other, op, runtime_op, state):
""" generate guards (output in the list extra_guards) that make runtime
values of the shape other match the shape of self. if that's not
possible, VirtualStatesCantMatch is thrown and bad gets keys set which
parts of the state are the problem.
the function can peek into the information about the op, as well
as runtime value (passed in runtime_op)
as a guiding heuristic whether making such guards makes
sense. if None is passed in for op, no guard is ever generated, and
this function degenerates to a generalization check."""
assert self.position != -1
if self.position in state.renum:
if state.renum[self.position] != other.position:
state.bad[self] = state.bad[other] = None
raise VirtualStatesCantMatch(
'The numbering of the virtual states does not ' +
'match. This means that two virtual fields ' +
'have been set to the same Box in one of the ' +
'virtual states but not in the other.',
state)
else:
state.renum[self.position] = other.position
try:
self._generate_guards(other, op, runtime_op, state)
except VirtualStatesCantMatch as e:
state.bad[self] = state.bad[other] = None
if e.state is None:
e.state = state
raise e
def _generate_guards(self, other, box, runtime_box, state):
raise VirtualStatesCantMatch(
'Generating guards for making the VirtualStates ' +
'at hand match have not been implemented')
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
raise NotImplementedError
def enum(self, virtual_state):
if self.position != -1:
return
virtual_state.info_counter += 1
self.position = virtual_state.info_counter
self._enum(virtual_state)
def _enum(self, virtual_state):
raise NotImplementedError
def debug_print(self, indent, seen, bad, metainterp_sd):
mark = ''
if self in bad:
mark = '*'
self.debug_header(indent + mark)
if self not in seen:
seen[self] = True
for s in self.fieldstate:
s.debug_print(indent + " ", seen, bad, metainterp_sd)
else:
debug_print(indent + " ...")
def debug_header(self, indent):
raise NotImplementedError
class AbstractVirtualStructStateInfo(AbstractVirtualStateInfo):
def __init__(self, fielddescrs):
self.fielddescrs = fielddescrs
def _generate_guards(self, other, box, runtime_box, state):
if not self._generalization_of_structpart(other):
raise VirtualStatesCantMatch("different kinds of structs")
assert isinstance(other, AbstractVirtualStructStateInfo)
assert len(self.fielddescrs) == len(self.fieldstate)
assert len(other.fielddescrs) == len(other.fieldstate)
if runtime_box is not None:
opinfo = state.optimizer.getptrinfo(box)
assert opinfo.is_virtual()
assert isinstance(opinfo, AbstractStructPtrInfo)
else:
opinfo = None
if len(self.fielddescrs) != len(other.fielddescrs):
raise VirtualStatesCantMatch("field descrs don't match")
for i in range(len(self.fielddescrs)):
if other.fielddescrs[i] is not self.fielddescrs[i]:
raise VirtualStatesCantMatch("field descrs don't match")
if runtime_box is not None and opinfo is not None:
fieldbox = opinfo._fields[self.fielddescrs[i].get_index()]
if fieldbox is not None:
fieldbox_runtime = state.get_runtime_field(runtime_box,
self.fielddescrs[i])
else:
fieldbox_runtime = None
else:
fieldbox = None
fieldbox_runtime = None
if self.fieldstate[i] is not None:
if other.fieldstate[i] is None:
raise VirtualStatesCantMatch
self.fieldstate[i].generate_guards(other.fieldstate[i],
fieldbox,
fieldbox_runtime, state)
def _generalization_of_structpart(self, other):
raise NotImplementedError
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
box = optimizer.get_box_replacement(box)
info = optimizer.getptrinfo(box)
if info is None or not info.is_virtual():
raise VirtualStatesCantMatch()
else:
assert isinstance(info, AbstractStructPtrInfo)
# The min operation ensures we don't wander off either array, as not all
# to make_inputargs have validated their inputs with generate_guards.
for i in range(min(len(self.fielddescrs), len(info._fields))):
state = self.fieldstate[i]
if not state:
continue
if state.position > self.position:
fieldbox = info._fields[i]
state.enum_forced_boxes(boxes, fieldbox, optimizer, force_boxes)
def _enum(self, virtual_state):
for s in self.fieldstate:
if s:
s.enum(virtual_state)
class VirtualStateInfo(AbstractVirtualStructStateInfo):
def is_virtual(self):
return True
def __init__(self, known_class, fielddescrs):
AbstractVirtualStructStateInfo.__init__(self, fielddescrs)
self.known_class = known_class
def _generalization_of_structpart(self, other):
return (isinstance(other, VirtualStateInfo) and
self.known_class.same_constant(other.known_class))
def debug_header(self, indent):
debug_print(indent + 'VirtualStateInfo(%d):' % self.position)
class VStructStateInfo(AbstractVirtualStructStateInfo):
def __init__(self, typedescr, fielddescrs):
AbstractVirtualStructStateInfo.__init__(self, fielddescrs)
self.typedescr = typedescr
def _generalization_of_structpart(self, other):
return (isinstance(other, VStructStateInfo) and
self.typedescr is other.typedescr)
def debug_header(self, indent):
debug_print(indent + 'VStructStateInfo(%d):' % self.position)
class VArrayStateInfo(AbstractVirtualStateInfo):
def __init__(self, arraydescr):
self.arraydescr = arraydescr
def _generate_guards(self, other, box, runtime_box, state):
if not isinstance(other, VArrayStateInfo):
raise VirtualStatesCantMatch("other is not an array")
if self.arraydescr is not other.arraydescr:
raise VirtualStatesCantMatch("other is a different kind of array")
if len(self.fieldstate) != len(other.fieldstate):
raise VirtualStatesCantMatch("other has a different length")
fieldbox = None
fieldbox_runtime = None
for i in range(len(self.fieldstate)):
if runtime_box is not None:
opinfo = state.optimizer.getptrinfo(box)
assert isinstance(opinfo, ArrayPtrInfo)
fieldbox = opinfo._items[i]
fieldbox_runtime = state.get_runtime_item(runtime_box,
self.arraydescr, i)
if self.fieldstate[i] is not None:
if other.fieldstate[i] is None:
raise VirtualStatesCantMatch
self.fieldstate[i].generate_guards(other.fieldstate[i],
fieldbox, fieldbox_runtime, state)
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
box = optimizer.get_box_replacement(box)
info = optimizer.getptrinfo(box)
if info is None or not info.is_virtual():
raise VirtualStatesCantMatch()
if len(self.fieldstate) > info.getlength():
raise VirtualStatesCantMatch
for i in range(len(self.fieldstate)):
fieldbox = info.getitem(self.arraydescr, i)
s = self.fieldstate[i]
if s is not None:
if s.position > self.position:
s.enum_forced_boxes(boxes, fieldbox, optimizer, force_boxes)
def _enum(self, virtual_state):
for s in self.fieldstate:
if s:
s.enum(virtual_state)
def debug_header(self, indent):
debug_print(indent + 'VArrayStateInfo(%d):' % self.position)
class VArrayStructStateInfo(AbstractVirtualStateInfo):
def __init__(self, arraydescr, fielddescrs, length):
self.arraydescr = arraydescr
self.fielddescrs = fielddescrs
self.length = length
def _generate_guards(self, other, box, runtime_box, state):
if not isinstance(other, VArrayStructStateInfo):
raise VirtualStatesCantMatch("other is not an VArrayStructStateInfo")
if self.arraydescr is not other.arraydescr:
raise VirtualStatesCantMatch("other is a different kind of array")
if len(self.fielddescrs) != len(other.fielddescrs):
raise VirtualStatesCantMatch("other has a different length")
if len(self.fielddescrs) != len(other.fielddescrs):
raise VirtualStatesCantMatch("other has a different length")
for j, descr in enumerate(self.fielddescrs):
if descr is not other.fielddescrs[j]:
raise VirtualStatesCantMatch("other is a different kind of array")
fieldbox = None
fieldbox_runtime = None
if box is not None:
opinfo = state.optimizer.getptrinfo(box)
assert isinstance(opinfo, ArrayPtrInfo)
else:
opinfo = None
for i in range(self.length):
for descr in self.fielddescrs:
index = i * len(self.fielddescrs) + descr.get_index()
fieldstate = self.fieldstate[index]
if fieldstate is None:
continue
if other.fieldstate[index] is None:
raise VirtualStatesCantMatch
if box is not None and opinfo is not None:
fieldbox = opinfo._items[index]
fieldbox_runtime = state.get_runtime_interiorfield(
runtime_box, descr, i)
self.fieldstate[index].generate_guards(other.fieldstate[index],
fieldbox, fieldbox_runtime, state)
def _enum(self, virtual_state):
for s in self.fieldstate:
if s is not None:
s.enum(virtual_state)
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
opinfo = optimizer.getptrinfo(box)
if not isinstance(opinfo, ArrayStructInfo):
raise VirtualStatesCantMatch
if not opinfo.is_virtual():
raise VirtualStatesCantMatch
#if len(self.fielddescrs) > len(value._items):
# raise VirtualStatesCantMatch
for i in range(self.length):
for descr in self.fielddescrs:
index = i * len(self.fielddescrs) + descr.get_index()
fieldstate = self.fieldstate[index]
itembox = opinfo._items[i * len(self.fielddescrs) +
descr.get_index()]
if fieldstate is None:
if itembox is not None:
raise VirtualStatesCantMatch
continue
# I think itembox must be present here
if fieldstate.position > self.position:
fieldstate.enum_forced_boxes(boxes, itembox, optimizer,
force_boxes)
def debug_header(self, indent):
debug_print(indent + 'VArrayStructStateInfo(%d):' % self.position)
def not_virtual(cpu, type, info):
if type == 'i':
return NotVirtualStateInfoInt(cpu, type, info)
if type == 'r':
return NotVirtualStateInfoPtr(cpu, type, info)
return NotVirtualStateInfo(cpu, type, info)
class NotVirtualStateInfo(AbstractVirtualStateInfo):
level = LEVEL_UNKNOWN
constbox = None
def __init__(self, cpu, type, info):
if info and info.is_constant():
self.level = LEVEL_CONSTANT
self.constbox = info.getconst()
def is_const(self):
return self.constbox is not None
def is_virtual(self):
return False
def _generate_guards(self, other, box, runtime_box, state):
# XXX This will always retrace instead of forcing anything which
# might be what we want sometimes?
extra_guards = state.extra_guards
if self.level == LEVEL_UNKNOWN:
return self._generate_guards_unkown(other, box, runtime_box,
extra_guards,
state)
else:
if not isinstance(other, NotVirtualStateInfo):
raise VirtualStatesCantMatch(
'comparing a constant against something that is a virtual')
assert self.level == LEVEL_CONSTANT
if other.level == LEVEL_CONSTANT:
if self.constbox.same_constant(other.constbox):
return
raise VirtualStatesCantMatch("different constants")
if runtime_box is not None and self.constbox.same_constant(runtime_box.constbox()):
op = ResOperation(rop.GUARD_VALUE, [box, self.constbox])
extra_guards.append(op)
return
else:
raise VirtualStatesCantMatch("other not constant")
assert 0, "unreachable"
def _generate_guards_unkown(self, other, box, runtime_box, extra_guards,
state):
return
def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False):
if self.level == LEVEL_CONSTANT:
return
assert 0 <= self.position_in_notvirtuals
if optimizer is not None:
box = optimizer.get_box_replacement(box)
if box.type == 'r':
info = optimizer.getptrinfo(box)
if info and info.is_virtual():
if force_boxes:
info.force_box(box, optimizer)
else:
raise VirtualStatesCantMatch
boxes[self.position_in_notvirtuals] = box
def _enum(self, virtual_state):
if self.level == LEVEL_CONSTANT:
return
self.position_in_notvirtuals = virtual_state.numnotvirtuals
virtual_state.numnotvirtuals += 1
def debug_print(self, indent, seen, bad, metainterp_sd=None):
mark = ''
if self in bad:
mark = '*'
if self.level == LEVEL_UNKNOWN:
l = "Unknown"
elif self.level == LEVEL_NONNULL:
l = "NonNull"
elif self.level == LEVEL_KNOWNCLASS:
addr = self.known_class.getaddr()
if metainterp_sd:
name = metainterp_sd.get_name_from_address(addr)
else:
name = "?"
l = "KnownClass(%s)" % name
else:
assert self.level == LEVEL_CONSTANT
const = self.constbox
if isinstance(const, ConstInt):
l = "ConstInt(%s)" % (const.value, )
elif isinstance(const, ConstPtr):
if const.value:
l = "ConstPtr"
else:
l = "ConstPtr(null)"
else:
assert isinstance(const, ConstFloat)
l = "ConstFloat(%s)" % const.getfloat()
lb = ''
if self.lenbound:
lb = ', ' + self.lenbound.bound.__repr__()
result = indent + mark + 'NotVirtualStateInfo(%d' % self.position + ', ' + l
extra = self._extra_repr()
if extra:
result += ', ' + extra
result += lb + ')'
debug_print(result)
class NotVirtualStateInfoInt(NotVirtualStateInfo):
intbound = None
def __init__(self, cpu, type, info):
NotVirtualStateInfo.__init__(self, cpu, type, info)
assert type == 'i'
if isinstance(info, IntBound):
if info.lower < MININT / 2:
info.lower = MININT
if info.upper > MAXINT / 2:
info.upper = MAXINT
self.intbound = info
def _generate_guards_unkown(self, other, box, runtime_box, extra_guards,
state):
other_intbound = None
if isinstance(other, NotVirtualStateInfoInt):
other_intbound = other.intbound
if self.intbound is None:
return
if self.intbound.contains_bound(other_intbound):
return
if (runtime_box is not None and
self.intbound.contains(runtime_box.getint())):
# this may generate a few more guards than needed, but they are
# optimized away when emitting them
self.intbound.make_guards(box, extra_guards, state.optimizer)
return
raise VirtualStatesCantMatch("intbounds don't match")
def _extra_repr(self):
return self.intbound.__repr__()
class NotVirtualStateInfoPtr(NotVirtualStateInfo):
lenbound = None
known_class = None
def __init__(self, cpu, type, info):
if info:
self.known_class = info.get_known_class(cpu)
if self.known_class:
self.level = LEVEL_KNOWNCLASS
elif info.is_nonnull():
self.level = LEVEL_NONNULL
self.lenbound = info.getlenbound(None)
# might set it to LEVEL_CONSTANT
NotVirtualStateInfo.__init__(self, cpu, type, info)
def _generate_guards(self, other, box, runtime_box, state):
if state.force_boxes and isinstance(other, VirtualStateInfo):
return self._generate_virtual_guards(other, box, runtime_box, state)
if not isinstance(other, NotVirtualStateInfoPtr):
raise VirtualStatesCantMatch(
'The VirtualStates does not match as a ' +
'virtual appears where a pointer is needed ' +
'and it is too late to force it.')
extra_guards = state.extra_guards
if self.lenbound:
if other.lenbound is None:
other_bound = IntLowerBound(0)
else:
other_bound = other.lenbound
if not self.lenbound.contains_bound(other_bound):
raise VirtualStatesCantMatch("length bound does not match")
if self.level == LEVEL_NONNULL:
return self._generate_guards_nonnull(other, box, runtime_box,
extra_guards,
state)
elif self.level == LEVEL_KNOWNCLASS:
return self._generate_guards_knownclass(other, box, runtime_box,
extra_guards,
state)
return NotVirtualStateInfo._generate_guards(self, other, box,
runtime_box, state)
# the following methods often peek into the runtime value that the
# box had when tracing. This value is only used as an educated guess.
# It is used here to choose between either emitting a guard and jumping
# to an existing compiled loop or retracing the loop. Both alternatives
# will always generate correct behaviour, but performance will differ.
def _generate_virtual_guards(self, other, box, runtime_box, state):
"""
Generate the guards and add state information for unifying a virtual
object with a non-virtual. This involves forcing the object in the
event that unification can succeed. Since virtual objects cannot be null,
this method need only check that the virtual object has the expected type.
"""
assert state.force_boxes and isinstance(other, VirtualStateInfo)
if self.level == LEVEL_CONSTANT:
raise VirtualStatesCantMatch(
"cannot unify a constant value with a virtual object")
if self.level == LEVEL_KNOWNCLASS:
if not self.known_class.same_constant(other.known_class):
raise VirtualStatesCantMatch("classes don't match")
def _generate_guards_nonnull(self, other, box, runtime_box, extra_guards,
state):
if not isinstance(other, NotVirtualStateInfoPtr):
raise VirtualStatesCantMatch('trying to match ptr with non-ptr??!')
if other.level == LEVEL_UNKNOWN:
if runtime_box is not None and runtime_box.nonnull():
op = ResOperation(rop.GUARD_NONNULL, [box])
extra_guards.append(op)
return
else:
raise VirtualStatesCantMatch("other not known to be nonnull")
elif other.level == LEVEL_NONNULL:
pass
elif other.level == LEVEL_KNOWNCLASS:
pass # implies nonnull
else:
assert other.level == LEVEL_CONSTANT
assert other.constbox
if not other.constbox.nonnull():
raise VirtualStatesCantMatch("constant is null")
def _generate_guards_knownclass(self, other, box, runtime_box, extra_guards,
state):
cpu = state.cpu
if not isinstance(other, NotVirtualStateInfoPtr):
raise VirtualStatesCantMatch('trying to match ptr with non-ptr??!')
if other.level == LEVEL_UNKNOWN:
if (runtime_box and runtime_box.nonnull() and
self.known_class.same_constant(cpu.ts.cls_of_box(runtime_box))):
op = ResOperation(rop.GUARD_NONNULL_CLASS, [box, self.known_class])
extra_guards.append(op)
else:
raise VirtualStatesCantMatch("other's class is unknown")
elif other.level == LEVEL_NONNULL:
if (runtime_box and self.known_class.same_constant(
cpu.ts.cls_of_box(runtime_box))):
op = ResOperation(rop.GUARD_CLASS, [box, self.known_class])
extra_guards.append(op)
else:
raise VirtualStatesCantMatch("other's class is unknown")
elif other.level == LEVEL_KNOWNCLASS:
if self.known_class.same_constant(other.known_class):
return
raise VirtualStatesCantMatch("classes don't match")
else:
assert other.level == LEVEL_CONSTANT
if (other.constbox.nonnull() and
self.known_class.same_constant(cpu.ts.cls_of_box(other.constbox))):
pass
else:
raise VirtualStatesCantMatch("classes don't match")
class VirtualState(object):
def __init__(self, state):
self.state = state
self.info_counter = -1
self.numnotvirtuals = 0
for s in state:
if s:
s.enum(self)
def generalization_of(self, other, optimizer):
state = GenerateGuardState(optimizer)
assert len(self.state) == len(other.state)
try:
for i in range(len(self.state)):
self.state[i].generate_guards(other.state[i], None, None, state)
except VirtualStatesCantMatch:
return False
return True
def generate_guards(self, other, boxes, runtime_boxes, optimizer, force_boxes=False):
assert (len(self.state) == len(other.state) == len(boxes) ==
len(runtime_boxes))
state = GenerateGuardState(optimizer, force_boxes=force_boxes)
for i in range(len(self.state)):
self.state[i].generate_guards(other.state[i], boxes[i],
runtime_boxes[i], state)
return state
def make_inputargs(self, inputargs, optimizer, force_boxes=False):
if optimizer.optearlyforce:
optimizer = optimizer.optearlyforce
assert len(inputargs) == len(self.state)
boxes = [None] * self.numnotvirtuals
# We try twice. The first time around we allow boxes to be forced
# which might change the virtual state if the box appear in more
# than one place among the inputargs.
if force_boxes:
for i in range(len(inputargs)):
self.state[i].enum_forced_boxes(boxes, inputargs[i], optimizer,
True)
for i in range(len(inputargs)):
self.state[i].enum_forced_boxes(boxes, inputargs[i], optimizer)
return boxes
def make_inputargs_and_virtuals(self, inputargs, optimizer, force_boxes=False):
inpargs = self.make_inputargs(inputargs, optimizer, force_boxes)
# we append the virtuals here in case some stuff is proven
# to be not a virtual and there are getfields in the short preamble
# that will read items out of there
virtuals = []
for i in range(len(inputargs)):
if not isinstance(self.state[i], NotVirtualStateInfo):
virtuals.append(inputargs[i])
return inpargs, virtuals
def debug_print(self, hdr='', bad=None, metainterp_sd=None):
if bad is None:
bad = {}
debug_print(hdr + "VirtualState():")
seen = {}
for s in self.state:
s.debug_print(" ", seen, bad, metainterp_sd)
class VirtualStateConstructor(VirtualVisitor):
def __init__(self, optimizer):
self.fieldboxes = {}
self.optimizer = optimizer
self.info = {}
def register_virtual_fields(self, keybox, fieldboxes):
self.fieldboxes[keybox] = fieldboxes
def already_seen_virtual(self, keybox):
return keybox in self.fieldboxes
def create_state_or_none(self, box, opt):
if box is None:
return None
return self.create_state(box, opt)
def create_state(self, box, opt):
box = opt.get_box_replacement(box)
try:
return self.info[box]
except KeyError:
pass
if box.type == 'r':
info = opt.getptrinfo(box)
if info is not None and info.is_virtual():
result = info.visitor_dispatch_virtual_type(self)
self.info[box] = result
info.visitor_walk_recursive(box, self, opt)
result.fieldstate = [self.create_state_or_none(b, opt)
for b in self.fieldboxes[box]]
else:
result = self.visit_not_virtual(box)
self.info[box] = result
elif box.type == 'i' or box.type == 'f':
result = self.visit_not_virtual(box)
self.info[box] = result
else:
assert False
return result
def get_virtual_state(self, jump_args):
if self.optimizer.optearlyforce:
opt = self.optimizer.optearlyforce
else:
opt = self.optimizer
state = []
self.info = {}
for box in jump_args:
state.append(self.create_state(box, opt))
return VirtualState(state)
def visit_not_virtual(self, box):
return not_virtual(self.optimizer.cpu, box.type,
self.optimizer.getinfo(box))
def visit_virtual(self, descr, fielddescrs):
known_class = ConstInt(descr.get_vtable())
return VirtualStateInfo(known_class, fielddescrs)
def visit_vstruct(self, typedescr, fielddescrs):
return VStructStateInfo(typedescr, fielddescrs)
def visit_varray(self, arraydescr, clear):
# 'clear' is ignored here. I *think* it is correct, because so
# far in force_at_end_of_preamble() we force all array values
# to be non-None, so clearing is not important any more
return VArrayStateInfo(arraydescr)
def visit_varraystruct(self, arraydescr, length, fielddescrs):
return VArrayStructStateInfo(arraydescr, fielddescrs, length)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from wx.lib.agw import customtreectrl
from wx.lib.mixins import treemixin
from robotide.controller.ui.treecontroller import TreeController, \
TestSelectionController
from robotide.context import IS_WINDOWS
from robotide.action.actioninfo import ActionInfo
from robotide.controller.filecontrollers import ResourceFileController
from robotide.publish.messages import RideTestRunning, RideTestPassed, \
RideTestFailed, RideTestExecutionStarted, RideImportSetting, \
RideExcludesChanged, RideIncludesChanged, RideOpenSuite, RideNewProject
from robotide.ui.images import RUNNING_IMAGE_INDEX, PASSED_IMAGE_INDEX, \
FAILED_IMAGE_INDEX, ROBOT_IMAGE_INDEX
from robotide.ui.treenodehandlers import TestCaseHandler
from robotide.publish import PUBLISHER, RideTreeSelection, RideFileNameChanged,\
RideItem, RideUserKeywordAdded, RideTestCaseAdded, RideUserKeywordRemoved,\
RideTestCaseRemoved, RideDataFileRemoved, RideDataChangedToDirty,\
RideDataDirtyCleared, RideVariableRemoved, RideVariableAdded,\
RideVariableMovedUp, RideVariableMovedDown, RideVariableUpdated,\
RideOpenResource, RideSuiteAdded, RideSelectResource, RideDataFileSet
from robotide.controller.commands import MoveTo
from robotide.widgets import PopupCreator
from robotide import utils
from .treenodehandlers import ResourceRootHandler, action_handler_class,\
ResourceFileHandler
from .images import TreeImageList
_TREE_ARGS = {'style': wx.TR_DEFAULT_STYLE}
if wx.VERSION_STRING >= '2.8.11.0':
_TREE_ARGS['agwStyle'] = \
customtreectrl.TR_DEFAULT_STYLE | customtreectrl.TR_HIDE_ROOT | \
customtreectrl.TR_EDIT_LABELS
if IS_WINDOWS:
_TREE_ARGS['style'] |= wx.TR_EDIT_LABELS
class Tree(treemixin.DragAndDrop, customtreectrl.CustomTreeCtrl,
utils.RideEventHandler):
_RESOURCES_NODE_LABEL = 'External Resources'
def __init__(self, parent, action_registerer, settings=None):
self._checkboxes_for_tests = False
self._test_selection_controller = self._create_test_selection_controller()
self._controller = TreeController(
self, action_registerer, settings=settings,
test_selection=self._test_selection_controller)
treemixin.DragAndDrop.__init__(self, parent, **_TREE_ARGS)
self._controller.register_tree_actions()
self._bind_tree_events()
self._images = TreeImageList()
self._silent_mode = False
self.SetImageList(self._images)
self.label_editor = TreeLabelEditListener(self, action_registerer)
self._controller.bind_keys()
self._subscribe_to_messages()
self._popup_creator = PopupCreator()
self._dragging = False
self._clear_tree_data()
self._editor = None
self._execution_results = None
if not hasattr(self, 'OnCancelEdit'):
self.OnCancelEdit = self._on_cancel_edit
def _create_test_selection_controller(self):
tsc = TestSelectionController()
PUBLISHER.subscribe(tsc.clear_all, RideOpenSuite)
PUBLISHER.subscribe(tsc.clear_all, RideNewProject)
return tsc
def _on_cancel_edit(self, item):
le = customtreectrl.TreeEvent(customtreectrl.wxEVT_TREE_END_LABEL_EDIT, self.GetId())
le._item = item
le.SetEventObject(self)
le._label = ""
le._editCancelled = True
self.GetEventHandler().ProcessEvent(le)
def _bind_tree_events(self):
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnTreeItemExpanding)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnRightClick)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnItemActivated)
self.Bind(customtreectrl.EVT_TREE_ITEM_CHECKED, self.OnTreeItemChecked)
def OnDoubleClick(self, event):
item, pos = self.HitTest(self.ScreenToClient(wx.GetMousePosition()))
if item:
handler = self._controller.get_handler(item)
handler.double_clicked()
event.Skip()
def set_editor(self, editor):
self._editor = editor
def StartDragging(self):
self._dragging = True
treemixin.DragAndDrop.StartDragging(self)
def OnEndDrag(self, event):
self._dragging = False
treemixin.DragAndDrop.OnEndDrag(self, event)
def register_context_menu_hook(self, callable):
self._popup_creator.add_hook(callable)
def unregister_context_menu_hook(self, callable):
self._popup_creator.remove_hook(callable)
def _subscribe_to_messages(self):
subscriptions = [
(self._item_changed, RideItem),
(self._resource_added, RideOpenResource),
(self._select_resource, RideSelectResource),
(self._suite_added, RideSuiteAdded),
(self._keyword_added, RideUserKeywordAdded),
(self._test_added, RideTestCaseAdded),
(self._variable_added, RideVariableAdded),
(self._leaf_item_removed, RideUserKeywordRemoved),
(self._leaf_item_removed, RideTestCaseRemoved),
(self._leaf_item_removed, RideVariableRemoved),
(self._datafile_removed, RideDataFileRemoved),
(self._datafile_set, RideDataFileSet),
(self._data_dirty, RideDataChangedToDirty),
(self._data_undirty, RideDataDirtyCleared),
(self._variable_moved_up, RideVariableMovedUp),
(self._variable_moved_down, RideVariableMovedDown),
(self._variable_updated, RideVariableUpdated),
(self._filename_changed, RideFileNameChanged),
(self._testing_started, RideTestExecutionStarted),
(self._test_result, RideTestRunning),
(self._test_result, RideTestPassed),
(self._test_result, RideTestFailed),
(self._handle_import_setting_message, RideImportSetting),
(self._mark_excludes, RideExcludesChanged),
(self._mark_excludes, RideIncludesChanged),
]
for listener, topic in subscriptions:
PUBLISHER.subscribe(listener, topic)
def _mark_excludes(self, message):
tree = self._controller.find_node_by_controller(message.old_controller)
self._render_datafile(self.GetItemParent(tree), message.new_controller)
self._remove_datafile_node(tree)
def _set_item_excluded(self, node):
self.SetItemTextColour(node, 'gray')
self.SetItemItalic(node, True)
self.SetItemText(node, "%s (excluded)" % self.GetItemText(node))
def _handle_import_setting_message(self, message):
if message.is_resource():
self._set_resource_color(message.import_controller.get_imported_controller())
self._set_resource_color(message.import_controller.get_previous_imported_controller())
def _set_resource_color(self, resource_controller):
if not resource_controller:
return
node = self._controller.find_node_by_controller(resource_controller)
if node:
self.SetItemTextColour(node, self._get_resource_text_color(resource_controller))
def _get_resource_text_color(self, resource_controller):
return self.GetDefaultAttributes().colFg if resource_controller.is_used() else wx.LIGHT_GREY
def _testing_started(self, message):
self._for_all_drawn_tests(self._root, lambda t: self.SetItemImage(t, ROBOT_IMAGE_INDEX))
self._execution_results = message.results
self._images.set_execution_results(message.results)
def _test_result(self, message):
wx.CallAfter(self._set_icon_from_execution_results, message.item)
def _set_icon_from_execution_results(self, controller):
node = self._controller.find_node_by_controller(controller)
if not node:
return
self.SetItemImage(node, self._get_icon_index_for(controller))
def _get_icon_index_for(self, controller):
if not self._execution_results:
return ROBOT_IMAGE_INDEX
if self._execution_results.is_running(controller):
return RUNNING_IMAGE_INDEX
if self._execution_results.has_passed(controller):
return PASSED_IMAGE_INDEX
if self._execution_results.has_failed(controller):
return FAILED_IMAGE_INDEX
return ROBOT_IMAGE_INDEX
def populate(self, model):
self._clear_tree_data()
self._populate_model(model)
self._refresh_view()
self.SetFocus() # Needed for keyboard shortcuts
def _clear_tree_data(self):
self.DeleteAllItems()
self._root = self.AddRoot('')
self._resource_root = self._create_resource_root()
self._datafile_nodes = []
def _create_resource_root(self):
return self._create_node(self._root, self._RESOURCES_NODE_LABEL,
self._images.directory)
def _populate_model(self, model):
handler = ResourceRootHandler(model, self, self._resource_root,
self._controller.settings)
self.SetPyData(self._resource_root, handler)
if model.data:
self._render_datafile(self._root, model.data, 0)
for res in model.external_resources:
if not res.parent:
self._render_datafile(self._resource_root, res)
def _resource_added(self, message):
ctrl = message.datafile
if self._controller.find_node_by_controller(ctrl):
return
parent = self._get_dir_node(ctrl.parent) if ctrl.parent else self._resource_root
self._render_datafile(parent, ctrl)
def _get_dir_node(self, ctrl):
if ctrl is None:
return self._root
dir_node = self._get_datafile_node(ctrl.data)
if dir_node is None:
parent = self._get_dir_node(ctrl.parent)
self._render_datafile(parent, ctrl)
dir_node = self._get_datafile_node(ctrl.data)
return dir_node
def _select_resource(self, message):
self.select_controller_node(message.item)
def select_controller_node(self, controller):
self.SelectItem(self._controller.find_node_by_controller(controller))
def _suite_added(self, message):
self.add_datafile(message.parent, message.suite)
def _refresh_view(self):
self.Refresh()
if self._resource_root:
self.Expand(self._resource_root)
if self._datafile_nodes:
self.SelectItem(self._datafile_nodes[0])
self._expand_and_render_children(self._datafile_nodes[0])
def _render_datafile(self, parent_node, controller, index=None):
node = self._create_node_with_handler(parent_node, controller, index)
if controller.dirty:
self._controller.mark_node_dirty(node)
self._datafile_nodes.append(node)
self.SetItemHasChildren(node, True)
for child in controller.children:
self._render_datafile(node, child)
return node
def _create_node_with_handler(self, parent_node, controller, index=None):
handler_class = action_handler_class(controller)
node = self._create_node(parent_node, controller.display_name, self._images[controller],
index, with_checkbox=(handler_class == TestCaseHandler and self._checkboxes_for_tests))
if isinstance(controller, ResourceFileController):
if not controller.is_used():
self.SetItemTextColour(node, wx.ColorRGB(0xA9A9A9))
self.SetPyData(node, handler_class(controller, self, node, self._controller.settings))
if controller.is_excluded():
self._set_item_excluded(node)
return node
def set_checkboxes_for_tests(self):
self._checkboxes_for_tests = True
def _expand_and_render_children(self, node):
assert node is not None
self._render_children(node)
self.Expand(node)
def _render_children(self, node):
handler = self._controller.get_handler(node)
if not handler or not handler.can_be_rendered:
return
self._create_child_nodes(node, handler, lambda item: item.is_test_suite)
handler.set_rendered()
def _create_child_nodes(self, node, handler, predicate):
for childitem in self._children_of(handler):
index = self._get_insertion_index(node, predicate)
self._create_node_with_handler(node, childitem, index)
def _children_of(self, handler):
return [v for v in handler.variables if v.has_data()] + list(handler.tests) + \
list(handler.keywords)
def _create_node(self, parent_node, label, img, index=None, with_checkbox=False):
node = self._wx_node(parent_node, index, label, with_checkbox)
self.SetItemImage(node, img.normal, wx.TreeItemIcon_Normal)
self.SetItemImage(node, img.expanded, wx.TreeItemIcon_Expanded)
return node
def _wx_node(self, parent_node, index, label, with_checkbox):
ct_type = 1 if with_checkbox else 0
if index is not None:
# blame wxPython for this ugliness
if isinstance(index, int):
return self.InsertItemByIndex(parent_node, index, label, ct_type=ct_type)
else:
return self.InsertItem(parent_node, index, label, ct_type=ct_type)
return self.AppendItem(parent_node, label, ct_type=ct_type)
def add_datafile(self, parent, suite):
snode = self._render_datafile(self._get_datafile_node(parent.data), suite)
self.SelectItem(snode)
def add_test(self, parent_node, test):
self._add_dataitem(parent_node, test, lambda item: item.is_user_keyword)
def add_keyword(self, parent_node, kw):
self._add_dataitem(parent_node, kw, lambda item: item.is_test_suite)
def _add_dataitem(self, parent_node, dataitem, predicate):
node = self._get_or_create_node(parent_node, dataitem, predicate)
self._select(node)
self._controller.mark_node_dirty(parent_node)
def _get_or_create_node(self, parent_node, dataitem, predicate):
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
return self._controller.find_node_with_label(parent_node, dataitem.display_name)
index = self._get_insertion_index(parent_node, predicate)
return self._create_node_with_handler(parent_node, dataitem, index)
def _select(self, node):
if node:
wx.CallAfter(self.SelectItem, node)
def _get_insertion_index(self, parent_node, predicate):
if not predicate:
return None
item, cookie = self.GetFirstChild(parent_node)
while item:
if predicate(self._controller.get_handler(item)):
index = self.GetPrevSibling(item)
if not index:
index = 0
return index
item, cookie = self.GetNextChild(parent_node, cookie)
return None
def _keyword_added(self, message):
self.add_keyword(self._get_datafile_node(self.get_selected_datafile()),
message.item)
def _variable_added(self, message):
self._get_or_create_node(self._get_datafile_node(self.get_selected_datafile()),
message.item, lambda item: not item.is_variable or item.index > message.index)
def _leaf_item_removed(self, message):
node = self._controller.find_node_by_controller(message.item)
self.delete_node(node)
def _test_added(self, message):
self.add_test(self._get_datafile_node(self.get_selected_datafile()),
message.item)
def _datafile_removed(self, message):
dfnode = self._get_datafile_node(message.datafile.data)
self._datafile_nodes.remove(dfnode)
self.DeleteChildren(dfnode)
self.Delete(dfnode)
def _datafile_set(self, message):
wx.CallAfter(self._refresh_datafile_when_file_set, message.item)
def _filename_changed(self, message):
df = message.datafile
node = self._controller.find_node_by_controller(df)
if not node:
raise AssertionError('No node found with controller "%s"' % df)
wx.CallAfter(self.SetItemText, node, df.display_name)
def add_keyword_controller(self, controller):
parent = self._get_datafile_node(self.get_selected_datafile())
self.add_keyword(parent, controller)
def delete_node(self, node):
if node is None:
return
parent = self.GetItemParent(node)
self._controller.mark_node_dirty(parent)
if self.IsSelected(node):
wx.CallAfter(self.SelectItem, parent)
wx.CallAfter(self.Delete, node)
def _data_dirty(self, message):
self._controller.mark_controller_dirty(message.datafile)
def _data_undirty(self, message):
self.unset_dirty()
def unset_dirty(self):
for node in self._datafile_nodes:
text = self.GetItemText(node)
handler = self._controller.get_handler(node)
if text.startswith('*') and not handler.controller.dirty:
self.SetItemText(node, text[1:])
def select_node_by_data(self, controller):
'''Find and select the tree item associated with the given controller.
Controller can be any of the controllers that are represented in the tree.'''
parent_node = self._get_datafile_node(controller.datafile)
if not parent_node:
return None
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
node = self._controller.find_node_by_controller(controller)
if node != self.GetSelection():
self.SelectItem(node)
return node
def select_user_keyword_node(self, uk):
parent_node = self._get_datafile_node(uk.parent.parent)
if not parent_node:
return
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
node = self._controller.find_node_with_label(parent_node, utils.normalize(uk.name))
if node != self.GetSelection():
self.SelectItem(node)
def _get_datafile_node(self, datafile):
for node in self._datafile_nodes:
if self._controller.get_handler(node).item == datafile:
return node
return None
def get_selected_datafile(self):
"""Returns currently selected data file.
If a test or user keyword node is selected, returns parent of that item.
"""
datafile = self._get_selected_datafile_node()
if not datafile:
return None
return self._controller.get_handler(datafile).item
def get_selected_datafile_controller(self):
"""Returns controller associated with currently active data file.
If a test or user keyword node is selected, returns parent of that item.
"""
dfnode = self._get_selected_datafile_node()
return self._controller.get_handler(dfnode).controller if dfnode else None
def _get_selected_datafile_node(self):
node = self.GetSelection()
if not node or node in (self._resource_root, self._root):
return None
while node not in self._datafile_nodes:
node = self.GetItemParent(node)
return node
def get_selected_item(self):
"""Returns model object associated with currently selected tree node."""
selection = self.GetSelection()
if not selection:
return None
handler = self._controller.get_handler(selection)
return handler and handler.controller or None
def move_up(self, node):
prev = self.GetPrevSibling(node)
if prev.IsOk():
self._switch_items(prev, node, node)
def move_down(self, node):
next = self.GetNextSibling(node)
if next.IsOk():
self._switch_items(node, next, node)
def _switch_items(self, first, second, currently_selected):
"""Changes the order of given items, first is expected to be directly above the second"""
selection = self.GetItemPyData(currently_selected).controller
controller = self._controller.get_handler(first).controller
self.Delete(first)
self._create_node_with_handler(self.GetItemParent(second),
controller, second)
self.select_node_by_data(selection)
def _refresh_datafile_when_file_set(self, controller):
self._start_silent_mode() #Prevent tab selections based on tree item selected events
current = self.get_selected_datafile_controller()
if not current: # If tree is not yet in use - do not expand anything.
self._end_silent_mode()
return
self._uncheck_tests(current)
item = self.GetSelection()
current_txt = self.GetItemText(item) if item.IsOk() else ''
# after refresh current and current_txt might have been changed
node = self._refresh_datafile(controller)
if node is None:
#TODO: Find out why this sometimes happens
return
self._expand_and_render_children(node)
if current == controller:
wx.CallAfter(self.SelectItem, self._controller.find_node_with_label(node, current_txt) or node)
wx.CallAfter(self._end_silent_mode)
else:
self._end_silent_mode()
def _uncheck_tests(self, controller):
self._test_selection_controller.unselect_all(controller.tests)
def _start_silent_mode(self):
self._silent_mode = True
def _end_silent_mode(self):
self._silent_mode = False
def refresh_datafile(self, controller, event):
to_be_selected = self._get_pending_selection(event)
new_node = self._refresh_datafile(controller)
self._handle_pending_selection(to_be_selected, new_node)
def _refresh_datafile(self, controller):
self._uncheck_tests(controller)
orig_node = self._get_data_controller_node(controller)
if orig_node is not None:
insertion_index = self._get_datafile_index(orig_node)
parent = self._get_parent(orig_node)
self._remove_datafile_node(orig_node)
return self._render_datafile(parent, controller, insertion_index)
def _get_pending_selection(self, event):
if hasattr(event, 'Item'):
item = event.GetItem()
event.Veto()
elif hasattr(event, 'Position'):
item, flags = self.HitTest(event.Position)
if not self._click_on_item(item, flags):
return
else:
return
return self.GetItemText(item)
def _get_data_controller_node(self, controller):
for node in self._datafile_nodes:
if self.GetItemPyData(node).controller == controller:
return node
return None
def _click_on_item(self, item, flags):
return item is not None and item.IsOk() and \
flags & wx.TREE_HITTEST_ONITEM
def _get_datafile_index(self, node):
insertion_index = self.GetPrevSibling(node)
if not insertion_index:
insertion_index = 0
return insertion_index
def _get_parent(self, node):
return self.GetItemParent(node)
def _remove_datafile_node(self, node):
for child in self.GetItemChildren(node):
if child in self._datafile_nodes:
self._remove_datafile_node(child)
self._datafile_nodes.remove(node)
self.Delete(node)
def _handle_pending_selection(self, to_be_selected, parent_node):
if to_be_selected:
self._expand_and_render_children(parent_node)
wx.CallAfter(self.SelectItem,
self._controller.find_node_with_label(parent_node, to_be_selected))
def OnSelChanged(self, event):
node = event.GetItem()
if not node.IsOk() or self._dragging:
event.Skip()
return
self._controller.add_to_history(node)
handler = self._controller.get_handler(node)
if handler and handler.item:
RideTreeSelection(node=node, item=handler.controller, silent=self._silent_mode).publish()
self.SetFocus()
def OnTreeItemExpanding(self, event):
node = event.GetItem()
if node.IsOk():
self._render_children(node)
def SelectAllTests(self, item):
self._for_all_tests(item, lambda t: self.CheckItem(t))
def SelectTests(self, tests):
def foo(t):
if self.GetPyData(t).controller in tests:
self.CheckItem(t)
self._for_all_tests(self._root, foo)
def ExpandAllSubNodes(self, item):
self._expand_or_collapse_nodes(item, self.Expand)
def CollapseAllSubNodes(self, item):
self._expand_or_collapse_nodes(item, self.Collapse)
def _expand_or_collapse_nodes(self, item, callback):
if not self.HasAGWFlag(customtreectrl.TR_HIDE_ROOT) or item != self.GetRootItem():
callback(item)
for child in item.GetChildren():
self._expand_or_collapse_nodes(child, callback)
def _for_all_tests(self, item, func):
if not self.HasAGWFlag(customtreectrl.TR_HIDE_ROOT) or item != self.GetRootItem():
if isinstance(item.GetData(), ResourceRootHandler or ResourceFileHandler):
return
self.Expand(item)
if self._is_test_node(item):
func(item)
if not self.IsExpanded(item):
return
for child in item.GetChildren():
self._for_all_tests(child, func)
def _for_all_drawn_tests(self, item, func):
if self._is_test_node(item):
func(item)
for child in item.GetChildren():
self._for_all_drawn_tests(child, func)
def _is_test_node(self, node):
return node.GetType() == 1
def DeselectAllTests(self, item):
self._for_all_tests(item, lambda t: self.CheckItem(t, checked=False))
def DeselectTests(self, tests):
def foo(t):
if self.GetPyData(t).controller in tests:
self.CheckItem(t, checked=False)
self._for_all_tests(self._root, foo)
def SelectFailedTests(self, item):
def func(t):
# FIXME: This information should be in domain model!
self.CheckItem(t, checked=(self.GetItemImage(t) == FAILED_IMAGE_INDEX))
self._for_all_tests(item, func)
def SelectPassedTests(self, item):
def func(t):
self.CheckItem(t, checked=(self.GetItemImage(t) == PASSED_IMAGE_INDEX))
self._for_all_tests(item, func)
def OnTreeItemChecked(self, event):
node = event.GetItem()
handler = self._controller.get_handler(node=node)
self._test_selection_controller.select(handler.controller, node.IsChecked())
def OnItemActivated(self, event):
node = event.GetItem()
if self.IsExpanded(node):
self.Collapse(node)
elif self.ItemHasChildren(node):
self._expand_and_render_children(node)
def OnLeftArrow(self, event):
node = self.GetSelection()
if self.IsExpanded(node):
self.Collapse(node)
else:
event.Skip()
def OnRightClick(self, event):
handler = self._controller.get_handler(event.GetItem() if hasattr(event, 'GetItem') else None)
if handler:
if not self.IsExpanded(handler.node):
self.Expand(handler.node)
handler.show_popup()
def OnNewTestCase(self, event):
handler = self._controller.get_handler()
if handler:
handler.OnNewTestCase(event)
def OnDrop(self, target, dragged):
dragged = self._controller.get_handler(dragged)
target = self._controller.get_handler(target)
if target and target.accepts_drag(dragged):
dragged.controller.execute(MoveTo(target.controller))
else:
self.Refresh()
def IsValidDragItem(self, item):
return self._controller.get_handler(item).is_draggable
def OnMoveUp(self, event):
handler = self._controller.get_handler()
if handler.is_draggable:
handler.OnMoveUp(event)
def OnMoveDown(self, event):
handler = self._controller.get_handler()
if handler.is_draggable:
handler.OnMoveDown(event)
def _item_changed(self, data):
controller = data.item
node = self._controller.find_node_by_controller(controller)
if node:
self.SetItemText(node, data.item.name)
self._test_selection_controller.send_selection_changed_message()
if controller.dirty:
self._controller.mark_node_dirty(self._get_datafile_node(controller.datafile))
def _variable_moved_up(self, data):
if self._should_update_variable_positions(data):
self._do_action_if_datafile_node_is_expanded(self.move_up, data)
def _variable_moved_down(self, data):
if self._should_update_variable_positions(data):
self._do_action_if_datafile_node_is_expanded(self.move_down, data)
def _should_update_variable_positions(self, message):
return message.item != message.other and message.item.has_data() and message.other.has_data()
def _do_action_if_datafile_node_is_expanded(self, action, data):
if self.IsExpanded(self._get_datafile_node(data.item.datafile)):
node = self._controller.find_node_by_controller(data.item)
action(node)
def _variable_updated(self, data):
self._item_changed(data)
def highlight(self, data, text):
self.select_node_by_data(data)
self._editor.highlight(text)
def node_is_resource_file(self, node):
return self._controller.get_handler(node).__class__ == ResourceFileHandler
class TreeLabelEditListener(object):
def __init__(self, tree, action_registerer):
self._tree = tree
tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.OnBeginLabelEdit)
tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnLabelEdited)
tree.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
if IS_WINDOWS:
#Delete key does not work in windows without registration
action_registerer.register_shortcut(ActionInfo(None, None, action=self.OnDelete, shortcut='Del'))
self._editing_label = False
self._on_label_edit_called = False
def OnBeginLabelEdit(self, event):
#See http://code.google.com/p/robotframework-ride/issues/detail?id=756
self._editing_label = True
if not self._on_label_edit_called:
self.OnLabelEdit()
event.Veto() # On windows CustomTreeCtrl will create Editor component
# And we want this to be done by the handler -- as it knows if there should be one or not
# And because this will make it work the same way as when pressing F2
# .. so in other words there is a bug if we don't Veto this event
def OnLabelEdit(self, event=None):
if not self._on_label_edit_called:
self._on_label_edit_called = True
handler = self._tree._controller.get_handler()
if handler and not handler.begin_label_edit():
self._on_label_edit_called = False
self._editing_label = False
def OnLabelEdited(self, event):
self._editing_label = False
self._on_label_edit_called = False
self._tree._controller.get_handler(event.GetItem()).end_label_edit(event)
# Reset edit control as it doesn't seem to reset it in case the focus goes directly
# away from the tree control
# Use CallAfter to prevent messing up the current end label edit
# .. and the another CallAfter because of
# customtreectrl.TreeTextCtrl#OnChar will call CallAfter(self.Finish) when Enter is pressed
# --> Results in PyDeadObject if called after ResetEditControl..
wx.CallAfter(wx.CallAfter, self._stop_editing)
def _stop_editing(self):
control = self._tree.GetEditControl()
if control:
control.StopEditing()
def OnDelete(self, event):
editor = self._tree.GetEditControl()
if editor and wx.Window.FindFocus() == editor:
start, end = editor.GetSelection()
editor.Remove(start, max(end, start+1))
def OnLeftDown(self, event):
#See http://code.google.com/p/robotframework-ride/issues/detail?id=756
if IS_WINDOWS and self._editing_label:
# This method works only on Windows, luckily the issue 756 exists
# only on Windows
self._tree.OnCancelEdit(self._tree.GetSelection())
event.Skip()
def _get_handler(self, item=None):
return self._tree._get_handler(item)
|
|
"""
Support for MQTT message handling.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import asyncio
import logging
import os
import socket
import time
import ssl
import re
import requests.certs
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import template, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_VALUE_TEMPLATE, CONF_USERNAME,
CONF_PASSWORD, CONF_PORT, CONF_PROTOCOL, CONF_PAYLOAD)
from homeassistant.components.mqtt.server import HBMQTT_CONFIG_SCHEMA
REQUIREMENTS = ['paho-mqtt==1.2.3']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt'
DATA_MQTT = 'mqtt'
SERVICE_PUBLISH = 'publish'
SIGNAL_MQTT_MESSAGE_RECEIVED = 'mqtt_message_received'
CONF_EMBEDDED = 'embedded'
CONF_BROKER = 'broker'
CONF_CLIENT_ID = 'client_id'
CONF_DISCOVERY = 'discovery'
CONF_DISCOVERY_PREFIX = 'discovery_prefix'
CONF_KEEPALIVE = 'keepalive'
CONF_CERTIFICATE = 'certificate'
CONF_CLIENT_KEY = 'client_key'
CONF_CLIENT_CERT = 'client_cert'
CONF_TLS_INSECURE = 'tls_insecure'
CONF_TLS_VERSION = 'tls_version'
CONF_BIRTH_MESSAGE = 'birth_message'
CONF_WILL_MESSAGE = 'will_message'
CONF_STATE_TOPIC = 'state_topic'
CONF_COMMAND_TOPIC = 'command_topic'
CONF_QOS = 'qos'
CONF_RETAIN = 'retain'
PROTOCOL_31 = '3.1'
PROTOCOL_311 = '3.1.1'
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
DEFAULT_RETAIN = False
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_DISCOVERY = False
DEFAULT_DISCOVERY_PREFIX = 'homeassistant'
DEFAULT_TLS_PROTOCOL = 'auto'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_PAYLOAD_TEMPLATE = 'payload_template'
ATTR_QOS = CONF_QOS
ATTR_RETAIN = CONF_RETAIN
MAX_RECONNECT_WAIT = 300 # seconds
def valid_subscribe_topic(value, invalid_chars='\0'):
"""Validate that we can subscribe using this MQTT topic."""
value = cv.string(value)
if all(c not in value for c in invalid_chars):
return vol.Length(min=1, max=65535)(value)
raise vol.Invalid('Invalid MQTT topic name')
def valid_publish_topic(value):
"""Validate that we can publish using this MQTT topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0')
def valid_discovery_topic(value):
"""Validate a discovery topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0/')
_VALID_QOS_SCHEMA = vol.All(vol.Coerce(int), vol.In([0, 1, 2]))
CLIENT_KEY_AUTH_MSG = 'client_key and client_cert must both be present in ' \
'the mqtt broker config'
MQTT_WILL_BIRTH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Required(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE):
vol.All(vol.Coerce(int), vol.Range(min=15)),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any('auto', cv.isfile),
vol.Inclusive(CONF_CLIENT_KEY, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Inclusive(CONF_CLIENT_CERT, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION,
default=DEFAULT_TLS_PROTOCOL): vol.Any('auto', '1.0',
'1.1', '1.2'),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL):
vol.All(cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])),
vol.Optional(CONF_EMBEDDED): HBMQTT_CONFIG_SCHEMA,
vol.Optional(CONF_WILL_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
vol.Optional(CONF_DISCOVERY_PREFIX,
default=DEFAULT_DISCOVERY_PREFIX): valid_discovery_topic,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_BASE = {
vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
}
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): object,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
def _build_publish_data(topic, qos, retain):
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
def publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
def async_publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
def publish_template(hass, topic, payload_template, qos=None, retain=None):
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
@asyncio.coroutine
def async_subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
@callback
def async_mqtt_topic_subscriber(dp_topic, dp_payload, dp_qos):
"""Match subscribed MQTT topic."""
if not _match_topic(topic, dp_topic):
return
if encoding is not None:
try:
payload = dp_payload.decode(encoding)
_LOGGER.debug("Received message on %s: %s", dp_topic, payload)
except (AttributeError, UnicodeDecodeError):
_LOGGER.error("Illegal payload encoding %s from "
"MQTT topic: %s, Payload: %s",
encoding, dp_topic, dp_payload)
return
else:
_LOGGER.debug("Received binary message on %s", dp_topic)
payload = dp_payload
hass.async_run_job(msg_callback, dp_topic, payload, dp_qos)
async_remove = async_dispatcher_connect(
hass, SIGNAL_MQTT_MESSAGE_RECEIVED, async_mqtt_topic_subscriber)
yield from hass.data[DATA_MQTT].async_subscribe(topic, qos)
return async_remove
def subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
async_remove = run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback,
qos, encoding),
hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
@asyncio.coroutine
def _async_setup_server(hass, config):
"""Try to start embedded MQTT broker.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
server = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'server')
if server is None:
_LOGGER.error("Unable to load embedded server")
return None
success, broker_config = \
yield from server.async_start(hass, conf.get(CONF_EMBEDDED))
return success and broker_config
@asyncio.coroutine
def _async_setup_discovery(hass, config):
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
discovery = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'discovery')
if discovery is None:
_LOGGER.error("Unable to load MQTT discovery")
return None
success = yield from discovery.async_start(
hass, conf[CONF_DISCOVERY_PREFIX], config)
return success
@asyncio.coroutine
def async_setup(hass, config):
"""Start the MQTT protocol service."""
conf = config.get(DOMAIN)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: {}})[DOMAIN]
client_id = conf.get(CONF_CLIENT_ID)
keepalive = conf.get(CONF_KEEPALIVE)
# Only setup if embedded config passed in or no broker specified
if CONF_EMBEDDED not in conf and CONF_BROKER in conf:
broker_config = None
else:
broker_config = yield from _async_setup_server(hass, config)
if CONF_BROKER in conf:
broker = conf[CONF_BROKER]
port = conf[CONF_PORT]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
certificate = conf.get(CONF_CERTIFICATE)
client_key = conf.get(CONF_CLIENT_KEY)
client_cert = conf.get(CONF_CLIENT_CERT)
tls_insecure = conf.get(CONF_TLS_INSECURE)
protocol = conf[CONF_PROTOCOL]
elif broker_config:
# If no broker passed in, auto config to internal server
broker, port, username, password, certificate, protocol = broker_config
# Embedded broker doesn't have some ssl variables
client_key, client_cert, tls_insecure = None, None, None
else:
err = "Unable to start MQTT broker."
if conf.get(CONF_EMBEDDED) is not None:
# Explicit embedded config, requires explicit broker config
err += " (Broker configuration required.)"
_LOGGER.error(err)
return False
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
# When the certificate is set to auto, use bundled certs from requests
if certificate == 'auto':
certificate = requests.certs.where()
will_message = conf.get(CONF_WILL_MESSAGE)
birth_message = conf.get(CONF_BIRTH_MESSAGE)
# Be able to override versions other than TLSv1.0 under Python3.6
conf_tls_version = conf.get(CONF_TLS_VERSION)
if conf_tls_version == '1.2':
tls_version = ssl.PROTOCOL_TLSv1_2
elif conf_tls_version == '1.1':
tls_version = ssl.PROTOCOL_TLSv1_1
elif conf_tls_version == '1.0':
tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
# Python3.6 supports automatic negotiation of highest TLS version
if sys.hexversion >= 0x03060000:
tls_version = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
tls_version = ssl.PROTOCOL_TLSv1
try:
hass.data[DATA_MQTT] = MQTT(
hass, broker, port, client_id, keepalive, username, password,
certificate, client_key, client_cert, tls_insecure, protocol,
will_message, birth_message, tls_version)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker itself")
return False
@asyncio.coroutine
def async_stop_mqtt(event):
"""Stop MQTT component."""
yield from hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
success = yield from hass.data[DATA_MQTT].async_connect()
if not success:
return False
@asyncio.coroutine
def async_publish_service(call):
"""Handle MQTT publish service calls."""
msg_topic = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos = call.data[ATTR_QOS]
retain = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = \
template.Template(payload_template, hass).async_render()
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to '%s': rendering payload template of "
"'%s' failed because %s",
msg_topic, payload_template, exc)
return
yield from hass.data[DATA_MQTT].async_publish(
msg_topic, payload, qos, retain)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service,
descriptions.get(SERVICE_PUBLISH), schema=MQTT_PUBLISH_SCHEMA)
if conf.get(CONF_DISCOVERY):
yield from _async_setup_discovery(hass, config)
return True
class MQTT(object):
"""Home Assistant MQTT client."""
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate, client_key, client_cert,
tls_insecure, protocol, will_message, birth_message,
tls_version):
"""Initialize Home Assistant MQTT client."""
import paho.mqtt.client as mqtt
self.hass = hass
self.broker = broker
self.port = port
self.keepalive = keepalive
self.topics = {}
self.progress = {}
self.birth_message = birth_message
self._mqttc = None
self._paho_lock = asyncio.Lock(loop=hass.loop)
if protocol == PROTOCOL_31:
proto = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(
certificate, certfile=client_cert,
keyfile=client_key, tls_version=tls_version)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
if will_message:
self._mqttc.will_set(will_message.get(ATTR_TOPIC),
will_message.get(ATTR_PAYLOAD),
will_message.get(ATTR_QOS),
will_message.get(ATTR_RETAIN))
@asyncio.coroutine
def async_publish(self, topic, payload, qos, retain):
"""Publish a MQTT message.
This method must be run in the event loop and returns a coroutine.
"""
with (yield from self._paho_lock):
yield from self.hass.loop.run_in_executor(
None, self._mqttc.publish, topic, payload, qos, retain)
@asyncio.coroutine
def async_connect(self):
"""Connect to the host. Does process messages yet.
This method is a coroutine.
"""
result = yield from self.hass.loop.run_in_executor(
None, self._mqttc.connect, self.broker, self.port, self.keepalive)
if result != 0:
import paho.mqtt.client as mqtt
_LOGGER.error('Failed to connect: %s', mqtt.error_string(result))
else:
self._mqttc.loop_start()
return not result
def async_disconnect(self):
"""Stop the MQTT client.
This method must be run in the event loop and returns a coroutine.
"""
def stop():
"""Stop the MQTT client."""
self._mqttc.disconnect()
self._mqttc.loop_stop()
return self.hass.loop.run_in_executor(None, stop)
@asyncio.coroutine
def async_subscribe(self, topic, qos):
"""Subscribe to a topic.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("topic need to be a string!")
with (yield from self._paho_lock):
if topic in self.topics:
return
result, mid = yield from self.hass.loop.run_in_executor(
None, self._mqttc.subscribe, topic, qos)
_raise_on_error(result)
self.progress[mid] = topic
self.topics[topic] = None
@asyncio.coroutine
def async_unsubscribe(self, topic):
"""Unsubscribe from topic.
This method is a coroutine.
"""
result, mid = yield from self.hass.loop.run_in_executor(
None, self._mqttc.unsubscribe, topic)
_raise_on_error(result)
self.progress[mid] = topic
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code):
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error('Unable to connect to the MQTT broker: %s',
mqtt.connack_string(result_code))
self._mqttc.disconnect()
return
old_topics = self.topics
self.topics = {key: value for key, value in self.topics.items()
if value is None}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self.hass.add_job(self.async_subscribe, topic, qos)
if self.birth_message:
self.hass.add_job(self.async_publish(
self.birth_message.get(ATTR_TOPIC),
self.birth_message.get(ATTR_PAYLOAD),
self.birth_message.get(ATTR_QOS),
self.birth_message.get(ATTR_RETAIN)))
def _mqtt_on_subscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Subscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos[0]
def _mqtt_on_message(self, _mqttc, _userdata, msg):
"""Message received callback."""
dispatcher_send(
self.hass, SIGNAL_MQTT_MESSAGE_RECEIVED, msg.topic, msg.payload,
msg.qos
)
def _mqtt_on_unsubscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Unsubscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code):
"""Disconnected callback."""
self.progress = {}
self.topics = {key: value for key, value in self.topics.items()
if value is not None}
# Remove None values from topic list
for key in list(self.topics):
if self.topics[key] is None:
self.topics.pop(key)
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if self._mqttc.reconnect() == 0:
_LOGGER.info("Successfully reconnected to the MQTT server")
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
"Disconnected from MQTT (%s). Trying to reconnect in %s s",
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
"""Raise error if error result."""
if result != 0:
import paho.mqtt.client as mqtt
raise HomeAssistantError(
'Error talking to MQTT: {}'.format(mqtt.error_string(result)))
def _match_topic(subscription, topic):
"""Test if topic matches subscription."""
reg_ex_parts = []
suffix = ""
if subscription.endswith('#'):
subscription = subscription[:-2]
suffix = "(.*)"
sub_parts = subscription.split('/')
for sub_part in sub_parts:
if sub_part == "+":
reg_ex_parts.append(r"([^\/]+)")
else:
reg_ex_parts.append(sub_part)
reg_ex = "^" + (r'\/'.join(reg_ex_parts)) + suffix + "$"
reg = re.compile(reg_ex)
return reg.match(topic) is not None
|
|
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import keep_lazy_text
from django.utils.regex_helper import _lazy_re_compile
# based on RFC 7232, Appendix C
ETAG_MATCH = _lazy_re_compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = _lazy_re_compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = _lazy_re_compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote() is deprecated in favor of '
'urllib.parse.quote().',
RemovedInDjango40Warning, stacklevel=2,
)
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote_plus() is deprecated in favor of '
'urllib.parse.quote_plus(),',
RemovedInDjango40Warning, stacklevel=2,
)
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote() is deprecated in favor of '
'urllib.parse.unquote().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote_plus() is deprecated in favor of '
'urllib.parse.unquote_plus().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
current_year = datetime.datetime.utcnow().year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https)
)
def is_safe_url(url, allowed_hosts, require_https=False):
warnings.warn(
'django.utils.http.is_safe_url() is deprecated in favor of '
'url_has_allowed_host_and_scheme().',
RemovedInDjango40Warning, stacklevel=2,
)
return url_has_allowed_host_and_scheme(url, allowed_hosts, require_https)
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various longitudinal commands provided by freesurfer
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import os.path
from ... utils.filemanip import split_filename, copyfile
from ..freesurfer.base import (Info, FSCommand, FSTraitedSpec,
FSScriptCommand,
FSScriptOutputSpec,
FSCommandOpenMP,
FSTraitedSpecOpenMP)
from ..base import (isdefined, TraitedSpec, File, traits, Directory)
from ... import logging
iflogger = logging.getLogger('interface')
class MPRtoMNI305InputSpec(FSTraitedSpec):
# environment variables, required
# usedefault=True is hack for on_trait_change in __init__
reference_dir = Directory(
"", exists=True, mandatory=True, usedefault=True, desc="TODO")
target = traits.String(
"", mandatory=True, usedefault=True, desc="input atlas file")
# required
in_file = File(argstr='%s', usedefault=True,
desc="the input file prefix for MPRtoMNI305")
class MPRtoMNI305OutputSpec(FSScriptOutputSpec):
out_file = File(
exists=False, desc="The output file '<in_file>_to_<target>_t4_vox2vox.txt'")
class MPRtoMNI305(FSScriptCommand):
"""
For complete details, see FreeSurfer documentation
Examples
========
>>> from nipype.interfaces.freesurfer import MPRtoMNI305, Info
>>> mprtomni305 = MPRtoMNI305()
>>> mprtomni305.inputs.target = 'structural.nii'
>>> mprtomni305.inputs.reference_dir = '.' # doctest: +SKIP
>>> mprtomni305.cmdline # doctest: +SKIP
'mpr2mni305 output'
>>> mprtomni305.inputs.out_file = 'struct_out' # doctest: +SKIP
>>> mprtomni305.cmdline # doctest: +SKIP
'mpr2mni305 struct_out' # doctest: +SKIP
>>> mprtomni305.inputs.environ['REFDIR'] == os.path.join(Info.home(), 'average') # doctest: +SKIP
True
>>> mprtomni305.inputs.environ['MPR2MNI305_TARGET'] # doctest: +SKIP
'structural'
>>> mprtomni305.run() # doctest: +SKIP
"""
_cmd = "mpr2mni305"
input_spec = MPRtoMNI305InputSpec
output_spec = MPRtoMNI305OutputSpec
def __init__(self, **inputs):
super(MPRtoMNI305, self).__init__(**inputs)
self.inputs.on_trait_change(self._environ_update, 'target')
self.inputs.on_trait_change(self._environ_update, 'reference_dir')
def _format_arg(self, opt, spec, val):
if opt in ['target', 'reference_dir']:
return ""
elif opt == 'in_file':
_, retval, ext = split_filename(val)
# Need to copy file to working cache directory!
copyfile(val, os.path.abspath(retval + ext),
copy=True, hashmethod='content')
return retval
return super(MPRtoMNI305, self)._format_arg(opt, spec, val)
def _environ_update(self):
# refdir = os.path.join(Info.home(), val)
refdir = self.inputs.reference_dir
target = self.inputs.target
self.inputs.environ['MPR2MNI305_TARGET'] = target
self.inputs.environ["REFDIR"] = refdir
def _get_fname(self, fname):
return split_filename(fname)[1]
def _list_outputs(self):
outputs = super(MPRtoMNI305, self)._list_outputs()
fullname = "_".join([self._get_fname(self.inputs.in_file), "to",
self.inputs.target, "t4", "vox2vox.txt"])
outputs['out_file'] = os.path.abspath(fullname)
return outputs
class RegisterAVItoTalairachInputSpec(FSTraitedSpec):
in_file = File(argstr='%s', exists=True, mandatory=True,
position=0, desc="The input file")
target = File(argstr='%s', exists=True, mandatory=True,
position=1, desc="The target file")
vox2vox = File(argstr='%s', exists=True, mandatory=True,
position=2, desc="The vox2vox file")
out_file = File(argstr='%s', mandatory=False, genfile=True,
position=3, desc="The transform output")
class RegisterAVItoTalairachOutputSpec(FSScriptOutputSpec):
out_file = traits.File(
exists=False, desc="The output file for RegisterAVItoTalairach")
class RegisterAVItoTalairach(FSScriptCommand):
"""
converts the vox2vox from talairach_avi to a talairach.xfm file
This is a script that converts the vox2vox from talairach_avi to a
talairach.xfm file. It is meant to replace the following cmd line:
tkregister2_cmdl \
--mov $InVol \
--targ $FREESURFER_HOME/average/mni305.cor.mgz \
--xfmout ${XFM} \
--vox2vox talsrcimg_to_${target}_t4_vox2vox.txt \
--noedit \
--reg talsrcimg.reg.tmp.dat
set targ = $FREESURFER_HOME/average/mni305.cor.mgz
set subject = mgh-02407836-v2
set InVol = $SUBJECTS_DIR/$subject/mri/orig.mgz
set vox2vox = $SUBJECTS_DIR/$subject/mri/transforms/talsrcimg_to_711-2C_as_mni_average_305_t4_vox2vox.txt
Examples
========
>>> from nipype.interfaces.freesurfer import RegisterAVItoTalairach
>>> register = RegisterAVItoTalairach()
>>> register.inputs.in_file = 'structural.mgz' # doctest: +SKIP
>>> register.inputs.target = 'mni305.cor.mgz' # doctest: +SKIP
>>> register.inputs.vox2vox = 'talsrcimg_to_structural_t4_vox2vox.txt' # doctest: +SKIP
>>> register.cmdline # doctest: +SKIP
'avi2talxfm structural.mgz mni305.cor.mgz talsrcimg_to_structural_t4_vox2vox.txt talairach.auto.xfm'
>>> register.run() # doctest: +SKIP
"""
_cmd = "avi2talxfm"
input_spec = RegisterAVItoTalairachInputSpec
output_spec = RegisterAVItoTalairachOutputSpec
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = super(RegisterAVItoTalairach, self)._list_outputs()
# outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
outputs['out_file'] = 'talairach.auto.xfm'
return outputs
class EMRegisterInputSpec(FSTraitedSpecOpenMP):
# required
in_file = File(argstr="%s", exists=True, mandatory=True,
position=-3, desc="in brain volume")
template = File(argstr="%s", exists=True, mandatory=True,
position=-2, desc="template gca")
out_file = File(argstr="%s", exists=False, mandatory=True,
position=-1, genfile=True, desc="output transform")
# optional
skull = traits.Bool(
argstr="-skull", desc="align to atlas containing skull (uns=5)")
mask = File(argstr="-mask %s", exists=True,
mandatory=False, desc="use volume as a mask")
nbrspacing = traits.Int(argstr="-uns %d", mandatory=False,
desc="align to atlas containing skull setting unknown_nbr_spacing = nbrspacing")
transform = File(argstr="-t %s", exists=True, mandatory=False,
desc="Previously computed transform")
class EMRegisterOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="output transform")
class EMRegister(FSCommandOpenMP):
""" This program creates a tranform in lta format
Examples
========
>>> from nipype.interfaces.freesurfer import EMRegister
>>> register = EMRegister()
>>> register.inputs.in_file = 'norm.mgz'
>>> register.inputs.template = 'aseg.mgz'
>>> register.inputs.out_file = 'talairach_with_skull.lta'
>>> register.inputs.skull = True
>>> register.inputs.nbrspacing = 9
>>> register.cmdline
'mri_em_register -uns 9 -skull norm.mgz aseg.mgz talairach_with_skull.lta'
"""
_cmd = 'mri_em_register'
input_spec = EMRegisterInputSpec
output_spec = EMRegisterOutputSpec
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
outputs['out_file'] = 'talairach_with_skull.lta'
return outputs
class RegisterInputSpec(FSTraitedSpec):
# required
in_surf = File(argstr="%s", exists=True, mandatory=True, position=-3,
desc="Surface to register, often {hemi}.sphere")
target = File(argstr="%s", exists=True, mandatory=True, position=-2,
desc="The data to register to. In normal recon-all usage, this is a template file for average surface.")
in_smoothwm = File(exists=True, mandatory=True,
desc="Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm ")
in_sulc = File(exists=True, mandatory=True,
desc="Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc ")
# optional
curv = File(argstr="-curv", mandatory=False, exists=True,
desc="Use smoothwm curvature for final alignment")
out_file = File(argstr="%s", exists=False, position=-1, genfile=True,
desc="Output surface file to capture registration")
class RegisterOutputSpec(TraitedSpec):
out_file = File(
exists=False, desc="Output surface file to capture registration")
class Register(FSCommand):
""" This program registers a surface to an average surface template.
Examples
========
>>> from nipype.interfaces.freesurfer import Register
>>> register = Register()
>>> register.inputs.in_surf = 'lh.pial'
>>> register.inputs.in_smoothwm = 'lh.pial'
>>> register.inputs.in_sulc = 'lh.pial'
>>> register.inputs.target = 'aseg.mgz'
>>> register.inputs.out_file = 'lh.sphere.reg'
>>> register.cmdline
'mris_register lh.pial aseg.mgz lh.sphere.reg'
"""
_cmd = 'mris_register'
input_spec = RegisterInputSpec
output_spec = RegisterOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'curv':
return spec.argstr
return super(Register, self)._format_arg(opt, spec, val)
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
outputs['out_file'] = os.path.abspath(self.inputs.in_surf) + '.reg'
return outputs
class PaintInputSpec(FSTraitedSpec):
# required
in_surf = File(argstr="%s", exists=True, mandatory=True, position=-2,
desc="Surface file with grid (vertices) onto which the template data is to be sampled or 'painted'")
template = File(argstr="%s", exists=True, mandatory=True, position=-3,
desc="Template file")
# optional
template_param = traits.Int(
mandatory=False, desc="Frame number of the input template")
averages = traits.Int(argstr="-a %d", mandatory=False,
desc="Average curvature patterns")
out_file = File(argstr="%s", exists=False, position=-1, genfile=True,
desc="File containing a surface-worth of per-vertex values, saved in 'curvature' format.")
class PaintOutputSpec(TraitedSpec):
out_file = File(exists=False,
desc="File containing a surface-worth of per-vertex values, saved in 'curvature' format.")
class Paint(FSCommand):
"""
This program is useful for extracting one of the arrays ("a variable")
from a surface-registration template file. The output is a file
containing a surface-worth of per-vertex values, saved in "curvature"
format. Because the template data is sampled to a particular surface
mesh, this conjures the idea of "painting to a surface".
Examples
========
>>> from nipype.interfaces.freesurfer import Paint
>>> paint = Paint()
>>> paint.inputs.in_surf = 'lh.pial'
>>> paint.inputs.template = 'aseg.mgz'
>>> paint.inputs.averages = 5
>>> paint.inputs.out_file = 'lh.avg_curv'
>>> paint.cmdline
'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv'
"""
_cmd = 'mrisp_paint'
input_spec = PaintInputSpec
output_spec = PaintOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'template':
if isdefined(self.inputs.template_param):
return spec.argstr % (val + '#' + str(self.inputs.template_param))
return super(Paint, self)._format_arg(opt, spec, val)
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
head, tail = os.path.split(self.inputs.in_surf)
hemisphere = tail.split('.')[0]
filename = hemisphere + '.avg_curv'
outputs['out_file'] = os.path.join(head, filename)
return outputs
|
|
'''
Brute Force Agent Actions.
'''
import sys,os
import re, random
from furl import *
from urllib.parse import urlparse
import time, signal
from multiprocessing import Process
import stomp
import re
from daemonize import Daemonize
from os.path import basename
current_dir = os.path.basename(os.getcwd())
if current_dir == "agents":
sys.path.append('../')
if current_dir == "Kurgan-Framework":
sys.path.append('./')
from libs.STOMP import STOMP_Connector
from libs.FIPA import FIPAMessage
from libs.Transport import Transport
import libs.Utils as utl
import libs.Target as target
import config as cf
import libs.BruteForceGeneric as bruteforce
import libs.BruteForceHeadless as bruteforceheadless
AGENT_NAME="AgentBruteForce"
AGENT_ID="5"
ALL_AGENTS = "All"
startTime = time.time()
class BruteForceAction():
mAgent = ''
available_agents = []
msg_id=[]
baseUrlTarget = ''
content = ''
is_running = False
urlTarget = ''
agent_can_run = True
accounts_discovered = []
def set_agent_can_run(self, val):
self.agent_can_run = val
def get_agent_can_run(self):
return self.agent_can_run
def set_accounts_discovered(self, val):
self.accounts_discovered.append(val)
def get_accounts_discovered(self):
return self.accounts_discovered
def zera_accounts_discovered(self):
for i in self.accounts_discovered:
del i
def set_mAgent(self, val):
self.mAgent = val
def set_baseUrlTarget(self, val):
self.baseUrlTarget = val
def get_baseUrlTarget(self):
return self.baseUrlTarget
def set_UrlTarget(self, val):
self.urlTarget = val
def get_UrlTarget(self):
return self.urlTarget
def registerAgent(self):
performative = "subscribe"
toAgent = ALL_AGENTS
content = ("Register Agent (= (agent-name) (" + AGENT_NAME + "))\n")
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
def deregister(self):
performative = "subscribe"
toAgent = ALL_AGENTS
content = ("Deregister Agent (= (agent-name) (" + AGENT_NAME + "))\n")
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
def cfp(self,reqfunction,values):
performative = "cfp"
toAgent = ALL_AGENTS
content = ("Call For Propose (= (" + reqfunction + ") (" + values + "))\n")
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
def requestInfo(self,performative, toAgent, reqfunction,values):
content = ("Request Information (= (" + reqfunction + ") (" + values + "))\n")
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
def responseInfo(self,performative, toAgent, reply_to, reqfunction,values):
content = ("Response (= (" + reqfunction + ") (" + values + "))\n")
conversation_id = utl.id_gen()
msg = self.mAgent.set_response_to_agent(performative,AGENT_NAME, toAgent, content, reply_to, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
return ret
def registerUrl(self, url, toAgent):
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
content = ("Register urlTarget (= (url-target) (" + url + "))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
def set_content(self, val):
self.content = val
def get_content(self):
return self.content
def run_bf_target(self, toAgent):
self.zera_accounts_discovered()
bf = bruteforce.BruteForceGeneric()
self.accounts_discovered = bf.runBF(self.baseUrlTarget)
self.accounts_discovered = bf.get_accounts_discovered()
body = ''
for i in self.accounts_discovered:
body = body + i + "\n"
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
content = ("Response brute force (= (run-brute-force) (" + body + "))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
self.is_running = False
def runBruteForce(self, toAgent):
if self.is_running is True:
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
body = "Brute Force in execution..."
content = ("Response From Brute Force (= (run-brute-force) ("
+ body +
"))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
return ret
else:
self.is_running = True
p = Process(target=self.run_bf_target(toAgent))
p.start()
def run_bf_headless_target(self, toAgent):
self.zera_accounts_discovered()
bf = bruteforceheadless.BruteForceHeadless()
bf.set_urlTarget(self.baseUrlTarget)
self.accounts_discovered = bf.runBF()
self.accounts_discovered = bf.get_accounts_discovered()
body = ''
for i in self.accounts_discovered:
body = body + i + "\n"
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
content = ("Response brute force Headless (= (run-brute-force-headless) (" + body + "))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
self.is_running = False
def runBruteForceHeadless(self, toAgent):
if self.is_running is True:
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
body = "Brute Force Headless in execution..."
content = ("Response From Brute Force Headless(= (run-brute-force-headless) ("
+ body +
"))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
return ret
else:
self.is_running = True
p = Process(target=self.run_bf_headless_target(toAgent))
p.start()
def agentStatus(self, toAgent):
status = "UP"
performative = "inform"
reply_with = utl.id_generator()
conversation_id = utl.id_gen()
uptime = time.time() - startTime
content = ("Response agent-status (= (agent-status) ("
"AgentName: " + AGENT_NAME + "\n"
"Agend_id: " + AGENT_ID + "\n"
"Uptime: %0.2f " % uptime + "\n"
"))\n")
msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id)
ret = self.mAgent.send_data_to_agent(msg)
return ret
def add_available_agent(self, agent_id):
self.available_agents.append(agent_id)
def del_available_agent(self, agent_id):
for id in self.available_agents:
if id == agent_id:
del self.available_agent[id]
def get_available_agents(self):
return self.available_agents
def parse_action(self, fm):
performative = fm.get_performative()
action_function = fm.get_fname()
description = fm.get_fdescription()
values = fm.get_fvalues()
toAgent = fm.get_sender()
reply_with = fm.get_reply_with()
mAgent = Transport()
self.set_mAgent(mAgent)
if action_function == "set-run-brute-force" and performative=='inform':
if values == "True":
self.agent_can_run = True
else:
if values == "False":
self.agent_can_run = False
else:
self.agent_can_run = False #check this
if action_function == "run-brute-force" and performative=='request':
if self.agent_can_run is True:
print ("Running Brute Force...")
ret = self.runBruteForce(toAgent)
else:
values = "False"
reply_to = reply_with
self.responseInfo('inform', toAgent, reply_to, "run-brute-force", values)
if action_function == "run-brute-force-headless" and performative=='request':
if self.agent_can_run is True:
print ("Running Brute Force Headless...")
ret = self.runBruteForceHeadless(toAgent)
else:
values = "False"
reply_to = reply_with
self.responseInfo('inform', toAgent, reply_to, "run-brute-force-headless", values)
if action_function == "brute-force-get-accounts" and performative == "request":
print ("Sending Accounts Discovery to: " , toAgent)
values = str(self.accounts_discovered)
reply_to = reply_with
self.responseInfo('inform', toAgent, reply_to, "brute-force-get-accounts", values)
if action_function == "url-target":
print ("Sending url-target to " , toAgent)
ret = self.registerUrl(urlTarget, toAgent)
if action_function == "agent-status":
print ("Sending agent-up to " , toAgent)
ret = self.agentStatus(toAgent)
if action_function == "base-url-target":
if performative == 'request':
print ("Sending base-url-target to: " , toAgent)
values = self.get_baseUrlTarget()
reply_to = reply_with
self.responseInfo('inform', toAgent, reply_to, "base-url-target", values)
elif performative == 'inform':
self.baseUrlTarget = values
def receive_pkg(self, mAgent):
fm = FIPAMessage()
while True:
time.sleep(1)
rcv = mAgent.receive_data_from_agents()
if not len(rcv) == 0:
fm.parse_pkg(rcv)
match = re.search("message-id:(.\w+\-\w+)", rcv)
if match:
message_id = match.group(1).lstrip()
if message_id in self.msg_id:
continue
else:
self.msg_id.append(message_id)
mAgent.zera_buff()
receiver = fm.get_receiver()
sender = fm.get_sender()
if receiver == ALL_AGENTS or receiver == AGENT_NAME:
if sender != AGENT_NAME:
self.parse_action(fm)
#break
else:
continue
#print(rcv)
#break
else:
continue
#print(rcv)
break
|
|
from cgitb import small
__author__ = 'Shailesh'
import copy
from collections import defaultdict
from nltk.tokenize import wordpunct_tokenize
from termcolor import colored
from nltk.corpus import stopwords,names
import math
from Sentiment import Sentiment
from PredictionFunctions import PredictionFunctions
class Angel:
stopWords = set(stopwords.words())
engNames = set(names.words())
GlobalAdjList = {"disappointing", "disappointed", "disappointment", "fresh", "freshly", "tasty", "delicious",
"poor", "badly", "sadly", "sucks", "sucked", "crispy", "yelled", "love", "loved", "loving",
"poorly", "underwhelming"}
def __init__(self, lexicon, debug=False, smallReviews=False):
self.lexicon = copy.deepcopy(lexicon)
self.debug = debug
self.DumpRequested = self.DefaultDumpFunction
self.smallReviews = smallReviews
def SetDumpParameters(self, positiveThreshold=1, negativeThreshold=-1):
self.positiveThreshold = positiveThreshold
self.negativeThreshold = negativeThreshold
def PredictBase(self, adjectives):
"""
Predict the base of the multiplier using the number of polar adjectives.
The values have been determined experimentally to maximize results.
"""
# Get the list of Adjectives which have sentiment polarity greater than 0.1
if self.smallReviews:
return 1.0
PolarAdjList = [l for l in adjectives if l in self.lexicon and math.fabs(float(self.lexicon[l])) > 0.1]
if len(PolarAdjList) > 0:
return 12.0/len(PolarAdjList)
# elif len(list2) < 8:
# return 2.0
else:
return 1.0
def PredictMultiplier(self, word, dependencies, words, i):
"""
Given a word, calculate how other words affect the polarity of this word.
E.g: "not good" will change the polarity of "good" to negative.
Returns a multiplier for the word.
"""
# return PredictionFunctions.DependencyFunction(self.lexicon, word, dependencies, words, i)
# return PredictionFunctions.RelativeFunction(self.lexicon, word, dependencies, words, i)
return PredictionFunctions.CombinedFunction(self.lexicon, word, dependencies, words, i)
def CalculateNotScore(self, notCount):
"""
Method to calculate the reduction in overall score based on the negativity in the review.
The values have been determined experimentally.
Ideally, this method should not be used, as it is against the objective of this program.
Usage will be removed later.
"""
if notCount >= 10:
notScore = 4
elif notCount >= 7:
notScore = 2
elif notCount >= 4:
notScore = 1
else:
notScore = 0
return notScore
def ExtractSentDetails(self, sentence):
"""
Method to extract properties form a sentence object.
:param sentence: The dictionary of sentence properties extracted from the JSON/XML sentence element
"""
if "Adjectives" in sentence:
adjList = [w.lower() for w in sentence["Adjectives"] if w.lower() not in Angel.stopWords and w.lower() not in Angel.engNames]
adjectives = set(adjList)
else:
adjectives = set()
dependencies = defaultdict(dict)
if "Dependencies" in sentence:
if not isinstance(sentence["Dependencies"],list):
sentence["Dependencies"] = [sentence["Dependencies"]]
for dep in sentence["Dependencies"]:
line = dep.split(',')
if len(line) != 3:
continue
relation, adj, other = line
adj, other = adj.lower(), other.lower()
if relation in {'amod', 'acomp', 'ccomp', 'pobj', 'dep'}:
adj, other = other, adj
if relation not in dependencies[adj]:
dependencies[adj][relation] = set()
dependencies[adj][relation].add(other)
if relation == 'conj':
adjectives.add(other)
dictconj, other = defaultdict(dict), None
for adj in dependencies:
if 'conj' in dependencies[adj]:
for other in dependencies[adj]['conj']:
dictconj[other] = copy.deepcopy(dependencies[adj])
for adj in dictconj:
for relation in dictconj[adj]:
if relation not in dependencies[adj]:
dependencies[adj][relation] = set()
dependencies[adj][relation] |= dictconj[adj][relation]
return adjectives, dependencies
def DumpDetails(self, sentences, label="N.A."):
"""
This method uses the same logic as predict sentiment to rate reviews.
However, all the small details on how the calculation is done are printed for a review
"""
AdjR = 0.0
adjAll = []
for sentence in sentences:
# if sentence["Text"].startswith("Joanie is not helpful"):
# x = 1
adjectives, dependencies = self.ExtractSentDetails(sentence)
adjAll.extend(adjectives)
allAdjectives = adjectives | Angel.GlobalAdjList
AdjS = 0.0
words = wordpunct_tokenize(sentence["Text"])
if len(words) <= 3:
allAdjectives |= set([x.lower() for x in words])
for i in range(len(words)):
word = words[i].lower()
if word in {"but", "if"}:
AdjS = 0.0
print words[i],
elif word in allAdjectives and word in self.lexicon:
multiplier = self.PredictMultiplier(word, dependencies[word], words, i)
score = float(self.lexicon[word]) * multiplier
if multiplier < 1:
colortext = colored(words[i] + " (" + '{:.3}'.format(score) + ")", 'red',None,['underline'])
elif multiplier > 1:
colortext = colored(words[i] + " (" + '{:.3}'.format(score) + ")", 'red',None,['bold'])
else:
colortext = colored(words[i] + " (" + '{:.3}'.format(score) + ")", 'red')
AdjS += score
print colortext,
else:
print words[i],
print
colortext = colored("Adjectives: " + '{:.3}'.format(AdjS),'red')
print colortext
AdjR += AdjS
print
print "Label:", label
base = self.PredictBase(adjAll)
colortext = colored("Adjectives: " + str(AdjR) + "*" + str(base) + " = " + str(AdjR*base),'red')
print colortext
def PredictReviewScore(self, sentences, label=0):
"""
This method gives a score to a review.
"""
AdjR = 0.0
# if text.startswith("For more photos and reviews do check out fourleggedfoodies"):
# x = 1
adjAll = []
for sentence in sentences:
adjectives, dependencies = self.ExtractSentDetails(sentence)
adjAll.extend(adjectives)
allAdjectives = adjectives | Angel.GlobalAdjList
AdjS = 0.0
words = wordpunct_tokenize(sentence["Text"])
if len(words) <= 3:
allAdjectives |= set([x.lower() for x in words])
for i in range(len(words)):
word = words[i].lower()
if word in {"but", "if"}:
AdjS = 0.0
elif word in allAdjectives and word in self.lexicon:
AdjS += float(self.lexicon[word]) * self.PredictMultiplier(word, dependencies[word], words, i)
AdjR += AdjS
AdjR *= self.PredictBase(adjAll)
finalScore = AdjR
if self.DumpRequested(finalScore, label):
self.DumpDetails(sentences, label)
return finalScore
def DefaultDumpFunction(self, score, label):
if not self.debug:
return False
if label == Sentiment.NEGATIVE and score > self.positiveThreshold:
return True
elif label == Sentiment.POSITIVE and score < self.negativeThreshold:
return True
return False
def GetImpact(self, sentences):
impactTable = dict()
adjAll = []
totalImpact = 0.0
for sentence in sentences:
adjectives, dependencies = self.ExtractSentDetails(sentence)
adjAll.extend(adjectives)
allAdjectives = adjectives | Angel.GlobalAdjList
sentenceImpact = 0.0
words = wordpunct_tokenize(sentence["Text"])
if len(words) <= 3:
allAdjectives |= set([x.lower() for x in words])
for i in range(len(words)):
word = words[i].lower()
if word in allAdjectives and word in self.lexicon:
score = float(self.lexicon[word])
multiplier = self.PredictMultiplier(word, dependencies[word], words, i)
if multiplier != 0:
impactTable[word] = (math.fabs(score*multiplier), multiplier)
sentenceImpact += math.fabs(score * multiplier)
totalImpact += sentenceImpact
return totalImpact, impactTable
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.stacks import forms
from openstack_dashboard.dashboards.project.stacks import mappings
INDEX_URL = reverse('horizon:project:stacks:index')
class MockResource(object):
def __init__(self, resource_type, physical_resource_id):
self.resource_type = resource_type
self.physical_resource_id = physical_resource_id
class MappingsTests(test.TestCase):
def test_mappings(self):
def assertMappingUrl(url, resource_type, physical_resource_id):
mock = MockResource(resource_type, physical_resource_id)
mock_url = mappings.resource_to_url(mock)
self.assertEqual(url, mock_url)
assertMappingUrl(
'/project/networks/subnets/aaa/detail',
'OS::Quantum::Subnet',
'aaa')
assertMappingUrl(
None,
'OS::Quantum::Subnet',
None)
assertMappingUrl(
None,
None,
None)
assertMappingUrl(
None,
'AWS::AutoScaling::LaunchConfiguration',
'aaa')
assertMappingUrl(
'/project/instances/aaa/',
'AWS::EC2::Instance',
'aaa')
assertMappingUrl(
'/project/containers/aaa/',
'OS::Swift::Container',
'aaa')
assertMappingUrl(
None,
'Foo::Bar::Baz',
'aaa')
def test_stack_output(self):
self.assertEqual(u'foo', mappings.stack_output('foo'))
self.assertEqual(u'', mappings.stack_output(None))
self.assertEqual(
u'<pre>[\n "one", \n "two", \n "three"\n]</pre>',
mappings.stack_output(['one', 'two', 'three']))
self.assertEqual(
u'<pre>{\n "foo": "bar"\n}</pre>',
mappings.stack_output({'foo': 'bar'}))
self.assertEqual(
u'<a href="http://www.example.com/foo" target="_blank">'
'http://www.example.com/foo</a>',
mappings.stack_output('http://www.example.com/foo'))
class StackTests(test.TestCase):
@test.create_stubs({api.heat: ('stacks_list',)})
def test_index(self):
stacks = self.stacks.list()
api.heat.stacks_list(IsA(http.HttpRequest)) \
.AndReturn(stacks)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/stacks/index.html')
self.assertIn('table', res.context)
resp_stacks = res.context['table'].data
self.assertEqual(len(resp_stacks), len(stacks))
@test.create_stubs({api.heat: ('stack_create', 'template_validate')})
def test_launch_stack(self):
template = self.stack_templates.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
template=template.data) \
.AndReturn(json.loads(template.validate))
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=template.data,
parameters=IsA(dict),
password='password')
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.StackCreateForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_stack_form_invalid_names_fail(self):
self._test_launch_stack_invalid_name('2_StartWithDigit')
self._test_launch_stack_invalid_name('_StartWithUnderscore')
self._test_launch_stack_invalid_name('.StartWithPoint')
def _test_launch_stack_invalid_name(self, name):
template = self.stack_templates.first()
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.StackCreateForm.__name__}
res = self.client.post(url, form_data)
error = ('Name must start with a letter and may only contain letters, '
'numbers, underscores, periods and hyphens.')
self.assertFormErrors(res, 1)
self.assertFormError(res, "form", 'stack_name', error)
class TemplateFormTests(test.TestCase):
def test_exception_to_validation(self):
json_error = """{
"code": 400,
"error": {
"message": "The Key (none) could not be found.",
"traceback": "<Traceback>",
"type": "StackValidationFailed"
},
"explanation": "The server could not comply with the request",
"title": "Bad Request"
}"""
msg = forms.exception_to_validation_msg(json_error)
self.assertEqual(msg, "The Key (none) could not be found.")
def test_exception_to_validation_legacy(self):
json_error = """400 Bad Request
The server could not comply with the request since it is either \
malformed or otherwise incorrect.
Remote error: StackValidationFailed The Key (none) could not be found. \
[u'<Traceback>']."""
msg = forms.exception_to_validation_msg(json_error)
self.assertEqual(msg, "The Key (none) could not be found.")
def test_exception_to_validation_malformed(self):
json_error = """{
"code": 400,
"error": {
"traceback": "<Traceback>",
"type": "StackValidationFailed"
},
"explanation": "The server could not comply with the request",
"title": "Bad Request"
}"""
msg = forms.exception_to_validation_msg(json_error)
self.assertEqual(msg, None)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
from inspect import getargspec
from nova import exception
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import unit
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
info = self.mox.CreateMockAnything()
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = unit.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / unit.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.libvirt_utils, 'create_lvm_image')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(libvirt_images_rbd_pool=self.POOL)
self.flags(rbd_user=self.USER)
self.flags(libvirt_images_rbd_ceph_conf=self.CONF)
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.rbd = self.mox.CreateMockAnything()
self.rados = self.mox.CreateMockAnything()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
return fn
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH)
self.rbd.RBD_FEATURE_LAYERING = 1
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
def fake_resize(rbd_name, size):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(getargspec(imagebackend.Image.libvirt_info),
getargspec(self.image_class.libvirt_info))
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_lvm(self):
self.flags(libvirt_images_volume_group='FakeVG')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(libvirt_images_rbd_pool=pool)
self.flags(libvirt_images_rbd_ceph_conf=conf)
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class BetaincTest(test.TestCase):
def _testBetaInc(self, a_s, b_s, x_s, dtype):
try:
from scipy import special # pylint: disable=g-import-not-at-top
np_dt = dtype.as_numpy_dtype
# Test random values
a_s = a_s.astype(np_dt) # in (0, infty)
b_s = b_s.astype(np_dt) # in (0, infty)
x_s = x_s.astype(np_dt) # in (0, 1)
tf_a_s = constant_op.constant(a_s, dtype=dtype)
tf_b_s = constant_op.constant(b_s, dtype=dtype)
tf_x_s = constant_op.constant(x_s, dtype=dtype)
tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
with self.cached_session():
tf_out = self.evaluate(tf_out_t)
scipy_out = special.betainc(a_s, b_s, x_s, dtype=np_dt)
# the scipy version of betainc uses a double-only implementation.
# TODO(ebrevdo): identify reasons for (sometime) precision loss
# with doubles
rtol = 1e-4
atol = 1e-5
self.assertAllCloseAccordingToType(
scipy_out, tf_out, rtol=rtol, atol=atol)
# Test out-of-range values (most should return nan output)
combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
with self.cached_session():
tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
scipy_comb = special.betainc(a_comb, b_comb, x_comb, dtype=np_dt)
self.assertAllCloseAccordingToType(
scipy_comb, tf_comb, rtol=rtol, atol=atol)
# Test broadcasting between scalars and other shapes
with self.cached_session():
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, x_s, dtype=np_dt),
math_ops.betainc(0.1, b_s, x_s).eval(),
rtol=rtol,
atol=atol)
self.assertAllCloseAccordingToType(
special.betainc(a_s, 0.1, x_s, dtype=np_dt),
math_ops.betainc(a_s, 0.1, x_s).eval(),
rtol=rtol,
atol=atol)
self.assertAllCloseAccordingToType(
special.betainc(a_s, b_s, 0.1, dtype=np_dt),
math_ops.betainc(a_s, b_s, 0.1).eval(),
rtol=rtol,
atol=atol)
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, 0.1, dtype=np_dt),
math_ops.betainc(0.1, b_s, 0.1).eval(),
rtol=rtol,
atol=atol)
self.assertAllCloseAccordingToType(
special.betainc(0.1, 0.1, 0.1, dtype=np_dt),
math_ops.betainc(0.1, 0.1, 0.1).eval(),
rtol=rtol,
atol=atol)
with self.assertRaisesRegex(ValueError, "must be equal"):
math_ops.betainc(0.5, [0.5], [[0.5]])
with self.cached_session():
with self.assertRaisesOpError("Shapes of .* are inconsistent"):
a_p = array_ops.placeholder(dtype)
b_p = array_ops.placeholder(dtype)
x_p = array_ops.placeholder(dtype)
math_ops.betainc(a_p, b_p, x_p).eval(
feed_dict={a_p: 0.5,
b_p: [0.5],
x_p: [[0.5]]})
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
@test_util.run_deprecated_v1
def testBetaIncFloat(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
@test_util.run_deprecated_v1
def testBetaIncDouble(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
def testBetaIncDoubleVeryLargeValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/178338235")
def testBetaIncDoubleVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/178338235")
def testBetaIncFloatVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
@test_util.run_deprecated_v1
def testBetaIncFpropAndBpropAreNeverNAN(self):
with self.cached_session() as sess:
space = np.logspace(-8, 5).tolist()
space_x = np.linspace(1e-16, 1 - 1e-16).tolist()
ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x)))
# Test grads are never nan
ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)
gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)
gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)
tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)
tf_gout, grads_x = sess.run(
[tf_gout_t,
gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(
np.zeros_like(grads_x).astype(np.bool_), np.isnan(tf_gout))
self.assertAllEqual(
np.zeros_like(grads_x).astype(np.bool_), np.isnan(grads_x))
@test_util.run_deprecated_v1
def testBetaIncGrads(self):
err_tolerance = 1e-3
with self.cached_session():
# Test gradient
ga_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gb_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gx_s = np.random.rand(2, 2) # in (0, 1)
tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)
tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)
tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
# Test broadcast gradient
gx_s = np.random.rand() # in (0, 1)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [()], tf_gout_t, ga_s.shape)
tf_logging.info("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
class SampleHookA(object):
name = "a"
def _add_called(self, op, kwargs):
called = kwargs.get('called', None)
if called is not None:
called.append(op + self.name)
def pre(self, *args, **kwargs):
self._add_called("pre", kwargs)
class SampleHookB(SampleHookA):
name = "b"
def post(self, rv, *args, **kwargs):
self._add_called("post", kwargs)
class SampleHookC(SampleHookA):
name = "c"
def pre(self, f, *args, **kwargs):
self._add_called("pre" + f.__name__, kwargs)
def post(self, f, rv, *args, **kwargs):
self._add_called("post" + f.__name__, kwargs)
class SampleHookExceptionPre(SampleHookA):
name = "epre"
exception = Exception()
def pre(self, f, *args, **kwargs):
raise self.exception
class SampleHookExceptionPost(SampleHookA):
name = "epost"
exception = Exception()
def post(self, f, rv, *args, **kwargs):
raise self.exception
class MockEntryPoint(object):
def __init__(self, cls):
self.cls = cls
def load(self):
return self.cls
class MockedHookTestCase(test.BaseHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return []
def setUp(self):
super(MockedHookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
class HookTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
]
def setUp(self):
super(HookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
@hooks.add_hook('test_hook')
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['test_hook']
self.assert_has_hook('test_hook', self._hooked)
self.assertEqual(2, len(mgr.extensions))
self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['prea', 'preb', 'postb'], called_order)
class HookTestCaseWithFunction(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('function_hook',
MockEntryPoint(SampleHookC), SampleHookC, SampleHookC()),
]
@hooks.add_hook('function_hook', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['function_hook']
self.assert_has_hook('function_hook', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookC, mgr.extensions[0].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['pre_hookedc', 'post_hookedc'], called_order)
class HookFailPreTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_pre',
MockEntryPoint(SampleHookExceptionPre),
SampleHookExceptionPre, SampleHookExceptionPre()),
]
@hooks.add_hook('fail_pre', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_pre']
self.assert_has_hook('fail_pre', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPre, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPre, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
class HookFailPostTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_post',
MockEntryPoint(SampleHookExceptionPost),
SampleHookExceptionPost, SampleHookExceptionPost()),
]
@hooks.add_hook('fail_post', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_post']
self.assert_has_hook('fail_post', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPost, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPost, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fractal demo."""
from __future__ import with_statement
__author__ = '[email protected] (Kathryn Hurley)'
import json
import logging
import os
import time
import lib_path
import google_cloud.gce as gce
import google_cloud.gce_appengine as gce_appengine
import google_cloud.oauth as oauth
import jinja2
import oauth2client.appengine as oauth2client
import user_data
import webapp2
from google.appengine.api import urlfetch
DEMO_NAME = 'fractal'
CUSTOM_IMAGE = 'fractal-demo-image'
MACHINE_TYPE='n1-highcpu-2'
FIREWALL = 'www-fractal'
FIREWALL_DESCRIPTION = 'Fractal Demo Firewall'
GCE_SCOPE = 'https://www.googleapis.com/auth/compute'
HEALTH_CHECK_TIMEOUT = 1
VM_FILES = os.path.join(os.path.dirname(__file__), 'vm_files')
STARTUP_SCRIPT = os.path.join(VM_FILES, 'startup.sh')
GO_PROGRAM = os.path.join(VM_FILES, 'mandelbrot.go')
GO_ARGS = '--portBase=80 --numPorts=1'
GO_TILESERVER_FLAG = '--tileServers='
# TODO: Update these values with your project and LB IP/destinations.
LB_PROJECTS = {
'your-project': ['a.b.c.d'],
}
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(''))
oauth_decorator = oauth.decorator
parameters = [
user_data.DEFAULTS[user_data.GCE_PROJECT_ID],
user_data.DEFAULTS[user_data.GCE_ZONE_NAME]
]
data_handler = user_data.DataHandler(DEMO_NAME, parameters)
class ServerVarsAggregator(object):
"""Aggregate stats across multiple servers and produce a summary."""
def __init__(self):
"""Constructor for ServerVarsAggregator."""
# A map of tile-size -> count
self.tile_counts = {}
# A map of tile-size -> time
self.tile_times = {}
# The uptime of the server that has been up and running the longest.
self.max_uptime = 0
def aggregate_vars(self, instance_vars):
"""Integrate instance_vars into the running aggregates.
Args:
instance_vars A parsed JSON object returned from /debug/vars
"""
self._aggregate_map(instance_vars['tileCount'], self.tile_counts)
self._aggregate_map(instance_vars['tileTime'], self.tile_times)
self.max_uptime = max(self.max_uptime, instance_vars['uptime'])
def _aggregate_map(self, src_map, dest_map):
"""Aggregate one map from src_map into dest_map."""
for k, v in src_map.items():
dest_map[k] = dest_map.get(k, 0L) + long(v)
def get_aggregate(self):
"""Get the overall aggregate, including derived values."""
tile_time_avg = {}
result = {
'tileCount': self.tile_counts.copy(),
'tileTime': self.tile_times.copy(),
'tileTimeAvgMs': tile_time_avg,
'maxUptime': self.max_uptime,
}
for size, count in self.tile_counts.items():
time = self.tile_times.get(size, 0)
if time and count:
# Compute average tile time in milliseconds. The raw time is in
# nanoseconds.
tile_time_avg[size] = float(time / count) / float(1000*1000)
logging.debug('tile-size: %s count: %d time: %d avg: %d', size, count, time, tile_time_avg[size])
return result
class Fractal(webapp2.RequestHandler):
"""Fractal demo."""
@oauth_decorator.oauth_required
@data_handler.data_required
def get(self):
"""Show main page of Fractal demo."""
template = jinja_environment.get_template(
'demos/%s/templates/index.html' % DEMO_NAME)
gce_project_id = data_handler.stored_user_data[user_data.GCE_PROJECT_ID]
self.response.out.write(template.render({
'demo_name': DEMO_NAME,
'lb_enabled': gce_project_id in LB_PROJECTS,
}))
@oauth_decorator.oauth_required
@data_handler.data_required
def get_instances(self):
"""List instances.
Uses app engine app identity to retrieve an access token for the app
engine service account. No client OAuth required. External IP is used
to determine if the instance is actually running.
"""
gce_project = self._create_gce()
instances = gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.list_instances,
'Error listing instances: ',
filter='name eq ^%s-.*' % self.instance_prefix())
# A map of instanceName -> (ip, RPC)
health_rpcs = {}
# Convert instance info to dict and check server status.
num_running = 0
instance_dict = {}
if instances:
for instance in instances:
instance_record = {}
instance_dict[instance.name] = instance_record
if instance.status:
instance_record['status'] = instance.status
else:
instance_record['status'] = 'OTHER'
ip = None
for interface in instance.network_interfaces:
for config in interface.get('accessConfigs', []):
if 'natIP' in config:
ip = config['natIP']
instance_record['externalIp'] = ip
break
if ip: break
# Ping the instance server. Grab stats from /debug/vars.
if ip and instance.status == 'RUNNING':
num_running += 1
health_url = 'http://%s/debug/vars?t=%d' % (ip, int(time.time()))
logging.debug('Health checking %s', health_url)
rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
urlfetch.make_fetch_call(rpc, url=health_url)
health_rpcs[instance.name] = rpc
# Ping through a LBs too. Only if we get success there do we know we are
# really serving.
loadbalancers = []
lb_rpcs = {}
if instances and len(instances) > 1:
loadbalancers = self._get_lb_servers(gce_project)
if num_running > 0 and loadbalancers:
for lb in loadbalancers:
health_url = 'http://%s/health?t=%d' % (lb, int(time.time()))
logging.debug('Health checking %s', health_url)
rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
urlfetch.make_fetch_call(rpc, url=health_url)
lb_rpcs[lb] = rpc
# wait for RPCs to complete and update dict as necessary
vars_aggregator = ServerVarsAggregator()
# TODO: there is significant duplication here. Refactor.
for (instance_name, rpc) in health_rpcs.items():
result = None
instance_record = instance_dict[instance_name]
try:
result = rpc.get_result()
if result and "memstats" in result.content:
logging.debug('%s healthy!', instance_name)
instance_record['status'] = 'SERVING'
instance_vars = {}
try:
instance_vars = json.loads(result.content)
instance_record['vars'] = instance_vars
vars_aggregator.aggregate_vars(instance_vars)
except ValueError as error:
logging.error('Error decoding vars json for %s: %s', instance_name, error)
else:
logging.debug('%s unhealthy. Content: %s', instance_name, result.content)
except urlfetch.Error as error:
logging.debug('%s unhealthy: %s', instance_name, str(error))
# Check health status through the load balancer.
loadbalancer_healthy = bool(lb_rpcs)
for (lb, lb_rpc) in lb_rpcs.items():
result = None
try:
result = lb_rpc.get_result()
if result and "ok" in result.content:
logging.info('LB %s healthy: %s\n%s', lb, result.headers, result.content)
else:
logging.info('LB %s result not okay: %s, %s', lb, result.status_code, result.content)
loadbalancer_healthy = False
break
except urlfetch.Error as error:
logging.info('LB %s fetch error: %s', lb, str(error))
loadbalancer_healthy = False
break
response_dict = {
'instances': instance_dict,
'vars': vars_aggregator.get_aggregate(),
'loadbalancers': loadbalancers,
'loadbalancer_healthy': loadbalancer_healthy,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(response_dict))
@oauth_decorator.oauth_required
@data_handler.data_required
def set_instances(self):
"""Start/stop instances so we have the requested number running."""
gce_project = self._create_gce()
self._setup_firewall(gce_project)
image = self._get_image(gce_project)
disks = self._get_disks(gce_project)
# Get the list of instances to insert.
num_instances = int(self.request.get('num_instances'))
target = self._get_instance_list(
gce_project, num_instances, image, disks)
target_set = set()
target_map = {}
for instance in target:
target_set.add(instance.name)
target_map[instance.name] = instance
# Get the list of instances running
current = gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.list_instances,
'Error listing instances: ',
filter='name eq ^%s-.*' % self.instance_prefix())
current_set = set()
current_map = {}
for instance in current:
current_set.add(instance.name)
current_map[instance.name] = instance
# Add the new instances
to_add_set = target_set - current_set
to_add = [target_map[name] for name in to_add_set]
if to_add:
gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.bulk_insert,
'Error inserting instances: ',
resources=to_add)
# Remove the old instances
to_remove_set = current_set - target_set
to_remove = [current_map[name] for name in to_remove_set]
if to_remove:
gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.bulk_delete,
'Error deleting instances: ',
resources=to_remove)
logging.info("current_set: %s", current_set)
logging.info("target_set: %s", target_set)
logging.info("to_add_set: %s", to_add_set)
logging.info("to_remove_set: %s", to_remove_set)
@oauth_decorator.oauth_required
@data_handler.data_required
def cleanup(self):
"""Stop instances using the gce_appengine helper class."""
gce_project = self._create_gce()
gce_appengine.GceAppEngine().delete_demo_instances(
self, gce_project, self.instance_prefix())
def _get_lb_servers(self, gce_project):
return LB_PROJECTS.get(gce_project.project_id, [])
def instance_prefix(self):
"""Return a prefix based on a request/query params."""
tag = self.request.get('tag')
prefix = DEMO_NAME
if tag:
prefix = prefix + '-' + tag
return prefix
def _create_gce(self):
gce_project_id = data_handler.stored_user_data[user_data.GCE_PROJECT_ID]
gce_zone_name = data_handler.stored_user_data[user_data.GCE_ZONE_NAME]
return gce.GceProject(oauth_decorator.credentials,
project_id=gce_project_id,
zone_name=gce_zone_name)
def _setup_firewall(self, gce_project):
"Create the firewall if it doesn't exist."
firewalls = gce_project.list_firewalls()
firewall_names = [firewall.name for firewall in firewalls]
if not FIREWALL in firewall_names:
firewall = gce.Firewall(
name=FIREWALL,
target_tags=[DEMO_NAME],
description=FIREWALL_DESCRIPTION)
gce_project.insert(firewall)
def _get_image(self, gce_project):
"""Returns the appropriate image to use. def _has_custom_image(self, gce_project):
Args:
gce_project: An instance of gce.GceProject
Returns: (project, image_name) for the image to use.
"""
images = gce_project.list_images(filter='name eq ^%s$' % CUSTOM_IMAGE)
if images:
return (gce_project.project_id, CUSTOM_IMAGE)
return ('google', None)
def _get_disks(self, gce_project):
"""Get boot disks for VMs."""
disks_array = gce_project.list_disks(
filter='name eq ^boot-%s-.*' % self.instance_prefix())
disks = {}
for d in disks_array:
disks[d.name] = d
return disks
def _get_instance_metadata(self, gce_project, instance_names):
"""The metadata values to pass into the instance."""
inline_values = {
'goargs': GO_ARGS,
}
file_values = {
'startup-script': STARTUP_SCRIPT,
'goprog': GO_PROGRAM,
}
# Try and use LBs if we have any. But only do that if we have more than one
# instance.
if instance_names:
tile_servers = ''
if len(instance_names) > 1:
tile_servers = self._get_lb_servers(gce_project)
if not tile_servers:
tile_servers = instance_names
tile_servers = ','.join(tile_servers)
inline_values['goargs'] += ' %s%s' %(GO_TILESERVER_FLAG, tile_servers)
metadata = []
for k, v in inline_values.items():
metadata.append({'key': k, 'value': v})
for k, fv in file_values.items():
v = open(fv, 'r').read()
metadata.append({'key': k, 'value': v})
return metadata
def _get_instance_list(self, gce_project, num_instances, image, disks):
"""Get a list of instances to start.
Args:
gce_project: An instance of gce.GceProject.
num_instances: The number of instances to start.
image: tuple with (project_name, image_name) for the image to use.
disks: A dictionary of disk_name -> disk resources
Returns:
A list of gce.Instances.
"""
instance_names = []
for i in range(num_instances):
instance_names.append('%s-%02d' % (self.instance_prefix(), i))
instance_list = []
for instance_name in instance_names:
disk_name = 'boot-%s' % instance_name
disk = disks.get(disk_name, None)
disk_mounts = []
image_project_id = None
image_name = None
kernel = None
if disk:
dm = gce.DiskMount(disk=disk, boot=True)
kernel = gce_project.settings['compute']['kernel']
disk_mounts.append(dm)
else:
image_project_id, image_name = image
instance = gce.Instance(
name=instance_name,
machine_type_name=MACHINE_TYPE,
image_name=image_name,
image_project_id=image_project_id,
disk_mounts=disk_mounts,
kernel=kernel,
tags=[DEMO_NAME, self.instance_prefix()],
metadata=self._get_instance_metadata(gce_project, instance_names),
service_accounts=gce_project.settings['cloud_service_account'])
instance_list.append(instance)
return instance_list
app = webapp2.WSGIApplication(
[
('/%s' % DEMO_NAME, Fractal),
webapp2.Route('/%s/instance' % DEMO_NAME,
handler=Fractal, handler_method='get_instances',
methods=['GET']),
webapp2.Route('/%s/instance' % DEMO_NAME,
handler=Fractal, handler_method='set_instances',
methods=['POST']),
webapp2.Route('/%s/cleanup' % DEMO_NAME,
handler=Fractal, handler_method='cleanup',
methods=['POST']),
(data_handler.url_path, data_handler.data_handler),
], debug=True)
|
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import api as qdbapi
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
LOG = logging.getLogger(__name__)
IP_MIN_MTU = {4: 68, 6: 1280}
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def __init__(self):
"""Do the initialization for the vpn service plugin here."""
qdbapi.register_models()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
dpd = ipsec_sitecon['dpd']
ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold')
ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30)
ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
self._check_dpd(ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
self._check_mtu(context,
ipsec_sitecon['mtu'],
ipsec_sitecon['vpnservice_id'])
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=ipsec_sitecon['vpnservice_id'],
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def _check_dpd(self, ipsec_sitecon):
if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']:
raise vpnaas.IPsecSiteConnectionDpdIntervalValueError(
attr='dpd_timeout')
def _check_mtu(self, context, mtu, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
version = netaddr.IPNetwork(subnet).version
if mtu < IP_MIN_MTU[version]:
raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu, version=version)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
conn = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
dpd = conn.get('dpd', {})
if dpd.get('action'):
conn['dpd_action'] = dpd.get('action')
if dpd.get('interval') or dpd.get('timeout'):
conn['dpd_interval'] = dpd.get(
'interval', ipsec_site_conn_db.dpd_interval)
conn['dpd_timeout'] = dpd.get(
'timeout', ipsec_site_conn_db.dpd_timeout)
self._check_dpd(conn)
if 'mtu' in conn:
self._check_mtu(context,
conn['mtu'],
ipsec_site_conn_db.vpnservice_id)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in conn:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(conn["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del conn["peer_cidrs"]
if conn:
ipsec_site_conn_db.update(conn)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def _check_router(self, context, router_id):
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
router = l3_plugin.get_router(context, router_id)
if not router.get(l3_db.EXTERNAL_GW_INFO):
raise vpnaas.RouterIsNotExternal(router_id=router_id)
def _check_subnet_id(self, context, router_id, subnet_id):
core_plugin = manager.NeutronManager.get_plugin()
ports = core_plugin.get_ports(
context,
filters={
'fixed_ips': {'subnet_id': [subnet_id]},
'device_id': [router_id]})
if not ports:
raise vpnaas.SubnetIsNotConnectedToRouter(
subnet_id=subnet_id,
router_id=router_id)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
self._check_router(context, vpns['router_id'])
self._check_subnet_id(context, vpns['router_id'], vpns['subnet_id'])
with context.session.begin(subtransactions=True):
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
|
|
#!python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
__author__ = 'Alexei Ardyakov'
__version__ = '0.05'
__license__ = 'MIT'
import codecs
import os
import re
import sys
from functools import partial
from pyproj import Proj, transform
from scipy.optimize import leastsq
# osgeo package is optional (used for WKT output and projstring normalization)
try:
from osgeo import osr
except:
osr = False
PY3 = sys.version_info[0] >= 3
def to_str(s):
"""Converts byte or unicode string to bytes type assuming UTF-8 encoding"""
if s is None:
return None
if isinstance(s, str):
return s
if PY3 and isinstance(s, bytes):
return s.decode('utf-8')
elif not PY3 and isinstance(s, unicode):
return s.encode('utf-8')
raise ValueError('Cannot convert {0} to str'.format(s))
def refine_projstring(projstring):
"""Refines projstring using osgeo package"""
if osr:
srs = osr.SpatialReference()
srs.ImportFromProj4(to_str(projstring))
return srs.ExportToProj4()
return projstring
def target_func_template(points, src_proj, tgt_template, params):
"""Target function template (the real target function is a result
of partial application of the template with first 3 arguments known)
"""
tgt_proj = tgt_template.format(*params)
p1 = Proj(to_str(src_proj))
p2 = Proj(to_str(tgt_proj))
result = []
for pt in points:
if len(pt[0]) == 2:
tpt = transform(p1, p2, pt[0][0], pt[0][1])
elif len(pt[0]) == 3:
tpt = transform(p1, p2, pt[0][0], pt[0][1], pt[0][2])
else:
raise ValueError('Two or three coordinates expected')
result.append(pt[1][0] - tpt[0])
result.append(pt[1][1] - tpt[1])
if len(pt[0]) == 3 and len(pt[1]) == 3:
result.append(pt[0][2] - tpt[2])
return result
def find_params(src_proj, tgt_known, tgt_unknown, points):
"""Finds unknown params of target projection
using least squares method
"""
# Sorting params (some of them can have dot separated index)
param_list = []
for param_dict, is_known in ((tgt_known, True), (tgt_unknown, False)):
for k in param_dict.keys():
if '.' in k:
k1, k2 = k.split('.')
k2 = int(k2)
else:
k1, k2 = k, 0
param_list.append((k1, k2, param_dict[k], is_known))
param_list.sort()
# Constructing target projection template
start_values, var_names = [], []
tgt_template = ''
var_index = 0
for p in param_list:
if p[1] == 0:
tgt_template += ' +{0}'.format(p[0])
else:
tgt_template += ','
if p[3]: # Known value
if p[2] is not None:
if p[1] == 0:
tgt_template += '={0}'.format(p[2])
else:
tgt_template += '{0}'.format(p[2])
else: # Unknown value
start_values.append(p[2])
if p[1] == 0:
var_names.append(p[0])
tgt_template += '='
else:
var_names.append('{0}.{1}'.format(p[0], p[1]))
tgt_template += '{' + str(var_index) + '}'
var_index += 1
tgt_template = tgt_template.strip()
# Creating target function
tgt_func = partial(target_func_template,
points, src_proj, tgt_template)
# Solving the problem
x, cov_x, infodict, mesg, ier = leastsq(
tgt_func, start_values, ftol=1e-12, full_output=True)
# Formatting outputs
if ier not in (1, 2, 3, 4):
return None, None, None
result_projstring = refine_projstring(tgt_template.format(*x))
result_dict = dict(zip(var_names, x))
fvec = infodict['fvec']
residuals = []
i = 0
for pt in points:
if len(pt[0]) == 3 and len(pt[1]) == 3:
residuals.append(tuple(fvec[i:i + 3]))
i += 3
else:
residuals.append(tuple(fvec[i:i + 2]))
i += 2
return result_projstring, result_dict, residuals
def parse_arguments(argv):
"""Parses command line arguments of the program"""
src_params = []
known, unknown, options = {}, {}, {}
filename = None
parsing_target = False
for arg in argv[1:]:
if arg.startswith('-'):
splitarg = arg.split('=', 1)
if len(splitarg) == 2:
options[splitarg[0]] = splitarg[1]
else:
options[arg] = True
elif parsing_target:
if arg.startswith('+'):
param_re = re.compile(r'^\+([0-9a-zA-Z_]+)([=~].*)?$')
m = param_re.match(arg)
if not m:
raise ValueError('Invalid parameter: {0}'.format(arg))
pname, pvalue = m.groups()
if not pvalue:
known[pname] = None
else:
subvalues = pvalue.split(',')
for i, sv in enumerate(subvalues):
extpname = pname + ('.' + str(i) if i else '')
if sv.startswith('~'):
unknown[extpname] = float(sv[1:])
elif sv.startswith('=~'):
unknown[extpname] = float(sv[2:])
elif sv.startswith('='):
known[extpname] = sv[1:]
else:
known[extpname] = sv
else:
if filename:
raise ValueError('Multiple input files are not supported')
filename = arg
else:
if arg == '+to':
parsing_target = True
elif arg.startswith('+'):
src_params.append(arg)
else:
raise ValueError('Unexpected token: {0}'.format(arg))
if src_params:
src_proj = ' '.join(src_params)
else:
src_proj = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
return (src_proj, known, unknown, options, filename)
def parse_coord(s):
"""Parses a value of coordinate in decimal or DMS format"""
if s is None:
raise TypeError('Coordinate value is None')
ss = to_str(s).replace(',', '.')
try:
f = float(ss)
except:
dms_re = re.compile(r'^([+-])?'
r'(?:(\d{0,3}(?:\.\d*)?)?d)?'
r"(?:(\d{0,2}(?:\.\d*)?)?')?"
r'(?:(\d{0,2}(?:\.\d*)?)?")?$')
m = dms_re.match(ss)
if not m:
raise ValueError('`{0}` is not a valid coordinate value'.format(s))
g = m.groups()
if g[1] in ('', None) and g[2] in ('', None) and g[3] in ('', None):
raise ValueError('`{0}` is not a valid coordinate value'.format(s))
f = 0
if g[1]:
f += float(g[1])
if g[2]:
mf = float(g[2])
if mf >= 60:
raise ValueError('Invalid value for minutes: {0}'.format(mf))
f += mf / 60.0
if g[3]:
sf = float(g[3])
if sf >= 60:
raise ValueError('Invalid value for minutes: {0}'.format(mf))
f += sf / 3600.0
if g[0] == '-':
f = -f
return f
def read_points(filename, encoding='utf-8'):
"""Reads points from a file"""
points = []
with codecs.open(filename, 'r', encoding) as fp:
for line in fp:
tokens = line.strip().split()
if not tokens or not tokens[0] or tokens[0].startswith('#'):
continue
number_count = len(tokens)
for i, t in enumerate(tokens):
try:
d = parse_coord(t)
except:
number_count = i
break
number_count = min((number_count, 6))
if number_count == 5:
number_count = 4
if number_count < 4:
raise ValueError('')
tokens = line.strip().split(None, number_count)
if number_count == 4:
points.append((
tuple(map(parse_coord, tokens[0:2])),
tuple(map(parse_coord, tokens[2:4])),
tokens[4] if len(tokens) > 4 else '',
))
elif number_count == 6:
points.append((
tuple(map(parse_coord, tokens[0:3])),
tuple(map(parse_coord, tokens[3:6])),
tokens[6] if len(tokens) > 6 else '',
))
return points
def usage_help(program_name):
"""Returns usage help string"""
return ('Usage: {0} [--opts] +src_opts[=arg,] '
'+to +tgt_opts[=[~]arg,] filename'.format(program_name))
def format_residuals(points, residuals):
"""Returns the residuals as a text string"""
s = 'Residuals:\n'
for i, pt in enumerate(points):
r = residuals[i]
if len(r) == 2:
s += '{0}\t{1}\t\t{2}\n'.format(r[0], r[1], pt[2])
else:
s += '{0}\t{1}\t{2}\t{3}\n'.format(r[0], r[1], r[2], pt[2])
return s
def to_wkt(projstring, esri=False, pretty=False):
"""Returns projection parameters as well-known text"""
if not osr:
raise ImportError('Package GDAL not found')
srs = osr.SpatialReference()
srs.ImportFromProj4(to_str(projstring))
if esri:
srs.MorphToESRI()
return srs.ExportToPrettyWkt() if pretty else srs.ExportToWkt()
def generate_output(outfile, result_projstring, options, points, residuals):
"""Outputs results in specified format"""
if '--proj' in options or '--proj4' in options:
outfile.write(result_projstring)
elif '--wkt' in options:
outfile.write(to_wkt(result_projstring, pretty='--pretty' in options))
elif '--esri' in options:
outfile.write(
to_wkt(result_projstring, esri=True, pretty='--pretty' in options))
else:
outfile.write(result_projstring)
outfile.write('\n')
outfile.write(format_residuals(points, residuals))
outfile.write('\n')
def arg_main(argv, outfile):
"""The variant of main() that expects sys.argv and sys.stdout
as function arguments (for use in tests or wrapper scripts)
"""
src_proj, known, unknown, options, filename = parse_arguments(argv)
if len(unknown) == 0 or '-h' in options or '--help' in options:
outfile.write(usage_help(argv[0]))
outfile.write('\n')
return 0
encoding = options.get('--encoding', 'utf-8')
points = read_points(filename, encoding)
result_projstring, result_dict, residuals = find_params(
src_proj, known, unknown, points)
if result_projstring:
generate_output(outfile, result_projstring, options, points, residuals)
return 0
else:
if not(set(options.keys()) &
set(['--proj', '--proj4', '--wkt', '--esri',])):
outfile.write('Solution not found\n')
return 1
def main():
"""The script entry point used in setup.py"""
return arg_main(sys.argv, sys.stdout)
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import json
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from mcfw.properties import long_list_property, typed_property, bool_property, long_property, unicode_property, \
unicode_list_property, azzert
from mcfw.serialization import s_long, ds_long, register, s_bool, s_unicode, s_unicode_list, ds_bool, ds_unicode, \
ds_unicode_list, get_list_serializer, get_list_deserializer, s_long_list, ds_long_list
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import UserProfile
from rogerthat.models.news import NewsItem
from rogerthat.models.properties.news import NewsItemStatistics
from rogerthat.rpc import users
from rogerthat.to import BaseButtonTO, TO
from rogerthat.to import KeyValueLongTO
from rogerthat.utils.app import get_app_user_tuple, get_app_id_from_app_user
from rogerthat.utils.service import remove_slash_default
class NewsSenderTO(object):
email = unicode_property('1')
name = unicode_property('2')
avatar_id = long_property('3')
def __init__(self, email=None, name=None, avatar_id=None):
self.email = email
self.name = name
self.avatar_id = avatar_id
def _serialize_news_sender(stream, sender):
s_long(stream, 1) # version
s_unicode(stream, sender.email)
s_unicode(stream, sender.name)
s_long(stream, sender.avatar_id)
def _deserialize_news_sender(stream):
_ = ds_long(stream) # version
sender = NewsSenderTO()
sender.email = ds_unicode(stream)
sender.name = ds_unicode(stream)
sender.avatar_id = ds_long(stream)
return sender
class NewsActionButtonTO(BaseButtonTO):
flow_params = unicode_property('101')
def __init__(self, id_=None, caption=None, action=None, flow_params=None):
super(NewsActionButtonTO, self).__init__(id_, caption, action)
self.flow_params = flow_params
@classmethod
def from_model(cls, model):
return cls(model.id, model.caption, model.action, model.flow_params)
def _serialize_news_button(stream, b):
s_unicode(stream, b.id)
s_unicode(stream, b.caption)
s_unicode(stream, b.action)
s_unicode(stream, b.flow_params)
def _deserialize_news_button(stream, version):
b = NewsActionButtonTO()
b.id = ds_unicode(stream)
b.caption = ds_unicode(stream)
b.action = ds_unicode(stream)
b.flow_params = ds_unicode(stream) if version > 1 else None
return b
_serialize_news_button_list = get_list_serializer(_serialize_news_button)
_deserialize_news_button_list = get_list_deserializer(_deserialize_news_button, True)
def _serialize_news_buttons(stream, buttons):
s_long(stream, 2) # version
_serialize_news_button_list(stream, buttons)
def _deserialize_news_buttons(stream):
version = ds_long(stream)
return _deserialize_news_button_list(stream, version)
class AppNewsInfoTO(object):
id = long_property('1')
version = long_property('2')
sort_timestamp = long_property('3')
sort_priority = long_property('4')
sender_email = unicode_property('5', default=None)
broadcast_type = unicode_property('6', default=None)
feed_name = unicode_property('7', default=None)
class NewsItemStatisticsTimeTO(TO):
timestamp = long_property('1')
amount = long_property('2')
def __init__(self, timestamp, amount):
self.timestamp = timestamp
self.amount = amount
class NewsItemStatisticsDetailsTO(TO):
age = typed_property('1', KeyValueLongTO, True) # key: age (e.g. 10 - 15), value: amount
gender = typed_property('2', KeyValueLongTO, True) # key: gender, value: amount
time = typed_property('3', NewsItemStatisticsTimeTO, True)
total = long_property('4')
def __init__(self, age=None, gender=None, time=None, total=0):
if age is None:
age = []
if gender is None:
gender = []
if time is None:
time = []
self.age = age
self.gender = gender
self.time = time
self.total = total
@classmethod
def from_model(cls, model, news_type, news_item_creation_timestamp):
"""
Args:
model (NewsItemStatistics)
news_type (unicode)
news_item_creation_timestamp (long)
"""
age = []
gender = []
time_to = []
for i, _ in enumerate(NewsItemStatistics.default_age_stats()):
start_age = i * 5
end_age = start_age + 5
age_label = u'%s - %s' % (start_age, end_age)
age_value = getattr(model, '%s_age' % news_type)[i]
age.append(KeyValueLongTO(age_label, age_value))
for i, _ in enumerate(NewsItemStatistics.default_gender_stats()):
gender_label = NewsItemStatistics.gender_translation_key(i)
gender_value = getattr(model, '%s_gender' % news_type)[i]
gender.append(KeyValueLongTO(gender_label, gender_value))
time_values = getattr(model, '%s_time' % news_type, [])
for hours_from_creation, time_value in enumerate(time_values):
dt = datetime.utcfromtimestamp(news_item_creation_timestamp) + relativedelta(hours=hours_from_creation)
timestamp = long(time.mktime(dt.utctimetuple()))
time_to.append(NewsItemStatisticsTimeTO(timestamp, time_value))
return cls(age, gender, time_to, getattr(model, '%s_total' % news_type))
class NewsItemStatisticsTO(TO):
app_id = unicode_property('1')
# following dicts could have type NewsItemStatisticsDetailsTO, but are dictionaries for enhanced performance
reached = typed_property('2', dict, False) # type: dict
rogered = typed_property('3', dict, False) # type: dict
action = typed_property('4', dict, False) # type: dict
followed = typed_property('5', dict, False) # type: dict
@classmethod
def from_model(cls, app_id, statistics, news_item_creation_timestamp):
"""
Args:
statistics (rogerthat.models.properties.news.NewsItemStatistics)
news_item_creation_timestamp (long)
"""
reached = NewsItemStatisticsDetailsTO.from_model(statistics, u'reached', news_item_creation_timestamp).to_dict()
rogered = NewsItemStatisticsDetailsTO.from_model(statistics, u'rogered', news_item_creation_timestamp).to_dict()
action = NewsItemStatisticsDetailsTO.from_model(statistics, u'action', news_item_creation_timestamp).to_dict()
followed = NewsItemStatisticsDetailsTO.from_model(statistics, u'followed',
news_item_creation_timestamp).to_dict()
return cls(app_id=app_id, reached=reached, rogered=rogered, action=action, followed=followed)
class NewsItemInternalStatistics(TO):
users_that_rogered = unicode_list_property('users_that_rogered')
total_reached = long_property('total_reached')
total_action = long_property('total_action')
total_followed = long_property('total_followed')
details = typed_property('details', NewsItemStatisticsTO, True)
def __init__(self, total_reached=0, users_that_rogered=None, total_action=0, total_followed=0, details=None):
super(NewsItemInternalStatistics, self).__init__(
users_that_rogered=users_that_rogered or [],
total_reached=total_reached,
total_action=total_action,
total_followed=total_followed,
details=details or []
)
class NewsTargetAudienceTO(object):
min_age = long_property('1', default=0)
max_age = long_property('2', default=200)
gender = long_property('3', default=UserProfile.GENDER_MALE_OR_FEMALE)
connected_users_only = bool_property('4', default=False)
class NewsFeedNameTO(TO):
app_id = unicode_property('1')
name = unicode_property('2')
def __init__(self, app_id=None, name=None):
self.app_id = app_id
self.name = name
@classmethod
def from_model(cls, model):
return cls(model.app_id, model.name)
class BaseNewsItemTO(object):
id = long_property('1')
sender = typed_property('2', NewsSenderTO, False)
title = unicode_property('3')
message = unicode_property('4')
image_url = unicode_property('5')
broadcast_type = unicode_property('6')
reach = long_property('7')
users_that_rogered = unicode_list_property('8')
buttons = typed_property('9', NewsActionButtonTO, True)
qr_code_content = unicode_property('10')
qr_code_caption = unicode_property('11')
version = long_property('12')
timestamp = long_property('13')
flags = long_property('14')
type = long_property('15')
def __init__(self, news_id=0, sender_email=None, sender_name=None, sender_avatar_id=0, title=None,
message=None, image_url=None, broadcast_type=None, reach=0, users_that_rogered=None, buttons=None,
qr_code_content=None, qr_code_caption=None, version=0, timestamp=0, flags=0, news_type=1):
if users_that_rogered is None:
users_that_rogered = []
if buttons is None:
buttons = []
self.id = news_id
if sender_email:
sender_email = remove_slash_default(users.User(sender_email)).email()
self.sender = NewsSenderTO(sender_email, sender_name, sender_avatar_id)
self.title = title
self.message = message
self.image_url = image_url
self.broadcast_type = broadcast_type
self.reach = reach
self.users_that_rogered = users_that_rogered
self.buttons = [NewsActionButtonTO.from_model(button) for button in buttons]
self.qr_code_content = qr_code_content
self.qr_code_caption = qr_code_caption
self.version = version
self.timestamp = timestamp
self.flags = flags
self.type = news_type
@classmethod
def from_model(cls, model, base_url, statistics):
# type: (NewsItem, unicode, NewsItemInternalStatistics) -> BaseNewsItemTO
from rogerthat.dal.service import get_service_identity
si = get_service_identity(model.sender)
return cls(model.id, si.service_identity_user.email(), si.name, si.avatarId, model.title, model.message,
model.image_url(base_url), model.broadcast_type, statistics.total_reached,
statistics.users_that_rogered, model.buttons, model.qr_code_content, model.qr_code_caption,
model.version, model.timestamp, model.flags, model.type)
class AppNewsItemTO(BaseNewsItemTO):
sort_timestamp = long_property('101')
sort_priority = long_property('102')
feed_name = unicode_property('103', default=None)
@classmethod
def from_news_item_to(cls, news_item_to, connections, app_user):
"""
Args:
news_item_to (NewsItemTO)
connections (tuple of (list of users.User))
app_user (users.User)
Returns:
app_news_item_to (AppNewsItemTO)
"""
to = cls()
to.id = news_item_to.id
to.sender = NewsSenderTO(news_item_to.sender.email, news_item_to.sender.name, news_item_to.sender.avatar_id)
to.title = news_item_to.title
to.message = news_item_to.message
to.image_url = news_item_to.image_url
to.broadcast_type = news_item_to.broadcast_type
to.reach = news_item_to.reach
to.users_that_rogered = []
for friend in connections[0]:
if friend.email() in news_item_to.users_that_rogered:
human_friend, _ = get_app_user_tuple(friend)
to.users_that_rogered.append(human_friend.email())
to.buttons = news_item_to.buttons
if news_item_to.qr_code_content:
try:
content = json.loads(news_item_to.qr_code_content)
content['u'] = app_user.email()
to.qr_code_content = u'%s' % json.dumps(content)
except:
to.qr_code_content = news_item_to.qr_code_content
else:
to.qr_code_content = news_item_to.qr_code_content
to.qr_code_caption = news_item_to.qr_code_caption
to.version = news_item_to.version
to.timestamp = news_item_to.timestamp
to.flags = news_item_to.flags
to.type = news_item_to.type
app_id = get_app_id_from_app_user(app_user)
for feed_name in news_item_to.feed_names:
if feed_name.app_id == app_id:
to.feed_name = feed_name.name
break
else:
to.feed_name = None
to.sort_timestamp = news_item_to.sticky_until if news_item_to.sticky else news_item_to.timestamp
if news_item_to.sticky:
to.sort_priority = 10
else:
user_profile = get_user_profile(app_user)
if not NewsItem.match_target_audience_of_item(user_profile, news_item_to):
to.sort_priority = 45
elif to.users_that_rogered:
to.sort_priority = 20
elif users.User(news_item_to.sender.email) in connections[1]:
to.sort_priority = 30
else:
to.sort_priority = 40
return to
class NewsItemTO(BaseNewsItemTO):
TYPE_NORMAL = u'NORMAL'
TYPE_QR_CODE = u'QR_CODE'
TYPES = (TYPE_NORMAL, TYPE_QR_CODE)
sticky = bool_property('101')
sticky_until = long_property('102')
app_ids = unicode_list_property('103')
rogered = bool_property('104')
scheduled_at = long_property('105')
published = bool_property('106')
statistics = typed_property('107', NewsItemStatisticsTO, True)
action_count = long_property('108')
follow_count = long_property('109')
target_audience = typed_property('110', NewsTargetAudienceTO, False)
role_ids = long_list_property('111', default=[])
tags = unicode_list_property('112')
feed_names = typed_property('113', NewsFeedNameTO, True)
def __init__(self, news_id=0, sender_email=None, sender_name=None, sender_avatar_id=0, title=None,
message=None, image_url=None, broadcast_type=None, reach=0, users_that_rogered=None, buttons=None,
qr_code_content=None, qr_code_caption=None, version=0, timestamp=0, flags=0, news_type=1,
sticky=False, sticky_until=0, app_ids=None, rogered=False, scheduled_at=0, published=False,
statistics=None, action_count=-1, follow_count=-1, target_audience=None, role_ids=None,
tags=None, feed_names=None):
if app_ids is None:
app_ids = []
if users_that_rogered is None:
users_that_rogered = []
if buttons is None:
buttons = []
if statistics is None:
statistics = []
if role_ids is None:
role_ids = []
if tags is None:
tags = []
if feed_names is None:
feed_names = []
super(NewsItemTO, self).__init__(news_id, sender_email, sender_name, sender_avatar_id, title,
message, image_url, broadcast_type, reach, users_that_rogered, buttons,
qr_code_content, qr_code_caption, version, timestamp, flags, news_type)
self.sticky = sticky
self.sticky_until = sticky_until
self.app_ids = app_ids
self.rogered = rogered
self.scheduled_at = scheduled_at
if scheduled_at:
self.timestamp = scheduled_at
self.published = published
self.statistics = statistics
self.action_count = action_count
self.follow_count = follow_count
self.target_audience = target_audience
self.role_ids = role_ids
self.tags = tags
self.feed_names = feed_names
def has_roles(self):
"""Check if this news item has any assigned roles."""
return len(self.role_ids) > 0
@classmethod
def from_model(cls, model, base_url, statistics=None):
# type: (NewsItem, unicode, NewsItemInternalStatistics) -> NewsItemTO
from rogerthat.dal.service import get_service_identity
si = get_service_identity(model.sender)
buttons = model.buttons.values() if model.buttons else []
if not statistics:
statistics = NewsItemInternalStatistics()
feed_names = []
if model.feeds:
for feed in model.feeds.values():
feed_name = NewsFeedNameTO.from_model(feed)
feed_names.append(feed_name)
# set the target audience
if model.target_audience_enabled:
target_audience = NewsTargetAudienceTO()
target_audience.min_age = model.target_audience_min_age
target_audience.max_age = model.target_audience_max_age
target_audience.gender = model.target_audience_gender
target_audience.connected_users_only = model.connected_users_only
else:
target_audience = None
sender_email = model.sender.email()
sender_name = si.name if si else u""
sender_avatar_id = si.avatarId if si else -1
return cls(model.id, sender_email, sender_name, sender_avatar_id, model.title, model.message,
model.image_url(base_url), model.broadcast_type, statistics.total_reached,
statistics.users_that_rogered, buttons, model.qr_code_content, model.qr_code_caption, model.version,
model.timestamp, model.flags, model.type, model.sticky, model.sticky_until, model.app_ids,
True if statistics.users_that_rogered else False, model.scheduled_at, model.published,
statistics.details, statistics.total_action, statistics.total_followed, target_audience,
model.role_ids, model.tags, feed_names)
def _serialize_news_target_audience(stream, target_audience, version):
if target_audience:
s_bool(stream, True)
s_long(stream, target_audience.min_age)
s_long(stream, target_audience.max_age)
s_long(stream, target_audience.gender)
if version >= 6:
s_bool(stream, target_audience.connected_users_only)
else:
s_bool(stream, False)
def _deserialize_news_target_audience(stream, version):
target_audience = None
if ds_bool(stream): # target audience enabled
target_audience = NewsTargetAudienceTO()
target_audience.min_age = ds_long(stream)
target_audience.max_age = ds_long(stream)
target_audience.gender = ds_long(stream)
if version >= 6:
target_audience.connected_users_only = ds_bool(stream)
return target_audience
def _serialize_news_item(stream, news_item):
version = 7
s_long(stream, version)
s_bool(stream, news_item.sticky)
s_long(stream, news_item.sticky_until)
_serialize_news_sender(stream, news_item.sender)
s_unicode_list(stream, news_item.app_ids)
s_long(stream, news_item.timestamp)
s_unicode(stream, news_item.title)
s_unicode(stream, news_item.message)
s_unicode(stream, news_item.image_url)
s_long(stream, news_item.type)
s_unicode(stream, news_item.broadcast_type)
s_long(stream, news_item.reach)
s_bool(stream, news_item.rogered)
s_unicode_list(stream, news_item.users_that_rogered)
_serialize_news_buttons(stream, news_item.buttons)
s_long(stream, news_item.id)
s_unicode(stream, news_item.qr_code_content)
s_unicode(stream, news_item.qr_code_caption)
s_long(stream, news_item.version) # this is different from the version above
s_long(stream, news_item.flags)
s_long(stream, news_item.scheduled_at)
s_bool(stream, news_item.published)
s_long(stream, news_item.action_count)
s_long(stream, news_item.follow_count)
_serialize_news_target_audience(stream, news_item.target_audience, version)
s_long_list(stream, news_item.role_ids)
s_unicode_list(stream, news_item.tags)
def _deserialize_news_item(stream):
version = ds_long(stream)
news_item = NewsItemTO()
news_item.sticky = ds_bool(stream)
news_item.sticky_until = ds_long(stream)
news_item.sender = _deserialize_news_sender(stream)
news_item.app_ids = ds_unicode_list(stream)
news_item.timestamp = ds_long(stream)
news_item.title = ds_unicode(stream)
news_item.message = ds_unicode(stream)
news_item.image_url = ds_unicode(stream)
news_item.type = ds_long(stream)
news_item.broadcast_type = ds_unicode(stream)
news_item.reach = ds_long(stream)
news_item.rogered = ds_bool(stream)
news_item.users_that_rogered = ds_unicode_list(stream)
news_item.buttons = _deserialize_news_buttons(stream)
news_item.id = ds_long(stream)
news_item.qr_code_content = ds_unicode(stream)
news_item.qr_code_caption = ds_unicode(stream)
news_item.version = ds_long(stream)
news_item.flags = ds_long(stream)
news_item.scheduled_at = ds_long(stream)
news_item.published = ds_bool(stream)
if version > 1:
news_item.action_count = ds_long(stream)
news_item.follow_count = ds_long(stream)
if version in (2, 3):
if ds_bool(stream):
news_item.flags |= NewsItem.FLAG_SILENT
if version >= 5:
news_item.target_audience = _deserialize_news_target_audience(stream, version)
news_item.role_ids = ds_long_list(stream)
if version >= 7:
news_item.tags = ds_unicode_list(stream)
return news_item
class NewsItemListResultTO(object):
result = typed_property('1', NewsItemTO, True)
cursor = unicode_property('2')
def __init__(self, news_items=None, cursor=None, base_url=None, statistics=None):
# type: (list[NewsItem], unicode, unicode, dict[int, NewsItemInternalStatistics]) -> None
if news_items is None:
news_items = []
if statistics is None:
statistics = {}
self.cursor = cursor
results = []
for news_item in news_items:
stats = statistics.get(news_item.id) or NewsItemInternalStatistics()
results.append(NewsItemTO.from_model(news_item, base_url, stats))
self.result = results
class NewsIdsListResultTO(object):
cursor = unicode_property('1')
result = typed_property('2', AppNewsInfoTO, True)
def __init__(self, cursor=None, news_items=None, connections=None, first_time=False, profile=None,
users_that_rogered=None):
# type: (unicode, list[NewsItem], tuple[list, list], bool, UserProfile, dict[int, list[unicode]]) -> None
if news_items is None:
news_items = []
if connections is None:
connections = ([], [])
if users_that_rogered is None:
users_that_rogered = {}
if cursor:
self.cursor = cursor if isinstance(cursor, unicode) else cursor.decode('utf-8')
else:
self.cursor = None
self.result = []
for news_item in news_items:
to = AppNewsInfoTO()
to.id = news_item.id
to.version = news_item.version
to.sort_timestamp = news_item.sort_timestamp
to.sort_priority = 50 if first_time else news_item.sort_priority(connections, profile,
users_that_rogered.get(news_item.id, []))
to.sender_email = remove_slash_default(news_item.sender).email()
to.broadcast_type = news_item.broadcast_type
to.feed_name = news_item.feed_name(get_app_id_from_app_user(profile.user))
self.result.append(to)
register(NewsSenderTO, _serialize_news_sender, _deserialize_news_sender)
register(NewsItemTO, _serialize_news_item, _deserialize_news_item)
class NewNewsRequestTO(object):
news_item = typed_property('1', AppNewsItemTO, False)
class NewNewsResponseTO(object):
pass
class DisableNewsRequestTO(object):
news_id = long_property('1')
def __init__(self, news_id=None):
self.news_id = news_id
class DisableNewsResponseTO(object):
pass
class GetNewsRequestTO(object):
cursor = unicode_property('1')
updated_since = long_property('2')
class GetNewsResponseTO(NewsIdsListResultTO):
pass
class GetNewsItemsRequestTO(object):
ids = long_list_property('1')
class GetNewsItemsResponseTO(object):
items = typed_property('1', AppNewsItemTO, True)
class SaveNewsStatisticsRequestTO(object):
news_ids = long_list_property('1')
type = unicode_property('2')
class SaveNewsStatisticsResponseTO(object):
pass
class NewsReadInfoTO(TO):
news_id = long_property('1')
app_ids = unicode_list_property('2')
read_count = long_property('3')
users_that_rogered = unicode_list_property('4')
@classmethod
def from_news_model(cls, model, statistics=None):
# type: (NewsItem, NewsItemInternalStatistics) -> NewsReadInfoTO
if not statistics:
statistics = NewsItemInternalStatistics()
return cls(news_id=model.id,
app_ids=model.app_ids or [],
read_count=statistics.total_reached,
users_that_rogered=statistics.users_that_rogered)
class NewsMobileConfigTO(object):
ip = unicode_property('1')
port = long_property('2')
@classmethod
def from_string(cls, settings_string):
to = cls()
parts = settings_string.split(":")
part_ip = [int(p) for p in parts[0].split('.')]
azzert(len(part_ip) == 4)
to.ip = parts[0].decode('unicode-escape')
to.port = int(parts[1])
return to
|
|
"""Utilities shared by tests."""
import cgi
import contextlib
import gc
import email.parser
import http.server
import json
import logging
import io
import os
import re
import ssl
import sys
import threading
import traceback
import urllib.parse
import asyncio
import aiohttp
from aiohttp import server
from aiohttp import helpers
def run_briefly(loop):
@asyncio.coroutine
def once():
pass
t = asyncio.Task(once(), loop=loop)
loop.run_until_complete(t)
@contextlib.contextmanager
def run_server(loop, *, listen_addr=('127.0.0.1', 0),
use_ssl=False, router=None):
properties = {}
transports = []
class HttpRequestHandler:
def __init__(self, addr):
if isinstance(addr, tuple):
host, port = addr
self.host = host
self.port = port
else:
self.host = host = 'localhost'
self.port = port = 0
self.address = addr
self._url = '{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port)
def __getitem__(self, key):
return properties[key]
def __setitem__(self, key, value):
properties[key] = value
def url(self, *suffix):
return urllib.parse.urljoin(
self._url, '/'.join(str(s) for s in suffix))
class TestHttpServer(server.ServerHttpProtocol):
def connection_made(self, transport):
transports.append(transport)
super().connection_made(transport)
def handle_request(self, message, payload):
if properties.get('close', False):
return
if properties.get('noresponse', False):
yield from asyncio.sleep(99999)
for hdr, val in message.headers.items():
if (hdr == 'EXPECT') and (val == '100-continue'):
self.transport.write(b'HTTP/1.0 100 Continue\r\n\r\n')
break
if router is not None:
body = yield from payload.read()
rob = router(
self, properties, self.transport, message, body)
rob.dispatch()
else:
response = aiohttp.Response(self.writer, 200, message.version)
text = b'Test message'
response.add_header('Content-type', 'text/plain')
response.add_header('Content-length', str(len(text)))
response.send_headers()
response.write(text)
response.write_eof()
if use_ssl:
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
else:
sslcontext = None
def run(loop, fut):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
if isinstance(listen_addr, tuple):
host, port = listen_addr
server_coroutine = thread_loop.create_server(
lambda: TestHttpServer(keep_alive=0.5),
host, port, ssl=sslcontext)
else:
try:
os.unlink(listen_addr)
except FileNotFoundError:
pass
server_coroutine = thread_loop.create_unix_server(
lambda: TestHttpServer(keep_alive=0.5),
listen_addr, ssl=sslcontext)
server = thread_loop.run_until_complete(server_coroutine)
waiter = asyncio.Future(loop=thread_loop)
loop.call_soon_threadsafe(
fut.set_result, (thread_loop, waiter,
server.sockets[0].getsockname()))
try:
thread_loop.run_until_complete(waiter)
finally:
# call pending connection_made if present
run_briefly(thread_loop)
# close opened transports
for tr in transports:
tr.close()
run_briefly(thread_loop) # call close callbacks
server.close()
thread_loop.stop()
thread_loop.close()
gc.collect()
fut = asyncio.Future(loop=loop)
server_thread = threading.Thread(target=run, args=(loop, fut))
server_thread.start()
thread_loop, waiter, addr = loop.run_until_complete(fut)
try:
yield HttpRequestHandler(addr)
finally:
thread_loop.call_soon_threadsafe(waiter.set_result, None)
server_thread.join()
class Router:
_response_version = "1.1"
_responses = http.server.BaseHTTPRequestHandler.responses
def __init__(self, srv, props, transport, message, payload):
# headers
self._headers = http.client.HTTPMessage()
for hdr, val in message.headers.items():
self._headers.add_header(hdr, val)
self._srv = srv
self._props = props
self._transport = transport
self._method = message.method
self._uri = message.path
self._version = message.version
self._compression = message.compression
self._body = payload
url = urllib.parse.urlsplit(self._uri)
self._path = url.path
self._query = url.query
@staticmethod
def define(rmatch):
def wrapper(fn):
f_locals = sys._getframe(1).f_locals
mapping = f_locals.setdefault('_mapping', [])
mapping.append((re.compile(rmatch), fn.__name__))
return fn
return wrapper
def dispatch(self): # pragma: no cover
for route, fn in self._mapping:
match = route.match(self._path)
if match is not None:
try:
return getattr(self, fn)(match)
except Exception:
out = io.StringIO()
traceback.print_exc(file=out)
self._response(500, out.getvalue())
return
return self._response(self._start_response(404))
def _start_response(self, code):
return aiohttp.Response(self._srv.writer, code)
def _response(self, response, body=None,
headers=None, chunked=False, write_body=None):
r_headers = {}
for key, val in self._headers.items():
key = '-'.join(p.capitalize() for p in key.split('-'))
r_headers[key] = val
encoding = self._headers.get('content-encoding', '').lower()
if 'gzip' in encoding: # pragma: no cover
cmod = 'gzip'
elif 'deflate' in encoding:
cmod = 'deflate'
else:
cmod = ''
resp = {
'method': self._method,
'version': '%s.%s' % self._version,
'path': self._uri,
'headers': r_headers,
'origin': self._transport.get_extra_info('addr', ' ')[0],
'query': self._query,
'form': {},
'compression': cmod,
'multipart-data': []
}
if body: # pragma: no cover
resp['content'] = body
else:
resp['content'] = self._body.decode('utf-8', 'ignore')
ct = self._headers.get('content-type', '').lower()
# application/x-www-form-urlencoded
if ct == 'application/x-www-form-urlencoded':
resp['form'] = urllib.parse.parse_qs(self._body.decode('latin1'))
# multipart/form-data
elif ct.startswith('multipart/form-data'): # pragma: no cover
out = io.BytesIO()
for key, val in self._headers.items():
out.write(bytes('{}: {}\r\n'.format(key, val), 'latin1'))
out.write(b'\r\n')
out.write(self._body)
out.write(b'\r\n')
out.seek(0)
message = email.parser.BytesParser().parse(out)
if message.is_multipart():
for msg in message.get_payload():
if msg.is_multipart():
logging.warning('multipart msg is not expected')
else:
key, params = cgi.parse_header(
msg.get('content-disposition', ''))
params['data'] = msg.get_payload()
params['content-type'] = msg.get_content_type()
cte = msg.get('content-transfer-encoding')
if cte is not None:
resp['content-transfer-encoding'] = cte
resp['multipart-data'].append(params)
body = json.dumps(resp, indent=4, sort_keys=True)
# default headers
hdrs = [('Connection', 'close'),
('Content-Type', 'application/json')]
if chunked:
hdrs.append(('Transfer-Encoding', 'chunked'))
else:
hdrs.append(('Content-Length', str(len(body))))
# extra headers
if headers:
hdrs.extend(headers.items())
if chunked:
response.enable_chunked_encoding()
# headers
response.add_headers(*hdrs)
response.send_headers()
# write payload
if write_body:
try:
write_body(response, body)
except:
return
else:
response.write(helpers.str_to_bytes(body))
response.write_eof()
# keep-alive
if response.keep_alive():
self._srv.keep_alive(True)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import numpy as np
from collections import Counter
from federatedml.feature.instance import Instance
from federatedml.util import LOGGER
from federatedml.util import consts
def get_features_shape(data_instances):
one_feature = data_instances.first()
instance = one_feature[1]
if instance is None:
return None
if one_feature is not None:
if type(one_feature[1].features).__name__ == consts.SPARSE_VECTOR:
return one_feature[1].features.get_shape()
else:
return one_feature[1].features.shape[0]
else:
return None
def get_instance_shape(instance):
if instance is None:
return None
if type(instance.features).__name__ == consts.SPARSE_VECTOR:
return instance.features.get_shape()
else:
return instance.features.shape[0]
def max_abs_sample_weight_map_func(kv_iter):
max_weight = -1
for k, inst in kv_iter:
if np.abs(inst.weight) > max_weight:
max_weight = np.abs(inst.weight)
return max_weight
def max_sample_weight_cmp(v1, v2):
return v1 if v1 > v2 else v2
def get_max_sample_weight(data_inst_with_weight):
inter_rs = data_inst_with_weight.applyPartitions(max_abs_sample_weight_map_func)
max_weight = inter_rs.reduce(max_sample_weight_cmp)
return max_weight
def check_negative_sample_weight(kv_iterator):
for k, v in kv_iterator:
if isinstance(v, Instance) and v.weight is not None:
if v.weight < 0:
return True
return False
def header_alignment(data_instances, pre_header):
header = data_instances.schema["header"]
if len((set(header) & set(pre_header))) != len(pre_header):
raise ValueError("fit & transform data' header should be same")
if pre_header == header:
return data_instances
if len(pre_header) != len(header):
LOGGER.warning(
"header in prediction stage is super-set training stage, predict size is {}, training header size is {}".format(
len(header), len(pre_header)))
else:
LOGGER.warning("header in prediction stage will be shuffled to match the header of training stage")
header_idx_mapping = dict(zip(pre_header, [i for i in range(len(pre_header))]))
header_correct = {}
for i in range(len(header)):
col = header[i]
if col not in header_idx_mapping:
continue
header_correct[i] = header_idx_mapping[col]
def align_header(inst, header_pos=None):
if type(inst.features).__name__ == consts.SPARSE_VECTOR:
shape = len(header_pos)
new_data = {}
for k, v in inst.features.get_all_data():
if k not in header_pos:
continue
new_data[header_pos.get(k)] = v
inst_new = copy.deepcopy(inst)
inst_new.features.set_shape(shape)
inst_new.features.set_sparse_vector(new_data)
else:
col_order = [None] * len(header_pos)
for k, v in header_pos.items():
col_order[v] = k
inst_new = copy.deepcopy(inst)
inst_new.features = inst.features[col_order]
return inst_new
correct_schema = data_instances.schema
correct_schema["header"] = pre_header
data_instances = data_instances.mapValues(lambda inst: align_header(inst, header_pos=header_correct))
data_instances.schema = correct_schema
return data_instances
def get_data_shape(data):
one_feature = data.first()
if one_feature is not None:
return len(list(one_feature[1]))
else:
return None
def get_header(data_instances):
header = data_instances.schema.get('header') # ['x1', 'x2', 'x3' ... ]
return header
def is_empty_feature(data_instances):
shape_of_feature = get_features_shape(data_instances)
if shape_of_feature is None or shape_of_feature == 0:
return True
return False
def is_sparse_data(data_instance):
first_data = data_instance.first()
if type(first_data[1]).__name__ in ['ndarray', 'list', 'tuple']:
return False
data_feature = first_data[1].features
if type(data_feature).__name__ == "ndarray":
return False
else:
return True
def count_labels(data_instance):
def _count_labels(instances):
labels = set()
for idx, instance in instances:
label = instance.label
labels.add(label)
return labels
label_set = data_instance.applyPartitions(_count_labels)
label_set = label_set.reduce(lambda x1, x2: x1.union(x2))
return len(label_set)
# if len(label_set) != 2:
# return False
# return True
def with_weight(data_instances):
first_entry = data_instances.first()[1]
if isinstance(first_entry, Instance) and first_entry.weight is not None:
return True
return False
def get_class_dict(kv_iterator):
class_dict = {}
for _, inst in kv_iterator:
count = class_dict.get(inst.label, 0)
class_dict[inst.label] = count + 1
if len(class_dict.keys()) > consts.MAX_CLASSNUM:
raise ValueError("In Classify Task, max dif classes should be no more than %d" % (consts.MAX_CLASSNUM))
return class_dict
def get_label_count(data_instances):
class_weight = data_instances.mapPartitions(get_class_dict).reduce(
lambda x, y: dict(Counter(x) + Counter(y)))
return class_weight
def get_predict_result_labels(data):
def _get_labels(score_inst):
labels = set()
for idx, result in score_inst:
true_label = result.features[0]
predict_label = result.features[1]
labels.add(true_label)
labels.add(predict_label)
return labels
label_set = data.applyPartitions(_get_labels)
label_set = label_set.reduce(lambda x1, x2: x1.union(x2))
if len(label_set) > consts.MAX_CLASSNUM:
raise ValueError("In Classify Task, max dif classes should be no more than %d" % (consts.MAX_CLASSNUM))
return label_set
def rubbish_clear(rubbish_list):
"""
Temporary procession for resource recovery. This will be discarded in next version because of our new resource recovery plan
Parameter
----------
rubbish_list: list of Table, each Table in this will be destroy
"""
for r in rubbish_list:
try:
if r is None:
continue
r.destroy()
except Exception as e:
LOGGER.warning("destroy table error,:{}, but this can be ignored sometimes".format(e))
def check_with_inst_id(data_instances):
instance = data_instances.first()[1]
if isinstance(instance, Instance) and instance.with_inst_id:
return True
return False
def scale_sample_weight(data_instances):
data_count = data_instances.count()
def _sum_all_weight(kv_iterator):
weight_sum = 0
for _, v in kv_iterator:
weight_sum += v.weight
return weight_sum
total_weight = data_instances.mapPartitions(_sum_all_weight).reduce(lambda x, y: x + y)
# LOGGER.debug(f"weight_sum is : {total_weight}")
scale_factor = data_count / total_weight
# LOGGER.debug(f"scale factor is : {total_weight}")
def _replace_weight(instance):
new_weight = instance.weight * scale_factor
instance.set_weight(new_weight)
return instance
scaled_instances = data_instances.mapValues(lambda v: _replace_weight(v))
return scaled_instances
class DataStatistics(object):
def __init__(self):
self.multivariate_statistic_obj = None
def static_all_values(self, data_instances, static_col_indexes, is_sparse: bool = False):
if not is_sparse:
f = functools.partial(self.__dense_values_set,
static_col_indexes=static_col_indexes)
else:
f = functools.partial(self.__sparse_values_set,
static_col_indexes=static_col_indexes)
result_sets = data_instances.applyPartitions(f).reduce(self.__reduce_set_results)
result = [sorted(list(x)) for x in result_sets]
return result
@staticmethod
def __dense_values_set(instances, static_col_indexes: list):
result = [set() for _ in static_col_indexes]
for _, instance in instances:
for idx, col_index in enumerate(static_col_indexes):
value_set = result[idx]
value_set.add(instance.features[col_index])
return result
@staticmethod
def __sparse_values_set(instances, static_col_indexes: list):
tmp_result = {idx: set() for idx in static_col_indexes}
for _, instance in instances:
data_generator = instance.features.get_all_data()
for idx, value in data_generator:
if idx not in tmp_result:
continue
tmp_result[idx].add(value)
result = [tmp_result[x] for x in static_col_indexes]
return result
@staticmethod
def __reduce_set_results(result_set_a, result_set_b):
final_result_sets = []
for set_a, set_b in zip(result_set_a, result_set_b):
final_result_sets.append(set_a.union(set_b))
return final_result_sets
|
|
# Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
A simple thread safe timer queue implementation which has O(logn) time complexity.
'''
import threading
import Queue
import traceback
import logging
from time import time
import sortedcontainers as sc
__all__ = ['Timer',
'TimerQueueStruct',
'TimerQueue']
class Timer(object):
'''Timer wraps the callback and timestamp related attributes.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
'''
_ident = 0
_lock = threading.Lock()
def __init__(self, callback, when, interval, ident=None):
self._callback = callback
self.when = when
self.interval = interval
if ident is not None:
self.ident = ident
else:
with Timer._lock:
self.ident = Timer._ident + 1
Timer._ident = Timer._ident + 1
def update_expiration(self):
self.when += self.interval
def __cmp__(self, other):
if other is None:
return 1
self_k = (self.when, self.ident)
other_k = (other.when, other.ident)
if self_k == other_k:
return 0
elif self_k < other_k:
return -1
else:
return 1
def __eq__(self, other):
return isinstance(other, Timer) and (self.ident == other.ident)
def __call__(self):
self._callback()
TEARDOWN_SENTINEL = None
class TimerQueueStruct(object):
'''
The underlying data structure for TimerQueue
'''
def __init__(self):
self._timers = sc.SortedSet()
self._cancelling_timers = {}
def add_timer(self, callback, when, interval, ident):
''' Add timer to the data structure.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
:returns: A timer object which should not be manipulated directly by
clients. Used to delete/update the timer
:rtype: ``solnlib.timer_queue.Timer``
'''
timer = Timer(callback, when, interval, ident)
self._timers.add(timer)
return timer
def remove_timer(self, timer):
''' Remove timer from data structure.
:param timer: Timer object which is returned by ``TimerQueueStruct.add_timer``.
:type timer: ``Timer``
'''
try:
self._timers.remove(timer)
except ValueError:
logging.info('Timer=%s is not in queue, move it to cancelling '
'list', timer.ident)
else:
self._cancelling_timers[timer.ident] = timer
def get_expired_timers(self):
''' Get a list of expired timers.
:returns: a list of ``Timer``, empty list if there is no expired
timers.
:rtype: ``list``
'''
next_expired_time = 0
now = time()
expired_timers = []
for timer in self._timers:
if timer.when <= now:
expired_timers.append(timer)
if expired_timers:
del self._timers[:len(expired_timers)]
if self._timers:
next_expired_time = self._timers[0].when
return (next_expired_time, expired_timers)
def reset_timers(self, expired_timers):
''' Re-add the expired periodical timers to data structure for next
round scheduling.
:returns: True if there are timers added, False otherwise.
:rtype: ``bool``
'''
has_new_timer = False
cancelling_timers = self._cancelling_timers
for timer in expired_timers:
if timer.ident in cancelling_timers:
logging.INFO('Timer=%s has been cancelled', timer.ident)
continue
elif timer.interval:
# Repeated timer
timer.update_expiration()
self._timers.add(timer)
has_new_timer = True
cancelling_timers.clear()
return has_new_timer
def check_and_execute(self):
''' Get expired timers and execute callbacks for the timers.
:returns: duration of next expired timer.
:rtype: ``float``
'''
(next_expired_time, expired_timers) = self.get_expired_timers()
for timer in expired_timers:
try:
timer()
except Exception:
logging.error(traceback.format_exc())
self.reset_timers(expired_timers)
return _calc_sleep_time(next_expired_time)
class TimerQueue(object):
'''A simple timer queue implementation.
It runs a separate thread to handle timers Note: to effectively use this
timer queue, the timer callback should be short, otherwise it will cause
other timers's delay execution. A typical use scenario in production is
that the timers are just a simple functions which inject themselvies to
a task queue and then they are picked up by a threading/process pool to
execute, as shows below:
Timers --enqueue---> TimerQueue --------expiration-----------
|
|
\|/
Threading/Process Pool <---- TaskQueue <--enqueue-- Timers' callback (nonblocking)
Usage::
>>> from solnlib import time_queue
>>> tq = time_queue.TimerQueue()
>>> tq.start()
>>> t = tq.add_timer(my_func, time.time(), 10)
>>> # do other stuff
>>> tq.stop()
'''
def __init__(self):
self._timers = TimerQueueStruct()
self._lock = threading.Lock()
self._wakeup_queue = Queue.Queue()
self._thr = threading.Thread(target=self._check_and_execute)
self._thr.daemon = True
self._started = False
def start(self):
'''Start the timer queue.
'''
if self._started:
return
self._started = True
self._thr.start()
logging.info('TimerQueue started.')
def stop(self):
'''Stop the timer queue.
'''
if not self._started:
return
self._started = True
self._wakeup(TEARDOWN_SENTINEL)
self._thr.join()
def add_timer(self, callback, when, interval, ident=None):
''' Add timer to the queue.
:param callback: Arbitrary callable object.
:type callback: ``callable object``
:param when: The first expiration time, seconds since epoch.
:type when: ``integer``
:param interval: Timer interval, if equals 0, one time timer, otherwise
the timer will be periodically executed
:type interval: ``integer``
:param ident: (optional) Timer identity.
:type ident: ``integer``
:returns: A timer object which should not be manipulated directly by
clients. Used to delete/update the timer
'''
with self._lock:
timer = self._timers.add_timer(callback, when, interval, ident)
self._wakeup()
return timer
def remove_timer(self, timer):
''' Remove timer from the queue.
:param timer: Timer object which is returned by ``TimerQueue.add_timer``.
:type timer: ``Timer``
'''
with self._lock:
self._timers.remove_timer(timer)
def _check_and_execute(self):
wakeup_queue = self._wakeup_queue
while 1:
(next_expired_time, expired_timers) = self._get_expired_timers()
for timer in expired_timers:
try:
# Note, please make timer callback effective/short
timer()
except Exception:
logging.error(traceback.format_exc())
self._reset_timers(expired_timers)
sleep_time = _calc_sleep_time(next_expired_time)
try:
wakeup = wakeup_queue.get(timeout=sleep_time)
if wakeup is TEARDOWN_SENTINEL:
break
except Queue.Empty:
pass
logging.info('TimerQueue stopped.')
def _get_expired_timers(self):
with self._lock:
return self._timers.get_expired_timers()
def _reset_timers(self, expired_timers):
with self._lock:
has_new_timer = self._timers.reset_timers(expired_timers)
if has_new_timer:
self._wakeup()
def _wakeup(self, something='not_None'):
self._wakeup_queue.put(something)
def _calc_sleep_time(next_expired_time):
if next_expired_time:
now = time()
if now < next_expired_time:
sleep_time = next_expired_time - now
else:
sleep_time = 0.1
else:
sleep_time = 1
return sleep_time
|
|
#!/usr/bin/env python3
"""
Without argument: run all the regression tests.
About the tests:
* They are stored as python file in the subdirectories.
* The firstline must be an explanation about the test.
* Errors(must be True) defines an Error that must be corrected
* Warning(must be True) defines something that should be corrected
once corrected, must be redefined as an Error
"""
import os, sys, re, tempfile, subprocess, json
import wsgiref, wsgiref.simple_server
sys.path.append('../pythonjs')
import typedpython
if 'NODE_PATH' not in os.environ:
os.environ['NODE_PATH'] = '/usr/local/lib/node_modules/'
tmpname = os.path.join(tempfile.gettempdir(), "xxx_regtest")
print("Temporary files are stored into '%s...'" % tmpname)
print()
show_details = len(sys.argv) > 1
# List of valid filenames in the parameters
argv = [os.path.abspath(name)
for name in sys.argv[1:]
if os.path.exists(name)
]
__sandbox = {
'mycollection' : range(10)
}
__clients = {} ## keeps track of iterator indices
def httpd_reply( env, start_response ):
path = env['PATH_INFO']
host = env['HTTP_HOST']
client = env['REMOTE_ADDR']
arg = env['QUERY_STRING']
if client not in __clients:
__clients[ client ] = {}
length = 0
if 'CONTENT_LENGTH' in env: length = int(env['CONTENT_LENGTH'])
data = env['wsgi.input'].read( length ).decode('utf-8')
#print('http_reply ->', path, host, client, arg, data)
msg = json.loads( data )
res = ''
if 'call' in msg:
assert 'args' in msg
if msg['call'] == 'concat':
res = ''.join( msg['args'] )
elif msg['call'] == 'add':
res = msg['args'][0] + msg['args'][1]
else:
raise NotImplementedError( msg )
elif 'iter' in msg:
name = msg['iter']
assert name in __sandbox
if name not in __clients[ client ]:
__clients[ client ][name] = 0
index = __clients[ client ][name]
iterable = __sandbox[name]
if index == len(iterable):
index = 0
res = '__STOP_ITERATION__'
else:
res = iterable[ index ]
index += 1
__clients[ client ][name] = index
elif 'set' in msg:
__sandbox[ msg['set'] ] = msg['value']
elif 'get' in msg:
res = __sandbox[ msg['get'] ]
else:
raise NotImplementedError( msg )
start_response( '200 OK', [] )
return [ json.dumps(res).encode('utf-8') ]
httpd = wsgiref.simple_server.make_server( 'localhost', 8080, httpd_reply )
import threading
thread_id = threading._start_new_thread( httpd.serve_forever, ())
def runnable(command):
## this fails with lua5.1 "lua -v"
#"""Returns True is the standard out of the command display something"""
#f = os.popen(command, "r")
#output = f.read()
#f.close()
#return output != ''
try:
subprocess.check_call( command.split() )
return True
except OSError:
return False
def run_pypy_test_on(filename):
"""PyPy"""
write("%s.py" % tmpname, patch_python(filename, python='PYPY'))
return run_command("%s %s.py %s" % (pypy_exe, tmpname, display_errors))
def run_old_pypy_test_on(filename):
"""PyPy 1.9"""
write("%s.py" % tmpname, patch_python(filename, python='PYPY'))
return run_command("%s %s.py %s" % (old_pypy_exe, tmpname, display_errors))
old_pypy_runnable = pypy_runnable = False
old_pypy_exe = pypy_exe = None
if os.path.isfile( os.path.expanduser('~/pypy-2.3.1-linux64/bin/pypy') ):
pypy_runnable = True
pypy_exe = os.path.expanduser('~/pypy-2.3.1-linux64/bin/pypy')
run_pypy_test_on.__doc__ = 'PyPy 2.3.1'
elif os.path.isfile( os.path.expanduser('~/pypy-2.2-linux64/bin/pypy') ):
pypy_runnable = True
pypy_exe = os.path.expanduser('~/pypy-2.2-linux64/bin/pypy')
run_pypy_test_on.__doc__ = 'PyPy 2.2'
elif runnable( 'pypy --help' ):
pypy_runnable = True
pypy_exe = 'pypy'
if os.path.isfile( os.path.expanduser('~/pypy-1.9/bin/pypy') ) and '--old-pypy' in sys.argv:
old_pypy_runnable = True
old_pypy_exe = os.path.expanduser('~/pypy-1.9/bin/pypy')
webclgl = []
if os.path.isdir( os.path.expanduser('~/webclgl') ):
#webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGL_2.0.Min.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLUtils.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLBuffer.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGLKernel.class.js'), 'rb').read().decode('utf-8') )
webclgl.append( open( os.path.expanduser('~/webclgl/WebCLGL.class.js'), 'rb').read().decode('utf-8') )
## rhino is not run by default because it simply freezes up on maximum callstack errors
rhino_runnable = '--rhino' in sys.argv and runnable("rhino -e 'quit()'")
node_runnable = runnable("node --help")
## sudo npm install nodewebkit -g
## nodewebkit npm package is broken? https://github.com/shama/nodewebkit/issues/31
#nodewebkit = '/usr/local/lib/node_modules/nodewebkit/bin/nodewebkit'
## download https://github.com/rogerwang/node-webkit/releases/tag/nw-v0.9.2
## and extract to your home directory.
nodewebkit_runnable = False
nodewebkit = os.path.expanduser('~/node-webkit-v0.10.0-rc1-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.9.2-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.9.1-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
else:
nodewebkit = os.path.expanduser('~/node-webkit-v0.8.4-linux-x64/nw')
if os.path.isfile( nodewebkit ): nodewebkit_runnable = True
if not show_details or '--no-nodewebkit' in sys.argv:
nodewebkit_runnable = False
#dart2js = os.path.expanduser( '~/dart-sdk-1.0/dart-sdk/bin/dart2js') ## TODO support dart-sdk-1.3+
dart2js = os.path.expanduser( '~/dart-sdk/bin/dart2js') # tested with dart 1.3
dart2js_runnable = runnable( dart2js + ' -h' ) and '--dart2js' in sys.argv
dart_exe = os.path.expanduser( '~/dart-sdk/bin/dart')
dart_runnable = os.path.isfile( dart_exe )
coffee_runnable = runnable( "coffee -v" ) and '--coffee' in sys.argv
lua_runnable = runnable( "lua -v" ) and '--lua' in sys.argv
luajit_runnable = runnable( "luajit -v" ) and '--luajit' in sys.argv
lua2js = os.path.abspath( '../external/lua.js/lua2js' )
luajs_runnable = os.path.isfile( lua2js ) and '--lua2js' in sys.argv
go_runnable = runnable( 'go version')
gopherjs_runnable = runnable( 'gopherjs')
assert rhino_runnable or node_runnable
if show_details:
display_errors = ""
else:
display_errors = "2>/dev/null"
def files():
"""returns all the filenames of the regression tests.
this also needs to copy all the original python files to /tmp
because `from xxx import *` syntax will trigger the translator
to read files from the same directory and insert them.
"""
tests = []
html_tests = []
benchmarks = []
mods = []
for dirpath, dirnames, filenames in os.walk('.'):
if dirpath == '.':
continue
for filename in filenames:
a = dirpath + os.path.sep + filename
if filename.endswith(".py"):
if 'bench' in dirpath:
benchmarks.append( a )
else:
tests.append( a )
elif 'html' in dirpath:
if filename.endswith(".html"):
html_tests.append( a )
elif filename.endswith('.py'): ## these are modules
mods.append( filename )
tmpdir = tempfile.gettempdir()
for mod in mods+tests:
data = open(mod,'rb').read()
name = os.path.split(mod)[-1]
open(os.path.join(tmpdir, name), 'wb').write( data )
tests.extend( html_tests )
tests.extend( benchmarks )
return tests
def read(filename):
"""Returns the file content as a string"""
f = open(filename)
content = f.read()
f.close()
return content
def write(filename, content):
"""Write the content into the file"""
f = open(filename, "w")
f.write(content)
f.close()
def run_command(command, returns_stdout_stderr=False, nodewebkit_workaround=False):
"""Returns the number of problems"""
if os.path.isfile("%s.errors" % tmpname):
os.unlink("%s.errors" % tmpname)
f = os.popen(command + " 2>%s.errors" % tmpname, 'r')
killed = False
try:
stdout = f.read().strip()
except KeyboardInterrupt:
stdout = f.read().strip()
killed = True
f.close()
stderr = read("%s.errors" % tmpname)
if nodewebkit_workaround:
stdout = stderr
stderr = ''
a = []
for line in stdout.splitlines():
if 'INFO:CONSOLE' in line:
line = line.replace('\\n', '\n')
line = line.replace('\\u003C', '<')
start = line.index('"')
end = line.rindex('"')
a.append( line[start+1:end] )
stdout = '\n'.join(a)
if stderr:
if show_details:
print('TEST ERROR!')
print(stderr)
if killed:
print(stdout)
sys.exit()
if returns_stdout_stderr:
return stdout, stderr
#########################
if show_details and stdout:
print(stdout)
unknown = []
for line in stdout.splitlines():
if _benchmark:
if line.startswith('#'):
_benchmark.append( line )
else:
#exe = command.split()[0]
_benchmark.append( _test_description + ' ' + line )
else:
unknown.append(line)
errors = '\n'.join(unknown) + stderr
d = {}
x = errors.count("Error fail")
if x:
d['Error'] = x
x = errors.count("Warning fail")
if x:
d['Warning'] = x
if len(d) == 0 and errors != '':
if '.py", line' in errors:
d["Syntax Error Python"] = 1
else:
d["?"] = 1
return d
_benchmark = None
def start_benchmark( name ):
print('starting benchmark:', name)
global _benchmark
_benchmark = [
'font=Helvetica',
'fontsz=12',
'=color_per_datum',
'yformat=%g',
'ylabel=seconds'
]
def end_benchmark( name ):
print('ending benchmark:', name)
global _benchmark
path = '/tmp/%s.perf' %name
f = open( path, 'wb' )
data = '\n'.join( _benchmark )
f.write( data.encode('utf-8') )
f.close()
os.system( './bargraph.pl -eps %s > /tmp/%s.eps' %(path,name))
_benchmark = None
def patch_assert(filename):
"""Patch the regression tests to add information into asserts"""
out = []
for i, line in enumerate(read(filename).split('\n')):
out.append(re.sub("(TestError|TestWarning)\((.*)\)",
r'\1("%s",%d,\2,u"""\2""")' % (filename, i),
line)
)
return '\n'.join(out)
_patch_header = """# -*- coding: utf-8 -*-
def TestError(file, line, result, test):
if result == False:
print(file + ":" + str(line) + " Error fail " + test)
def TestWarning(file, line, result, test):
if result == False:
print(file + ":" + str(line) + " Warning fail " + test)
"""
_patch_header_go = """# -*- coding: utf-8 -*-
def TestError(file:string, line:int, result:bool, test:string):
if result == False:
print(file + ":" + str(line) + " Error fail " + test)
"""
_python_only_extra_header = """
try:
import threading
threading.start_webworker = lambda f,a: threading._start_new_thread(f,a)
threading.start_new_thread = threading._start_new_thread
except ImportError:
pass
class __faker__(object):
def __enter__(self, *args): pass
def __exit__(self, *args): pass
def __call__(self, *args, **kw):
return lambda f: f
def vectorize(self, f):
return f
def main(self, f):
return f
def object(self, f):
return f
def method(self, f):
return f
webworker = __faker__()
glsl = __faker__()
gpu = __faker__()
returns = __faker__()
typedef = __faker__()
vec2 = None
mat4 = None
def int16(a): return int(a)
try:
import numpy
except ImportError:
try:
import numpypy as numpy
except ImportError:
pass
from math import isnan as isNaN
"""
def patch_python(filename, dart=False, python='PYTHONJS', backend=None):
"""Rewrite the Python code"""
code = patch_assert(filename)
## a main function can not be simply injected like this for dart,
## because dart has special rules about what can be created outside
## of the main function at the module level.
#if dart:
# out = []
# main_inserted = False
# for line in code.splitlines():
# if line.startswith('TestError') or line.startswith('TestWarning'):
# if not main_inserted:
# out.append('def main():')
# main_inserted = True
# out.append( '\t'+line )
# else:
# out.append( line )
# code = '\n'.join( out )
a = [
'PYTHON="%s"'%python,
'BACKEND="%s"'%backend,
]
if backend == 'GO':
a.append(_patch_header_go)
else:
a.append(_patch_header)
if python != 'PYTHONJS':
code = typedpython.transform_source( code, strip=True )
a.append( _python_only_extra_header )
a.append( code )
if not dart and python != 'PYTHONJS':
a.append( 'main()' )
return '\n'.join( a )
def run_python_test_on(filename):
"""Python2"""
write("%s.py" % tmpname, patch_python(filename, python='PYTHON2'))
return run_command("python %s.py %s" % (tmpname, display_errors))
def run_python3_test_on(filename):
"""Python3"""
write("%s.py" % tmpname, patch_python(filename, python='PYTHON3'))
return run_command("python3 %s.py %s" % (tmpname, display_errors))
def translate_js(filename, javascript=False, dart=False, coffee=False, lua=False, luajs=False, go=False, gopherjs=False, multioutput=False, requirejs=True):
global tmpname
tmpname = os.path.join(
tempfile.gettempdir(),
#'test-%s-js=%s-dart=%s-lua=%s' %(filename.split('/')[-1], javascript, dart, lua)
'regtest-%s'%filename.split('/')[-1]
)
output_name = "%s.py" % tmpname
if javascript:
content = 'pythonjs.configure(javascript=True)\n' + patch_python(filename, backend='JAVASCRIPT')
elif dart:
source = [
'pythonjs.configure(dart=True)',
open('../pythonjs/runtime/dart_builtins.py', 'rb').read().decode('utf-8'),
patch_python(filename, dart=True, backend='DART')
]
content = '\n'.join( source )
elif coffee:
source = [
'pythonjs.configure(coffee=True)',
patch_python(filename, backend='COFFEE')
]
content = '\n'.join( source )
elif lua or luajs:
source = [
'pythonjs.configure(lua=True)',
read('../pythonjs/runtime/lua_builtins.py'),
patch_python(filename, backend='LUA')
]
content = '\n'.join( source )
elif go or gopherjs:
content = patch_python(filename, backend='GO')
else:
content = patch_python(filename)
code = '\n'.join(
[
'# -*- coding: utf-8 -*-',
'pythonjs.configure(runtime_exceptions=False)',
content
]
)
write(output_name, code)
cmd = [
os.path.join("..", "pythonjs", "translator.py"),
output_name,
'--debug'
]
if dart:
cmd.append( '--dart' )
elif coffee:
cmd.append( '--coffee')
elif lua:
cmd.append( '--lua')
elif luajs:
cmd.append( '--luajs')
elif go:
cmd.append( '--go' )
elif gopherjs:
cmd.append( '--gopherjs' )
if not requirejs:
cmd.append( '--no-wrapper' )
stdout, stderr = run_command(' '.join(cmd), returns_stdout_stderr=True)
if stderr:
return ''
else:
#jsheader = 'if (typeof(process) != "undefined") { var requirejs = require("requirejs"); }'
jsheader = ''
if multioutput or (stdout.startswith("{") and stdout.endswith("}")):
d = json.loads( stdout )
stdout = d.pop('main')
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
for jsfile in d:
if not jsfile.startswith('/'):
stdout = stdout.replace('"%s"' %jsfile, '"/tmp/%s"' %jsfile)
write(
os.path.join('/tmp', jsfile),
'\n'.join( [jsheader, d[jsfile]] )
)
if dart:
if os.path.isfile('/tmp/dart2js-output.js'):
os.unlink('/tmp/dart2js-output.js')
dart_input = '/tmp/dart2js-input.dart'
open( dart_input, 'wb').write( stdout.encode('utf-8') )
cmd = [
dart2js,
'-o', '/tmp/dart2js-output.js',
dart_input
]
if show_details:
subprocess.call( cmd )
else:
sout, serr = run_command(' '.join(cmd), returns_stdout_stderr=True)
if os.path.isfile('/tmp/dart2js-output.js'):
return open('/tmp/dart2js-output.js', 'rb').read().decode('utf-8')
else:
return ''
elif coffee:
coffee_input = '/tmp/coffee-input.coffee'
open( coffee_input, 'wb').write( stdout.encode('utf-8') )
cmd = [
'coffee',
'--print', # print js to stdout
coffee_input
]
#subprocess.call( cmd )
sout, serr = run_command(' '.join(cmd), returns_stdout_stderr=True)
if serr:
return ''
elif sout:
builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
open('/tmp/coffee-output.js', 'wb').write( (builtins+'\n'+sout).encode('utf-8') )
return sout
else:
return ''
elif luajs:
lua2js_input = '/tmp/lua2js-input.lua'
lua2js_output = '/tmp/lua2js-output.js'
open( lua2js_input, 'wb').write( stdout.encode('utf-8') )
cmd = [
lua2js,
lua2js_input,
lua2js_output
]
try:
subprocess.check_call( cmd )
except subprocess.CalledProcessError:
return ''
return open( lua2js_output, 'rb' ).read().decode('utf-8')
else:
return '\n'.join( [jsheader, stdout] )
def run_if_no_error(function):
"""Run the function if the JS code is not empty"""
global js
if js:
return function(js)
else:
return {'Translation error':1}
def run_pythonjs_test_on(dummy_filename):
"""JS PythonJS tests"""
return run_if_no_error(run_js_rhino)
def run_pythonjsjs_test_on(filename):
"""JSJS PythonJS with javascript tests"""
return run_pythonjs_test_on(filename)
def run_js_rhino(content):
"""Run Javascript using Rhino"""
builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
# Patch in order to run Rhino
builtins = builtins.replace('Object.create(null)', '{}', 1)
# Add the program to test
content = builtins + content
# Remove documentation strings from JavaScript (Rhino don't like)
content = re.sub('^ *".*" *$', '', content)
# Add the console for Rhino
content = '''
console = { log: print } ;
process = { title:"", version:"" } ;
''' + content
write("%s.js" % tmpname, content)
return run_command("rhino -O -1 %s.js" % tmpname)
def run_pythonjs_test_on_node(dummy_filename):
"""PythonJS (normal)"""
return run_if_no_error(run_js_node)
def run_pythonjsjs_test_on_node(filename):
"""PythonJS (fast backend)"""
return run_pythonjs_test_on_node(filename)
def run_js_node(content):
"""Run Javascript using Node"""
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
write("/tmp/mymodule.js", content)
lines = [
"var requirejs = require('requirejs')",
"var module = requirejs('mymodule')",
"module.main()"
]
write("%s.js" % tmpname, '\n'.join(lines))
return run_command("node %s.js" % tmpname)
def run_pythonjs_test_on_nodewebkit(dummy_filename):
"""PythonJS (normal) - NodeWebkit"""
return run_if_no_error(run_js_nodewebkit)
def run_pythonjsjs_test_on_nodewebkit(filename):
"""PythonJS (fast backend) - NodeWebkit"""
return run_pythonjs_test_on_nodewebkit(filename)
def run_js_nodewebkit(content):
"""Run Javascript using NodeWebkit"""
## there is likely a bug in requirejs and/or nodewebkit that prevents WebWorkers from working,
## `workerjs` for node also seems like its incompatible with nodewebkit and requirejs,
## as a quick workaround simply strip away the wrapper function from the javascript.
code = '\n'.join( content.strip().splitlines()[1:-2] )
write("/tmp/package.json", '{"name":"test", "main":"test.html"}')
#write("/tmp/mymodule.js", content)
lines = [
"var __nw = require('nw.gui')",
"var requirejs = require('requirejs')",
#"var module = requirejs('mymodule')",
#"module.main()",
code,
"main()",
"__nw.App.quit()"
]
html = ['<html>']
if webclgl:
for data in webclgl:
html.append('<script>')
html.append( data )
html.append('</script>')
html.append('<script>')
html.extend( lines )
html.append('</script>')
html.append('</html>')
write("/tmp/test.html", '\n'.join(html))
#write("%s.js" % tmpname, '\n'.join(lines))
#return run_command("node %s.js" % tmpname)
return run_command("%s /tmp" %nodewebkit, nodewebkit_workaround=True)
def run_pythonjs_dart_test_on_node(dummy_filename):
"""PythonJS (Dart backend - dart2js)"""
return run_if_no_error(run_dart2js_node)
def run_dart2js_node(content):
"""Run Dart2js using Node"""
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_dart_test_on_dart(dummy_filename):
"""PythonJS (Dart backend - Dart VM)"""
return run_if_no_error(run_dart)
def run_dart(content):
"""Run Dart2js using Node"""
#write("%s.js" % tmpname, content)
return run_command("%s %s" % (dart_exe, "/tmp/dart2js-input.dart"))
def run_pythonjs_coffee_test_on_node(dummy_filename):
"""PythonJS (CoffeeScript)"""
return run_if_no_error(run_coffee_node)
def run_coffee_node(content):
"""Run CoffeeScript using Node"""
#builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_lua_test_on_lua(dummy_filename):
"""PythonJS (Lua) on Lua"""
return run_if_no_error(run_lua_lua)
def run_lua_lua(content):
"""Run Lua using Lua"""
write("%s.lua" % tmpname, content)
return run_command("lua %s.lua" % tmpname)
def run_pythonjs_lua_test_on_luajit(dummy_filename):
"""PythonJS (LuaJIT backend)"""
return run_if_no_error(run_lua_luajit)
def run_lua_luajit(content):
"""Run Lua using LuaJIT"""
write("%s.lua" % tmpname, content)
return run_command("luajit %s.lua" % tmpname)
def run_pythonjs_luajs_test_on_node(dummy_filename):
"""PythonJS (Lua.js)"""
return run_if_no_error(run_luajs_node)
def run_luajs_node(content):
"""Run Lua.js using Node"""
builtins = read(os.path.join("../external/lua.js", "lua.js"))
write("%s.js" % tmpname, builtins + '\n' + content)
return run_command("node %s.js" % tmpname)
def run_pythonjs_go_test(dummy_filename):
"""PythonJS (Go backend)"""
return run_if_no_error(run_go)
def run_go(content):
"""compile and run go program"""
write("%s.go" % tmpname, content)
errors = run_command("go build -o /tmp/regtest-go %s.go" % tmpname)
if errors:
return errors
else:
return run_command( '/tmp/regtest-go' )
def run_pythonjs_gopherjs_test(dummy_filename):
"""PythonJS (Gopherjs)"""
return run_if_no_error(run_gopherjs_node)
def run_gopherjs_node(content):
"""Run Gopherjs using Node"""
write("%s.js" % tmpname, content)
return run_command("node %s.js" % tmpname)
def run_html_test( filename, sum_errors ):
lines = open(filename, 'rb').read().decode('utf-8').splitlines()
filename = os.path.split(filename)[-1]
doc = []; script = None
for line in lines:
if line.strip().startswith('<link') and 'stylesheet' in line and '~/' in line:
doc.append('<style>')
css = line.split('href=')[-1].split()[0][1:-1]
print('css', css)
assert css.startswith('~/')
assert css.endswith('.css')
assert os.path.isfile( os.path.expanduser(css) )
doc.append( open(os.path.expanduser(css), 'rb').read().decode('utf-8') )
doc.append('</style>')
elif line.strip().startswith('<script'):
if 'type="text/python"' in line:
doc.append( '<script type="text/javascript">')
script = list()
elif 'src=' in line and '~/' in line: ## external javascripts installed in users home folder
x = line.split('src="')[-1].split('"')[0]
if os.path.isfile(os.path.expanduser(x)):
doc.append( '<script type="text/javascript">' )
doc.append( open(os.path.expanduser(x), 'rb').read().decode('utf-8') )
doc.append( '</script>')
else:
doc.append( line )
elif line.strip() == '</script>':
if script:
open('/tmp/%s.js'%filename, 'wb').write( ('\n'.join(script)).encode('utf-8') )
js = translate_js( '/tmp/%s.js'%filename, requirejs=False ) ## inserts TestError and others
doc.append( js )
doc.append( line )
script = None
elif isinstance( script, list ):
script.append( line )
else:
doc.append( line )
html = '\n'.join(doc)
open('/tmp/%s.html'%filename, 'wb').write( html.encode('utf-8') )
if '--nodewebkit' in sys.argv:
## nodewebkit can bypass all cross origin browser-side security
cfg = '{"name":"test", "main":"%s.html", "window":{"width":1200, "height":700}}' %filename
write("/tmp/package.json", cfg)
run_command("%s /tmp" %nodewebkit, nodewebkit_workaround=True)
else:
## chrome-extension that won't force you to close your browser windows when deving: `Allow-Control-Allow-Origin:*`
## this still fails with iframes that do not allow cross origin.
cmd = [
'google-chrome',
'--app=file:///tmp/%s.html'%filename,
'--allow-file-access-from-files', ## only takes affect if chrome is closed
'--allow-file-access', ## only takes affect if chrome is closed
'--disable-web-security' ## only takes affect if chrome is closed
]
## non-blocking, TODO check for chrome extension that allows output of console.log to stdout
subprocess.check_call(cmd)
table_header = "%-12.12s %-28.28s"
table_cell = '%-6.6s'
def run_test_on(filename):
"""run one test and returns the number of errors"""
if not show_details:
f = open(filename)
comment = f.readline().strip(" \n\"'")
f.close()
print(table_header % (filename[2:-3], comment), end='')
sum_errors = {}
if filename.endswith('.html'):
run_html_test( filename, sum_errors )
return sum_errors
def display(function):
global _test_description
_test_description = function.__doc__
if show_details:
print('\n<%s>\n' % function.__doc__)
errors = function(filename)
if errors:
if not show_details:
print(table_cell % ''.join('%s%d' % (k[0], v)
for k, v in errors.items()),
end='')
else:
if not show_details:
print(table_cell % 'OK', end='')
sys.stdout.flush()
for k, v in errors.items():
sum_errors[k] = sum_errors.get(k, 0) + v
if show_details:
print('-'*77)
if 'requirejs' not in filename and not filename.startswith('./go/'):
display(run_python_test_on)
display(run_python3_test_on)
if pypy_runnable:
display(run_pypy_test_on)
if old_pypy_runnable:
display(run_old_pypy_test_on)
global js
if not filename.startswith('./go/'):
js = translate_js(
filename,
javascript=False,
multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker'))
)
if rhino_runnable:
display(run_pythonjs_test_on)
if node_runnable:
display(run_pythonjs_test_on_node)
if nodewebkit_runnable:
display(run_pythonjs_test_on_nodewebkit)
if '--no-javascript-mode' not in sys.argv and not filename.startswith('./go/'):
js = translate_js(filename, javascript=True, multioutput=filename.startswith('./threads/' or filename.startswith('./bench/webworker')))
if rhino_runnable:
display(run_pythonjsjs_test_on)
if node_runnable:
display(run_pythonjsjs_test_on_node)
if nodewebkit_runnable:
display(run_pythonjsjs_test_on_nodewebkit)
if 'requirejs' not in filename:
if dart_runnable:
js = translate_js(filename, javascript=False, dart=True)
display(run_pythonjs_dart_test_on_dart)
if dart2js_runnable and node_runnable:
js = translate_js(filename, javascript=False, dart=True)
display(run_pythonjs_dart_test_on_node)
if coffee_runnable and node_runnable:
js = translate_js(filename, javascript=False, dart=False, coffee=True)
display(run_pythonjs_coffee_test_on_node)
if luajs_runnable and node_runnable:
js = translate_js(filename, luajs=True)
display(run_pythonjs_luajs_test_on_node)
if lua_runnable:
js = translate_js(filename, lua=True)
display(run_pythonjs_lua_test_on_lua)
if luajit_runnable:
js = translate_js(filename, lua=True)
display(run_pythonjs_lua_test_on_luajit)
if go_runnable:
js = translate_js(filename, go=True)
display(run_pythonjs_go_test)
if gopherjs_runnable:
js = translate_js(filename, gopherjs=True)
display(run_pythonjs_gopherjs_test)
print()
return sum_errors
def run():
"""Run all the tests or the selected ones"""
if not show_details:
headers = ["Py-\nthon2", "Py-\nthon3"]
if pypy_runnable:
headers.append("PyPy\n")
if old_pypy_runnable:
headers.append("PyPy\n1.9")
if rhino_runnable:
headers.append("JS\nRhino")
if node_runnable:
headers.append("JS\nNode")
if nodewebkit_runnable:
headers.append("JS\nWebkit")
if rhino_runnable:
headers.append("JSJS\nRhino")
if node_runnable:
headers.append("JSJS\nNode")
if nodewebkit_runnable:
headers.append("JSJS\nWebkit")
if dart_runnable:
headers.append("Dart\nDart")
if node_runnable:
if dart2js_runnable:
headers.append("Dart\nNode")
if coffee_runnable:
headers.append("Coffe\nNode")
if luajs_runnable:
headers.append("LuaJS\nNode")
if lua_runnable:
headers.append("Lua\nLua")
if luajit_runnable:
headers.append("Lua\nJIT")
if go_runnable:
headers.append("Go\n-")
print(table_header % ("", "Regtest run on")
+ ''.join(table_cell % i.split('\n')[0]
for i in headers)
)
print(table_header % ("", "")
+ ''.join(table_cell % i.split('\n')[1]
for i in headers
)
)
errors = []
total_errors = {}
for filename in files():
if filename.startswith('./bench/'):
start_benchmark( os.path.split(filename)[-1] )
if show_details:
if os.path.abspath(filename) not in argv:
continue
print('*'*77)
print(filename)
sum_errors = run_test_on(filename)
if sum_errors:
errors.append(filename)
for k, v in sum_errors.items():
total_errors[k] = total_errors.get(k, 0) + v
if filename.startswith('./bench/'):
end_benchmark( os.path.split(filename)[-1] )
print()
if errors:
nr_errors = 0
if not show_details:
print("To see details about errors, run the commands:")
for i in errors:
print('\t%s %s' % (sys.argv[0], i))
print("\nSummary of errors:")
for k, v in total_errors.items():
print('\t%d %s' % (v, k))
if k in ('Error', 'Translation error'):
nr_errors += v
if nr_errors == 0:
print("\nRegression tests run fine but with warnings")
sys.exit(nr_errors)
else:
print("Regression tests run fine")
sys.exit(0)
run()
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: RemoteDebugger.py
"""Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
from idlelib import rpc
from idlelib import Debugger
debugging = 0
idb_adap_oid = 'idb_adapter'
gui_adap_oid = 'gui_adapter'
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"""replace info[2], a traceback instance, by its ID"""
if info is None:
return
else:
traceback = info[2]
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
return
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
self.conn.remotecall(self.oid, 'interaction', (
message, wrap_frame(frame), wrap_info(info)), {})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
stack = [ (wrap_frame(frame), k) for frame, k in stack ]
return (
stack, i)
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
def dict_keys(self, did):
dict = dicttable[did]
return dict.keys()
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value)
return value
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
global idb_adap_oid
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = 'idb_adapter'
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError, name
if name == 'f_code':
return self._get_f_code()
if name == 'f_globals':
return self._get_f_globals()
if name == 'f_locals':
return self._get_f_locals()
return self._conn.remotecall(self._oid, 'frame_attr', (
self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, 'frame_code', (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, 'frame_globals', (
self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, 'frame_locals', (
self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if did in self._dictcache:
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == 'co_name':
return self._conn.remotecall(self._oid, 'code_name', (
self._cid,), {})
if name == 'co_filename':
return self._conn.remotecall(self._oid, 'code_filename', (
self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
def keys(self):
return self._conn.remotecall(self._oid, 'dict_keys', (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, 'dict_item', (
self._did, key), {})
def __getattr__(self, name):
raise AttributeError, name
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
return value
def run(self, cmd, locals):
seq = self.conn.asyncqueue(self.oid, 'run', (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
stack, i = self.call('get_stack', frame._fid, tbid)
stack = [ (FrameProxy(self.conn, fid), k) for fid, k in stack ]
return (
stack, i)
def set_continue(self):
self.call('set_continue')
def set_step(self):
self.call('set_step')
def set_next(self, frame):
self.call('set_next', frame._fid)
def set_return(self, frame):
self.call('set_return', frame._fid)
def set_quit(self):
self.call('set_quit')
def set_break(self, filename, lineno):
msg = self.call('set_break', filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call('clear_break', filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call('clear_all_file_breaks', filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall('exec', 'start_the_debugger', (
gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall('exec', 'stop_the_debugger', (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall('exec', 'start_the_debugger', (
gui_adap_oid,), {})
|
|
#!/usr/bin/env python
"""ATS input converter from 0.87 to 0.88"""
import sys, os
try:
amanzi_xml = os.path.join(os.environ["AMANZI_SRC_DIR"], "tools","amanzi_xml")
except KeyError:
pass
else:
if amanzi_xml not in sys.path:
sys.path.append(amanzi_xml)
from amanzi_xml.utils import search as asearch
from amanzi_xml.utils import io as aio
from amanzi_xml.utils import errors as aerrors
from amanzi_xml.common import parameter
def fixEvaluator(xml, name, newname):
try:
pd = asearch.find_path(xml, ["state","field evaluators",name])
except aerrors.MissingXMLError:
pass
else:
pd.setName(newname)
def linear_operator(xml):
"""Changes any instances of "linear operator" to "linear solver",
which is now standard across all PKs."""
pks = asearch.child_by_name(xml, "PKs")
for pk in pks:
try:
lin_op = asearch.child_by_name(pk, "linear operator")
except aerrors.MissingXMLError:
pass
else:
lin_op.setName("linear solver")
def max_valid_change(xml):
"""Adds options for max valid change, which aren't required, but are strongly suggested."""
pks = asearch.child_by_name(xml, "PKs")
for pk in pks:
pk_type = asearch.child_by_name(pk, "PK type")
if pk_type.get('value') == 'permafrost flow':
try:
pk.getElement("max valid change in saturation in a time step [-]")
except aerrors.MissingXMLError:
pk.append(parameter.DoubleParameter("max valid change in saturation in a time step [-]", 0.1))
try:
pk.getElement("max valid change in ice saturation in a time step [-]")
except aerrors.MissingXMLError:
pk.append(parameter.DoubleParameter("max valid change in ice saturation in a time step [-]", 0.1))
def bad_spinup_longwave(xml):
"""One spinup file commonly used includes a longwave radiation
value that is totally wrong. Not many runs actually used it.
Some runs even had a spec for it in their file, but didn't include
the necessary flag to use it. So this just removes it to avoid
confusion."""
evals = asearch.find_path(xml, ["state","field evaluators"])
try:
lw = evals.getElement("surface-incoming_longwave_radiation")
except aerrors.MissingXMLError:
pass
else:
try:
filename = asearch.find_path(lw, ["function","domain","function","function-tabular","file"])
except aerrors.MissingXMLError:
pass
else:
if "spinup-10yr.h5" in filename.getValue():
evals.pop("surface-incoming_longwave_radiation")
def sources(xml):
"""Can turn off derivative of source terms"""
pks = asearch.child_by_name(xml, "PKs")
for pk in pks:
try:
source_term = pk.getElement("mass source key")
except aerrors.MissingXMLError:
pass
else:
source_term.setName('source key')
try:
source_term = pk.getElement("energy source key")
except aerrors.MissingXMLError:
pass
else:
source_term.setName('source key')
try:
source_term = pk.getElement("source term")
except aerrors.MissingXMLError:
pass
else:
if source_term.getValue():
try:
source_is_diff = pk.getElement("source term is differentiable")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term is differentiable", True))
def snow_distribution(xml):
try:
snow_dist_pk = asearch.find_path(xml, ["PKs","snow distribution"])
except aerrors.MissingXMLError:
pass
else:
if snow_dist_pk.isElement("primary variable key") and \
asearch.child_by_name(snow_dist_pk,"primary variable key").getValue() == "surface-precipitation_snow":
asearch.child_by_name(snow_dist_pk,"primary variable key").setValue("snow-precipitation")
if snow_dist_pk.isElement("conserved quantity key") and \
asearch.child_by_name(snow_dist_pk,"conserved quantity key").getValue() == "surface-precipitation_snow":
asearch.child_by_name(snow_dist_pk,"conserved quantity key").setValue("snow-precipitation")
if snow_dist_pk.isElement("domain name") and \
asearch.child_by_name(snow_dist_pk,"domain name").getValue() == "surface":
asearch.child_by_name(snow_dist_pk,"domain name").setValue("snow")
try:
ssk = asearch.find_path(xml, ["state","field evaluators","snow-conductivity"])
except aerrors.MissingXMLError:
pass
else:
if ssk.isElement("height key"):
asearch.child_by_name(ssk, "height key").setValue("snow-precipitation")
def end_time_units(xml):
"""yr --> y"""
try:
end_time = asearch.find_path(xml, ["cycle driver","end time units"])
except aerrors.MissingXMLError:
pass
else:
if end_time.getValue() == "yr":
end_time.setValue("y")
def surface_rel_perm_one(xml):
"""Add units, changed to pressure."""
for surf_rel_perm in asearch.findall_name(xml, "surface rel perm model"):
pres_above = None
if surf_rel_perm.isElement("unfrozen rel perm cutoff depth"):
height_el = surf_rel_perm.pop("unfrozen rel perm cutoff height")
pres_above = height_el.getValue() * 1000 * 10
if surf_rel_perm.isElement("unfrozen rel pres cutoff pressure"):
pres_el = surf_rel_perm.pop("unfrozen rel perm cutoff pressure")
pres_above = pres_el.getValue()
if surf_rel_perm.isElement("unfrozen rel pres cutoff pressure [Pa]"):
continue
else:
if pres_above is not None:
surf_rel_perm.append(parameter.DoubleParameter("unfrozen rel pres cutoff pressure [Pa]", pres_above))
def update(xml):
linear_operator(xml)
max_valid_change(xml)
bad_spinup_longwave(xml)
sources(xml)
pks = asearch.child_by_name(xml, "PKs")
for pk in pks:
pk_type = asearch.child_by_name(pk, "PK type")
if pk_type.get('value') == 'surface balance implicit':
print('updating seb monolitic')
import seb_monolithic_to_evals
seb_monolithic_to_evals.update_seb(xml)
fixEvaluator(xml, "surface-snow_skin_potential", "snow-skin_potential")
fixEvaluator(xml, "surface-snow_conductivity", "snow-conductivity")
snow_distribution(xml)
end_time_units(xml)
import verbose_object
verbose_object.fixVerboseObject(xml)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fix a number of changes from ATS input spec 0.86 to 0.88")
parser.add_argument("infile", help="input filename")
group = parser.add_mutually_exclusive_group()
group.add_argument("-i", "--inplace", action="store_true", help="fix file in place")
group.add_argument("-o", "--outfile", help="output filename")
args = parser.parse_args()
print("Converting file: %s", args.infile)
xml = aio.fromFile(args.infile, True)
update(xml)
if args.inplace:
aio.toFile(xml, args.infile)
else:
aio.toFile(xml, args.outfile)
sys.exit(0)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
from nova.api.openstack.compute.plugins.v3 import security_groups as \
secgroups_v21
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
import nova.db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id, 'power_state': power_state.SHUTDOWN,
'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroupsV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
def setUp(self):
super(TestSecurityGroupsV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
self.server_controller = self.server_secgrp_ctl_cls()
self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_template()
sg['description'] = ""
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
try:
self.controller.create(req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stubs.Set(nova.db, 'security_group_update',
return_update_security_group)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.update(req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_template(id=2, rules=[])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
context = req.environ['nova.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
use_admin_context=True)
self.controller.delete(req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
class TestSecurityGroupsV2(TestSecurityGroupsV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
class TestSecurityGroupRulesV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
def setUp(self):
super(TestSecurityGroupRulesV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
db_list = [
fakes.stub_instance(
1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance(
2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list,
['metadata', 'system_metadata',
'security_groups', 'info_cache'])
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v2/fake/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupsOutputPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupsOutputController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
def test_show_policy_failed(self):
self.controller.show(self.req, None, FAKE_UUID1)
def test_create_policy_failed(self):
self.controller.create(self.req, None, {})
def test_detail_policy_failed(self):
self.controller.detail(self.req, None)
class PolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PolicyEnforcementV21, self).setUp()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
def _common_policy_check(self, func, *arg, **kwarg):
self.policy.set_rules(self.rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID1)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_update_policy_failed(self):
self._common_policy_check(
self.controller.update, self.req, FAKE_UUID1, {})
class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(ServerSecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.ServerSecurityGroupController()
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req, FAKE_UUID1)
class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupRulesPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupActionPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupActionController()
def test_add_security_group_policy_failed(self):
self._common_policy_check(
self.controller._addSecurityGroup, self.req, FAKE_UUID1, {})
def test_remove_security_group_policy_failed(self):
self._common_policy_check(
self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {})
|
|
"""Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
import sys
try:
import ssl
from .py3_ssl import (wrap_ssl_error, SSLContext, SSLWantReadError,
SSLWantWriteError)
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import constants
from . import events
from . import futures
from . import selectors
from . import sslproto
from . import transports
from .compat import flatten_bytes
from .log import logger
from .py33_exceptions import (wrap_error,
BlockingIOError, InterruptedError, ConnectionAbortedError, BrokenPipeError,
ConnectionResetError)
# On Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4,
# _SelectorSslTransport._read_ready() hangs if the socket has no data.
# Example: test_events.test_create_server_ssl()
_SSL_REQUIRES_SELECT = (sys.version_info < (2, 6, 6))
if _SSL_REQUIRES_SELECT:
import select
def _get_socket_error(sock, address):
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to the except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super(BaseSelectorEventLoop, self).__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
rawsock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
extra=extra, server=server)
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_SelectorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
waiter, server_side=False,
server_hostname=None, extra=None,
server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super(BaseSelectorEventLoop, self).close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = wrap_error(self._ssock.recv, 4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
wrap_error(csock.send, b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = wrap_error(sock.accept)
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except socket.error as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
protocol = protocol_factory()
if sslcontext:
self._make_ssl_transport(
conn, protocol, sslcontext,
server_side=True, extra={'peername': addr}, server=server)
else:
self._make_socket_transport(
conn, protocol , extra={'peername': addr},
server=server)
# It's now up to the protocol to handle the connection.
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_READ,
(handle, None))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_READ,
(handle, writer))
if reader is not None:
reader.cancel()
def remove_reader(self, fd):
"""Remove a reader callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
mask &= ~selectors.EVENT_READ
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer))
if reader is not None:
reader.cancel()
return True
else:
return False
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_WRITE,
(None, handle))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
(reader, handle))
if writer is not None:
writer.cancel()
def remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
# Remove both writer and connector.
mask &= ~selectors.EVENT_WRITE
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None))
if writer is not None:
writer.cancel()
return True
else:
return False
def sock_recv(self, sock, n):
"""Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = wrap_error(sock.recv, n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""Send data to the socket.
The socket must be connected to a remote socket. This method continues
to send data from data until either all data has been sent or an
error occurs. None is returned on success. On error, an exception is
raised, and there is no way to determine how much data, if any, was
successfully processed by the receiving end of the connection.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
if data:
self._sock_sendall(fut, False, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
n = wrap_error(sock.send, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
The address must be already resolved to avoid the trap of hanging the
entire event loop when the address requires doing a DNS lookup. For
example, it must be an IP address, not an hostname, for AF_INET and
AF_INET6 address families. Use getaddrinfo() to resolve the hostname
asynchronously.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
try:
base_events._check_resolved_address(sock, address)
except ValueError as err:
fut.set_exception(err)
else:
self._sock_connect(fut, sock, address)
return fut
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
while True:
try:
wrap_error(sock.connect, address)
except InterruptedError:
continue
else:
break
except BlockingIOError:
fut.add_done_callback(functools.partial(self._sock_connect_done,
fd))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def _sock_connect_done(self, fd, fut):
self.remove_writer(fd)
def _sock_connect_cb(self, fut, sock, address):
if fut.cancelled():
return
try:
wrap_error(_get_socket_error, sock, address)
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket
object usable to send and receive data on the connection, and address
is the address bound to the socket on the other end of the connection.
This method is a coroutine.
"""
if self.get_debug() and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = futures.Future(loop=self)
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = wrap_error(sock.accept)
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self.remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self.remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
self.remove_reader(sock.fileno())
sock.close()
class _SelectorTransport(transports._FlowControlMixin,
transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
def __init__(self, loop, sock, protocol, extra, server=None):
super(_SelectorTransport, self).__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._sock_fd)
# test if the transport was closed
if self._loop is not None:
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_READ)
if polling:
info.append('read=polling')
else:
info.append('read=idle')
polling = _test_selector_event(self._loop._selector,
self._sock_fd,
selectors.EVENT_WRITE)
if polling:
state = 'polling'
else:
state = 'idle'
bufsize = self.get_write_buffer_size()
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
return '<%s>' % ' '.join(info)
def abort(self):
self._force_close(None)
def close(self):
if self._closing:
return
self._closing = True
self._loop.remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, None)
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, (BrokenPipeError,
ConnectionResetError, ConnectionAbortedError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
del self._buffer[:]
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop.remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super(_SelectorSocketTransport, self).__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def pause_reading(self):
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
try:
data = wrap_error(self._sock.recv, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on socket transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop.remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
data = flatten_bytes(data)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on socket transport')
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
data = flatten_bytes(self._buffer)
try:
n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = sslproto._create_transport_context(server_side,
server_hostname)
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super(_SelectorSslTransport, self).__init__(loop, sslsock, protocol, extra, server)
self._server_hostname = server_hostname
self._waiter = waiter
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
start_time = self._loop.time()
else:
start_time = None
self._on_handshake(start_time)
def _on_handshake(self, start_time):
try:
wrap_ssl_error(self._sock.do_handshake)
except SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
except SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
if self._waiter is not None and not self._waiter.cancelled():
self._waiter.set_exception(exc)
if isinstance(exc, Exception):
return
else:
raise
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed "
"on matching the hostname",
self, exc_info=True)
self._sock.close()
if (self._waiter is not None
and not self._waiter.cancelled()):
self._waiter.set_exception(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
)
if hasattr(self._sock, 'compression'):
self._extra['compression'] = self._sock.compression()
self._read_wants_write = False
self._write_wants_read = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(self._waiter._set_result_unless_cancelled,
None)
if self._loop.get_debug():
dt = self._loop.time() - start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _sock_recv(self):
return wrap_ssl_error(self._sock.recv, self.max_size)
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
if _SSL_REQUIRES_SELECT:
rfds = (self._sock.fileno(),)
rfds = select.select(rfds, (), (), 0.0)[0]
if not rfds:
# False alarm.
return
data = wrap_error(self._sock_recv)
except (BlockingIOError, InterruptedError, SSLWantReadError):
pass
except SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
if data:
self._protocol.data_received(data)
else:
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
data = flatten_bytes(self._buffer)
try:
n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError, SSLWantWriteError):
n = 0
except SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
data = flatten_bytes(data)
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
super(_SelectorDatagramTransport, self).__init__(loop, sock,
protocol, extra)
self._address = address
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
try:
data, addr = wrap_error(self._sock.recvfrom, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on datagram transport')
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
data = flatten_bytes(data)
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
wrap_error(self._sock.send, data)
else:
wrap_error(self._sock.sendto, data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
wrap_error(self._sock.send, data)
else:
wrap_error(self._sock.sendto, data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
|
|
#!/usr/bin/env python
import sys,os,json
import shutil
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.sdist import sdist
import pyct.build
###############
### autover ###
def get_setup_version(reponame):
"""
Helper to get the current version from either git describe or the
.version file (if available).
"""
basepath = os.path.split(__file__)[0]
version_file_path = os.path.join(basepath, reponame, '.version')
try:
from param import version
except:
version = None
if version is not None:
return version.Version.setup_version(basepath, reponame, archive_commit="$Format:%h$")
else:
print("WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.")
return json.load(open(version_file_path, 'r'))['version_string']
#######################
### bokeh extension ###
def _build_geoviewsjs():
from bokeh.ext import build
print("Building custom models:")
geoviews_dir = os.path.join(os.path.dirname(__file__), "geoviews")
build(geoviews_dir)
class CustomDevelopCommand(develop):
"""Custom installation for development mode."""
def run(self):
_build_geoviewsjs()
develop.run(self)
class CustomInstallCommand(install):
"""Custom installation for install mode."""
def run(self):
_build_geoviewsjs()
install.run(self)
class CustomSdistCommand(sdist):
"""Custom installation for sdist mode."""
def run(self):
_build_geoviewsjs()
sdist.run(self)
_COMMANDS = {
'develop': CustomDevelopCommand,
'install': CustomInstallCommand,
'sdist': CustomSdistCommand,
}
try:
from wheel.bdist_wheel import bdist_wheel
class CustomBdistWheelCommand(bdist_wheel):
"""Custom bdist_wheel command to force cancelling qiskit-terra wheel
creation."""
def run(self):
"""Do nothing so the command intentionally fails."""
_build_geoviewsjs()
bdist_wheel.run(self)
_COMMANDS['bdist_wheel'] = CustomBdistWheelCommand
except:
pass
####################
### dependencies ###
_required = [
'bokeh >=2.3.0,<2.4.0',
'cartopy >=0.18.0',
'holoviews >=1.14.2'
]
_recommended = [
'datashader',
'geopandas',
'gdal',
'netcdf4',
'jupyter',
'matplotlib>2.2',
'pandas',
'pyct',
'scipy',
'shapely',
'xarray',
]
# can only currently run all examples with packages from conda-forge
_examples_extra = _recommended + [
'iris',
'xesmf',
'mock'
]
extras_require={
'recommended': _recommended,
'examples_extra': _examples_extra,
'doc': _examples_extra + [
'nbsite >=0.6.1',
'sphinx_holoviz_theme',
'selenium',
],
'tests': [
'pytest-cov',
'codecov',
'flake8',
'nbsmoke >=0.2.0',
'ipython >=7.0',
'nose',
'pytest'
],
}
extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
# until pyproject.toml/equivalent is widely supported; meanwhile
# setup_requires doesn't work well with pip. Note: deliberately omitted from all.
extras_require['build'] = [
'param >=1.9.2',
'pyct >=0.4.4',
'bokeh >=2.3.0,<2.4.0',
'pyviz_comms >=0.6.0'
]
########################
### package metadata ###
setup_args = dict(
name='geoviews',
version=get_setup_version("geoviews"),
python_requires = '>=3.6',
install_requires = _required,
extras_require = extras_require,
tests_require = extras_require['tests'],
description='GeoViews is a Python library that makes it easy to explore and visualize geographical, meteorological, and oceanographic datasets, such as those used in weather, climate, and remote sensing research.',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
platforms=['Windows', 'Mac OS X', 'Linux'],
license='BSD 3-Clause',
url='https://geoviews.org',
cmdclass=_COMMANDS,
packages =find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'geoviews = geoviews.__main__:main'
]
},
classifiers = [
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Natural Language :: English",
"Framework :: Matplotlib",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries"
]
)
if __name__=="__main__":
example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'geoviews','examples')
if 'develop' not in sys.argv and 'egg_info' not in sys.argv:
pyct.build.examples(example_path, __file__, force=True)
setup(**setup_args)
if os.path.isdir(example_path):
shutil.rmtree(example_path)
|
|
import base64
import os.path
from django.conf import settings
from django.core import mail
import mock
from nose import SkipTest
from nose.tools import eq_, ok_
import mkt
from mkt.comm.models import CommunicationThread, CommunicationThreadToken
from mkt.comm.tests.test_views import CommTestMixin
from mkt.comm.utils import create_comm_note
from mkt.comm.utils_mail import CommEmailParser, save_from_email_reply
from mkt.constants import comm
from mkt.site.fixtures import fixture
from mkt.site.tests import TestCase, user_factory
from mkt.site.utils import app_factory, extension_factory
from mkt.users.models import UserProfile
sample_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email.txt')
multi_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_multipart.txt')
quopri_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_quoted_printable.txt')
attach_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_attachment.txt')
attach_email2 = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_attachment2.txt')
class TestSendMailComm(TestCase, CommTestMixin):
def setUp(self):
self.developer = user_factory()
self.mozilla_contact = user_factory()
self.reviewer = user_factory()
self.senior_reviewer = user_factory()
self.grant_permission(self.senior_reviewer, '*:*',
'Senior App Reviewers')
self.app = app_factory()
self.app.addonuser_set.create(user=self.developer)
self.app.update(mozilla_contact=self.mozilla_contact.email)
def _create(self, note_type, author=None):
author = author or self.reviewer
return create_comm_note(self.app, self.app.current_version, author,
'Test Comment', note_type=note_type)
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
return recipients
def _check_template(self, call, template):
eq_(call[0][1], 'comm/emails/%s.html' % template)
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_approval(self, email):
self._create(comm.APPROVAL)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.mozilla_contact.email in recipients
self._check_template(email.call_args, 'approval')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_rejection(self, email):
self._create(comm.REJECTION)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.mozilla_contact.email in recipients
self._check_template(email.call_args, 'rejection')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation(self, email):
self._create(comm.ESCALATION)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args_list[0],
'escalation_senior_reviewer')
self._check_template(email.call_args_list[1],
'escalation_developer')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_vip_app(self, email):
self._create(comm.ESCALATION_VIP_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_vip')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_prerelease_app(self, email):
self._create(comm.ESCALATION_PRERELEASE_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_prerelease_app')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reviewer_comment(self, email):
another_reviewer = user_factory()
self._create(comm.REVIEWER_COMMENT, author=self.reviewer)
self._create(comm.REVIEWER_COMMENT, author=another_reviewer)
eq_(email.call_count, 3)
recipients = self._recipients(email)
assert self.reviewer.email in recipients
assert self.mozilla_contact.email in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_developer_comment(self, email):
self._create(comm.REVIEWER_COMMENT)
self._create(comm.DEVELOPER_COMMENT, author=self.developer)
eq_(email.call_count, 4)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.reviewer.email in recipients
assert self.developer.email not in recipients
assert settings.MKT_REVIEWS_EMAIL in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_additional_review(self, email):
self._create(comm.ADDITIONAL_REVIEW_PASSED)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.developer.email in recipients
self._check_template(email.call_args, 'tarako')
def test_mail_templates_exist(self):
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
for note_type in comm.EMAIL_SENIOR_REVIEWERS_AND_DEV:
self._create(note_type)
self._create(comm.NO_ACTION)
def test_email_formatting(self):
"""
Manually run test in case you want to spot-check if every email is
formatted nicely and consistently. Prints out each note type email
once.
"""
raise SkipTest
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
email_subjects = []
for email in mail.outbox:
if email.subject in email_subjects:
continue
email_subjects.append(email_subjects)
print '##### %s #####' % email.subject
print email.body
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reply_to(self, email):
note, thread = self._create(comm.APPROVAL)
reply_to = email.call_args_list[1][1]['headers']['Reply-To']
ok_(reply_to.startswith('commreply+'))
ok_(reply_to.endswith('marketplace.firefox.com'))
class TestSendMailCommExtensions(TestCase, CommTestMixin):
def setUp(self):
self.developer = user_factory()
self.reviewer = user_factory()
self.extension = extension_factory()
self.developer.extension_set.add(self.extension)
def _create(self, note_type, author=None):
author = author or self.reviewer
return create_comm_note(
self.extension, self.extension.latest_version, author,
'Test Comment', note_type=note_type)
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
return recipients
def _check_template(self, call, template):
eq_(call[0][1], 'comm/emails/%s.html' % template)
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_approval(self, email):
self._create(comm.APPROVAL)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.developer.email in recipients
self._check_template(email.call_args, 'approval')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_rejection(self, email):
self._create(comm.REJECTION)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.developer.email in recipients
self._check_template(email.call_args, 'rejection')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reviewer_comment(self, email):
another_reviewer = user_factory()
self._create(comm.REVIEWER_COMMENT, author=self.reviewer)
self._create(comm.REVIEWER_COMMENT, author=another_reviewer)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.reviewer.email in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_developer_comment(self, email):
self._create(comm.REVIEWER_COMMENT)
self._create(comm.DEVELOPER_COMMENT, author=self.developer)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.reviewer.email in recipients
assert settings.MKT_REVIEWS_EMAIL in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reply_to(self, email):
note, thread = self._create(comm.APPROVAL)
reply_to = email.call_args_list[0][1]['headers']['Reply-To']
ok_(reply_to.startswith('commreply+'))
ok_(reply_to.endswith('marketplace.firefox.com'))
class TestEmailReplySaving(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.app = app_factory(name='Antelope', status=mkt.STATUS_PENDING)
self.profile = UserProfile.objects.get(pk=999)
t = CommunicationThread.objects.create(
_addon=self.app, _version=self.app.current_version,
read_permission_reviewer=True)
self.token = CommunicationThreadToken.objects.create(
thread=t, user=self.profile)
self.token.update(uuid='5a0b8a83d501412589cc5d562334b46b')
self.email_base64 = open(sample_email).read()
self.grant_permission(self.profile, 'Apps:Review')
def test_successful_save(self):
note = save_from_email_reply(self.email_base64)
eq_(note.body, 'test note 5\n')
def test_developer_comment(self):
self.profile.addonuser_set.create(addon=self.app)
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.DEVELOPER_COMMENT)
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Apps:Review')
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.REVIEWER_COMMENT)
def test_with_max_count_token(self):
# Test with an invalid token.
self.token.update(use_count=comm.MAX_TOKEN_USE_COUNT + 1)
assert not save_from_email_reply(self.email_base64)
def test_with_unpermitted_token(self):
"""Test when the token's user does not have a permission on thread."""
self.profile.groupuser_set.filter(
group__rules__contains='Apps:Review').delete()
assert not save_from_email_reply(self.email_base64)
def test_non_existent_token(self):
self.token.update(uuid='youtube?v=wn4RP57Y7bw')
assert not save_from_email_reply(self.email_base64)
def test_with_invalid_msg(self):
assert not save_from_email_reply('youtube?v=WwJjts9FzxE')
class TestEmailReplySavingExtensions(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.extension = extension_factory()
self.profile = UserProfile.objects.get(pk=999)
t = CommunicationThread.objects.create(
_extension=self.extension,
_extension_version=self.extension.latest_version,
read_permission_reviewer=True)
self.token = CommunicationThreadToken.objects.create(
thread=t, user=self.profile)
self.token.update(uuid='5a0b8a83d501412589cc5d562334b46b')
self.email_base64 = open(sample_email).read()
self.grant_permission(self.profile, 'Apps:Review')
def test_successful_save(self):
note = save_from_email_reply(self.email_base64)
eq_(note.body, 'test note 5\n')
def test_developer_comment(self):
self.profile.extension_set.add(self.extension)
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.DEVELOPER_COMMENT)
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Apps:Review')
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.REVIEWER_COMMENT)
def test_with_max_count_token(self):
# Test with an invalid token.
self.token.update(use_count=comm.MAX_TOKEN_USE_COUNT + 1)
assert not save_from_email_reply(self.email_base64)
def test_with_unpermitted_token(self):
"""Test when the token's user does not have a permission on thread."""
self.profile.groupuser_set.filter(
group__rules__contains='Apps:Review').delete()
assert not save_from_email_reply(self.email_base64)
def test_non_existent_token(self):
self.token.update(uuid='youtube?v=wn4RP57Y7bw')
assert not save_from_email_reply(self.email_base64)
def test_with_invalid_msg(self):
assert not save_from_email_reply('youtube?v=WwJjts9FzxE')
class TestEmailParser(TestCase):
def test_basic_email(self):
email_text = open(sample_email).read()
parser = CommEmailParser(email_text)
eq_(parser.get_uuid(), '5a0b8a83d501412589cc5d562334b46b')
eq_(parser.get_body(), 'test note 5\n')
def test_multipart(self):
email = open(multi_email).read()
payload = base64.standard_b64encode(email)
parser = CommEmailParser(payload)
eq_(parser.get_body(), 'this is the body text\n')
eq_(parser.get_uuid(), 'abc123')
def test_quoted_printable(self):
email = open(quopri_email).read()
payload = base64.standard_b64encode(email)
parser = CommEmailParser(payload)
body = parser.get_body()
ok_('Yo,\n\nas it is open source' in body)
ok_('=20' not in body)
ok_('[email protected]' not in body)
def test_with_attachments(self):
for email in (attach_email, attach_email2):
email = open(attach_email).read()
payload = base64.standard_b64encode(email)
parser = CommEmailParser(payload)
body = parser.get_body()
ok_('Body inspection' in body)
eq_(parser.get_uuid(), 'abc123')
class TestEmailNonUsers(TestCase, CommTestMixin):
def setUp(self):
self.app = app_factory(
mozilla_contact='[email protected], [email protected],')
self.author = user_factory()
def _create(self):
return create_comm_note(self.app, self.app.current_version,
self.author, '@ngokevin_')
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
return recipients
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_basic(self, email):
thread, note = self._create()
# One for Tobias, one for Maebe.
eq_(email.call_count, 2)
eq_(thread.thread_cc.count(), 1)
recipients = self._recipients(email)
assert self.author.email not in recipients
assert '[email protected]' in recipients
assert '[email protected]' in recipients
for call in email.call_args_list:
ok_('Reply-To' not in call[1]['headers'])
|
|
# -*- coding: utf-8 -*-
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from datetime import datetime, timezone, timedelta
import json
import sys
import tempfile
from shutil import rmtree
from os import path as op
import numpy as np
# Adapted from six
PY3 = sys.version_info[0] == 3
text_type = str if PY3 else unicode # noqa
string_types = str if PY3 else basestring # noqa
special_chars = {'{FWDSLASH}': '/'}
tab_str = '----'
def _import_sparse():
try:
from scipy import sparse
except ImportError:
sparse = None
return sparse
##############################################################################
# WRITING
def _check_h5py():
"""Helper to check if h5py is installed"""
try:
import h5py
except ImportError:
raise ImportError('the h5py module is required to use HDF5 I/O')
return h5py
def _create_titled_group(root, key, title):
"""Helper to create a titled group in h5py"""
out = root.create_group(key)
out.attrs['TITLE'] = title
return out
def _create_titled_dataset(root, key, title, data, comp_kw=None):
"""Helper to create a titled dataset in h5py"""
comp_kw = {} if comp_kw is None else comp_kw
out = root.create_dataset(key, data=data, **comp_kw)
out.attrs['TITLE'] = title
return out
def _create_pandas_dataset(fname, root, key, title, data):
h5py = _check_h5py()
rootpath = '/'.join([root, key])
data.to_hdf(fname, rootpath)
with h5py.File(fname, mode='a') as fid:
fid[rootpath].attrs['TITLE'] = 'pd_dataframe'
def write_hdf5(fname, data, overwrite=False, compression=4,
title='h5io', slash='error', use_json=False):
"""Write python object to HDF5 format using h5py.
Parameters
----------
fname : str
Filename to use.
data : object
Object to write. Can be of any of these types:
{ndarray, dict, list, tuple, int, float, str, Datetime}
Note that dict objects must only have ``str`` keys. It is recommended
to use ndarrays where possible, as it is handled most efficiently.
overwrite : True | False | 'update'
If True, overwrite file (if it exists). If 'update', appends the title
to the file (or replace value if title exists).
compression : int
Compression level to use (0-9) to compress data using gzip.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'error' | 'replace'
Whether to replace forward-slashes ('/') in any key found nested within
keys in data. This does not apply to the top level name (title).
If 'error', '/' is not allowed in any lower-level keys.
use_json : bool
To accelerate the read and write performance of small dictionaries and
lists they can be combined to JSON objects and stored as strings.
"""
h5py = _check_h5py()
mode = 'w'
if op.isfile(fname):
if isinstance(overwrite, string_types):
if overwrite != 'update':
raise ValueError('overwrite must be "update" or a bool')
mode = 'a'
elif not overwrite:
raise IOError('file "%s" exists, use overwrite=True to overwrite'
% fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
comp_kw = dict()
if compression > 0:
comp_kw = dict(compression='gzip', compression_opts=compression)
with h5py.File(fname, mode=mode) as fid:
if title in fid:
del fid[title]
cleanup_data = []
_triage_write(title, data, fid, comp_kw, str(type(data)),
cleanup_data, slash=slash, title=title,
use_json=use_json)
# Will not be empty if any extra data to be written
for data in cleanup_data:
# In case different extra I/O needs different inputs
title = list(data.keys())[0]
if title in ['pd_dataframe', 'pd_series']:
rootname, key, value = data[title]
_create_pandas_dataset(fname, rootname, key, title, value)
def _triage_write(key, value, root, comp_kw, where,
cleanup_data, slash='error', title=None,
use_json=False):
sparse = _import_sparse()
if key != title and '/' in key:
if slash == 'error':
raise ValueError('Found a key with "/", '
'this is not allowed if slash == error')
elif slash == 'replace':
# Auto-replace keys with proper values
for key_spec, val_spec in special_chars.items():
key = key.replace(val_spec, key_spec)
else:
raise ValueError("slash must be one of ['error', 'replace'")
if use_json and isinstance(value, (list, dict)) and \
_json_compatible(value, slash=slash):
value = np.frombuffer(json.dumps(value).encode('utf-8'), np.uint8)
_create_titled_dataset(root, key, 'json', value, comp_kw)
elif isinstance(value, dict):
sub_root = _create_titled_group(root, key, 'dict')
for key, sub_value in value.items():
if not isinstance(key, string_types):
raise TypeError('All dict keys must be strings')
_triage_write(
'key_{0}'.format(key), sub_value, sub_root, comp_kw,
where + '["%s"]' % key, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, (list, tuple)):
title = 'list' if isinstance(value, list) else 'tuple'
sub_root = _create_titled_group(root, key, title)
for vi, sub_value in enumerate(value):
_triage_write(
'idx_{0}'.format(vi), sub_value, sub_root, comp_kw,
where + '[%s]' % vi, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, type(None)):
_create_titled_dataset(root, key, 'None', [False])
elif isinstance(value, (int, float)):
if isinstance(value, int):
title = 'int'
else: # isinstance(value, float):
title = 'float'
_create_titled_dataset(root, key, title, np.atleast_1d(value))
elif isinstance(value, datetime):
title = 'datetime'
value = np.frombuffer(value.isoformat().encode('utf-8'), np.uint8)
_create_titled_dataset(root, key, title, value)
elif isinstance(value, (np.integer, np.floating, np.bool_)):
title = 'np_{0}'.format(value.__class__.__name__)
_create_titled_dataset(root, key, title, np.atleast_1d(value))
elif isinstance(value, string_types):
if isinstance(value, text_type): # unicode
value = np.frombuffer(value.encode('utf-8'), np.uint8)
title = 'unicode'
else:
value = np.frombuffer(value.encode('ASCII'), np.uint8)
title = 'ascii'
_create_titled_dataset(root, key, title, value, comp_kw)
elif isinstance(value, np.ndarray):
if not (value.dtype == np.dtype('object') and
len(set([sub.dtype for sub in value])) == 1):
_create_titled_dataset(root, key, 'ndarray', value)
else:
ma_index, ma_data = multiarray_dump(value)
sub_root = _create_titled_group(root, key, 'multiarray')
_create_titled_dataset(sub_root, 'index', 'ndarray', ma_index)
_create_titled_dataset(sub_root, 'data', 'ndarray', ma_data)
elif sparse is not None and isinstance(value, sparse.csc_matrix):
sub_root = _create_titled_group(root, key, 'csc_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csc_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csc_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csc_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
elif sparse is not None and isinstance(value, sparse.csr_matrix):
sub_root = _create_titled_group(root, key, 'csr_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csr_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csr_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csr_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
_triage_write('shape', value.shape, sub_root, comp_kw,
where + '.csr_matrix_shape', cleanup_data=cleanup_data,
slash=slash)
else:
try:
from pandas import DataFrame, Series
except ImportError:
pass
else:
if isinstance(value, (DataFrame, Series)):
if isinstance(value, DataFrame):
title = 'pd_dataframe'
else:
title = 'pd_series'
rootname = root.name
cleanup_data.append({title: (rootname, key, value)})
return
err_str = 'unsupported type %s (in %s)' % (type(value), where)
raise TypeError(err_str)
##############################################################################
# READING
def read_hdf5(fname, title='h5io', slash='ignore'):
"""Read python object from HDF5 format using h5py
Parameters
----------
fname : str
File to load.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'ignore' | 'replace'
Whether to replace the string {FWDSLASH} with the value /. This does
not apply to the top level name (title). If 'ignore', nothing will be
replaced.
Returns
-------
data : object
The loaded data. Can be of any type supported by ``write_hdf5``.
"""
h5py = _check_h5py()
if not op.isfile(fname):
raise IOError('file "%s" not found' % fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
with h5py.File(fname, mode='r') as fid:
if title not in fid:
raise ValueError('no "%s" data found' % title)
if isinstance(fid[title], h5py.Group):
if 'TITLE' not in fid[title].attrs:
raise ValueError('no "%s" data found' % title)
data = _triage_read(fid[title], slash=slash)
return data
def _triage_read(node, slash='ignore'):
if slash not in ['ignore', 'replace']:
raise ValueError("slash must be one of 'replace', 'ignore'")
h5py = _check_h5py()
sparse = _import_sparse()
type_str = node.attrs['TITLE']
if isinstance(type_str, bytes):
type_str = type_str.decode()
if isinstance(node, h5py.Group):
if type_str == 'dict':
data = dict()
for key, subnode in node.items():
if slash == 'replace':
for key_spec, val_spec in special_chars.items():
key = key.replace(key_spec, val_spec)
data[key[4:]] = _triage_read(subnode, slash=slash)
elif type_str in ['list', 'tuple']:
data = list()
ii = 0
while True:
subnode = node.get('idx_{0}'.format(ii), None)
if subnode is None:
break
data.append(_triage_read(subnode, slash=slash))
ii += 1
assert len(data) == ii
data = tuple(data) if type_str == 'tuple' else data
return data
elif type_str == 'csc_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csc_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)))
elif type_str == 'csr_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csr_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)),
shape=_triage_read(node['shape']))
elif type_str in ['pd_dataframe', 'pd_series']:
from pandas import read_hdf, HDFStore
rootname = node.name
filename = node.file.filename
with HDFStore(filename, 'r') as tmpf:
data = read_hdf(tmpf, rootname)
elif type_str == 'multiarray':
ma_index = _triage_read(node.get('index', None), slash=slash)
ma_data = _triage_read(node.get('data', None), slash=slash)
data = multiarray_load(ma_index, ma_data)
else:
raise NotImplementedError('Unknown group type: {0}'
''.format(type_str))
elif type_str == 'ndarray':
data = np.array(node)
elif type_str in ('int', 'float'):
cast = int if type_str == 'int' else float
data = cast(np.array(node)[0])
elif type_str == 'datetime':
data = text_type(np.array(node).tobytes().decode('utf-8'))
data = fromisoformat(data)
elif type_str.startswith('np_'):
np_type = type_str.split('_')[1]
cast = getattr(np, np_type)
data = cast(np.array(node)[0])
elif type_str in ('unicode', 'ascii', 'str'): # 'str' for backward compat
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(node).tobytes().decode(decoder))
elif type_str == 'json':
node_unicode = str(np.array(node).tobytes().decode('utf-8'))
data = json.loads(node_unicode)
elif type_str == 'None':
data = None
else:
raise TypeError('Unknown node type: {0}'.format(type_str))
return data
# ############################################################################
# UTILITIES
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
sparse = _import_sparse()
try:
from pandas import DataFrame, Series
except ImportError:
DataFrame = Series = type(None)
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
pass # b must be None due to our type checking
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif sparse is not None and sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif isinstance(a, (DataFrame, Series)):
if b.shape != a.shape:
out += pre + (' pandas values a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a.values - b.values
nzeros = np.sum(c != 0)
if nzeros > 0:
out += pre + (' pandas values a and b differ on %s '
'elements' % nzeros)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
def __del__(self):
rmtree(self._path, ignore_errors=True)
def _list_file_contents(h5file):
if 'h5io' not in h5file.keys():
raise ValueError('h5file must contain h5io data')
# Set up useful variables for later
h5file = h5file['h5io']
root_title = h5file.attrs['TITLE']
n_space = np.max([(len(key), len(val.attrs['TITLE']))
for key, val in h5file.items()]) + 2
# Create print strings
strs = ['Root type: %s | Items: %s\n' % (root_title, len(h5file))]
for key, data in h5file.items():
type_str = data.attrs['TITLE']
str_format = '%%-%ss' % n_space
if type_str == 'ndarray':
desc = 'Shape: %s'
desc_val = data.shape
elif type_str in ['pd_dataframe', 'pd_series']:
desc = 'Shape: %s'
desc_val = data['values'].shape
elif type_str in ('unicode', 'ascii', 'str'):
desc = 'Text: %s'
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(data).tobytes().decode(decoder))
desc_val = data[:10] + '...' if len(data) > 10 else data
else:
desc = 'Items: %s'
desc_val = len(data)
this_str = ('%%s Key: %s | Type: %s | ' + desc) % (
str_format, str_format, str_format)
this_str = this_str % (tab_str, key, type_str, desc_val)
strs.append(this_str)
out_str = '\n'.join(strs)
print(out_str)
def list_file_contents(h5file):
"""List the contents of an h5io file.
This will list the root and one-level-deep contents of the file.
Parameters
----------
h5file : str
The path to an h5io hdf5 file.
"""
h5py = _check_h5py()
err = 'h5file must be an h5py File object, not {0}'
if isinstance(h5file, str):
with h5py.File(h5file, 'r') as f:
_list_file_contents(f)
else:
if not isinstance(h5file, h5py.File):
raise TypeError(err.format(type(h5file)))
_list_file_contents(h5file)
def _json_compatible(obj, slash='error'):
if isinstance(obj, (string_types, int, float, bool, type(None))):
return True
elif isinstance(obj, list):
return all([_json_compatible(item) for item in obj])
elif isinstance(obj, dict):
_check_keys_in_dict(obj, slash=slash)
return all([_json_compatible(item) for item in obj.values()])
else:
return False
def _check_keys_in_dict(obj, slash='error'):
repl = list()
for key in obj.keys():
if '/' in key:
key_prev = key
if slash == 'error':
raise ValueError('Found a key with "/", '
'this is not allowed if slash == error')
elif slash == 'replace':
# Auto-replace keys with proper values
for key_spec, val_spec in special_chars.items():
key = key.replace(val_spec, key_spec)
repl.append((key, key_prev))
else:
raise ValueError("slash must be one of ['error', 'replace'")
for key, key_prev in repl:
obj[key] = obj.pop(key_prev)
##############################################################################
# Arrays with mixed dimensions
def _validate_object_array(array):
if not (array.dtype == np.dtype('object') and
len(set([sub.dtype for sub in array])) == 1):
raise TypeError('unsupported array type')
def _shape_list(array):
return [np.shape(sub) for sub in array]
def _validate_sub_shapes(shape_lst):
if not all([shape_lst[0][1:] == t[1:] for t in shape_lst]):
raise ValueError('shape does not match!')
def _array_index(shape_lst):
return [t[0] for t in shape_lst]
def _index_sum(index_lst):
index_sum_lst = []
for step in index_lst:
if index_sum_lst != []:
index_sum_lst.append(index_sum_lst[-1] + step)
else:
index_sum_lst.append(step)
return index_sum_lst
def _merge_array(array):
merged_lst = []
for sub in array:
merged_lst += sub.tolist()
return np.array(merged_lst)
def multiarray_dump(array):
_validate_object_array(array)
shape_lst = _shape_list(array)
_validate_sub_shapes(shape_lst=shape_lst)
index_sum = _index_sum(index_lst=_array_index(shape_lst=shape_lst))
return index_sum, _merge_array(array=array)
def multiarray_load(index, array_merged):
array_restore = []
i_prev = 0
for i in index[:-1]:
array_restore.append(array_merged[i_prev:i])
i_prev = i
array_restore.append(array_merged[i_prev:])
return np.array(array_restore)
###############################################################################
# BACKPORTS
try:
fromisoformat = datetime.fromisoformat
except AttributeError: # Python < 3.7
# Code adapted from CPython
# https://github.com/python/cpython/blob/master/Lib/datetime.py
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos + 2])
pos += 2
next_char = tstr[pos:pos + 1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def fromisoformat(date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(
'Invalid isoformat string: {!r}'.format(date_string))
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(
'Invalid isoformat string: {!r}'.format(date_string))
else:
time_components = [0, 0, 0, 0, None]
return datetime(*(date_components + time_components))
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos - 1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
|
|
"""
Technical Analysis Factors
--------------------------
"""
from numbers import Number
from numpy import (
abs,
arange,
average,
clip,
diff,
exp,
fmax,
full,
inf,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.numpy_utils import ignore_nanwarnings
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanargmax,
nanmax,
nanmean,
nansum,
)
from .factor import CustomFactor
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nanmean(close * volume, axis=0)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@staticmethod
def weights(length, decay_rate):
"""
Return weighting vector for an exponential moving statistic on `length`
rows with a decay rate of `decay_rate`.
"""
return full(length, decay_rate, float) ** arange(length + 1, 1, -1)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[USEquityPricing.close],
window_length=30,
span=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[USEquityPricing.close],
window_length=30,
halflife=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
)
@classmethod
def from_center_of_mass(cls, inputs, window_length, center_of_mass):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[USEquityPricing.close],
window_length=30,
center_of_mass=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:func:`pandas.ewma`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=self.weights(len(data), decay_rate),
)
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.ewmstd`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = self.weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
# Convenience aliases.
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
|
|
# markdown/searializers.py
#
# Add x/html serialization to Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
from . import util
ElementTree = util.etree.ElementTree
QName = util.etree.QName
if hasattr(util.etree, 'test_comment'):
Comment = util.etree.test_comment
else:
Comment = util.etree.Comment
PI = util.etree.PI
ProcessingInstruction = util.etree.ProcessingInstruction
__all__ = ['to_html_string', 'to_xhtml_string']
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, qnames, namespaces, format):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
if qnames[k] == v and format == 'html':
# handle boolean attributes
write(" %s" % v)
else:
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = list(namespaces.items())
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
if format == "xhtml" and tag in HTML_EMPTY:
write(" />")
else:
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _write_html(root,
encoding=None,
default_namespace=None,
format="html"):
assert root is not None
data = []
write = data.append
qnames, namespaces = _namespaces(root, default_namespace)
_serialize_html(write, root, qnames, namespaces, format)
if encoding is None:
return "".join(data)
else:
return _encode("".join(data))
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in list(elem.items()):
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html")
def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml")
|
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Steven Czerwinski <[email protected]>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "[email protected]"
import sys
from io import BytesIO
import mock
import time
from scalyr_agent.__scalyr__ import SCALYR_VERSION
from scalyr_agent import scalyr_client
from scalyr_agent import util as scalyr_util
from scalyr_agent.scalyr_client import (
AddEventsRequest,
PostFixBuffer,
EventSequencer,
Event,
ScalyrClientSession,
MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT,
)
from scalyr_agent.test_base import ScalyrTestCase
from scalyr_agent.test_base import BaseScalyrLogCaptureTestCase
from scalyr_agent.test_base import skipIf
import scalyr_agent.test_util as test_util
import scalyr_agent.scalyr_client
class AddEventsRequestTest(ScalyrTestCase):
def setUp(self):
super(AddEventsRequestTest, self).setUp()
self.__body = {"token": "fakeToken"}
def test_basic_case(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertEquals(request.total_events, 0)
self.assertTrue(
request.add_event(Event().set_message(b"eventOne"), timestamp=1)
)
self.assertTrue(
request.add_event(Event().set_message(b"eventTwo"), timestamp=2)
)
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\x08eventOne},ts:"1"},{attrs:{message:`s\x00\x00\x00\x08eventTwo},ts:"2"}]"""
b""", logs: [], threads: [], client_time: 1 }""",
)
self.assertEquals(request.total_events, 2)
request.close()
def test_multiple_calls_to_get_payload(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(Event().set_message(b"eventOne"), timestamp=1)
)
self.assertTrue(
request.add_event(Event().set_message(b"eventTwo"), timestamp=2)
)
self.assertEquals(request.get_payload(), request.get_payload())
request.close()
def test_add_log_and_thread(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertEquals(request.total_events, 0)
self.assertTrue(
request.add_event(Event().set_message(b"eventOne"), timestamp=1)
)
self.assertTrue(request.add_log_and_thread("t1", "n1", {"l1": "L1"}))
self.assertTrue(
request.add_event(Event().set_message(b"eventTwo"), timestamp=2)
)
self.assertTrue(request.add_log_and_thread("t2", "n2", {"l2": "L2"}))
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\x08eventOne},ts:"1"},{attrs:{message:`s\x00\x00\x00\x08eventTwo},ts:"2"}]"""
b""", logs: [{"attrs":{"l1":"L1"},"id":"t1"},{"attrs":{"l2":"L2"},"id":"t2"}], threads: [{"id":"t1","name":"n1"},{"id":"t2","name":"n2"}], client_time: 1 }""",
)
self.assertEquals(request.total_events, 2)
request.close()
def test_maximum_bytes_exceeded(self):
request = AddEventsRequest(self.__body, max_size=112)
request.set_client_time(1)
self.assertTrue(request.add_event(Event().set_message("eventOne"), timestamp=1))
self.assertFalse(
request.add_event(Event().set_message("eventTwo"), timestamp=2)
)
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\x08eventOne},ts:"1"}], logs: [], threads: [], client_time: 1 }""",
)
request.close()
def test_maximum_bytes_exceeded_from_logs_and_threads(self):
request = AddEventsRequest(self.__body, max_size=131)
request.set_client_time(1)
self.assertTrue(request.add_log_and_thread("t1", "name1", {}))
self.assertFalse(request.add_log_and_thread("t2", "name2", {}))
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [], logs: [{"attrs":{},"id":"t1"}], threads: [{"id":"t1","name":"name1"}], client_time: 1 }""",
)
request.close()
def test_set_position(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
position = request.position()
self.assertTrue(
request.add_event(Event().set_message(b"eventOne"), timestamp=1)
)
self.assertTrue(
request.add_event(Event().set_message(b"eventTwo"), timestamp=2)
)
request.set_position(position)
self.assertTrue(
request.add_event(Event().set_message(b"eventThree"), timestamp=3)
)
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\neventThree},ts:"3"}], logs: [], threads: [], client_time: 1 }""",
)
request.close()
def test_set_position_with_log_and_thread(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
position = request.position()
request.add_log_and_thread("log1", "Hi there", {})
self.assertTrue(request.add_event(Event().set_message("eventOne"), timestamp=1))
self.assertTrue(request.add_event(Event().set_message("eventTwo"), timestamp=2))
request.set_position(position)
self.assertTrue(request.add_log_and_thread("log2", "Log two", {}))
self.assertTrue(
request.add_event(Event().set_message("eventThree"), timestamp=3)
)
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\neventThree},ts:"3"}], """
b"""logs: [{"attrs":{},"id":"log2"}], threads: [{"id":"log2","name":"Log two"}], client_time: 1 }""",
)
request.close()
def test_set_log_line_attributes(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
request.add_log_and_thread("log2", "Log two", {})
event_one = Event().set_message("eventOne")
event_one.add_attributes({"source": "stdout"}, overwrite_existing=True)
self.assertTrue(request.add_event(event_one, timestamp=1))
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{"source":"stdout",message:`s\x00\x00\x00\x08eventOne},ts:"1"}], """
b"""logs: [{"attrs":{},"id":"log2"}], threads: [{"id":"log2","name":"Log two"}], client_time: 1 }""",
)
request.close()
def test_set_log_line_attributes_with_base_attributes(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
request.add_log_and_thread("log2", "Log two", {})
event_base = Event()
event_base.add_attributes(
{"source": "stdout", "base": "base"}, overwrite_existing=False
)
event_one = Event(base=event_base)
event_one.set_message("eventOne")
event_one.add_attributes(
{"source": "stdin", "event": "event"}, overwrite_existing=True
)
self.assertTrue(request.add_event(event_one, timestamp=1))
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{"event":"event","source":"stdin",message:`s\x00\x00\x00\x08eventOne},ts:"1"}], """
b"""logs: [{"attrs":{},"id":"log2"}], threads: [{"id":"log2","name":"Log two"}], client_time: 1 }""",
)
request.close()
def test_set_client_time(self):
request = AddEventsRequest(self.__body)
request.set_client_time(100)
self.assertTrue(request.add_event(Event().set_message("eventOne"), timestamp=1))
self.assertTrue(request.add_event(Event().set_message("eventTwo"), timestamp=2))
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\x08eventOne},ts:"1"},{attrs:{message:`s\x00\x00\x00\x08eventTwo},ts:"2"}]"""
b""", logs: [], threads: [], client_time: 100 }""",
)
request.set_client_time(2)
self.assertEquals(
request.get_payload(),
b"""{"token":"fakeToken", events: [{attrs:{message:`s\x00\x00\x00\x08eventOne},ts:"1"},{attrs:{message:`s\x00\x00\x00\x08eventTwo},ts:"2"}]"""
b""", logs: [], threads: [], client_time: 2 }""",
)
request.close()
def test_monotonically_increasing_timestamp(self):
request = AddEventsRequest(self.__body, enforce_monotonic_timestamps=True)
scalyr_client._set_last_timestamp(0)
ts = 2000
expected = str(ts + 1)
self.assertTrue(
request.add_event(Event().set_message("eventOne"), timestamp=ts)
)
self.assertTrue(request.add_event(Event().set_message("eventTwo"), timestamp=1))
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][1]
self.assertEquals(event["ts"], expected)
def test_no_monotonically_increasing_timestamp(self):
request = AddEventsRequest(self.__body)
ts = 2000
self.assertTrue(
request.add_event(Event().set_message("eventOne"), timestamp=ts)
)
self.assertTrue(request.add_event(Event().set_message("eventTwo"), timestamp=1))
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][1]
self.assertEquals(event["ts"], "1")
def test_timestamp_none(self):
request = AddEventsRequest(self.__body)
request.set_client_time(100)
ts = int(time.time() * 1e9)
self.assertTrue(
request.add_event(Event().set_message("eventOne"), timestamp=None)
)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][0]
event_ts = int(event["ts"])
threshold = abs(event_ts - ts)
# allow a threshold of 1 second to have elapsed between reading the time.time and
# setting the event timestamp in add_event
self.assertTrue(threshold < 1e9)
def test_sequence_id_but_no_number(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"), timestamp=1, sequence_id=1234
)
)
self.assertEquals(request.total_events, 1)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][0]
self.assertFalse("si" in event)
self.assertFalse("sn" in event)
self.assertFalse("sd" in event)
request.close()
def test_sequence_number_but_no_id(self):
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"), timestamp=1, sequence_number=1234
)
)
self.assertEquals(request.total_events, 1)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][0]
self.assertFalse("si" in event)
self.assertFalse("sn" in event)
self.assertFalse("sd" in event)
request.close()
def test_sequence_id_and_number(self):
expected_id = "1234"
expected_number = 1234
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"),
timestamp=1,
sequence_id=expected_id,
sequence_number=expected_number,
)
)
self.assertEquals(request.total_events, 1)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][0]
self.assertEquals(expected_id, event["si"])
self.assertEquals(expected_number, event["sn"])
self.assertFalse("sd" in event)
request.close()
def test_same_sequence_id(self):
expected_id = b"1234"
expected_number = 1234
expected_delta = 1
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"),
timestamp=1,
sequence_id=expected_id,
sequence_number=expected_number,
)
)
self.assertTrue(
request.add_event(
Event().set_message("eventTwo"),
timestamp=2,
sequence_id=expected_id,
sequence_number=expected_number + expected_delta,
)
)
self.assertEquals(request.total_events, 2)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][1]
self.assertFalse("si" in event)
self.assertFalse("sn" in event)
self.assertEquals(expected_delta, event["sd"])
request.close()
def test_different_sequence_id(self):
first_id = "1234"
second_id = "1235"
first_number = 1234
second_number = 1234
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"),
timestamp=1,
sequence_id=first_id,
sequence_number=first_number,
)
)
self.assertTrue(
request.add_event(
Event().set_message("eventTwo"),
timestamp=2,
sequence_id=first_id,
sequence_number=first_number + 1,
)
)
self.assertTrue(
request.add_event(
Event().set_message("eventThree"),
timestamp=3,
sequence_id=second_id,
sequence_number=second_number,
)
)
self.assertEquals(request.total_events, 3)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][2]
self.assertEquals(second_id, event["si"])
self.assertEquals(second_number, event["sn"])
self.assertFalse("sd" in event)
request.close()
def test_exceeds_size_doesnt_effect_sequence(self):
first_id = "1234"
second_id = "1235"
first_number = 1234
second_number = 4321
expected_delta = 10
request = AddEventsRequest(self.__body, max_size=180)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"),
timestamp=1,
sequence_id=first_id,
sequence_number=first_number,
)
)
self.assertFalse(
request.add_event(
Event(
attrs={"name": "eventTwo", "long": "some really long text"}
).set_message(b"eventTwo"),
timestamp=2,
sequence_id=second_id,
sequence_number=second_number,
)
)
self.assertTrue(
request.add_event(
Event().set_message(b"eventThree"),
timestamp=3,
sequence_id=first_id,
sequence_number=first_number + expected_delta,
)
)
self.assertEquals(request.total_events, 2)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][1]
self.assertFalse("si" in event)
self.assertEquals(expected_delta, event["sd"])
self.assertFalse("sn" in event)
request.close()
def test_set_position_resets_sequence_compression(self):
first_id = "1234"
first_number = 1234
second_number = 4321
expected_delta = 10
request = AddEventsRequest(self.__body)
request.set_client_time(1)
self.assertTrue(
request.add_event(
Event().set_message("eventOne"),
timestamp=1,
sequence_id=first_id,
sequence_number=first_number,
)
)
position = request.position()
self.assertTrue(
request.add_event(
Event().set_message("eventTwo"),
timestamp=2,
sequence_id=first_id,
sequence_number=first_number + expected_delta,
)
)
request.set_position(position)
self.assertTrue(
request.add_event(
Event().set_message("eventThree"),
timestamp=3,
sequence_id=first_id,
sequence_number=second_number,
)
)
self.assertEquals(request.total_events, 2)
json = test_util.parse_scalyr_request(request.get_payload())
event = json["events"][1]
self.assertEquals(second_number, event["sn"])
self.assertFalse("sd" in event)
request.close()
def test_timing_data(self):
request = AddEventsRequest(self.__body)
request.increment_timing_data(**{"foo": 1, "bar": 2})
request.increment_timing_data(foo=5)
# can't rely on stable order in "get_timing_data()" return value
self.assertEquals(sorted(request.get_timing_data()), sorted("foo=6.0 bar=2.0"))
class EventTest(ScalyrTestCase):
def test_all_fields(self):
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message(b"my_message")
x.set_sampling_rate(0.5)
x.set_sequence_id(1)
x.set_sequence_number(2)
x.set_sequence_number_delta(3)
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message,sample_rate:0.5},ts:"42",si:"1",sn:2,sd:3}',
output_buffer.getvalue(),
)
self.assertEquals(
{
"log": "foo",
"thread": "foo",
"ts": "42",
"si": "1",
"sn": 2,
"sd": 3,
"attrs": {"parser": "bar", "message": "my_message", "sample_rate": 0.5},
},
test_util.parse_scalyr_request(output_buffer.getvalue()),
)
def test_fast_path_fields(self):
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message(b"my_message")
x.set_sequence_number_delta(3)
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message},sd:3,ts:"42"}',
output_buffer.getvalue(),
)
def test_individual_fields(self):
# snd
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message(b"my_message")
x.set_sequence_number_delta(3)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message},sd:3}',
output_buffer.getvalue(),
)
# timestamp
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message("my_message")
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message},ts:"42"}',
output_buffer.getvalue(),
)
# sampling_rate
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message("my_message")
x.set_sampling_rate(0.5)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message,sample_rate:0.5}}',
output_buffer.getvalue(),
)
# sid
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message("my_message")
x.set_sequence_id("hi")
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message},si:"hi"}',
output_buffer.getvalue(),
)
# seq num
x = Event(thread_id="foo", attrs={"parser": "bar"})
x.set_message("my_message")
x.set_sequence_number(5)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message},sn:5}',
output_buffer.getvalue(),
)
def test_only_message(self):
x = Event()
x.set_message("my_message")
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b"{attrs:{message:`s\x00\x00\x00\nmy_message}}", output_buffer.getvalue()
)
def test_no_thread_id(self):
x = Event(attrs={"parser": "bar"})
x.set_message("my_message")
x.set_sampling_rate(0.5)
x.set_sequence_id(1)
x.set_sequence_number(2)
x.set_sequence_number_delta(3)
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message,sample_rate:0.5},ts:"42",si:"1",sn:2,sd:3}',
output_buffer.getvalue(),
)
def test_no_attrs(self):
x = Event(thread_id="biz")
x.set_message("my_message")
x.set_sampling_rate(0.5)
x.set_sequence_id(1)
x.set_sequence_number(2)
x.set_sequence_number_delta(3)
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"biz", log:"biz", attrs:{message:`s\x00\x00\x00\nmy_message,sample_rate:0.5},ts:"42",si:"1",sn:2,sd:3}',
output_buffer.getvalue(),
)
def test_create_from_template(self):
x = Event(thread_id="foo", attrs={"parser": "bar"})
x = Event(base=x)
x.set_message("my_message")
x.set_sampling_rate(0.5)
x.set_sequence_id(1)
x.set_sequence_number(2)
x.set_sequence_number_delta(3)
x.set_timestamp(42)
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"parser":"bar",message:`s\x00\x00\x00\nmy_message,sample_rate:0.5},ts:"42",si:"1",sn:2,sd:3}',
output_buffer.getvalue(),
)
def test_create_from_template_with_add_attributes(self):
x = Event(thread_id="foo", attrs={"parser": "bar"})
x = Event(base=x)
x.set_message("my_message")
x.set_sampling_rate(0.5)
x.set_sequence_id(1)
x.set_sequence_number(2)
x.set_sequence_number_delta(3)
x.set_timestamp(42)
x.add_attributes({"trigger_update": "yes"})
output_buffer = BytesIO()
x.serialize(output_buffer)
self.assertEquals(
b'{thread:"foo", log:"foo", attrs:{"trigger_update":"yes",message:`s\x00\x00\x00\nmy_message,sample_rate:0.5},ts:"42",si:"1",sn:2,sd:3}',
output_buffer.getvalue(),
)
class EventSequencerTest(ScalyrTestCase):
def setUp(self):
super(EventSequencerTest, self).setUp()
self.event_sequencer = EventSequencer()
def test_sequence_id_but_no_number(self):
event = Event()
self.event_sequencer.add_sequence_fields(event, "1234", None)
self.assertIsNone(event.sequence_id)
self.assertIsNone(event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
def test_sequence_number_but_no_id(self):
event = Event()
self.event_sequencer.add_sequence_fields(event, None, 1234)
self.assertIsNone(event.sequence_id)
self.assertIsNone(event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
def test_sequence_id_and_number(self):
expected_id = "1234"
expected_number = 1234
event = Event()
self.event_sequencer.add_sequence_fields(event, expected_id, expected_number)
self.assertEquals(expected_id.encode("utf-8"), event.sequence_id)
self.assertEquals(expected_number, event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
def test_same_sequence_id(self):
expected_id = "1234"
expected_number = 1234
expected_delta = 1
event = Event()
self.event_sequencer.add_sequence_fields(event, expected_id, expected_number)
event = Event()
self.event_sequencer.add_sequence_fields(
event, expected_id, expected_number + expected_delta
)
self.assertIsNone(event.sequence_id)
self.assertIsNone(event.sequence_number)
self.assertEqual(expected_delta, event.sequence_number_delta)
def test_different_sequence_id(self):
first_id = "1234"
second_id = "1235"
first_number = 1234
second_number = 1234
event = Event()
self.event_sequencer.add_sequence_fields(event, first_id, first_number)
event = Event()
self.event_sequencer.add_sequence_fields(event, first_id, first_number + 1)
event = Event()
self.event_sequencer.add_sequence_fields(event, second_id, second_number)
self.assertEquals(second_id.encode("utf-8"), event.sequence_id)
self.assertEquals(second_number, event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
def test_memento(self):
first_id = "1234"
second_id = "1235"
first_number = 1234
second_number = 1234
event = Event()
self.event_sequencer.add_sequence_fields(event, first_id, first_number)
memento = self.event_sequencer.get_memento()
event = Event()
self.event_sequencer.add_sequence_fields(event, second_id, second_number)
self.assertIsNotNone(event.sequence_id)
self.assertIsNotNone(event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
self.event_sequencer.restore_from_memento(memento)
event = Event()
self.event_sequencer.add_sequence_fields(event, first_id, first_number + 1)
self.assertIsNone(event.sequence_id)
self.assertIsNone(event.sequence_number)
self.assertIsNotNone(event.sequence_number_delta)
def test_reset(self):
expected_id = "1234"
expected_number = 1234
expected_delta = 1
event = Event()
self.event_sequencer.add_sequence_fields(event, expected_id, expected_number)
self.event_sequencer.reset()
event = Event()
self.event_sequencer.add_sequence_fields(
event, expected_id, expected_number + expected_delta
)
self.assertEqual(expected_id.encode("utf-8"), event.sequence_id)
self.assertEqual(expected_number + expected_delta, event.sequence_number)
self.assertIsNone(event.sequence_number_delta)
class PostFixBufferTest(ScalyrTestCase):
def setUp(self):
super(PostFixBufferTest, self).setUp()
self.__format = b"], logs: LOGS, threads: THREADS, client_time: TIMESTAMP }"
def test_basic_case(self):
test_buffer = PostFixBuffer(self.__format)
test_buffer.set_client_timestamp(1)
test_buffer.add_log_and_thread_entry("log_5", "histogram_builder", {})
self.assertEquals(
test_buffer.content(),
b"""], logs: [{"attrs":{},"id":"log_5"}], threads: [{"id":"log_5","name":"histogram_builder"}], client_time: 1 }""",
)
def test_set_client_time(self):
test_buffer = PostFixBuffer(self.__format)
test_buffer.set_client_timestamp(1)
self.assertTrue(test_buffer.set_client_timestamp(433423))
expected_length = test_buffer.length
content = test_buffer.content(cache_size=False)
self.assertEquals(content, b"], logs: [], threads: [], client_time: 433423 }")
self.assertEquals(expected_length, len(content))
def test_set_client_time_fail(self):
test_buffer = PostFixBuffer(self.__format)
self.assertTrue(
test_buffer.set_client_timestamp(1, fail_if_buffer_exceeds=1000000)
)
self.assertFalse(
test_buffer.set_client_timestamp(
433423, fail_if_buffer_exceeds=test_buffer.length + 3
)
)
expected_length = test_buffer.length
content = test_buffer.content(cache_size=False)
self.assertEquals(content, b"], logs: [], threads: [], client_time: 1 }")
self.assertEquals(expected_length, len(content))
def test_add_thread(self):
test_buffer = PostFixBuffer(self.__format)
test_buffer.set_client_timestamp(1)
self.assertTrue(
test_buffer.add_log_and_thread_entry("log_5", "histogram_builder", {})
)
# NOTE: Order is important since .length call depends on the cached size
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertTrue(
test_buffer.add_log_and_thread_entry("log_12", "ok_builder", {})
)
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertTrue(
test_buffer.add_log_and_thread_entry("log", "histogram_builder_foo", {})
)
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertEquals(
test_buffer.content(),
b"""], logs: [{"attrs":{},"id":"log_5"},{"attrs":{},"id":"log_12"},{"attrs":{},"id":"log"}], """
b"""threads: [{"id":"log_5","name":"histogram_builder"},"""
b"""{"id":"log_12","name":"ok_builder"},"""
b"""{"id":"log","name":"histogram_builder_foo"}], client_time: 1 }""",
)
def test_add_thread_fail(self):
test_buffer = PostFixBuffer(self.__format)
test_buffer.set_client_timestamp(1)
# NOTE: Order is important since .length call depends on the cached size
self.assertTrue(
test_buffer.add_log_and_thread_entry(
"log_5", "histogram_builder", {}, fail_if_buffer_exceeds=1000000
)
)
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertFalse(
test_buffer.add_log_and_thread_entry(
"log_6", "histogram", {}, fail_if_buffer_exceeds=10
)
)
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertEquals(
test_buffer.content(),
b"""], logs: [{"attrs":{},"id":"log_5"}], """
b"""threads: [{"id":"log_5","name":"histogram_builder"}], client_time: 1 }""",
)
def test_set_position(self):
test_buffer = PostFixBuffer(self.__format)
test_buffer.set_client_timestamp(1)
self.assertTrue(
test_buffer.add_log_and_thread_entry("log_5", "histogram_builder", {})
)
position = test_buffer.position
self.assertTrue(
test_buffer.add_log_and_thread_entry("log_6", "histogram2_builder", {})
)
test_buffer.set_position(position)
content = test_buffer.content(cache_size=True)
length = test_buffer.length
self.assertEquals(
length,
len(content),
"Buffer content: %s" % (test_buffer.content(cache_size=False)),
)
self.assertEquals(
test_buffer.content(),
b"""], logs: [{"attrs":{},"id":"log_5"}], threads: [{"id":"log_5","name":"histogram_builder"}], """
b"""client_time: 1 }""",
)
class ClientSessionTest(BaseScalyrLogCaptureTestCase):
def test_user_agent_callback(self):
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
def get_user_agent():
return session._ScalyrClientSession__standard_headers["User-Agent"]
base_ua = get_user_agent()
frags = ["frag1", "frag2", "frag3"]
session.augment_user_agent(frags)
self.assertEquals(get_user_agent(), base_ua + ";" + ";".join(frags))
def test_get_user_agent_includes_requests_version(self):
scalyr_agent.scalyr_client.ssl.OPENSSL_VERSION_INFO = (1, 0, 2, 13, 13)
# without requests
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertEqual(split[-3], "o-1.0.2-13")
self.assertTrue(split[1].startswith("python-"))
# with requests
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
use_requests_lib=True,
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertEqual(split[-1], "requests-2.15.1")
self.assertEqual(split[-4], "o-1.0.2-13")
self.assertTrue(split[1].startswith("python-"))
@skipIf(sys.platform.startswith("win"), "Skipping test on Windows")
@mock.patch("scalyr_agent.platform_controller.PlatformController.new_platform")
def test_get_user_agent_string_run_as_admin(self, mock_new_platform):
mock_platform = mock.Mock()
mock_platform.get_current_user.return_value = "nobody"
mock_new_platform.return_value = mock_platform
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("a-0" in split)
mock_platform = mock.Mock()
mock_platform.get_current_user.return_value = "User"
mock_new_platform.return_value = mock_platform
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertFalse("" in split)
mock_platform = mock.Mock()
mock_platform.get_current_user.return_value = "root"
mock_new_platform.return_value = mock_platform
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("a-1" in split)
mock_platform = mock.Mock()
mock_platform.get_current_user.return_value = "MyDomain\\Administrators"
mock_new_platform.return_value = mock_platform
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("a-1" in split)
@skipIf(sys.platform.startswith("win"), "Skipping test on Windows")
def test_get_user_agent_worker_and_api_key_info(self):
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
sessions_api_keys_tuple=None,
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("mw-0" in split)
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
sessions_api_keys_tuple=("threaded", 1, 1),
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("mw-0" in split)
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
sessions_api_keys_tuple=("threaded", 3, 2),
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("mw-1|3|2" in split)
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
sessions_api_keys_tuple=("multiprocess", 4, 3),
)
user_agent = session._ScalyrClientSession__standard_headers["User-Agent"]
split = user_agent.split(";")
self.assertTrue("mw-2|4|3" in split)
@mock.patch("scalyr_agent.scalyr_client.time.time", mock.Mock(return_value=0))
def test_send_request_body_is_logged_raw_uncompressed(self):
"""
When sending a request with compression available / enabled, raw (uncompressed) request
body (payload) should be logged under DEBUG log level.
"""
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
session._ScalyrClientSession__compress = mock.Mock(return_value="compressed")
add_events_request = AddEventsRequest({"foo": "bar"})
event1 = Event(thread_id="foo1", attrs={"parser": "bar1"}).set_message(
"eventOne"
)
event2 = Event(thread_id="foo2", attrs={"parser": "bar2"}).set_message(
"eventTwo"
)
add_events_request.add_event(event=event1, timestamp=1)
add_events_request.add_event(event=event2, timestamp=2)
session.send(add_events_request=add_events_request)
# Should log raw (uncompressed) request body / payload
expected_body = r'{"foo":"bar", events: \[{thread:"foo1", .*'
self.assertLogFileContainsRegex(
expected_body, file_path=self.agent_debug_log_path
)
expected_body = r'.*,{thread:"foo2", log:"foo2", attrs:{"parser":"bar2",.*'
self.assertLogFileContainsRegex(
expected_body, file_path=self.agent_debug_log_path
)
# Verify that the compression was indeed enabled since that's the scenario we are testing
call_kwargs = session._ScalyrClientSession__connection.post.call_args_list[0][1]
self.assertEqual(call_kwargs["body"], "compressed")
@mock.patch("scalyr_agent.scalyr_client.time.time", mock.Mock(return_value=0))
def test_send_request_timestamp_doesnt_increases_monotonically(self):
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION,
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
add_events_request = session.add_events_request()
ts = 2000
add_events_request.add_event(Event().set_message("eventOne"), timestamp=ts)
add_events_request.add_event(Event().set_message("eventTwo"), timestamp=1)
json = test_util.parse_scalyr_request(add_events_request.get_payload())
event = json["events"][1]
self.assertEquals(event["ts"], "1")
@mock.patch("scalyr_agent.scalyr_client.time.time", mock.Mock(return_value=0))
def test_send_request_timestamp_increases_monotonically(self):
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
enforce_monotonic_timestamps=True,
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
scalyr_client._set_last_timestamp(0)
add_events_request = session.add_events_request()
ts = 2000
expected = str(ts + 1)
add_events_request.add_event(Event().set_message("eventOne"), timestamp=ts)
add_events_request.add_event(Event().set_message("eventTwo"), timestamp=1)
json = test_util.parse_scalyr_request(add_events_request.get_payload())
event = json["events"][1]
self.assertEquals(event["ts"], expected)
@mock.patch("scalyr_agent.scalyr_client.time.time", mock.Mock(return_value=0))
def test_send_request_body_is_logged_raw_uncompressed_long_body_is_truncated(self):
# Verify that very large bodies are truncated to avoid increased memory usage issues under
# Python 2.7
session = ScalyrClientSession(
"https://dummserver.com", "DUMMY API KEY", SCALYR_VERSION
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
session._ScalyrClientSession__compress = mock.Mock(return_value="compressed")
add_events_request = AddEventsRequest({"bar": "baz"})
event1 = Event(thread_id="foo4", attrs={"parser": "bar2"}).set_message(
"a" * (MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT + 1)
)
add_events_request.add_event(event=event1, timestamp=1)
session.send(add_events_request=add_events_request)
# Should log raw (uncompressed) request body / payload
expected_body = (
r'Sending POST /addEvents with body "{"bar":"baz".*\.\.\. \[body truncated to %s chars\] \.\.\.'
% (MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT)
)
self.assertLogFileContainsRegex(
expected_body, file_path=self.agent_debug_log_path
)
@mock.patch("scalyr_agent.scalyr_client.time.time", mock.Mock(return_value=0))
def test_send_request_body_compression(self):
add_events_request = AddEventsRequest({"bar": "baz"})
event1 = Event(thread_id="foo4", attrs={"parser": "bar2"}).set_message(
"test message 1"
)
event2 = Event(thread_id="foo5", attrs={"parser": "bar2"}).set_message(
"test message 2"
)
add_events_request.add_event(event=event1, timestamp=1)
add_events_request.add_event(event=event2, timestamp=2)
serialized_data = add_events_request.get_payload()
if sys.version_info < (2, 7, 0):
# lz4 and zstandard Python package is not available for Python 2.6
compression_types = scalyr_util.COMPRESSION_TYPE_TO_DEFAULT_LEVEL.copy()
del compression_types["zstandard"]
del compression_types["lz4"]
else:
compression_types = scalyr_util.COMPRESSION_TYPE_TO_DEFAULT_LEVEL
for compression_type in compression_types:
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
compression_type=compression_type,
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
session.send(add_events_request=add_events_request)
(
path,
request,
) = session._ScalyrClientSession__connection.post.call_args_list[0]
_, decompress_func = scalyr_util.get_compress_and_decompress_func(
compression_type
)
self.assertEqual(path[0], "/addEvents")
if compression_type == "none":
self.assertTrue(
"Content-Encoding"
not in session._ScalyrClientSession__standard_headers
)
self.assertTrue(b"test message 1" in request["body"])
self.assertTrue(b"test message 2" in request["body"])
else:
self.assertEqual(
session._ScalyrClientSession__standard_headers["Content-Encoding"],
compression_type,
)
# Verify decompressed data matches the raw body
self.assertTrue(b"test message 1" not in request["body"])
self.assertTrue(b"test message 2" not in request["body"])
self.assertFalse(serialized_data == request["body"])
self.assertEqual(serialized_data, decompress_func(request["body"]))
# Compression is disabled
session = ScalyrClientSession(
"https://dummserver.com",
"DUMMY API KEY",
SCALYR_VERSION,
compression_type=None,
)
session._ScalyrClientSession__connection = mock.Mock()
session._ScalyrClientSession__receive_response = mock.Mock()
session.send(add_events_request=add_events_request)
serialized_data = add_events_request.get_payload()
(path, request,) = session._ScalyrClientSession__connection.post.call_args_list[
0
]
_, decompress_func = scalyr_util.get_compress_and_decompress_func(
compression_type
)
self.assertEqual(path[0], "/addEvents")
self.assertTrue(b"test message 1" in request["body"])
self.assertTrue(b"test message 2" in request["body"])
self.assertEqual(serialized_data, request["body"])
self.assertTrue(
"Content-Encoding" not in session._ScalyrClientSession__standard_headers
)
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import re
import six
from collections.abc import Iterable
from st2common.util import schema as util_schema
from st2common.constants.pack import MANIFEST_FILE_NAME
from st2common.constants.pack import PACK_REF_WHITELIST_REGEX
from st2common.content.loader import MetaLoader
from st2common.persistence.pack import Pack
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.util import jinja as jinja_utils
__all__ = [
"get_pack_ref_from_metadata",
"get_pack_metadata",
"get_pack_warnings",
"get_pack_common_libs_path_for_pack_ref",
"get_pack_common_libs_path_for_pack_db",
"validate_config_against_schema",
"normalize_pack_version",
]
# Common format for python 2.7 warning
if six.PY2:
PACK_PYTHON2_WARNING = (
"DEPRECATION WARNING: Pack %s only supports Python 2.x. "
"Python 2 support will be dropped in future releases. "
"Please consider updating your packs to work with Python 3.x"
)
else:
PACK_PYTHON2_WARNING = (
"DEPRECATION WARNING: Pack %s only supports Python 2.x. "
"Python 2 support has been removed since st2 v3.4.0. "
"Please update your packs to work with Python 3.x"
)
def get_pack_ref_from_metadata(metadata, pack_directory_name=None):
"""
Utility function which retrieves pack "ref" attribute from the pack metadata file.
If this attribute is not provided, an attempt is made to infer "ref" from the "name" attribute.
:rtype: ``str``
"""
pack_ref = None
# The rules for the pack ref are as follows:
# 1. If ref attribute is available, we used that
# 2. If pack_directory_name is available we use that (this only applies to packs
# which are in sub-directories)
# 2. If attribute is not available, but pack name is and pack name meets the valid name
# criteria, we use that
if metadata.get("ref", None):
pack_ref = metadata["ref"]
elif pack_directory_name and re.match(
PACK_REF_WHITELIST_REGEX, pack_directory_name
):
pack_ref = pack_directory_name
else:
if re.match(PACK_REF_WHITELIST_REGEX, metadata["name"]):
pack_ref = metadata["name"]
else:
msg = (
'Pack name "%s" contains invalid characters and "ref" attribute is not '
'available. You either need to add "ref" attribute which contains only word '
"characters to the pack metadata file or update name attribute to contain only"
"word characters."
)
raise ValueError(msg % (metadata["name"]))
return pack_ref
def get_pack_metadata(pack_dir):
"""
Return parsed metadata for a particular pack directory.
:rtype: ``dict``
"""
manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME)
if not os.path.isfile(manifest_path):
raise ValueError(
'Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME)
)
meta_loader = MetaLoader()
content = meta_loader.load(manifest_path)
if not content:
raise ValueError('Pack "%s" metadata file is empty' % (pack_dir))
return content
def get_pack_warnings(pack_metadata):
"""
Return warning string if pack metadata indicates only python 2 is supported
:rtype: ``str``
"""
warning = None
versions = pack_metadata.get("python_versions", None)
pack_name = pack_metadata.get("name", None)
if versions and set(versions) == set(["2"]):
warning = PACK_PYTHON2_WARNING % pack_name
return warning
def validate_config_against_schema(
config_schema, config_object, config_path, pack_name=None
):
"""
Validate provided config dictionary against the provided config schema
dictionary.
"""
# NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used
import jsonschema
pack_name = pack_name or "unknown"
schema = util_schema.get_schema_for_resource_parameters(
parameters_schema=config_schema, allow_additional_properties=True
)
instance = config_object
try:
cleaned = util_schema.validate(
instance=instance,
schema=schema,
cls=util_schema.CustomValidator,
use_default=True,
allow_default_none=True,
)
for key in cleaned:
if (
jinja_utils.is_jinja_expression(value=cleaned.get(key))
and "decrypt_kv" in cleaned.get(key)
and config_schema.get(key).get("secret")
):
raise ValueValidationException(
'Values specified as "secret: True" in config '
"schema are automatically decrypted by default. Use "
'of "decrypt_kv" jinja filter is not allowed for '
"such values. Please check the specified values in "
"the config or the default values in the schema."
)
except jsonschema.ValidationError as e:
attribute = getattr(e, "path", [])
if isinstance(attribute, (tuple, list, Iterable)):
attribute = [str(item) for item in attribute]
attribute = ".".join(attribute)
else:
attribute = str(attribute)
msg = 'Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (
attribute,
pack_name,
config_path,
six.text_type(e),
)
raise jsonschema.ValidationError(msg)
return cleaned
def get_pack_common_libs_path_for_pack_ref(pack_ref):
pack_db = Pack.get_by_ref(pack_ref)
pack_common_libs_path = get_pack_common_libs_path_for_pack_db(pack_db=pack_db)
return pack_common_libs_path
def get_pack_common_libs_path_for_pack_db(pack_db):
"""
Return the pack's common lib path. This is the path where common code for sensors
and actions are placed.
For example, if the pack is at /opt/stackstorm/packs/my_pack, you can place
common library code for actions and sensors in /opt/stackstorm/packs/my_pack/lib/.
This common library code is only available for python sensors and actions. The lib
structure also needs to follow a python convention with a __init__.py file.
:param pack_db: Pack DB model
:type pack_db: :class:`PackDB`
:rtype: ``str``
"""
pack_dir = getattr(pack_db, "path", None)
if not pack_dir:
return None
libs_path = os.path.join(pack_dir, "lib")
return libs_path
def normalize_pack_version(version):
"""
Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid
semver version string (0.2.0).
:rtype: ``str``
"""
version = str(version)
version_seperator_count = version.count(".")
if version_seperator_count == 1:
version = version + ".0"
return version
|
|
import sys
import getopt
import struct
from functools import partial
import operator
import array
import copy
import time
import re
sys.path.append("../shell")
import swapforth
def truth(pred):
return [0, -1][pred]
def setimmediate(func):
func.is_immediate = True
return func
def ba(x):
return array.array('B', x)
class ForthException(Exception):
def __init__(self, value):
self.value = value
class SwapForth:
def __init__(self, CELL = 4, ENDIAN = '<'):
self.d = [] # data stack
self.r = [] # return stack
self.dict = {} # the dictionary
self.xts = [] # execution token (xt) table
self.ip = 0 # instruction pointer for inner interpreter
self.loopC = 0 # loop count
self.loopL = 0 # loop limit
self.leaves = [] # tracking LEAVEs from DO..LOOP
self.ram = array.array('B') # memory
self.out = sys.stdout.write # default console output
self.CELL = CELL
self.CSIGN = (256 ** self.CELL) >> 1 # Sign bit mask
self.CMASK = (256 ** self.CELL) - 1 # Cell mask
self.cellfmt = ENDIAN + {2: 'h', 4: 'i', 8: 'q'}[self.CELL]
def allot(n, d):
r = partial(self.lit, len(self.ram))
r.__doc__ = d
self.ram.extend([0] * n)
return r
self.tib = allot(256, "TIB")
self.sourcea = allot(self.CELL, "SOURCEA")
self.sourcec = allot(self.CELL, "SOURCEC")
self.sourceid = allot(self.CELL, "SOURCEID")
self.to_in = allot(self.CELL, ">IN")
self.base = allot(self.CELL, "BASE")
self.state = allot(self.CELL, "STATE")
# Run through own bound methods, adding each to the dict
isforth = re.compile(r"[A-Z0-9<>=\-\[\],@!:;+?/*]+$")
for name in dir(self):
o = getattr(self, name)
if not isforth.match(name) and o.__doc__:
# name was not a valid Forth name; try start of the docstring
name = o.__doc__.split()[0]
if callable(o) and isforth.match(name):
self.dict[name] = o
self.DECIMAL()
def u32(self, x):
return x & self.CMASK
def w32(self, x):
x += self.CSIGN
x &= self.CMASK
x -= self.CSIGN
return x
def lit(self, n):
""" push literal N on the stack """
self.d.append(n)
def popn(self, n):
r = self.d[-n:]
self.d = self.d[:-n]
return r
def q(self, s):
for w in s.split():
if w in self.dict:
self.dict[w]()
else:
self.lit(int(w))
def binary(self, op):
b = self.d.pop()
self.d[-1] = self.w32(op(self.d[-1], b))
def dpop(self):
r = self.d.pop() << (8 * self.CELL)
r += self.d.pop() & self.CMASK
return r
def dlit(self, d):
self.lit(self.w32(d & self.CMASK))
self.lit(self.w32(d >> (8 * self.CELL)))
def pops(self):
n = self.d.pop()
a = self.d.pop()
return self.ram[a:a+n].tostring()
# Start of Forth words
#
# If the word is a legal Python identifier, then
# use that name. Otherwise (e.g. '+') the Forth name is in
# the docstring.
def HERE(self):
self.lit(len(self.ram))
def THROW(self):
e = self.d.pop()
if e:
raise ForthException(e)
def CATCH(self):
self.q('SOURCEA @ SOURCEC @ >IN @')
self.q('SOURCEID @ >R')
source_spec = self.popn(3)
(ds,rs,ip) = (len(self.d) - 1, len(self.r), self.ip)
try:
self.EXECUTE()
except ForthException as e:
if len(self.d) > ds:
self.d = self.d[:ds]
else:
self.d = self.d + [0] * (ds - len(self.d))
self.r = self.r[:rs]
self.ip = ip
self.lit(source_spec[0])
self.lit(source_spec[1])
self.lit(source_spec[2])
self.q('R> SOURCEID !')
self.q('>IN ! SOURCEC ! SOURCEA !')
self.lit(e.value)
else:
self.lit(0)
def cell_plus(self):
""" CELL+ """
self.d[-1] += self.CELL
def DEPTH(self):
self.lit(len(self.d))
def SOURCE(self):
self.sourcea()
self.fetch()
self.sourcec()
self.fetch()
def fetch(self):
""" @ """
a = self.d.pop()
self.lit(*struct.unpack(self.cellfmt, self.ram[a:a + self.CELL]))
def c_fetch(self):
""" C@ """
a = self.d.pop()
self.lit(self.ram[a])
def store(self):
""" ! """
a = self.d.pop()
x = self.d.pop()
self.ram[a:a + self.CELL] = array.array('B', struct.pack(self.cellfmt, x))
def c_store(self):
""" C! """
a = self.d.pop()
x = self.d.pop()
self.ram[a] = x & 0xff
def comma(self):
""" , """
self.ram.extend(ba(struct.pack(self.cellfmt, self.d.pop())))
def c_comma(self):
""" C, """
self.ram.extend([self.d.pop()])
def slash_string(self):
""" /STRING """
n = self.d.pop()
self.d[-2] += n
self.d[-1] -= n
def PARSE(self):
delim = self.d.pop()
self.q('SOURCE >IN @ /STRING')
self.q('OVER >R')
while True:
if self.d[-1] == 0:
break
if (self.ram[self.d[-2]]) == delim:
break
self.lit(1)
self.slash_string()
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def parse_name(self):
""" PARSE-NAME """
self.q('SOURCE >IN @ /STRING')
def skip(pred):
while True:
if self.d[-1] == 0:
break
if not pred(self.ram[self.d[-2]]):
break
self.lit(1)
self.slash_string()
skip(lambda x: x == 32)
self.q('OVER >R')
skip(lambda x: x != 32)
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def DUP(self):
self.d.append(self.d[-1])
def DROP(self):
self.d.pop()
def NIP(self):
self.d.pop(-2)
def two_drop(self):
""" 2DROP """
self.d.pop()
self.d.pop()
def SWAP(self):
(self.d[-2], self.d[-1]) = (self.d[-1], self.d[-2])
def two_swap(self):
""" 2SWAP """
(self.d[-4], self.d[-3], self.d[-2], self.d[-1]) = (self.d[-2], self.d[-1], self.d[-4], self.d[-3])
def two_over(self):
""" 2OVER """
self.lit(self.d[-4])
self.lit(self.d[-4])
def OVER(self):
self.lit(self.d[-2])
def TUCK(self):
self.SWAP()
self.OVER()
def two_dup(self):
""" 2DUP """
self.d += self.d[-2:]
def to_r(self):
""" >R """
self.r.append(self.d.pop())
def r_from(self):
""" R> """
self.d.append(self.r.pop())
def r_fetch(self):
""" R@ """
self.d.append(self.r[-1])
def n_to_r(self):
""" N>R """
n = self.d.pop()
if n:
self.r += self.d[-n:]
self.d = self.d[:-n]
self.r.append(n)
def n_r_from(self):
""" NR> """
n = self.r.pop()
if n:
self.d += self.r[-n:]
self.r = self.r[:-n]
self.lit(n)
def plus(self):
""" + """
self.binary(operator.__add__)
def minus(self):
""" - """
self.binary(operator.__sub__)
def _and(self):
""" AND """
self.binary(operator.__and__)
def _or(self):
""" OR """
self.binary(operator.__or__)
def _xor(self):
""" XOR """
self.binary(operator.__xor__)
def LSHIFT(self):
self.binary(operator.__lshift__)
def RSHIFT(self):
self.binary(lambda a, b: (a & self.CMASK) >> b)
def two_slash(self):
""" 2/ """
self.d[-1] >>= 1
def equal(self):
""" = """
self.binary(lambda a, b: truth(a == b))
def less_than(self):
""" < """
self.binary(lambda a, b: truth(a < b))
def u_less_than(self):
""" U< """
self.binary(lambda a, b: truth((a & self.CMASK) < (b & self.CMASK)))
def NEGATE(self):
self.d[-1] = self.w32(-self.d[-1])
def INVERT(self):
self.d[-1] = self.w32(self.d[-1] ^ self.CMASK)
def MIN(self):
self.lit(min(self.d.pop(), self.d.pop()))
def MAX(self):
self.lit(max(self.d.pop(), self.d.pop()))
def dplus(self):
""" D+ """
self.dlit(self.dpop() + self.dpop())
def u_m_star(self):
""" UM* """
self.dlit(self.u32(self.d.pop()) * self.u32(self.d.pop()))
def star(self):
""" * """
self.binary(operator.__mul__)
def u_m_slash_mod(self):
""" UM/MOD """
u1 = self.u32(self.d.pop())
ud = self.dpop() & (65536**self.CELL - 1)
self.lit(self.w32(ud % u1))
self.lit(self.w32(ud / u1))
def MS(self):
time.sleep(0.001 * self.d.pop())
def EMIT(self):
self.out(chr(self.d.pop()))
def CR(self):
self.lit(ord('\n'))
self.EMIT()
def SPACE(self):
self.lit(ord(' '))
self.EMIT()
def BL(self):
self.lit(ord(' '))
def WORDS(self):
self.out(" ".join(self.dict))
def xt(self, c):
if not c in self.xts:
self.xts.append(c)
return self.xts.index(c) + 1000
def SFIND(self):
(a, n) = self.d[-2:]
s = self.ram[a:a+n].tostring().upper()
if s in self.dict:
x = self.dict[s]
self.d[-2] = self.xt(x)
if hasattr(x, 'is_immediate'):
self.d[-1] = 1
else:
self.d[-1] = -1
else:
self.lit(0)
def EXECUTE(self):
x = self.d.pop()
self.xts[x - 1000]()
@setimmediate
def left_paren(self):
""" [ """
self.lit(0)
self.state()
self.store()
def right_paren(self):
""" ] """
self.lit(1)
self.state()
self.store()
def inner(self, code):
save = self.ip
self.ip = 0
while self.ip < len(code):
c = code[self.ip]
self.ip += 1
c()
self.ip = save
def MARKER(self):
self.parse_name()
name = self.pops().upper()
def restore(here, dict):
del self.ram[here:]
self.dict = dict
self.dict[name] = partial(restore, len(self.ram), copy.copy(self.dict))
def mkheader(self):
self.parse_name()
self.code = []
self.defining = self.pops().upper()
def colon(self):
""" : """
self.mkheader()
self.right_paren()
def endcolon():
self.lastword = partial(self.inner, self.code)
if self.defining in self.dict:
print 'warning: refining %s' % self.defining
self.dict[self.defining] = self.lastword
self.dosemi = endcolon
@setimmediate
def semicolon(self):
""" ; """
self.dosemi()
self.left_paren()
@setimmediate
def RECURSE(self):
self.code.append(partial(self.inner, self.code))
def noname(self):
""" :NONAME """
self.code = []
self.right_paren()
def endnoname():
self.lit(self.xt(partial(self.inner, self.code)))
self.dosemi = endnoname
def IMMEDIATE(self):
setattr(self.lastword, 'is_immediate', True)
@setimmediate
def does(self):
""" DOES> """
def dodoes(code):
del self.code[1:]
self.code.append(partial(self.inner, code))
dobody = []
self.code.append(partial(dodoes, dobody))
self.semicolon()
self.right_paren()
self.code = dobody
self.dosemi = lambda: 0
def to_body(self):
""" >BODY """
code = self.xts[self.d.pop() - 1000].args[0]
code0 = code[0]
self.inner([code0])
def ALLOT(self):
self.ram.extend(ba(chr(0) * self.d.pop()))
@setimmediate
def POSTPONE(self):
self.parse_name()
self.SFIND()
if self.d[-1] == 0:
self.DROP()
assert 0, "Bad postpone %s" % self.pops()
if self.d.pop() < 0:
self.LITERAL()
self.lit(self.xt(self.compile_comma))
self.compile_comma()
def EXIT(self):
self.ip = 99999999;
def ACCEPT(self):
(a, n) = self.popn(2)
s = raw_input()[:n]
ns = len(s)
self.ram[a:a + ns] = s
self.lit(ns)
def to_number(self, base = None):
""" >NUMBER """
if base is None:
self.base()
self.fetch()
base = self.d.pop()
(a, n) = self.popn(2)
ud2 = self.dpop()
try:
while n:
ud2 = base * ud2 + int(chr(self.ram[a]), base)
a += 1
n -= 1
except ValueError:
pass
self.dlit(ud2)
self.lit(a)
self.lit(n)
def DECIMAL(self):
self.lit(10)
self.base()
self.store()
def compile_comma(self):
""" COMPILE, """
self.code.append(self.xts[self.d.pop() - 1000])
def branch(self, x):
self.ip = x
def zbranch(self, x):
if self.d.pop() == 0:
self.ip = x
@setimmediate
def BEGIN(self):
self.lit(len(self.code))
@setimmediate
def AGAIN(self):
self.code.append(partial(self.branch, self.d.pop()))
@setimmediate
def AHEAD(self):
self.lit(len(self.code))
self.code.append(self.branch)
@setimmediate
def m_if(self):
""" IF """
self.lit(len(self.code))
self.code.append(self.zbranch)
@setimmediate
def THEN(self):
p = self.d.pop()
self.code[p] = partial(self.code[p], len(self.code))
@setimmediate
def UNTIL(self):
self.code.append(partial(self.zbranch, self.d.pop()))
@setimmediate
def LITERAL(self):
self.code.append(partial(self.lit, self.d.pop()))
def dodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d.pop()
self.loopL = self.d.pop()
def qdodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d[-1]
self.loopL = self.d[-2]
self._xor()
def doloop(self):
before = self.w32(self.loopC - self.loopL) < 0
inc = self.d.pop()
self.loopC = self.w32(self.loopC + inc)
after = self.w32(self.loopC - self.loopL) < 0
if inc > 0:
finish = before > after
else:
finish = before < after
self.lit(finish)
@setimmediate
def DO(self):
self.leaves.append([])
self.code.append(self.dodo)
self.lit(len(self.code))
@setimmediate
def LOOP(self):
self.lit(1)
self.LITERAL()
self.plus_loop()
@setimmediate
def plus_loop(self):
""" +LOOP """
self.code.append(self.doloop)
self.UNTIL()
leaves = self.leaves.pop()
for p in leaves:
self.code[p] = partial(self.code[p], len(self.code))
self.code.append(self.UNLOOP)
@setimmediate
def question_do(self):
""" ?DO """
self.code.append(self.qdodo)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.lit(len(self.code))
return
self.code.append(self.two_dup)
self.code.append(self.equal)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.code.append(self.dodo)
self.lit(len(self.code))
def I(self):
self.lit(self.loopC)
def J(self):
self.lit(self.r[-2])
def UNLOOP(self):
self.loopL = self.r.pop()
self.loopC = self.r.pop()
def QUIT(self):
print 'QUIT'
raise swapforth.Bye
@setimmediate
def LEAVE(self):
self.leaves[-1].append(len(self.code))
self.code.append(self.branch)
def EVALUATE(self):
self.q('SOURCE >R >R >IN @ >R')
self.q('SOURCEID @ >R -1 SOURCEID !')
self.q('SOURCEC ! SOURCEA ! 0 >IN !')
self.interpret()
self.q('R> SOURCEID !')
self.q('R> >IN ! R> SOURCEA ! R> SOURCEC !')
def source_id(self):
""" SOURCE-ID """
self.q('SOURCEID @')
def interpret(self):
def consume1(c):
if self.d[-1] != 0:
r = self.ram[self.d[-2]] == c
else:
r = 0
if r:
self.lit(1)
self.slash_string()
return r
def da():
self.two_dup()
was = self.pops()
if len(was) == 3 and was[0] == "'" and was[2] == "'":
self.two_drop()
self.lit(ord(was[1]))
self.lit(1)
return
self.dlit(0)
self.two_swap()
if consume1(ord('$')):
base = 16
elif consume1(ord('#')):
base = 10
elif consume1(ord('%')):
base = 2
else:
base = None
neg = consume1(ord('-'))
self.to_number(base)
double = consume1(ord('.'))
if self.d.pop() != 0:
self.lit(-13)
self.THROW()
self.DROP()
if double:
if neg:
self.q('DNEGATE')
self.lit(2)
else:
self.DROP()
if neg:
self.NEGATE()
self.lit(1)
def doubleAlso():
da()
self.DROP()
def doubleAlso_comma():
da()
if self.d.pop() == 2:
self.SWAP()
self.LITERAL()
self.LITERAL()
while True:
self.parse_name()
if self.d[-1] == 0:
break
self.SFIND()
i = self.d.pop() + 1
self.state()
self.fetch()
i += 3 * self.d.pop()
[ # nonimmediate number immediate
# ------------ ------ ---------
self.EXECUTE, doubleAlso, self.EXECUTE, # interpretation
self.compile_comma, doubleAlso_comma, self.EXECUTE # compilation
][i]()
self.two_drop()
def REFILL(self):
self.q('SOURCEID @')
if self.d.pop() == 0:
self.tib()
self.lit(256)
self.ACCEPT()
self.q('SOURCEC !')
self.q('TIB SOURCEA !')
self.q('0 >IN !')
self.lit(-1)
else:
self.lit(0)
def putcmd(self, cmd):
if cmd.endswith('\r'):
cmd = cmd[:-1]
self.tib()
tib = self.d.pop()
for i,c in enumerate(cmd):
self.ram[tib + i] = ord(c)
self.q('TIB SOURCEA !')
self.lit(len(cmd))
self.q('SOURCEC !')
self.q('0 >IN !')
import threading
import Queue
class AsyncSwapForth(SwapForth):
def __init__(self, cmdq, ready, *options):
SwapForth.__init__(self, *options)
self.cmdq = cmdq
self.ready = ready
while True:
self.REFILL()
if not self.d.pop():
assert 0, "REFILL failed"
self.lit(self.xt(self.interpret))
self.CATCH()
e = self.d.pop()
if e:
codes = {
-1 : ": aborted",
-4 : ": stack underflow",
-9 : ": invalid memory address",
-13 : ": undefined word",
-14 : ": interpreting a compile-only word",
-28 : ": user interrupt"}
self.out('error: %d%s\n' % (e, codes.get(e, "")))
else:
self.out(' ok\r\n')
def ACCEPT(self):
(a, n) = self.popn(2)
self.ready.set()
(self.out, s) = self.cmdq.get()[:n]
ns = len(s)
self.ram[a:a + ns] = ba(s)
self.lit(ns)
class Tethered(swapforth.TetheredTarget):
def __init__(self, *options):
self.searchpath = ['.']
self.log = open("log", "w")
self.ser = None
self.verbose = False
self.interpreting = False
self.ready = threading.Event()
self.cmdq = Queue.Queue()
self.t = threading.Thread(target = AsyncSwapForth, args = (self.cmdq, self.ready) + options)
self.t.setDaemon(True)
self.t.start()
self.ready.wait()
def issue(self, writer, cmd):
assert self.ready.is_set()
self.ready.clear()
self.cmdq.put((writer, cmd))
self.ready.wait()
def interactive_command(self, cmd):
self.issue(sys.stdout.write, cmd)
def command_response(self, cmd):
r = []
self.issue(lambda c: r.append(c), cmd)
return "".join(r)
if __name__ == '__main__':
cellsize = 4
endian = '<'
try:
options,args = getopt.getopt(sys.argv[1:], 'c:b')
optdict = dict(options)
if '-c' in optdict:
cellsize = int(optdict['-c'])
if '-b' in optdict:
endian = '>'
except getopt.GetoptError:
print "usage:"
print " -c N cell size, one of 2,4 or 8"
print " -b big-endian. Default is little-endian"
sys.exit(1)
dpans = {}
allw = set()
t = Tethered(cellsize, endian)
t.searchpath += ['../anstests', '../common']
# print set(t.sf.dict.keys()) - dpans['CORE']
try:
t.include('swapforth.fs')
[t.include(a) for a in args]
except swapforth.Bye:
pass
if 0:
words = set(t.command_response('words').split())
missing = dpans['CORE'] - words
print(len(missing), "MISSING CORE", " ".join(sorted(missing)))
print words - allw
t.shell()
|
|
# -*- coding: utf-8 -*-
"""
Check the examples for several combinations of arguments.
--- CSV Strucure ---
A CSV file for each vCenter will be created as a backup of the information in VCIN. This file will have the following
structure. This structure is compatible with the vcin_vdt_configure_from_vsphere.py script in the VSPK-Examples Github
repository (https://github.com/nuagenetworks/vspk-examples)
"<IP>/<FQDN>",
"[name]",
"[hypervisor user]",
"[hypervisor password]",
"[management network portgroup]",
"[data network portgroup]",
"[vm network portgroup]",
"[multicast sourece portgroup]",
"[use management DHCP (True|False)]",
"[management IP]",
"[management netmask (octet structure)]",
"[management gateway]",
"[management DNS 1]",
"[management DNS 2]",
"[separate data network (True|False)]",
"[use data DHCP (True|False)]",
"[data IP]",
"[data netmask (octet structure)]",
"[data gateway]",
"[data DNS 1]",
"[data DNS 2]",
"[MTU]",
"[require metadata (True|False)]",
"[generic split activation (True|False)]",
"[multi VM support (True|False)]",
"[DHCP relay server (IP)]",
"[flow eviction threshold]",
"[datapath sync timeout]",
"[network uplink interface]",
"[network uplink IP]",
"[network uplink netmask (octet structure)]",
"[network uplink gateway]",
"[script URL]",
"[personality]",
"[site ID]",
"[NFS server address (IP)]",
"[NFS mount path]",
"[primay Nuage controller (IP)]",
"[secondary Nuage controller (IP)]",
"[primary NTP server (IP)]",
"[secondary NTP server (IP)]",
"[static route target IP]",
"[static route netmask (octet structure)]",
"[static route next-hop gateway]",
"[multicast send interface]",
"[multicast send IP]",
"[multicast send netmask (octet structure)]",
"[multicast receive IP]",
"[multicast receive netmask (octet structure)]",
"[Host Agent VM Port Group]",
"[Host Agent VM Datastore]"
--- Version history ---
2016-10-12 - 1.0 - Initial release
2016-10-13 - 1.1 - Updated to support Generic split activation.
2020-07-06 - 1.2 - Migrate to v6 API
--- Usage ---
Run 'python vcin_hypervisors_backup.py -h' for an overview
--- Author ---
Philippe Dellaert <[email protected]>
"""
import argparse
import csv
import getpass
import logging
import os.path
from vspk import v6 as vsdk
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="A tool to backup your configuration in VCIN to a CSV file which can be used with the vcin_vdt_configure_from_vsphere.py script.")
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('-o', '--output-folder', required=True, help='The folder to where to write the output to, a file per vCenter will be created', dest='output_folder', type=str)
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect (deprecated)', dest='nosslcheck', action='store_true')
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
args = parser.parse_args()
return args
def main():
"""
Backup the vCenter Integration Node configuration
"""
# Handling arguments
args = get_args()
debug = args.debug
log_file = None
if args.logfile:
log_file = args.logfile
nuage_enterprise = args.nuage_enterprise
nuage_host = args.nuage_host
nuage_port = args.nuage_port
nuage_password = None
if args.nuage_password:
nuage_password = args.nuage_password
nuage_username = args.nuage_username
output_folder = args.output_folder
# nosslcheck = args.nosslcheck
verbose = args.verbose
# Logging settings
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Checking if folder is writeable
if not os.access(output_folder, os.W_OK):
logger.critical('Folder {0:s} is not writable, exiting.'.format(output_folder))
return 1
# Getting user password for Nuage connection
if nuage_password is None:
logger.debug('No command line Nuage password received, requesting Nuage password from user')
nuage_password = getpass.getpass(
prompt='Enter password for Nuage host {0:s} for user {1:s}: '.format(nuage_host, nuage_username))
nc = None
# Connecting to Nuage
try:
logger.info('Connecting to Nuage server {0:s}:{1:d} with username {2:s}'.format(nuage_host, nuage_port, nuage_username))
nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise,
api_url="https://{0:s}:{1:d}".format(nuage_host, nuage_port))
nc.start()
except IOError:
pass
if not nc or not nc.is_current_session():
logger.error(
'Could not connect to Nuage host {0:s} with user {1:s} and specified password'.format(nuage_host, nuage_username))
return 1
logger.info('Connected to Nuage')
# Run through each vCenter
for nvc in nc.user.vcenters.get():
logger.debug('Running for vCenter {0:s}'.format(nvc.name))
hosts = []
for ndc in nvc.vcenter_data_centers.get():
logger.debug('Running for DC {0:s}'.format(ndc.name))
for ncl in ndc.vcenter_clusters.get():
logger.debug('Running for cluster {0:s}'.format(ncl.name))
for host in ncl.vcenter_hypervisors.get():
logger.debug('Handling host {0:s}'.format(host.name))
host = [
host.hypervisor_ip,
host.name,
host.hypervisor_user,
host.hypervisor_password,
host.mgmt_network_portgroup,
host.data_network_portgroup,
host.vm_network_portgroup,
host.multicast_source_portgroup,
host.allow_mgmt_dhcp,
host.mgmt_ip_address,
host.mgmt_netmask,
host.mgmt_gateway,
host.mgmt_dns1,
host.mgmt_dns2,
host.separate_data_network,
host.allow_data_dhcp,
host.data_ip_address,
host.data_netmask,
host.data_gateway,
host.data_dns1,
host.data_dns2,
host.mtu,
host.v_require_nuage_metadata,
host.generic_split_activation,
host.multi_vmssupport,
host.dhcp_relay_server,
host.flow_eviction_threshold,
host.datapath_sync_timeout,
host.network_uplink_interface,
host.network_uplink_interface_ip,
host.network_uplink_interface_netmask,
host.network_uplink_interface_gateway,
host.customized_script_url,
host.personality,
host.site_id,
host.nfs_log_server,
host.nfs_mount_path,
host.primary_nuage_controller,
host.secondary_nuage_controller,
host.ntp_server1,
host.ntp_server2,
host.static_route,
host.static_route_netmask,
host.static_route_gateway,
host.multicast_send_interface,
host.multicast_send_interface_ip,
host.multicast_send_interface_netmask,
host.multicast_receive_interface_ip,
host.multicast_receive_interface_netmask,
'',
''
]
hosts.append(host)
logger.debug('Writing CSV for vCenter {0:s}'.format(nvc.name))
with open('{0:s}/{1:s}.csv'.format(output_folder, nvc.name), 'w') as hostlist:
writer = csv.writer(hostlist, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerows(hosts)
logger.info('Completed all tasks.')
return 0
# Start program
if __name__ == "__main__":
main()
|
|
r"""
One-dimensional IMEX acoustic-advection
=========================
Integrate the linear 1D acoustic-advection problem:
.. math::
u_t + U u_x + c p_x & = 0 \\
p_t + U p_x + c u_x & = 0.
"""
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as LA
from pySDC.Problem import ptype
from pySDC.datatype_classes.mesh import mesh, rhs_imex_mesh
# Sharpclaw imports
from clawpack import pyclaw
from clawpack import riemann
from getFDMatrix import getFDMatrix
def u_initial(x):
return np.sin(2.0*np.pi*x)
# return np.exp(-0.5*(x-0.5)**2/0.1**2)
class acoustic_1d_imex(ptype):
"""
Example implementing the forced 1D heat equation with Dirichlet-0 BC in [0,1]
Attributes:
solver: Sharpclaw solver
state: Sharclaw state
domain: Sharpclaw domain
"""
def __init__(self, cparams, dtype_u, dtype_f):
"""
Initialization routine
Args:
cparams: custom parameters for the example
dtype_u: particle data type (will be passed parent class)
dtype_f: acceleration data type (will be passed parent class)
"""
# these parameters will be used later, so assert their existence
assert 'nvars' in cparams
assert 'cs' in cparams
assert 'cadv' in cparams
assert 'order_adv' in cparams
# add parameters as attributes for further reference
for k,v in cparams.items():
setattr(self,k,v)
# invoke super init, passing number of dofs, dtype_u and dtype_f
super(acoustic_1d_imex,self).__init__(self.nvars,dtype_u,dtype_f)
riemann_solver = riemann.advection_1D # NOTE: This uses the FORTRAN kernels of clawpack
self.solver = pyclaw.SharpClawSolver1D(riemann_solver)
self.solver.weno_order = 5
self.solver.time_integrator = 'Euler' # Remove later
self.solver.kernel_language = 'Fortran'
self.solver.bc_lower[0] = pyclaw.BC.periodic
self.solver.bc_upper[0] = pyclaw.BC.periodic
self.solver.cfl_max = 1.0
assert self.solver.is_valid()
x = pyclaw.Dimension(0.0, 1.0, self.nvars[1], name='x')
self.domain = pyclaw.Domain(x)
self.state = pyclaw.State(self.domain, self.solver.num_eqn)
self.mesh = self.state.grid.x.centers
self.dx = self.mesh[1] - self.mesh[0]
self.A = -self.cs*getFDMatrix(self.nvars[1], self.order_adv, self.dx)
self.state.problem_data['u'] = self.cadv
solution = pyclaw.Solution(self.state, self.domain)
self.solver.setup(solution)
def solve_system(self,rhs,factor,u0,t):
"""
Simple linear solver for (I-dtA)u = rhs
Args:
rhs: right-hand side for the nonlinear system
factor: abbrev. for the node-to-node stepsize (or any other factor required)
u0: initial guess for the iterative solver (not used here so far)
t: current time (e.g. for time-dependent BCs)
Returns:
solution as mesh
"""
M1 = sp.hstack( (sp.eye(self.nvars[1]), -factor*self.A) )
M2 = sp.hstack( (-factor*self.A, sp.eye(self.nvars[1])) )
M = sp.vstack( (M1, M2) )
b = np.concatenate( (rhs.values[0,:], rhs.values[1,:]) )
sol = LA.spsolve(M, b)
me = mesh(self.nvars)
me.values[0,:], me.values[1,:] = np.split(sol, 2)
return me
def __eval_fexpl(self,u,t):
"""
Helper routine to evaluate the explicit part of the RHS
Args:
u: current values (not used here)
t: current time
Returns:
explicit part of RHS
"""
fexpl = mesh(self.nvars)
# Copy values of u into pyClaw state object
self.state.q[0,:] = u.values[0,:]
# Evaluate right hand side
tmp = self.solver.dqdt(self.state)
fexpl.values[0,:] = tmp.reshape(self.nvars[1:])
# Copy values of u into pyClaw state object
self.state.q[0,:] = u.values[1,:]
# Evaluate right hand side
tmp = self.solver.dqdt(self.state)
fexpl.values[1,:] = tmp.reshape(self.nvars[1:])
# DEBUGGING
# fexpl.values[0,:] = 0.0*self.mesh
# fexpl.values[1,:] = 0.0*self.mesh
return fexpl
def __eval_fimpl(self,u,t):
"""
Helper routine to evaluate the implicit part of the RHS
Args:
u: current values
t: current time (not used here)
Returns:
implicit part of RHS
"""
fimpl = mesh(self.nvars,val=0)
fimpl.values[0,:] = self.A.dot(u.values[1,:])
fimpl.values[1,:] = self.A.dot(u.values[0,:])
return fimpl
def eval_f(self,u,t):
"""
Routine to evaluate both parts of the RHS
Args:
u: current values
t: current time
Returns:
the RHS divided into two parts
"""
f = rhs_imex_mesh(self.nvars)
f.impl = self.__eval_fimpl(u,t)
f.expl = self.__eval_fexpl(u,t)
return f
def u_exact(self,t):
"""
Routine to compute the exact solution at time t
Args:
t: current time
Returns:
exact solution
"""
me = mesh(self.nvars)
me.values[0,:] = 0.5*u_initial(self.mesh - (self.cadv + self.cs)*t) + 0.5*u_initial(self.mesh - (self.cadv - self.cs)*t)
me.values[1,:] = 0.5*u_initial(self.mesh - (self.cadv + self.cs)*t) - 0.5*u_initial(self.mesh - (self.cadv - self.cs)*t)
return me
|
|
#!/usr/bin/env python
import os, shutil, subprocess, time, pipes, traceback, xlrd, sys, Timer
from subprocess import Popen, PIPE
home_dir_list = []
home_dir_list.append(os.getcwd())
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "\n"
print "_________________________________________"
def initialization():
print "AutoHeFESTo" + "\n"
print "Welcome to AutoHeFESTo..." + "\n"
try:
if "main" in os.listdir(home_dir_list[0]):
print "'main' detected in the working directory. HeFESTo is ready!" + "\n"
else:
print "'main' is NOT detected in the working directory! HeFESTo is NOT ready and this script will NOT function properly!"
pass
except:
print "\n" + "***" + "'main' is NOT detected in the working directory! HeFESTo is NOT ready and this script will NOT function properly!" + "***" + "\n"
pass
print "Type 'bsp' to start the automation process or anything else to exit script..."
print "***bsp = bulk silicate planet///morb = mid ocean ridge basalt***" + "\n"
wait_for_begin = raw_input(">>> Please type 'bsp' or 'morb'... ")
if wait_for_begin == 'bsp':
print "\n" + "Performing BSP Calculations..." + "\n"
makethedirs_bsp()
elif wait_for_begin == 'morb':
print "\n" + "Performing MORB Calculations..." + "\n"
makethedirs_morb()
else:
print "Oops! That's not a valid command!" + "\n"
initialization()
#_____________________________________________________________________________________________MAKE DIRECTORIES
def makethedirs_bsp():
if not os.path.exists(home_dir_list[0] + "/BSP_Control_Files"):
print home_dir_list[0] + "/BSP_Control_Files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/BSP_Control_Files")
else:
print home_dir_list[0] + "/BSP_Control_Files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/BSP_Control_Files")
os.makedirs(home_dir_list[0] + "/BSP_Control_Files")
if not os.path.exists(home_dir_list[0] + "/BSP_Output_Files"):
print home_dir_list[0] + "/BSP_Output_Files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/BSP_Output_Files")
else:
print home_dir_list[0] + "/BSP_Output_Files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/BSP_Output_Files")
os.makedirs(home_dir_list[0] + "/BSP_Output_Files")
if not os.path.exists(home_dir_list[0] + "/BSP_Output_Files/fort.66_files"):
print home_dir_list[0] + "/BSP_Output_Files/fort.66_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.66_files")
else:
print home_dir_list[0] + "/BSP_Output_Files/fort.66_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/BSP_Output_Files/fort.66_files")
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.66_files")
if not os.path.exists(home_dir_list[0] + "/BSP_Output_Files/fort.58_files"):
print home_dir_list[0] + "/BSP_Output_Files/fort.58_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.58_files")
else:
print home_dir_list[0] + "/BSP_Output_Files/fort.58_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/BSP_Output_Files/fort.58_files")
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.58_files")
if not os.path.exists(home_dir_list[0] + "/BSP_Output_Files/fort.59_files"):
print home_dir_list[0] + "/BSP_Output_Files/fort.59_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.59_files")
else:
print home_dir_list[0] + "/BSP_Output_Files/fort.59_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/BSP_Output_Files/fort.59_files")
os.makedirs(home_dir_list[0] + "/BSP_Output_Files/fort.59_files")
print "Moving on to input file creation..." + "\n"
writeinputfiles_bsp()
def makethedirs_morb():
if not os.path.exists(home_dir_list[0] + "/MORB_Control_Files"):
print home_dir_list[0] + "/MORB_Control_Files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/MORB_Control_Files")
else:
print home_dir_list[0] + "/MORB_Control_Files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/MORB_Control_Files")
os.makedirs(home_dir_list[0] + "/MORB_Control_Files")
print "Moving on to input file creation..." + "\n"
if not os.path.exists(home_dir_list[0] + "/MORB_Output_Files"):
print home_dir_list[0] + "/MORB_Output_Files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/MORB_Output_Files")
else:
print home_dir_list[0] + "/MORB_Output_Files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/MORB_Output_Files")
os.makedirs(home_dir_list[0] + "/MORB_Output_Files")
if not os.path.exists(home_dir_list[0] + "/MORB_Output_Files/fort.66_files"):
print home_dir_list[0] + "/MORB_Output_Files/fort.66_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.66_files")
else:
print home_dir_list[0] + "/MORB_Output_Files/fort.66_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/MORB_Output_Files/fort.66_files")
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.66_files")
if not os.path.exists(home_dir_list[0] + "/MORB_Output_Files/fort.58_files"):
print home_dir_list[0] + "/MORB_Output_Files/fort.58_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.58_files")
else:
print home_dir_list[0] + "/MORB_Output_Files/fort.58_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/MORB_Output_Files/fort.58_files")
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.58_files")
if not os.path.exists(home_dir_list[0] + "/MORB_Output_Files/fort.59_files"):
print home_dir_list[0] + "/MORB_Output_Files/fort.59_files' path not detected. Creating..."
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.59_files")
else:
print home_dir_list[0] + "/MORB_Output_Files/fort.59_files' path exists. Deleting and recreating..."
shutil.rmtree(home_dir_list[0] + "/MORB_Output_Files/fort.59_files")
os.makedirs(home_dir_list[0] + "/MORB_Output_Files/fort.59_files")
print "\n" + "Moving on to input file creation..." + "\n"
writeinputfiles_morb()
#_______________________________________________________________________________________________________WRITE CONTROL FILES
def writeinputfiles_bsp():
xl_workbook = xlrd.open_workbook(raw_input(">>>Please enter your workbook name: "), 'rb')
print "\n" + "Opening workbook..." + "\n"
xl_sheet = xl_workbook.sheet_by_index(0)
print ('Sheet name: %s' % xl_sheet.name)
print "\n"
num_cols = xl_sheet.ncols
print "Writing BSP HeFESTo control files..." + "\n"
for j in range(xl_sheet.nrows):
row = xl_sheet.row(j)
file_name = str(row[0].value)
print "~Writing HeFESTo control file: " + str(file_name) + " ..." + "\n"
control_file = open('control.' +file_name.rstrip() + '_bsp' + ".txt", 'w')
for i in range(1,num_cols):
num = row[i].value
if i <=11:
control_file.write(str(row[i].value)+'\n')
else:
#print num
test = list(str(num))[0]
#print test
if test.isalpha() == True:
control_file.write(str(row[i].value)+'\n')
else:
output = int(row[i].value)
control_file.write(str(output)+'\n')
control_file.close()
filename = 'control.' +file_name.rstrip() + '_bsp' + ".txt"
fdir = home_dir_list[0] + "/" + filename
tdir = home_dir_list[0] + "/BSP_Control_Files/" + filename
shutil.move(fdir, tdir)
else:
print "BSP HeFESTo control files written..." + "\n"
os.chdir(home_dir_list[0])
if "fort.66" in os.listdir(home_dir_list[0]):
os.remove("fort.66")
else:
pass
if "fort.58" in os.listdir(home_dir_list[0]):
os.remove("fort.58")
else:
pass
if "fort.59" in os.listdir(home_dir_list[0]):
os.remove("fort.59")
else:
pass
if "control" in os.listdir(home_dir_list[0]):
os.remove("control")
else:
pass
run_hefesto_bsp()
def writeinputfiles_morb():
xl_workbook = xlrd.open_workbook(raw_input(">>>Please enter your workbook name: "), 'rb')
print "\n" + "Opening workbook..." + "\n"
xl_sheet = xl_workbook.sheet_by_index(0)
print ('Sheet name: %s' % xl_sheet.name)
print "\n"
num_cols = xl_sheet.ncols
print "Writing MORB HeFESTo control files..." + "\n"
for j in range(xl_sheet.nrows):
row = xl_sheet.row(j)
file_name = str(row[0].value)
print "~Writing HeFESTo control file: " + str(file_name) + " ..." + "\n"
control_file = open('control.' +file_name.rstrip() + '_morb' + ".txt", 'w')
for i in range(1,num_cols):
num = row[i].value
if i <=11:
control_file.write(str(row[i].value)+'\n')
else:
#print num
test = list(str(num))[0]
#print test
if test.isalpha() == True:
control_file.write(str(row[i].value)+'\n')
else:
output = int(row[i].value)
control_file.write(str(output)+'\n')
control_file.close()
filename = 'control.' +file_name.rstrip() + '_morb' + ".txt"
fdir = home_dir_list[0] + "/" + filename
tdir = home_dir_list[0] + "/MORB_Control_Files/" + filename
shutil.move(fdir, tdir)
else:
print "MORB HeFESTo control files written..." + "\n"
os.chdir(home_dir_list[0])
if "fort.66" in os.listdir(home_dir_list[0]):
os.remove("fort.66")
else:
pass
if "fort.58" in os.listdir(home_dir_list[0]):
os.remove("fort.58")
else:
pass
if "fort.59" in os.listdir(home_dir_list[0]):
os.remove("fort.59")
else:
pass
if "control" in os.listdir(home_dir_list[0]):
os.remove("control")
else:
pass
run_hefesto_morb()
#_____________________________________________________________________________________________________RUN HEFESTO
def run_hefesto_bsp():
for thing in os.listdir(home_dir_list[0] + "/BSP_Control_Files"):
print "\n" + "Opening HeFESTo for " + str(thing) + "\n"
time.sleep(2)
if "control" in os.listdir(home_dir_list[0]):
os.remove(home_dir_list[0] + "/control")
else:
pass
if "fort.59" in os.listdir(home_dir_list[0]):
os.remove(home_dir_list[0] + "/fort.59")
else:
pass
if "fort.58" in os.listdir(home_dir_list[0]):
os.remove(home_dir_list[0] + "/fort.58")
else:
pass
if "fort.66" in os.listdir(home_dir_list[0]):
os.remove(home_dir_list[0] + "/fort.66")
else:
pass
os.chdir(home_dir_list[0] + "/BSP_Control_Files")
print "Copying" + str(thing) + " to path" + home_dir_list[0] + "..." + "\n"
todir = home_dir_list[0] + "/" + "control"
copyfromdir = home_dir_list[0] + "/BSP_Control_Files/" + str(thing)
shutil.copy(copyfromdir, todir)
os.chdir(home_dir_list[0])
#src = str(thing)
#drc = "control"
#os.rename(src, drc)
print("Performing calculations on {thing!r} ...".format(**vars()))
print "\n"
print "\n" + "Opening HeFESTo for calculations on " + str(thing) + " ..." + "\n"
print "\n"
#working_dir = os.curdir()
#Popen(["main"], cwd=working_dir, stdin=PIPE)
argz = home_dir_list[0] + "/main"
p = subprocess.Popen(argz, stdin=None, stdout=None)
t = Timer(800, p.kill)
print "\n" + "Timeout timer started. 800 seconds until the process is terminated and the loop continues..." + "\n"
t.start()
t.communicate()
t.cancel()
print "\n" + "Copying output files to " + home_dir_list[0] + "/BSP_Output_Files directory..." + "\n"
try:
os.remove("control")
except:
print "\n" + "Control file not found!" + "\n"
pass
if "fort.66" in os.listdir(home_dir_list[0]):
print "\n" + "fort.66 found!" + "\n"
theoutputfile66 = home_dir_list[0] + "/" + "fort.66"
outputtodir66 = home_dir_list[0] + "/BSP_Output_Files/fort.66_files/" + "fort.66."+str(thing)+"_bsp"
shutil.move(theoutputfile66, outputtodir66)
else:
print "fort.66." + str(thing) + " not found!"
pass
if "fort.58" in os.listdir(home_dir_list[0]):
print "\n" + "fort.58 found!" + "\n"
theoutputfile58 = home_dir_list[0] + "/" + "fort.58"
outputtodir58 = home_dir_list[0] + "/BSP_Output_Files/fort.58_files/" + "fort.58."+str(thing)+"_bsp"
shutil.move(theoutputfile58, outputtodir58)
else:
print "fort.58." + str(thing) + " not found!"
pass
if "fort.59" in os.listdir(home_dir_list[0]):
print "\n" + "fort.59 found!" + "\n"
theoutputfile59 = home_dir_list[0] + "/" + "fort.59"
outputtodir59 = home_dir_list[0] + "/BSP_Output_Files/fort.59_files/" + "fort.59."+str(thing)+"_bsp"
shutil.move(theoutputfile59, outputtodir59)
else:
print "fort.59." + str(thing) + " not found!"
pass
print "LOOP FINISHED FOR " + str(thing)
time.sleep(2)
#except Exception:
# traceback.print_exc()
# print "\n"
# print "Calculation failure for " + str(thing) + ". Moving on..."
# print "\n"
else:
print "\n"
print "Done with BSP HeFESTo calculations. Exiting script..." + "\n\n\n\n"
print "___________________________________________________________"
print "\n"
# copydirs_bsp()
def run_hefesto_morb():
for thing in os.listdir(home_dir_list[0] + "/MORB_Control_Files"):
print "\n" + "Opening HeFESTo for " + str(thing) + "\n"
time.sleep(2)
if "control" in os.listdir(home_dir_list[0]):
os.remove(home_dir_list[0] + "/control")
else:
pass
os.chdir(home_dir_list[0] + "/MORB_Control_Files")
print "Copying" + str(thing) + " to path " + home_dir_list[0] + "..." + "\n"
todir = home_dir_list[0] + "/" + "control"
copyfromdir = home_dir_list[0] + "/MORB_Control_Files/" + str(thing)
shutil.copy(copyfromdir, todir)
os.chdir(home_dir_list[0])
#src = str(thing)
#drc = "control"
#os.rename(src, drc)
print("Performing calculations on {thing!r} ...".format(**vars()))
print "\n"
print "\n" + "Opening HeFESTo for calculations on " + str(thing) + " ..." + "\n"
print "\n"
#working_dir = os.curdir()
#Popen(["main"], cwd=working_dir, stdin=PIPE)
argz = home_dir_list[0] + "/main"
p = subprocess.Popen(argz, stdin=None, stdout=None)
t = Timer(800, p.kill)
print "\n" + "Timeout timer started. 800 seconds until the process is terminated and the loop continues..." + "\n"
t.start()
t.communicate()
t.cancel()
print "\n" + "Copying output files to" + home_dir_list[0]+ "/MORB_Output_Files' directory..." + "\n"
try:
os.remove("control")
except:
print "\n" + "Control file not found!" + "\n"
pass
if "fort.66" in os.listdir(home_dir_list[0]):
print "\n" + "fort.66 found!" + "\n"
theoutputfile66 = home_dir_list[0] + "/" + "fort.66"
outputtodir66 = home_dir_list[0] + "/MORB_Output_Files/fort.66_files/" + "fort.66."+str(thing)+"_morb"
shutil.move(theoutputfile66, outputtodir66)
else:
print "fort.66." + str(thing) + " not found!"
pass
if "fort.58" in os.listdir(home_dir_list[0]):
print "\n" + "fort.58 found!" + "\n"
theoutputfile58 = home_dir_list[0] + "/" + "fort.58"
outputtodir58 = home_dir_list[0] + "/MORB_Output_Files/fort.58_files/" + "fort.58."+str(thing)+"_morb"
shutil.move(theoutputfile58, outputtodir58)
else:
print "fort.58." + str(thing) + " not found!"
pass
if "fort.59" in os.listdir(home_dir_list[0]):
print "\n" + "fort.59 found!" + "\n"
theoutputfile59 = home_dir_list[0] + "/" + "fort.59"
outputtodir59 = home_dir_list[0] + "/MORB_Output_Files/fort.59_files/" + "fort.59."+str(thing)+"_morb"
shutil.move(theoutputfile59, outputtodir59)
else:
print "fort.59." + str(thing) + " not found!"
pass
print "LOOP FINISHED FOR " + str(thing)
time.sleep(2)
#except Exception:
# traceback.print_exc()
# print "\n"
# print "Calculation failure for " + str(thing) + ". Moving on..."
# print "\n"
else:
print "\n"
print "Done with MORB HeFESTo calculations. Exiting script..." + "\n\n\n\n"
print "___________________________________________________________"
print "\n"
initialization()
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot model."""
from ggrc.app import app
from ggrc.models import all_models
from ggrc.snapshotter.rules import Types
from integration.ggrc import TestCase
from integration.ggrc.models import factories
def get_snapshottable_models():
return {getattr(all_models, stype) for stype in Types.all}
class TestSnapshot(TestCase):
"""Basic tests for /query api."""
IGNORE_KEYS = {
# currently not working fields:
"audit_duration",
"audit_duration_id",
"audit_frequency",
"audit_frequency_id",
"directive",
"directive_id",
"kind",
"kind_id",
"means",
"means_id",
"meta_kind",
"network_zone",
"network_zone_id",
"verify_frequency",
"verify_frequency_id",
"assertions",
"categories",
"categorizations",
"categorized_assertions",
# special fields not needed for snapshots.
"display_name",
"preconditions_failed",
"type",
"workflow_state",
"selfLink",
"viewLink",
# relationships and mappings
"audit_objects",
"audits",
"controls",
"object_owners",
"object_people",
"objects",
"people",
"related_destinations",
"related_sources",
"risk_objects",
"risks",
"task_group_objects",
"task_group_tasks",
"task_groups",
"children",
"parent",
"parent_id",
# we don't need context for snapshots since they are all under an audit.
"context",
"context_id",
# obsolete fields that will be removed
"custom_attributes",
# following fields have been handled in fields without _id prefix. That
# means that "contact" fields should exists and have correct values.
"contact_id",
"secondary_contact_id",
"principal_assessor_id",
"secondary_assessor_id",
"modified_by_id",
"attribute_object_id",
}
def setUp(self):
"""Set up test cases for all tests."""
super(TestSnapshot, self).setUp()
self._create_cas()
response = self._import_file("all_snapshottable_objects.csv")
self._check_csv_response(response, {})
@staticmethod
def _create_cas():
"""Create custom attribute definitions."""
ca_model_names = [
"facility",
"control",
"market",
"section",
"threat",
"access_group",
"data_asset"
]
ca_args = [
{"title": "CA text", "attribute_type": "Text"},
{"title": "CA rich text", "attribute_type": "Rich Text"},
{"title": "CA date", "attribute_type": "Date"},
{"title": "CA checkbox", "attribute_type": "Checkbox"},
{"title": "CA person", "attribute_type": "Map:Person"},
{"title": "CA dropdown", "attribute_type": "Dropdown",
"multi_choice_options": "one,two,three,four,five"},
]
for type_ in ca_model_names:
with app.app_context():
for args in ca_args:
factories.CustomAttributeDefinitionFactory(
definition_type=type_,
**args
)
def test_revision_content(self):
"""Test that revision contains all content needed."""
facility_revision = all_models.Revision.query.filter(
all_models.Revision.resource_type == "Facility").order_by(
all_models.Revision.id.desc()).first()
self.assertIn("custom_attribute_values", facility_revision.content)
self.assertNotEqual(facility_revision.content[
"custom_attribute_values"], [])
def _get_object(self, obj):
return self.client.get(
"/api/{}/{}".format(obj._inflector.table_plural, obj.id) # noqa # pylint: disable=protected-access
).json[obj._inflector.table_singular] # noqa # pylint: disable=protected-access
def _clean_json(self, content):
"""Remove ignored items from JSON content.
This function removes all ignored items from dicts, changes dates to
isoformat changes values to int or unicode, so that the end result is a
dict that can be compared with the JSON dict that was received from the
server.
Args:
content: object that we want to clean, it can be a dict list or a value.
Returns:
content with all values cleaned up
"""
if isinstance(content, list):
return sorted(self._clean_json(value) for value in content)
if hasattr(content, 'isoformat'):
return unicode(content.isoformat())
if isinstance(content, int):
# We convert all numbers to the same type so that the diff of a failed
# test looks nicer. This conversion does not affect the test results just
# the output.
return long(content)
if not isinstance(content, dict):
return content
clean = {}
for key, value in content.items():
if key not in self.IGNORE_KEYS:
clean[str(key)] = self._clean_json(value)
return clean
def test_snapshot_content(self):
"""Test the content of stored revisions
The content in the revision (that is set by log_json) must match closely to
what the api returns for a get request. This ensures that when a model is
created from a snapshot on the fronend, it will have all the needed fields.
"""
self.client.get("/login")
test_models = get_snapshottable_models()
for model in test_models:
obj = model.eager_query().first()
generated_json = self._clean_json(obj.log_json())
expected_json = self._clean_json(self._get_object(obj))
self.assertEqual(expected_json, generated_json)
|
|
from __future__ import absolute_import
from django.db import connection, transaction, DatabaseError
from django.test import TransactionTestCase
from django.utils import six
from django.utils.unittest import skipIf, skipUnless
from .models import Reporter
from .signal_testing import (
AtomicBlockReceiver,
create_model_atomic_signal_call_sequence,
enter_block_atomic_signal_call_sequence,
leave_block_atomic_signal_call_sequence,
)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
def setUp(self):
super(AtomicTests, self).setUp()
self.atomic_block_receiver = AtomicBlockReceiver()
self.atomic_block_receiver.connect()
def tearDown(self):
self.atomic_block_receiver.disconnect()
super(AtomicTests, self).tearDown()
def assertAtomicSignalCalls(self, calls):
"""Assert a certain order of atomic signal calls.
"""
self.assertListEqual(self.atomic_block_receiver.calls, calls)
def assertAtomicSignalCallsForCommit(self):
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, True)
)
def assertAtomicSignalCallsForRollback(self):
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCallsForCommit()
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCallsForRollback()
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCallsForCommit()
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCallsForRollback()
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCallsForCommit()
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCallsForRollback()
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald",
last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>',
'<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, True)
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(False, False) +
# Leave atomic transaction block with exception caught.
leave_block_atomic_signal_call_sequence(True, True)
)
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block with caught eexception.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald",
last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>',
'<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False, savepoint=False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block with caught eexception.
leave_block_atomic_signal_call_sequence(False, True,
savepoint=False) +
# Leave atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(True, True)
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False, savepoint=False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, False,
savepoint=False) +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False, savepoint=False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True,
savepoint=False) +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False, savepoint=False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True,
savepoint=False) +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald",
last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>',
'<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block.
leave_block_atomic_signal_call_sequence(True, True)
)
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(False, False) +
# Leave atomic transaction block with exception caught.
leave_block_atomic_signal_call_sequence(True, True)
)
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter nested atomic transaction block.
enter_block_atomic_signal_call_sequence(False) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave nested atomic transaction block with caught eexception.
leave_block_atomic_signal_call_sequence(False, True) +
# Leave atomic transaction block with exception.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Leave atomic transaction with forced rollback.
leave_block_atomic_signal_call_sequence(True, False)
)
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
connection.cursor().execute(
"SELECT no_such_col FROM transactions_reporter"
)
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Tintin>'])
self.assertAtomicSignalCalls(
# Enter atomic transaction block.
enter_block_atomic_signal_call_sequence(True) +
# Create Reporter.
create_model_atomic_signal_call_sequence() +
# Enter and leave atomic transaction block.
enter_block_atomic_signal_call_sequence(False, savepoint=False) +
leave_block_atomic_signal_call_sequence(False, False,
savepoint=False) +
# Leave atomic transaction with recovered rollback.
leave_block_atomic_signal_call_sequence(True, True)
)
class AtomicInsideTransactionTests(AtomicTests):
def setUp(self):
super(AtomicInsideTransactionTests, self).setUp()
def tearDown(self):
super(AtomicInsideTransactionTests, self).tearDown()
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
def setUp(self):
super(AtomicWithoutAutocommitTests, self).setUp()
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
super(AtomicWithoutAutocommitTests, self).tearDown()
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicInsideLegacyTransactionManagementTests(AtomicTests):
def setUp(self):
super(AtomicInsideLegacyTransactionManagementTests, self).setUp()
transaction.enter_transaction_management()
def tearDown(self):
# The tests access the database after exercising 'atomic', making the
# connection dirty; a rollback is required to make it clean.
transaction.rollback()
transaction.leave_transaction_management()
super(AtomicInsideLegacyTransactionManagementTests, self).tearDown()
|
|
import re
import socket
import time
import thread
import Queue
from ssl import wrap_socket, CERT_NONE, CERT_REQUIRED, SSLError
def decode(txt):
for codec in ('utf-8', 'iso-8859-1', 'shift_jis', 'cp1252'):
try:
return txt.decode(codec)
except UnicodeDecodeError:
continue
return txt.decode('utf-8', 'ignore')
def censor(text):
text = text.replace('\n', '').replace('\r', '')
replacement = '[censored]'
if 'censored_strings' in bot.config:
words = map(re.escape, bot.config['censored_strings'])
regex = re.compile('(%s)' % "|".join(words))
text = regex.sub(replacement, text)
return text
class crlf_tcp(object):
"Handles tcp connections that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, timeout=300):
self.ibuffer = ""
self.obuffer = ""
self.oqueue = Queue.Queue() # lines to be sent out
self.iqueue = Queue.Queue() # lines that were received
self.socket = self.create_socket()
self.host = host
self.port = port
self.timeout = timeout
def create_socket(self):
return socket.socket(socket.AF_INET, socket.TCP_NODELAY)
def run(self):
self.socket.connect((self.host, self.port))
thread.start_new_thread(self.recv_loop, ())
thread.start_new_thread(self.send_loop, ())
def recv_from_socket(self, nbytes):
return self.socket.recv(nbytes)
def get_timeout_exception_type(self):
return socket.timeout
def handle_receive_exception(self, error, last_timestamp):
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return True
return False
def recv_loop(self):
last_timestamp = time.time()
while True:
try:
data = self.recv_from_socket(4096)
self.ibuffer += data
if data:
last_timestamp = time.time()
else:
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return
time.sleep(1)
except (self.get_timeout_exception_type(), socket.error) as e:
if self.handle_receive_exception(e, last_timestamp):
return
continue
while '\r\n' in self.ibuffer:
line, self.ibuffer = self.ibuffer.split('\r\n', 1)
self.iqueue.put(decode(line))
def send_loop(self):
while True:
line = self.oqueue.get().splitlines()[0][:500]
print ">>> %r" % line
self.obuffer += line.encode('utf-8', 'replace') + '\r\n'
while self.obuffer:
sent = self.socket.send(self.obuffer)
self.obuffer = self.obuffer[sent:]
class crlf_ssl_tcp(crlf_tcp):
"Handles ssl tcp connetions that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, ignore_cert_errors, timeout=300):
self.ignore_cert_errors = ignore_cert_errors
crlf_tcp.__init__(self, host, port, timeout)
def create_socket(self):
return wrap_socket(crlf_tcp.create_socket(self), server_side=False,
cert_reqs=CERT_NONE if self.ignore_cert_errors else
CERT_REQUIRED)
def recv_from_socket(self, nbytes):
return self.socket.read(nbytes)
def get_timeout_exception_type(self):
return SSLError
def handle_receive_exception(self, error, last_timestamp):
# this is terrible
if not "timed out" in error.args[0]:
raise
return crlf_tcp.handle_receive_exception(self, error, last_timestamp)
irc_prefix_rem = re.compile(r'(.*?) (.*?) (.*)').match
irc_noprefix_rem = re.compile(r'()(.*?) (.*)').match
irc_netmask_rem = re.compile(r':?([^!@]*)!?([^@]*)@?(.*)').match
irc_param_ref = re.compile(r'(?:^|(?<= ))(:.*|[^ ]+)').findall
class IRC(object):
"handles the IRC protocol"
# see the docs/ folder for more information on the protocol
def __init__(self, conf):
self.set_conf(conf)
self.out = Queue.Queue() # responses from the server are placed here
# format: [rawline, prefix, command, params,
# nick, user, host, paramlist, msg]
self.connect()
thread.start_new_thread(self.parse_loop, ())
def set_conf(self, conf):
self.conf = conf
self.nick = self.conf['nick']
self.server = self.conf['server']
def create_connection(self):
return crlf_tcp(self.server, self.conf.get('port', 6667))
def connect(self):
self.conn = self.create_connection()
thread.start_new_thread(self.conn.run, ())
self.cmd("NICK", [self.nick])
self.cmd("USER",
[self.conf.get('user', 'skybot'), "3", "*", self.conf.get('realname',
'Python bot - http://github.com/rmmh/skybot')])
if 'server_password' in self.conf:
self.cmd("PASS", [self.conf['server_password']])
def parse_loop(self):
while True:
msg = self.conn.iqueue.get()
if msg == StopIteration:
self.connect()
continue
if msg.startswith(":"): # has a prefix
prefix, command, params = irc_prefix_rem(msg).groups()
else:
prefix, command, params = irc_noprefix_rem(msg).groups()
nick, user, host = irc_netmask_rem(prefix).groups()
paramlist = irc_param_ref(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(':'):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put([msg, prefix, command, params, nick, user, host,
paramlist, lastparam])
if command == "PING":
self.cmd("PONG", paramlist)
def join(self, channel):
self.cmd("JOIN", channel.split(" ")) # [chan, password]
def msg(self, target, text):
self.cmd("PRIVMSG", [target, text])
def cmd(self, command, params=None):
if params:
params[-1] = ':' + params[-1]
self.send(command + ' ' + ' '.join(map(censor, params)))
else:
self.send(command)
def send(self, str):
self.conn.oqueue.put(str)
class FakeIRC(IRC):
def __init__(self, conf):
self.set_conf(conf)
self.out = Queue.Queue() # responses from the server are placed here
self.f = open(fn, 'rb')
thread.start_new_thread(self.parse_loop, ())
def parse_loop(self):
while True:
msg = decode(self.f.readline()[9:])
if msg == '':
print "!!!!DONE READING FILE!!!!"
return
if msg.startswith(":"): # has a prefix
prefix, command, params = irc_prefix_rem(msg).groups()
else:
prefix, command, params = irc_noprefix_rem(msg).groups()
nick, user, host = irc_netmask_rem(prefix).groups()
paramlist = irc_param_ref(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(':'):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put([msg, prefix, command, params, nick, user, host,
paramlist, lastparam])
if command == "PING":
self.cmd("PONG", [params])
def cmd(self, command, params=None):
pass
class SSLIRC(IRC):
def create_connection(self):
return crlf_ssl_tcp(self.server, self.conf.get('port', 6697), self.conf.get('ignore_cert', True))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dataqna_v1alpha.types import question
from google.cloud.dataqna_v1alpha.types import question as gcd_question
from google.cloud.dataqna_v1alpha.types import question_service
from google.cloud.dataqna_v1alpha.types import user_feedback
from google.cloud.dataqna_v1alpha.types import user_feedback as gcd_user_feedback
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import QuestionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import QuestionServiceGrpcTransport
from .transports.grpc_asyncio import QuestionServiceGrpcAsyncIOTransport
class QuestionServiceClientMeta(type):
"""Metaclass for the QuestionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[QuestionServiceTransport]]
_transport_registry["grpc"] = QuestionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = QuestionServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[QuestionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class QuestionServiceClient(metaclass=QuestionServiceClientMeta):
"""Service to interpret natural language queries. The service allows to
create ``Question`` resources that are interpreted and are filled
with one or more interpretations if the question could be
interpreted. Once a ``Question`` resource is created and has at
least one interpretation, an interpretation can be chosen for
execution, which triggers a query to the backend (for BigQuery, it
will create a job). Upon successful execution of that
interpretation, backend specific information will be returned so
that the client can retrieve the results from the backend.
The ``Question`` resources are named
``projects/*/locations/*/questions/*``.
The ``Question`` resource has a singletion sub-resource
``UserFeedback`` named
``projects/*/locations/*/questions/*/userFeedback``, which allows
access to user feedback.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dataqna.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
QuestionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
QuestionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> QuestionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
QuestionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def question_path(project: str, location: str, question: str,) -> str:
"""Returns a fully-qualified question string."""
return "projects/{project}/locations/{location}/questions/{question}".format(
project=project, location=location, question=question,
)
@staticmethod
def parse_question_path(path: str) -> Dict[str, str]:
"""Parses a question path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/questions/(?P<question>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def user_feedback_path(project: str, location: str, question: str,) -> str:
"""Returns a fully-qualified user_feedback string."""
return "projects/{project}/locations/{location}/questions/{question}/userFeedback".format(
project=project, location=location, question=question,
)
@staticmethod
def parse_user_feedback_path(path: str) -> Dict[str, str]:
"""Parses a user_feedback path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/questions/(?P<question>.+?)/userFeedback$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, QuestionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the question service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, QuestionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, QuestionServiceTransport):
# transport is a QuestionServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def get_question(
self,
request: Union[question_service.GetQuestionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> question.Question:
r"""Gets a previously created question.
.. code-block:: python
from google.cloud import dataqna_v1alpha
def sample_get_question():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
request = dataqna_v1alpha.GetQuestionRequest(
name="name_value",
)
# Make the request
response = client.get_question(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataqna_v1alpha.types.GetQuestionRequest, dict]):
The request object. A request to get a previously
created question.
name (str):
Required. The unique identifier for the question.
Example: ``projects/foo/locations/bar/questions/1234``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataqna_v1alpha.types.Question:
The question resource represents a
natural language query, its settings,
understanding generated by the system,
and answer retrieval status. A question
cannot be modified.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a question_service.GetQuestionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, question_service.GetQuestionRequest):
request = question_service.GetQuestionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_question]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_question(
self,
request: Union[question_service.CreateQuestionRequest, dict] = None,
*,
parent: str = None,
question: gcd_question.Question = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_question.Question:
r"""Creates a question.
.. code-block:: python
from google.cloud import dataqna_v1alpha
def sample_create_question():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
question = dataqna_v1alpha.Question()
question.scopes = ['scopes_value_1', 'scopes_value_2']
question.query = "query_value"
request = dataqna_v1alpha.CreateQuestionRequest(
parent="parent_value",
question=question,
)
# Make the request
response = client.create_question(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataqna_v1alpha.types.CreateQuestionRequest, dict]):
The request object. Request to create a question
resource.
parent (str):
Required. The name of the project this data source
reference belongs to. Example:
``projects/foo/locations/bar``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
question (google.cloud.dataqna_v1alpha.types.Question):
Required. The question to create.
This corresponds to the ``question`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataqna_v1alpha.types.Question:
The question resource represents a
natural language query, its settings,
understanding generated by the system,
and answer retrieval status. A question
cannot be modified.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, question])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a question_service.CreateQuestionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, question_service.CreateQuestionRequest):
request = question_service.CreateQuestionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if question is not None:
request.question = question
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_question]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def execute_question(
self,
request: Union[question_service.ExecuteQuestionRequest, dict] = None,
*,
name: str = None,
interpretation_index: int = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> question.Question:
r"""Executes an interpretation.
.. code-block:: python
from google.cloud import dataqna_v1alpha
def sample_execute_question():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
request = dataqna_v1alpha.ExecuteQuestionRequest(
name="name_value",
interpretation_index=2159,
)
# Make the request
response = client.execute_question(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataqna_v1alpha.types.ExecuteQuestionRequest, dict]):
The request object. Request to execute an
interpretation.
name (str):
Required. The unique identifier for the question.
Example: ``projects/foo/locations/bar/questions/1234``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
interpretation_index (int):
Required. Index of the interpretation
to execute.
This corresponds to the ``interpretation_index`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataqna_v1alpha.types.Question:
The question resource represents a
natural language query, its settings,
understanding generated by the system,
and answer retrieval status. A question
cannot be modified.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, interpretation_index])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a question_service.ExecuteQuestionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, question_service.ExecuteQuestionRequest):
request = question_service.ExecuteQuestionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if interpretation_index is not None:
request.interpretation_index = interpretation_index
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.execute_question]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_user_feedback(
self,
request: Union[question_service.GetUserFeedbackRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> user_feedback.UserFeedback:
r"""Gets previously created user feedback.
.. code-block:: python
from google.cloud import dataqna_v1alpha
def sample_get_user_feedback():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
request = dataqna_v1alpha.GetUserFeedbackRequest(
name="name_value",
)
# Make the request
response = client.get_user_feedback(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataqna_v1alpha.types.GetUserFeedbackRequest, dict]):
The request object. Request to get user feedback.
name (str):
Required. The unique identifier for the user feedback.
User feedback is a singleton resource on a Question.
Example:
``projects/foo/locations/bar/questions/1234/userFeedback``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataqna_v1alpha.types.UserFeedback:
Feedback provided by a user.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a question_service.GetUserFeedbackRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, question_service.GetUserFeedbackRequest):
request = question_service.GetUserFeedbackRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_user_feedback]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_user_feedback(
self,
request: Union[question_service.UpdateUserFeedbackRequest, dict] = None,
*,
user_feedback: gcd_user_feedback.UserFeedback = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_user_feedback.UserFeedback:
r"""Updates user feedback. This creates user feedback if
there was none before (upsert).
.. code-block:: python
from google.cloud import dataqna_v1alpha
def sample_update_user_feedback():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
user_feedback = dataqna_v1alpha.UserFeedback()
user_feedback.name = "name_value"
request = dataqna_v1alpha.UpdateUserFeedbackRequest(
user_feedback=user_feedback,
)
# Make the request
response = client.update_user_feedback(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataqna_v1alpha.types.UpdateUserFeedbackRequest, dict]):
The request object. Request to updates user feedback.
user_feedback (google.cloud.dataqna_v1alpha.types.UserFeedback):
Required. The user feedback to
update. This can be called even if there
is no user feedback so far. The
feedback's name field is used to
identify the user feedback (and the
corresponding question) to update.
This corresponds to the ``user_feedback`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataqna_v1alpha.types.UserFeedback:
Feedback provided by a user.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([user_feedback, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a question_service.UpdateUserFeedbackRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, question_service.UpdateUserFeedbackRequest):
request = question_service.UpdateUserFeedbackRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if user_feedback is not None:
request.user_feedback = user_feedback
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_user_feedback]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("user_feedback.name", request.user_feedback.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dataqna",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("QuestionServiceClient",)
|
|
import numbers
import numpy as np
from pandas._libs import lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like, is_list_like
from pandas import compat
from pandas.core import nanops
from pandas.core.algorithms import searchsorted
from pandas.core.missing import backfill_1d, pad_1d
from .base import ExtensionArray, ExtensionOpsMixin
class PandasDtype(ExtensionDtype):
"""
A Pandas ExtensionDtype for NumPy dtypes.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
dtype : numpy.dtype
"""
_metadata = ('_dtype',)
def __init__(self, dtype):
dtype = np.dtype(dtype)
self._dtype = dtype
self._name = dtype.name
self._type = dtype.type
def __repr__(self):
return "PandasDtype({!r})".format(self.name)
@property
def numpy_dtype(self):
"""The NumPy dtype this PandasDtype wraps."""
return self._dtype
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def _is_numeric(self):
# exclude object, str, unicode, void.
return self.kind in set('biufc')
@property
def _is_boolean(self):
return self.kind == 'b'
@classmethod
def construct_from_string(cls, string):
return cls(np.dtype(string))
def construct_array_type(cls):
return PandasArray
@property
def kind(self):
return self._dtype.kind
@property
def itemsize(self):
"""The element size of this data-type object."""
return self._dtype.itemsize
# TODO(NumPy1.13): remove this
# Compat for NumPy 1.12, which doesn't provide NDArrayOperatorsMixin
# or __array_ufunc__, so those operations won't be available to people
# on older NumPys.
#
# We would normally write this as bases=(...), then "class Foo(*bases):
# but Python2 doesn't allow unpacking tuples in the class statement.
# So, we fall back to "object", to avoid writing a metaclass.
try:
from numpy.lib.mixins import NDArrayOperatorsMixin
except ImportError:
NDArrayOperatorsMixin = object
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
"""
A pandas ExtensionArray for NumPy data.
.. versionadded :: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Notes
-----
Operations like ``+`` and applying ufuncs requires NumPy>=1.13.
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values, copy=False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError("'values' must be a NumPy array.")
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate(to_concat))
# ------------------------------------------------------------------------
# Data
@property
def dtype(self):
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype=None):
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# Lightly modified version of
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/\
# numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x._ndarray if isinstance(x, PandasArray) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == 'at':
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def __getitem__(self, item):
if isinstance(item, type(self)):
item = item._ndarray
result = self._ndarray[item]
if not lib.is_scalar(item):
result = type(self)(result)
return result
def __setitem__(self, key, value):
from pandas.core.internals.arrays import extract_array
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
values = self._ndarray
t = np.result_type(value, values)
if t != self._ndarray.dtype:
values = values.astype(t, casting='safe')
values[key] = value
self._dtype = PandasDtype(t)
self._ndarray = values
else:
self._ndarray[key] = value
def __len__(self):
return len(self._ndarray)
@property
def nbytes(self):
return self._ndarray.nbytes
def isna(self):
from pandas import isna
return isna(self._ndarray)
def fillna(self, value=None, method=None, limit=None):
# TODO(_values_for_fillna): remove this
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self._ndarray, limit=limit,
mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
result = take(self._ndarray, indices, allow_fill=allow_fill,
fill_value=fill_value)
return type(self)(result)
def copy(self, deep=False):
return type(self)(self._ndarray.copy())
def _values_for_argsort(self):
return self._ndarray
def _values_for_factorize(self):
return self._ndarray, -1
def unique(self):
from pandas import unique
return type(self)(unique(self._ndarray))
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name, skipna=True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = (
"'{}' does not implement reduction '{}'"
)
raise TypeError(msg.format(type(self).__name__, name))
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
def all(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
def min(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_min((), dict(out=out, keepdims=keepdims))
return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
def max(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_max((), dict(out=out, keepdims=keepdims))
return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
def sum(self, axis=None, dtype=None, out=None, keepdims=False,
initial=None, skipna=True, min_count=0):
nv.validate_sum((), dict(dtype=dtype, out=out, keepdims=keepdims,
initial=initial))
return nanops.nansum(self._ndarray, axis=axis, skipna=skipna,
min_count=min_count)
def prod(self, axis=None, dtype=None, out=None, keepdims=False,
initial=None, skipna=True, min_count=0):
nv.validate_prod((), dict(dtype=dtype, out=out, keepdims=keepdims,
initial=initial))
return nanops.nanprod(self._ndarray, axis=axis, skipna=skipna,
min_count=min_count)
def mean(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
def median(self, axis=None, out=None, overwrite_input=False,
keepdims=False, skipna=True):
nv.validate_median((), dict(out=out, overwrite_input=overwrite_input,
keepdims=keepdims))
return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='std')
return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='var')
return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='sem')
return nanops.nansem(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def kurt(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='kurt')
return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
def skew(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='skew')
return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result
@Appender(ExtensionArray.searchsorted.__doc__)
def searchsorted(self, value, side='left', sorter=None):
return searchsorted(self.to_numpy(), value,
side=side, sorter=sorter)
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
@classmethod
def _create_arithmetic_method(cls, op):
def arithmetic_method(self, other):
if isinstance(other, (ABCIndexClass, ABCSeries)):
return NotImplemented
elif isinstance(other, cls):
other = other._ndarray
with np.errstate(all="ignore"):
result = op(self._ndarray, other)
if op is divmod:
a, b = result
return cls(a), cls(b)
return cls(result)
return compat.set_function_name(arithmetic_method,
"__{}__".format(op.__name__),
cls)
_create_comparison_method = _create_arithmetic_method
PandasArray._add_arithmetic_ops()
PandasArray._add_comparison_ops()
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.verify.v2.service.entity.challenge import ChallengeList
from twilio.rest.verify.v2.service.entity.factor import FactorList
from twilio.rest.verify.v2.service.entity.new_factor import NewFactorList
class EntityList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid):
"""
Initialize the EntityList
:param Version version: Version that contains the resource
:param service_sid: Service Sid.
:returns: twilio.rest.verify.v2.service.entity.EntityList
:rtype: twilio.rest.verify.v2.service.entity.EntityList
"""
super(EntityList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, }
self._uri = '/Services/{service_sid}/Entities'.format(**self._solution)
def create(self, identity):
"""
Create the EntityInstance
:param unicode identity: Unique external identifier of the Entity
:returns: The created EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityInstance
"""
data = values.of({'Identity': identity, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return EntityInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams EntityInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.verify.v2.service.entity.EntityInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists EntityInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.verify.v2.service.entity.EntityInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EntityInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return EntityPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EntityInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EntityPage(self._version, response, self._solution)
def get(self, identity):
"""
Constructs a EntityContext
:param identity: Unique external identifier of the Entity
:returns: twilio.rest.verify.v2.service.entity.EntityContext
:rtype: twilio.rest.verify.v2.service.entity.EntityContext
"""
return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, )
def __call__(self, identity):
"""
Constructs a EntityContext
:param identity: Unique external identifier of the Entity
:returns: twilio.rest.verify.v2.service.entity.EntityContext
:rtype: twilio.rest.verify.v2.service.entity.EntityContext
"""
return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.EntityList>'
class EntityPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the EntityPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: Service Sid.
:returns: twilio.rest.verify.v2.service.entity.EntityPage
:rtype: twilio.rest.verify.v2.service.entity.EntityPage
"""
super(EntityPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EntityInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.verify.v2.service.entity.EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityInstance
"""
return EntityInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.EntityPage>'
class EntityContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, identity):
"""
Initialize the EntityContext
:param Version version: Version that contains the resource
:param service_sid: Service Sid.
:param identity: Unique external identifier of the Entity
:returns: twilio.rest.verify.v2.service.entity.EntityContext
:rtype: twilio.rest.verify.v2.service.entity.EntityContext
"""
super(EntityContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'identity': identity, }
self._uri = '/Services/{service_sid}/Entities/{identity}'.format(**self._solution)
# Dependents
self._factors = None
self._new_factors = None
self._challenges = None
def delete(self):
"""
Deletes the EntityInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def fetch(self):
"""
Fetch the EntityInstance
:returns: The fetched EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return EntityInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
@property
def factors(self):
"""
Access the factors
:returns: twilio.rest.verify.v2.service.entity.factor.FactorList
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorList
"""
if self._factors is None:
self._factors = FactorList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._factors
@property
def new_factors(self):
"""
Access the new_factors
:returns: twilio.rest.verify.v2.service.entity.new_factor.NewFactorList
:rtype: twilio.rest.verify.v2.service.entity.new_factor.NewFactorList
"""
if self._new_factors is None:
self._new_factors = NewFactorList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._new_factors
@property
def challenges(self):
"""
Access the challenges
:returns: twilio.rest.verify.v2.service.entity.challenge.ChallengeList
:rtype: twilio.rest.verify.v2.service.entity.challenge.ChallengeList
"""
if self._challenges is None:
self._challenges = ChallengeList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._challenges
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.EntityContext {}>'.format(context)
class EntityInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, service_sid, identity=None):
"""
Initialize the EntityInstance
:returns: twilio.rest.verify.v2.service.entity.EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityInstance
"""
super(EntityInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'identity': payload.get('identity'),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'identity': identity or self._properties['identity'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EntityContext for this EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityContext
"""
if self._context is None:
self._context = EntityContext(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Entity.
:rtype: unicode
"""
return self._properties['sid']
@property
def identity(self):
"""
:returns: Unique external identifier of the Entity
:rtype: unicode
"""
return self._properties['identity']
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: Service Sid.
:rtype: unicode
"""
return self._properties['service_sid']
@property
def date_created(self):
"""
:returns: The date this Entity was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this Entity was updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
def delete(self):
"""
Deletes the EntityInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def fetch(self):
"""
Fetch the EntityInstance
:returns: The fetched EntityInstance
:rtype: twilio.rest.verify.v2.service.entity.EntityInstance
"""
return self._proxy.fetch()
@property
def factors(self):
"""
Access the factors
:returns: twilio.rest.verify.v2.service.entity.factor.FactorList
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorList
"""
return self._proxy.factors
@property
def new_factors(self):
"""
Access the new_factors
:returns: twilio.rest.verify.v2.service.entity.new_factor.NewFactorList
:rtype: twilio.rest.verify.v2.service.entity.new_factor.NewFactorList
"""
return self._proxy.new_factors
@property
def challenges(self):
"""
Access the challenges
:returns: twilio.rest.verify.v2.service.entity.challenge.ChallengeList
:rtype: twilio.rest.verify.v2.service.entity.challenge.ChallengeList
"""
return self._proxy.challenges
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.EntityInstance {}>'.format(context)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BlockObjectAssociation'
db.create_table('simple_cms_relatedblock', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('block', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['simple_cms.Block'])),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['simple_cms.BlockGroup'], null=True, blank=True)),
('order', self.gf('django.db.models.fields.IntegerField')(default=-1)),
))
db.send_create_signal('simple_cms', ['BlockObjectAssociation'])
def backwards(self, orm):
# Deleting model 'BlockObjectAssociation'
db.delete_table('simple_cms_relatedblock')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'simple_cms.article': {
'Meta': {'ordering': "['-post_date']", 'object_name': 'Article'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'articles'", 'blank': 'True', 'to': "orm['simple_cms.Category']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_image': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'post_date': ('django.db.models.fields.DateTimeField', [], {}),
'render_as_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'simple_cms.block': {
'Meta': {'ordering': "('-updated_at', '-created_at')", 'object_name': 'Block'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'render_as_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'simple_cms.blockgroup': {
'Meta': {'ordering': "('title',)", 'object_name': 'BlockGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'simple_cms.relatedblock': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedBlock'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.Block']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.BlockGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'simple_cms.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'simple_cms.navigation': {
'Meta': {'ordering': "['site', 'title']", 'unique_together': "(('site', 'slug', 'parent'),)", 'object_name': 'Navigation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.NavigationGroup']", 'null': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inherit_blocks': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'page_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['simple_cms.Navigation']"}),
'redirect_permanent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'redirect_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'render_as_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'simple_cms.navigationblocks': {
'Meta': {'ordering': "['order']", 'object_name': 'NavigationBlocks'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.Block']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.BlockGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'navigation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['simple_cms.Navigation']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'simple_cms.navigationgroup': {
'Meta': {'ordering': "('title',)", 'object_name': 'NavigationGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'simple_cms.seo': {
'Meta': {'unique_together': "(['content_type', 'object_id'],)", 'object_name': 'Seo'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['simple_cms']
|
|
from Quartz import *
from Cocoa import *
import Utilities
import QuartzTextDrawing
import sys
import objc
def getTextString():
# These unicode values are the characters: Q, u, a, r, t, z,
# eighthnote, floral heart, black chess queen, and two CJK characters.
# Note: Create an NSString, because we'll use NSString-specific API's, otherwise
# we could just have used a python unicode object
return NSString.stringWithString_(u'\u0051\u0075\u0061\u0072\u0074\u007A\u266A\u2766\u265B\u3042\u304E')
doPointDrawing=1
def drawNSStringWithAttributes():
textString = getTextString()
if doPointDrawing:
context = NSGraphicsContext.currentContext().graphicsPort()
# Text Line 1. Draw with default attributes.
p = NSMakePoint(20.0, 400.0)
# Draw text with default text attributes. The point supplied is
# not the text baseline but rather the lower-left corner of the box
# which bounds the text.
textString.drawAtPoint_withAttributes_(p, None)
if doPointDrawing:
Utilities.drawPoint(context, p)
# Text Line 2. Draw with a specific font and color.
# Position the text 50 units below the previous text.
p.y -= 50
# Set attributes to use when drawing the string.
stringAttributes = {
# Use the font with the PostScript name "Times-Roman" at 40 point.
NSFontAttributeName: NSFont.fontWithName_size_("Times-Roman", 40),
# Set the color attribute to an opaque red.
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_(0.663, 0, 0.031, 1.0)
}
# Draw the text.
textString.drawAtPoint_withAttributes_(p, stringAttributes)
if doPointDrawing:
Utilities.drawPoint(context, p)
# Text Line 3. Draw stroked text.
# Position the text 50 units below the previous text.
p.y -= 50
# Panther and later support stroke attributes. A positive value
# of the stroke width attribute produces text that is stroked rather
# than filled.
stringAttributes[NSStrokeWidthAttributeName] = 3.0
textString.drawAtPoint_withAttributes_(p, stringAttributes)
if doPointDrawing:
Utilities.drawPoint(context, p)
# Text Line 4. Draw with fill and stroke.
p.y -= 50
# Panther and later support stroke attributes. A negative value
# of the stroke width attribute results in text that is both filled
# and stroked.
stringAttributes[NSStrokeWidthAttributeName] = -3.0
# Set the stroke color attribute to black.
stringAttributes[NSStrokeColorAttributeName] = NSColor.colorWithCalibratedRed_green_blue_alpha_(0, 0, 0, 1.0)
textString.drawAtPoint_withAttributes_(p, stringAttributes)
if doPointDrawing:
Utilities.drawPoint(context, p)
# Text Line 5. Draw at baseline.
# Tiger and later support the drawWithRect method which allows
# string text drawing from a point on the text baseline.
p.y -= 50
rect = NSRect(
origin=p,
size=NSSize(0,0),
)
textString.drawWithRect_options_attributes_(
rect, NSStringDrawingDisableScreenFontSubstitution,
stringAttributes)
if doPointDrawing:
Utilities.drawPoint(context, p)
_myLayout = None
_textStorage = None
_myTextRange = None
def drawWithNSLayout():
global _myLayout, _textStorage, _myTextRange
if _myLayout is None:
# Initialize the text storage with the string to draw.
_textStorage = NSTextStorage.alloc().initWithString_(getTextString())
# Initialize the layout manager to use with the text storage.
_myLayout = NSLayoutManager.alloc().init()
# Allocate and initialize a text container object.
textContainer = NSTextContainer.alloc().init()
# Add the text container to the layout.
_myLayout.addTextContainer_(textContainer)
# Release the text container since the layout retains it and
# this code no longer needs it.
del textContainer
# Add the layout to the text storage.
_textStorage.addLayoutManager_(_myLayout)
# Set attributes to use when drawing the string.
stringAttributes = {
# Use the font with the PostScript name "Times-Roman" at 40 point.
NSFontAttributeName: NSFont.fontWithName_size_("Times-Roman", 40),
# Set the text color attribute to an opaque red.
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_(0.663, 0, 0.031, 1.0),
}
# Create the range of text for the entire length of text
# in the textStorage object.
_myTextRange = NSMakeRange(0, _textStorage.length())
# Set the attributes on the entire range of text.
_textStorage.setAttributes_range_(stringAttributes, _myTextRange)
# Set the point for drawing the layout.
p = NSMakePoint(20.0, 400.0)
# Draw the text range at the point.
_myLayout.drawGlyphsForGlyphRange_atPoint_(_myTextRange, p)
if doPointDrawing:
context = NSGraphicsContext.currentContext().graphicsPort()
Utilities.drawPoint(context, p)
# The interface to the NSLayoutManager subclass.
class MyNSLayoutManager (NSLayoutManager):
# The extra instance variables for this subclass.
_textMode = objc.ivar()
_fColor = objc.ivar()
_sColor = objc.ivar()
_yStartPosition = objc.ivar()
_lineWidth = objc.ivar()
_clippingDrawProc = objc.ivar()
_clippingInfo = objc.ivar()
# Public methods to set the special attributes
# of the MyNSLayoutManager instance.
def setTextMode_(self, textMode):
self._textMode = textMode
def setFillColor_(self, color):
self._fColor = color
def setStrokeColor_(self, color):
self._sColor = color
def setTextLineWidth_(self, width):
self._lineWidth = width
def setClippingDrawProc_withInfo_(self, clippingDrawProc, info):
self._clippingDrawProc = clippingDrawProc
self._clippingInfo = info
def init(self):
self = super(MyNSLayoutManager, self).init()
if self is None:
return None
# Initialize the custom instance variables.
self._textMode = kCGTextFill
self._fColor = None
self._sColor = None
self._yStartPosition = 0
self._lineWidth = 1
self._clippingDrawProc = None
self._clippingInfo = None
return self
# This code overrides this method to record the y coordinate
# to use as the True baseline for the text drawing.
def drawGlyphsForGlyphRange_atPoint_(self, glyphsToShow, origin):
self._yStartPosition = origin.y
super(MyNSLayoutManager, self).drawGlyphsForGlyphRange_atPoint_(glyphsToShow, origin)
# This is the rendering method of NSLayoutManager that the
# code overrides to perform its custom rendering.
def showPackedGlyphs_length_glyphRange_atPoint_font_color_printAdjustment_(
self, glyphs, glyphLen, glyphRange, point, font, color, printingAdjustment):
# Obtain the destination drawing context.
context = NSGraphicsContext.currentContext().graphicsPort()
# Adjust start position y value based on the adjusted y coordinate.
# This ensures the text baseline is at the starting position
# passed to drawGlyphsForGlyphRange. This technique won't work
# for super, subscripts, or underlines but that's OK for this example.
point.y = _yStartPosition
# The Quartz graphics state should be preserved by showPackedGlyphs.
CGContextSaveGState(context)
# Set the desired text drawing mode.
CGContextSetTextDrawingMode(context, self._textMode)
# Set the fill color if needed.
if (self._textMode == kCGTextFill or _self.textMode == kCGTextFillStroke or
self._textMode == kCGTextFillClip or _textMode == kCGTextFillStrokeClip):
if self._fColor is not None:
CGContextSetFillColorWithColor(context, self._fColor)
# Set the line width and the stroke color if needed.
if (self._textMode == kCGTextStroke or self._textMode == kCGTextFillStroke or
self._textMode == kCGTextStrokeClip or self._textMode == kCGTextFillStrokeClip):
CGContextSetLineWidth(context, self._lineWidth)
if self._sColor is not None:
CGContextSetStrokeColorWithColor(context, self._sColor)
# Now draw the text. Check whether to adjust for printing widths
# and if needed adjust extra character spacing accordingly.
if printingAdjustment.width != 0.0:
# If printingAdjustment width is non-zero then the text
# needs to be adjusted. printingAdjustment is the per character
# adjustment required for this piece of text. Because
# the Quartz text character spacing set is transformed by
# the text matrix, this code needs to factor out that effect
# prior to setting it. Cocoa sets the text matrix to account
# for the point size of the font so we factor that out of the
# per character width supplied here.
charAdjust = printingAdjustment.width / font.pointSize()
CGContextSetCharacterSpacing(context, charAdjust)
else:
CGContextSetCharacterSpacing(context, 0.0)
# Draw the glyphs. The total number of glyphs is the length
# of the glyphs string passed to showPackedGlyphs, divided by 2
# since there are two bytes per glyph.
CGContextShowGlyphsAtPoint(context, point.x, point.y, glyphs, glyphLen/2)
# If the text drawing mode requires clipping and there is
# a custom clipping proc, call it. This allows drawing through
# clipped text before the graphics state is restored.
if (self._textMode == kCGTextClip or self._textMode == kCGTextFillClip or
self._textMode == kCGTextStrokeClip or
self._textMode == kCGTextFillStrokeClip) and self._clippingDrawProc is not None:
self._clippingDrawProc(context, point.x, point.y, self._clippingInfo)
CGContextRestoreGState(context)
def MyClipProc(c, x, y, info):
CGContextTranslateCTM(c, x, y)
CGContextSetStrokeColorWithColor(c, Utilities.getRGBOpaqueBlackColor())
# Draw a grid of lines through the clip.
QuartzTextDrawing.drawGridLines(c);
_myLayout = None
_textStorage = None
_myTextRange = None
def drawWithCustomNSLayout():
global _myLayout, _textStorage, _myTextRange
if _myLayout is None:
textContainer = NSTextContainer.alloc().init()
_textStorage = NSTextStorage.alloc().initWithString_(getTextString())
# Create an instance of the MyNSLayoutManager subclass of NSLayoutManager.
_myLayout = MyNSLayoutManager.alloc().init()
_myLayout.addTextContainer_(textContainer)
# The layout retains the text container so this code can release it.
del textContainer
_textStorage.addLayoutManager_(_myLayout)
# Set attributes to use when drawing the string.
stringAttributes = {
# Use the font with the PostScript name "Times-Roman" at 40 point.
NSFontAttributeName: NSFont.fontWithName_size_("Times-Roman", 40),
}
# Create the range.
_myTextRange = NSMakeRange(0, _textStorage.length())
# Set the attributes on the entire range of text.
_textStorage.setAttributes_range_(stringAttributes, _myTextRange)
p = NSMakePoint(20.0, 400.0)
# Set the custom attributes of the layout subclass so that
# the text will be filled with black.
_myLayout.setTextMode_(kCGTextFill)
_myLayout.setFillColor_(Utilities.getRGBOpaqueBlackColor())
# Draw text line 1.
_myLayout.drawGlyphsForGlyphRange_atPoint_(_myTextRange, p)
if doPointDrawing:
context = NSGraphicsContext.currentContext().graphicsPort()
Utilities.drawPoint(context, p)
# Set the custom attributes of the layout subclass so that
# the text will be stroked with black.
_myLayout.setTextMode_(kCGTextStroke)
_myLayout.setStrokeColor_(Utilities.getRGBOpaqueBlackColor())
_myLayout.setTextLineWidth_(2)
# Draw text line 2.
p.y -= 50;
_myLayout.drawGlyphsForGlyphRange_atPoint_(_myTextRange, p)
if doPointDrawing:
Utilities.drawPoint(context, p)
p.y -= 50;
# Set the custom attributes of the layout subclass so that
# the text will be filled and stroked and the fill color
# will be red. Since the stroke color hasn't changed it
# will be stroked with black.
_myLayout.setTextMode_(kCGTextFillStroke)
_myLayout.setFillColor_(Utilities.getRGBOpaqueRedColor())
# Draw text line 3.
_myLayout.drawGlyphsForGlyphRange_atPoint_(_myTextRange, p)
if doPointDrawing:
Utilities.drawPoint(context, p)
p.y -= 50;
# Set the custom attributes of the layout subclass so that
# the text will be filled, stroked, then clipped.
_myLayout.setTextMode_(kCGTextFillStrokeClip)
# Set the clipping proc to MyClipProc which requires
# no info data.
_myLayout.setClippingDrawProc_withInfo_(MyClipProc, None)
# Draw text line 4.
_myLayout.drawGlyphsForGlyphRange_atPoint_(_myTextRange, p)
if doPointDrawing:
Utilities.drawPoint(context, p)
# Set the clipping proc to None for future drawing.
_myLayout.setClippingDrawProc_withInfo_(None, None)
|
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for operations that can be applied to the server.
Contains classes and utilities for creating operations that are to be
applied on the server.
"""
import errors
import random
import util
import sys
PROTOCOL_VERSION = '0.21'
# Operation Types
WAVELET_APPEND_BLIP = 'wavelet.appendBlip'
WAVELET_SET_TITLE = 'wavelet.setTitle'
WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'
WAVELET_DATADOC_SET = 'wavelet.datadoc.set'
WAVELET_MODIFY_TAG = 'wavelet.modifyTag'
WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole'
BLIP_CREATE_CHILD = 'blip.createChild'
BLIP_DELETE = 'blip.delete'
DOCUMENT_APPEND_MARKUP = 'document.appendMarkup'
DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'
DOCUMENT_MODIFY = 'document.modify'
ROBOT_CREATE_WAVELET = 'robot.createWavelet'
ROBOT_FETCH_WAVE = 'robot.fetchWave'
ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash'
class Operation(object):
"""Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a 'delete blip' operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
"""
def __init__(self, method, opid, params):
"""Initializes this operation with contextual data.
Args:
method: Method to call or type of operation.
opid: The id of the operation. Any callbacks will refer to these.
params: An operation type dependent dictionary
"""
self.method = method
self.id = opid
self.params = params
def __str__(self):
return '%s[%s]%s' % (self.method, self.id, str(self.params))
def set_param(self, param, value):
self.params[param] = value
return self
def serialize(self, method_prefix=''):
"""Serialize the operation.
Args:
method_prefix: prefixed for each method name to allow for specifying
a namespace.
Returns:
a dict representation of the operation.
"""
if method_prefix and not method_prefix.endswith('.'):
method_prefix += '.'
return {'method': method_prefix + self.method,
'id': self.id,
'params': util.serialize(self.params)}
def set_optional(self, param, value):
"""Sets an optional parameter.
If value is None or "", this is a no op. Otherwise it calls
set_param.
"""
if value == '' or value is None:
return self
else:
return self.set_param(param, value)
class OperationQueue(object):
"""Wraps the queuing of operations using easily callable functions.
The operation queue wraps single operations as functions and queues the
resulting operations in-order. Typically there shouldn't be a need to
call this directly unless operations are needed on entities outside
of the scope of the robot. For example, to modify a blip that
does not exist in the current context, you might specify the wave, wavelet
and blip id to generate an operation.
Any calls to this will not be reflected in the robot in any way.
For example, calling wavelet_append_blip will not result in a new blip
being added to the robot, only an operation to be applied on the
server.
"""
# Some class global counters:
_next_operation_id = 1
def __init__(self, proxy_for_id=None):
self.__pending = []
self._capability_hash = 0
self._proxy_for_id = proxy_for_id
def _new_blipdata(self, wave_id, wavelet_id, initial_content='',
parent_blip_id=None):
"""Creates JSON of the blip used for this session."""
temp_blip_id = 'TBD_%s_%s' % (wavelet_id,
hex(random.randint(0, sys.maxint)))
return {'waveId': wave_id,
'waveletId': wavelet_id,
'blipId': temp_blip_id,
'content': initial_content,
'parentBlipId': parent_blip_id}
def _new_waveletdata(self, domain, participants):
"""Creates an ephemeral WaveletData instance used for this session.
Args:
domain: the domain to create the data for.
participants initially on the wavelet
Returns:
Blipdata (for the rootblip), WaveletData.
"""
wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))
wavelet_id = domain + '!conv+root'
root_blip_data = self._new_blipdata(wave_id, wavelet_id)
participants = set(participants)
wavelet_data = {'waveId': wave_id,
'waveletId': wavelet_id,
'rootBlipId': root_blip_data['blipId'],
'participants': participants}
return root_blip_data, wavelet_data
def __len__(self):
return len(self.__pending)
def __iter__(self):
return self.__pending.__iter__()
def clear(self):
self.__pending = []
def proxy_for(self, proxy):
"""Return a view of this operation queue with the proxying for set to proxy.
This method returns a new instance of an operation queue that shares the
operation list, but has a different proxying_for_id set so the robot using
this new queue will send out operations with the proxying_for field set.
"""
res = OperationQueue()
res.__pending = self.__pending
res._capability_hash = self._capability_hash
res._proxy_for_id = proxy
return res
def set_capability_hash(self, capability_hash):
self._capability_hash = capability_hash
def serialize(self):
first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH,
'0',
{'capabilitiesHash': self._capability_hash,
'protocolVersion': PROTOCOL_VERSION})
operations = [first] + self.__pending
res = util.serialize(operations)
return res
def copy_operations(self, other_queue):
"""Copy the pending operations from other_queue into this one."""
for op in other_queue:
self.__pending.append(op)
def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):
"""Creates and adds a new operation to the operation list."""
if props is None:
props = {}
props.update(kwprops)
props['waveId'] = wave_id
props['waveletId'] = wavelet_id
if self._proxy_for_id:
props['proxyingFor'] = self._proxy_for_id
operation = Operation(method,
'op%s' % OperationQueue._next_operation_id,
props)
self.__pending.append(operation)
OperationQueue._next_operation_id += 1
return operation
def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):
"""Appends a blip to a wavelet.
Args:
wave_id: The wave id owning the containing wavelet.
wavelet_id: The wavelet id that this blip should be appended to.
initial_content: optionally the content to start with
Returns:
JSON representing the information of the new blip.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)
self.new_operation(WAVELET_APPEND_BLIP, wave_id,
wavelet_id, blipData=blip_data)
return blip_data
def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):
"""Adds a participant to a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,
participantId=participant_id)
def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):
"""Sets a key/value pair on the data document of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
name: The key name for this data.
data: The value of the data to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,
datadocName=name, datadocValue=data)
def robot_create_wavelet(self, domain, participants=None, message=''):
"""Creates a new wavelet.
Args:
domain: the domain to create the wave in
participants: initial participants on this wavelet or None if none
message: an optional payload that is returned with the corresponding
event.
Returns:
data for the root_blip, wavelet
"""
if participants is None:
participants = []
blip_data, wavelet_data = self._new_waveletdata(domain, participants)
op = self.new_operation(ROBOT_CREATE_WAVELET,
wave_id=wavelet_data['waveId'],
wavelet_id=wavelet_data['waveletId'],
waveletData=wavelet_data)
op.set_optional('message', message)
return blip_data, wavelet_data
def robot_fetch_wave(self, wave_id, wavelet_id):
"""Requests a snapshot of the specified wave.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)
def wavelet_set_title(self, wave_id, wavelet_id, title):
"""Sets the title of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
title: The title to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,
waveletTitle=title)
def wavelet_modify_participant_role(
self, wave_id, wavelet_id, participant_id, role):
"""Modify the role of a participant on a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
role: the new roles
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id,
wavelet_id, participantId=participant_id,
participantRole=role)
def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):
"""Modifies a tag in a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
tag: The tag (a string).
modify_how: (optional) how to apply the tag. The default is to add
the tag. Specify 'remove' to remove. Specify None or 'add' to
add.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,
name=tag).set_optional("modify_how", modify_how)
def blip_create_child(self, wave_id, wavelet_id, blip_id):
"""Creates a child blip of another blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)
self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_delete(self, wave_id, wavelet_id, blip_id):
"""Deletes the specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)
def document_append_markup(self, wave_id, wavelet_id, blip_id, content):
"""Appends content with markup to a document.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
content: The markup content to append.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,
blipId=blip_id, content=content)
def document_modify(self, wave_id, wavelet_id, blip_id):
"""Creates and queues a document modify operation
The returned operation still needs to be filled with details before
it makes sense.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_MODIFY,
wave_id,
wavelet_id,
blipId=blip_id)
def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):
"""Inserts an inline blip at a specific location.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
position: The position in the document to insert the blip.
Returns:
JSON data for the blip that was created for further operations.
"""
inline_blip_data = self._new_blipdata(wave_id, wavelet_id)
inline_blip_data['parentBlipId'] = blip_id
self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,
blipId=blip_id,
index=position,
blipData=inline_blip_data)
return inline_blip_data
|
|
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Parser for Unicode data files (as distributed by unicode.org)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import urllib
# Directory or URL where Unicode tables reside.
_UNICODE_DIR = "https://www.unicode.org/Public/14.0.0/ucd"
# Largest valid Unicode code value.
_RUNE_MAX = 0x10FFFF
class Error(Exception):
"""Unicode error base class."""
class InputError(Error):
"""Unicode input error class. Raised on invalid input."""
def _UInt(s):
"""Converts string to Unicode code point ('263A' => 0x263a).
Args:
s: string to convert
Returns:
Unicode code point
Raises:
InputError: the string is not a valid Unicode value.
"""
try:
v = int(s, 16)
except ValueError:
v = -1
if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (s,))
return v
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,))
def _UStr(v):
"""Converts Unicode code point to hex string.
0x263a => '0x263A'.
Args:
v: code point to convert
Returns:
Unicode string
Raises:
InputError: the argument is not a valid Unicode value.
"""
if v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (v,))
return "0x%04X" % (v,)
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("https://"):
fil = urllib.request.urlopen(filename)
else:
fil = open(filename, "rb")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
line = line.decode('latin1')
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception as e:
print("%s:%d: %s" % (filename, lineno, e))
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,))
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = list(togroup.values())
for g in groups:
g.sort()
groups.sort()
return togroup, groups
def Scripts(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping script names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping script names to code lists
"""
scripts = {}
def DoLine(codes, fields):
"""Process single Scripts.txt line, updating scripts."""
(_, name) = fields
scripts.setdefault(name, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine)
return scripts
def Categories(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping category names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping category names to code lists
"""
categories = {}
def DoLine(codes, fields):
"""Process single UnicodeData.txt line, updating categories."""
category = fields[2]
categories.setdefault(category, []).extend(codes)
# Add codes from Lu into L, etc.
if len(category) > 1:
short = category[0]
categories.setdefault(short, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine)
return categories
|
|
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
from lib.view import templates
from lib.view.view import CliView
from lib.view.sheet.const import SheetStyle
from lib.live_cluster.client.node import ASInfoError
class CliViewTest(unittest.TestCase):
def setUp(self) -> None:
self.cluster_mock = patch(
"lib.live_cluster.live_cluster_root_controller.Cluster"
).start()
self.sheet_mock = patch("lib.view.sheet.render").start()
def test_show_roster(self):
roster_data = {
"1.1.1.1": {
"test": {
"observed_nodes": [
"BB9070016AE4202",
"BB9060016AE4202",
"BB9050016AE4202",
"BB9040016AE4202",
"BB9020016AE4202",
],
"ns": "test",
"pending_roster": ["null"],
"roster": ["null"],
}
}
}
node_names = {"1.1.1.1": "1.1.1.1 is my name"}
node_ids = {"1.1.1.1": "ABCD"}
principal = "test-principal"
common = {"principal": principal}
self.cluster_mock.get_node_names.return_value = node_names
self.cluster_mock.get_node_ids.return_value = node_ids
self.cluster_mock.get_expected_principal.return_value = principal
sources = {"node_names": node_names, "node_ids": node_ids, "data": roster_data}
CliView.show_roster(
roster_data, self.cluster_mock, flip=False, timestamp="test-stamp", **{}
)
self.sheet_mock.assert_called_with(
templates.show_roster,
"Roster (test-stamp)",
sources,
common=common,
style=SheetStyle.columns,
dynamic_diff=False,
)
def test_show_roster_with_mods(self):
roster_data = {
"1.1.1.1": {
"test": {
"observed_nodes": [
"BB9070016AE4202",
"BB9060016AE4202",
"BB9050016AE4202",
"BB9040016AE4202",
"BB9020016AE4202",
],
"ns": "test",
"pending_roster": ["null"],
"roster": ["null"],
},
"bar": {
"observed_nodes": [
"BB90120016AE4202",
"BB90110016AE4202",
"BB90100016AE4202",
"BB9090016AE4202",
"BB9080016AE4202",
],
"ns": "bar",
"pending_roster": ["null"],
"roster": ["null"],
},
},
"2.2.2.2": {
"test": {
"observed_nodes": [
"BB9070016AE4202",
"BB9060016AE4202",
"BB9050016AE4202",
"BB9040016AE4202",
"BB9020016AE4202",
],
"ns": "test",
"pending_roster": ["null"],
"roster": ["null"],
},
"bar": {
"observed_nodes": [
"BB90120016AE4202",
"BB90110016AE4202",
"BB90100016AE4202",
"BB9090016AE4202",
"BB9080016AE4202",
],
"ns": "bar",
"pending_roster": ["null"],
"roster": ["null"],
},
},
}
filtered_data = {
"1.1.1.1": {
"bar": {
"observed_nodes": [
"BB90120016AE4202",
"BB90110016AE4202",
"BB90100016AE4202",
"BB9090016AE4202",
"BB9080016AE4202",
],
"ns": "bar",
"pending_roster": ["null"],
"roster": ["null"],
},
},
"2.2.2.2": {
"bar": {
"observed_nodes": [
"BB90120016AE4202",
"BB90110016AE4202",
"BB90100016AE4202",
"BB9090016AE4202",
"BB9080016AE4202",
],
"ns": "bar",
"pending_roster": ["null"],
"roster": ["null"],
},
},
}
node_names = {"1.1.1.1": "1.1.1.1 is my name", "2.2.2.2": "2.2.2.2 is my name"}
node_ids = {"1.1.1.1": "ABCD", "2.2.2.2": "EFGH"}
principal = "test-principal"
common = {"principal": principal}
self.cluster_mock.get_node_names.return_value = node_names
self.cluster_mock.get_node_ids.return_value = node_ids
self.cluster_mock.get_expected_principal.return_value = principal
sources = {
"node_names": node_names,
"node_ids": node_ids,
"data": filtered_data,
}
CliView.show_roster(
roster_data,
self.cluster_mock,
flip=True,
timestamp="test-stamp",
**{"for": "ba", "with": ["foo"]}
)
self.cluster_mock.get_node_names.assert_called_with(["foo"])
self.cluster_mock.get_node_ids.assert_called_with(["foo"])
self.sheet_mock.assert_called_with(
templates.show_roster,
"Roster (test-stamp)",
sources,
common=common,
style=SheetStyle.rows,
dynamic_diff=False,
)
def test_show_best_practices(self):
failed_practices = "foo"
timestamp = "timestamp"
self.cluster_mock.get_node_names.return_value = "node_names"
self.cluster_mock.get_node_ids.return_value = "node_ids"
self.cluster_mock.get_expected_principal.return_value = "principal"
sources = {
"data": failed_practices,
"node_names": "node_names",
"node_ids": "node_ids",
}
common = {"principal": "principal"}
CliView.show_best_practices(
self.cluster_mock,
failed_practices,
timestamp=timestamp,
**{"with": ["bar"]}
)
self.cluster_mock.get_node_names.assert_called_with(["bar"])
self.cluster_mock.get_node_ids.assert_called_with(["bar"])
self.sheet_mock.assert_called_with(
templates.show_best_practices,
"Best Practices (timestamp)",
sources,
common=common,
)
def test_show_jobs(self):
jobs_data = {
"1.1.1.1": {"1": "1 data", "2": "2 data"},
"2.2.2.2": {"3": "3 data", "4": "4 data"},
"3.3.3.3": {"5": "5 data", "6": "6 data"},
"4.4.4.4": ASInfoError("test", "error"),
}
filtered_data = {
"1.1.1.1": {"1": "1 data"},
"2.2.2.2": {"3": "3 data"},
"3.3.3.3": {"5": "5 data"},
}
timestamp = "timestamp"
self.cluster_mock.get_node_names.return_value = "node_names"
self.cluster_mock.get_node_ids.return_value = "node_ids"
self.cluster_mock.get_expected_principal.return_value = "principal"
sources = {
"data": filtered_data,
"node_names": "node_names",
"node_ids": "node_ids",
}
common = {"principal": "principal"}
CliView.show_jobs(
"Jobs",
self.cluster_mock,
jobs_data,
timestamp=timestamp,
**{"trid": ["1", "3", "5"], "like": ["foo"], "with": ["bar"]}
)
self.cluster_mock.get_node_names.assert_called_with(["bar"])
self.cluster_mock.get_node_ids.assert_called_with(["bar"])
self.sheet_mock.assert_called_with(
templates.show_jobs,
"Jobs (timestamp)",
sources,
common=common,
selectors=["foo"],
)
def test_show_racks(self):
racks_data = {
"1.1.1.1": {
"test": {
"0": {
"rack-id": "0",
"nodes": [
"BB9060016AE4202",
"BB9050016AE4202",
"BB9040016AE4202",
],
}
}
}
}
sources = {
"data": {
"1.1.1.1": {
("test", "0"): {
"rack-id": "0",
"nodes": [
"BB9060016AE4202",
"BB9050016AE4202",
"BB9040016AE4202",
],
}
}
}
}
CliView.show_racks(racks_data, timestamp="test-stamp", **{})
self.sheet_mock.assert_called_with(
templates.show_racks,
"Racks (test-stamp)",
sources,
)
|
|
import uuid
import django_filters
import taggit
from django import forms
from django.conf import settings as django_settings
from django.contrib import auth
from django.contrib.contenttypes.models import ContentType
from django.core import exceptions
from django.db.models import Q
from django.db.models.functions import Concat
from django.utils import timezone
from django_filters.widgets import BooleanWidget
from rest_framework.filters import BaseFilterBackend
from waldur_core.core import filters as core_filters
from waldur_core.core import models as core_models
from waldur_core.core.filters import ExternalFilterBackend
from waldur_core.core.utils import get_ordering, is_uuid_like, order_with_nulls
from waldur_core.structure import models
from waldur_core.structure.managers import filter_queryset_for_user
from waldur_core.structure.registry import SupportedServices
User = auth.get_user_model()
class NameFilterSet(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
name_exact = django_filters.CharFilter(field_name='name', lookup_expr='exact')
class ScopeTypeFilterBackend(BaseFilterBackend):
""" Scope filters:
* ?scope = ``URL``
* ?scope_type = ``string`` (can be list)
"""
content_type_field = 'content_type'
scope_param = 'scope_type'
scope_models = {
'customer': models.Customer,
'project': models.Project,
'resource': models.BaseResource,
}
@classmethod
def get_scope_type(cls, model):
for scope_type, scope_model in cls.scope_models.items():
if issubclass(model, scope_model):
return scope_type
@classmethod
def _get_scope_models(cls, types):
for scope_type, scope_model in cls.scope_models.items():
if scope_type in types:
try:
for submodel in scope_model.get_all_models():
yield submodel
except AttributeError:
yield scope_model
@classmethod
def _get_scope_content_types(cls, types):
return ContentType.objects.get_for_models(
*cls._get_scope_models(types)
).values()
def filter_queryset(self, request, queryset, view):
if self.scope_param in request.query_params:
content_types = self._get_scope_content_types(
request.query_params.getlist(self.scope_param)
)
return queryset.filter(
**{'%s__in' % self.content_type_field: content_types}
)
return queryset
class GenericRoleFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return filter_queryset_for_user(queryset, request.user)
class GenericUserFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
user_uuid = request.query_params.get('user_uuid')
if not user_uuid:
return queryset
try:
uuid.UUID(user_uuid)
except ValueError:
return queryset.none()
try:
user = User.objects.get(uuid=user_uuid)
except User.DoesNotExist:
return queryset.none()
return filter_queryset_for_user(queryset, user)
class CustomerFilter(NameFilterSet):
query = django_filters.CharFilter(method='filter_query')
native_name = django_filters.CharFilter(lookup_expr='icontains')
abbreviation = django_filters.CharFilter(lookup_expr='icontains')
contact_details = django_filters.CharFilter(lookup_expr='icontains')
division_uuid = django_filters.ModelMultipleChoiceFilter(
field_name='division__uuid',
label='division_uuid',
to_field_name='uuid',
queryset=models.Division.objects.all(),
)
division_name = django_filters.CharFilter(
field_name='division__name', lookup_expr='icontains'
)
division_type_uuid = django_filters.ModelMultipleChoiceFilter(
field_name='division__type__uuid',
label='division_type_uuid',
to_field_name='uuid',
queryset=models.DivisionType.objects.all(),
)
division_type_name = django_filters.CharFilter(
field_name='division__type__name', lookup_expr='icontains'
)
class Meta:
model = models.Customer
fields = [
'name',
'abbreviation',
'contact_details',
'native_name',
'registration_code',
'agreement_number',
'backend_id',
]
def filter_query(self, queryset, name, value):
if value:
return queryset.filter(
Q(name__icontains=value)
| Q(native_name__icontains=value)
| Q(abbreviation__icontains=value)
| Q(domain__icontains=value)
| Q(uuid__icontains=value)
| Q(registration_code__icontains=value)
| Q(agreement_number__contains=value)
)
return queryset
class OwnedByCurrentUserFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
value = request.query_params.get('owned_by_current_user')
boolean_field = forms.NullBooleanField()
try:
value = boolean_field.to_python(value)
except exceptions.ValidationError:
value = None
if value:
return queryset.filter(
permissions__user=request.user,
permissions__is_active=True,
permissions__role=models.CustomerRole.OWNER,
)
return queryset
class ExternalCustomerFilterBackend(ExternalFilterBackend):
pass
class AccountingStartDateFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
query = Q(accounting_start_date__gt=timezone.now())
return filter_by_accounting_is_running(request, queryset, query)
class CustomerAccountingStartDateFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
query = Q(customer__accounting_start_date__gt=timezone.now())
return filter_by_accounting_is_running(request, queryset, query)
def filter_by_accounting_is_running(request, queryset, query):
if not django_settings.WALDUR_CORE['ENABLE_ACCOUNTING_START_DATE']:
return queryset
value = request.query_params.get('accounting_is_running')
boolean_field = forms.NullBooleanField()
try:
value = boolean_field.to_python(value)
except exceptions.ValidationError:
value = None
if value is None:
return queryset
if value:
return queryset.exclude(query)
else:
return queryset.filter(query)
class ProjectTypeFilter(NameFilterSet):
class Meta:
model = models.ProjectType
fields = ['name']
class ProjectFilter(NameFilterSet):
customer = django_filters.UUIDFilter(field_name='customer__uuid', distinct=True,)
customer_name = django_filters.CharFilter(
field_name='customer__name', distinct=True, lookup_expr='icontains'
)
customer_native_name = django_filters.CharFilter(
field_name='customer__native_name', distinct=True, lookup_expr='icontains'
)
customer_abbreviation = django_filters.CharFilter(
field_name='customer__abbreviation', distinct=True, lookup_expr='icontains'
)
description = django_filters.CharFilter(lookup_expr='icontains')
query = django_filters.CharFilter(method='filter_query')
o = django_filters.OrderingFilter(
fields=(
('name', 'name'),
('created', 'created'),
('customer__name', 'customer_name'),
('customer__native_name', 'customer_native_name'),
('customer__abbreviation', 'customer_abbreviation'),
)
)
class Meta:
model = models.Project
fields = [
'name',
'customer',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'description',
'created',
'query',
'backend_id',
]
def filter_query(self, queryset, name, value):
if is_uuid_like(value):
return queryset.filter(uuid=value)
else:
return queryset.filter(name__icontains=value)
class CustomerUserFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
customer_uuid = request.query_params.get('customer_uuid')
if not customer_uuid:
return queryset
try:
uuid.UUID(customer_uuid)
except ValueError:
return queryset.none()
return queryset.filter(
Q(
customerpermission__customer__uuid=customer_uuid,
customerpermission__is_active=True,
)
| Q(
projectpermission__project__customer__uuid=customer_uuid,
projectpermission__is_active=True,
)
).distinct()
class ProjectUserFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
project_uuid = request.query_params.get('project_uuid')
if not project_uuid:
return queryset
try:
uuid.UUID(project_uuid)
except ValueError:
return queryset.none()
return queryset.filter(
projectpermission__project__uuid=project_uuid,
projectpermission__is_active=True,
).distinct()
def filter_visible_users(queryset, user, extra=None):
connected_customers_query = models.Customer.objects.all()
if not (user.is_staff or user.is_support):
connected_customers_query = connected_customers_query.filter(
Q(permissions__user=user, permissions__is_active=True)
| Q(projects__permissions__user=user, projects__permissions__is_active=True)
).distinct()
connected_customers = list(connected_customers_query.all())
subquery = Q(
customerpermission__customer__in=connected_customers,
customerpermission__is_active=True,
) | Q(
projectpermission__project__customer__in=connected_customers,
projectpermission__is_active=True,
)
queryset = queryset.filter(subquery | Q(uuid=user.uuid) | (extra or Q())).distinct()
if not (user.is_staff or user.is_support):
queryset = queryset.filter(is_active=True, is_staff=False)
return queryset
class UserFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
user = request.user
if not django_settings.WALDUR_CORE.get('SHOW_ALL_USERS', False) and not (
user.is_staff or user.is_support
):
queryset = filter_visible_users(queryset, user, self.get_extra_q(user))
return queryset.order_by('username')
_extra_query = []
@classmethod
def register_extra_query(cls, func_get_query):
"""
Add extra Q for user list queryset
:param func_get_query: a function that takes User object and returns Q object
:return: None
"""
cls._extra_query.append(func_get_query)
@classmethod
def get_extra_q(cls, user):
result = Q()
for q in cls._extra_query:
result = result | q(user)
return result
class BaseUserFilter(django_filters.FilterSet):
full_name = django_filters.CharFilter(
method='filter_by_full_name', label='Full name'
)
username = django_filters.CharFilter()
native_name = django_filters.CharFilter(lookup_expr='icontains')
organization = django_filters.CharFilter(lookup_expr='icontains')
job_title = django_filters.CharFilter(lookup_expr='icontains')
email = django_filters.CharFilter(lookup_expr='icontains')
is_active = django_filters.BooleanFilter(widget=BooleanWidget)
def filter_by_full_name(self, queryset, name, value):
return core_filters.filter_by_full_name(queryset, value)
class Meta:
model = User
fields = [
'full_name',
'native_name',
'organization',
'email',
'phone_number',
'description',
'job_title',
'username',
'civil_number',
'is_active',
'registration_method',
]
class UserFilter(BaseUserFilter):
is_staff = django_filters.BooleanFilter(widget=BooleanWidget)
is_support = django_filters.BooleanFilter(widget=BooleanWidget)
o = core_filters.ExtendedOrderingFilter(
fields=(
(('first_name', 'last_name'), 'full_name'),
'native_name',
'email',
'phone_number',
'description',
'organization',
'job_title',
'username',
'is_active',
'registration_method',
'is_staff',
'is_support',
)
)
class UserConcatenatedNameOrderingBackend(BaseFilterBackend):
""" Filter user by concatenated first_name + last_name + username with ?o=concatenated_name """
def filter_queryset(self, request, queryset, view):
queryset = self._filter_queryset(request, queryset, view)
return BaseUserFilter(
request.query_params, queryset=queryset, request=request
).qs
def _filter_queryset(self, request, queryset, view):
if 'o' not in request.query_params:
return queryset
if request.query_params['o'] == 'concatenated_name':
order_by = 'concatenated_name'
elif request.query_params['o'] == '-concatenated_name':
order_by = '-concatenated_name'
else:
return queryset
return queryset.annotate(
concatenated_name=Concat('first_name', 'last_name', 'username')
).order_by(order_by)
class UserPermissionFilter(django_filters.FilterSet):
user = django_filters.UUIDFilter(field_name='user__uuid')
user_url = core_filters.URLFilter(view_name='user-detail', field_name='user__uuid',)
username = django_filters.CharFilter(
field_name='user__username', lookup_expr='exact',
)
full_name = django_filters.CharFilter(
method='filter_by_full_name', label='User full name contains'
)
native_name = django_filters.CharFilter(
field_name='user__native_name', lookup_expr='icontains',
)
def filter_by_full_name(self, queryset, name, value):
return core_filters.filter_by_full_name(queryset, value, 'user')
o = core_filters.ExtendedOrderingFilter(
fields=(
('user__username', 'username'),
(('user__first_name', 'user__last_name'), 'full_name'),
('user__native_name', 'native_name'),
('user__email', 'email'),
('expiration_time', 'expiration_time'),
('created', 'created'),
('role', 'role'),
)
)
class ProjectPermissionFilter(UserPermissionFilter):
class Meta:
fields = ['role']
model = models.ProjectPermission
customer = django_filters.UUIDFilter(field_name='project__customer__uuid',)
project = django_filters.UUIDFilter(field_name='project__uuid',)
project_url = core_filters.URLFilter(
view_name='project-detail', field_name='project__uuid',
)
class CustomerPermissionFilter(UserPermissionFilter):
class Meta:
fields = ['role']
model = models.CustomerPermission
customer = django_filters.UUIDFilter(field_name='customer__uuid',)
customer_url = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid',
)
class CustomerPermissionReviewFilter(django_filters.FilterSet):
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
reviewer_uuid = django_filters.UUIDFilter(field_name='reviewer__uuid')
o = django_filters.OrderingFilter(fields=('created', 'closed'))
class Meta:
model = models.CustomerPermissionReview
fields = [
'is_pending',
]
class SshKeyFilter(NameFilterSet):
uuid = django_filters.UUIDFilter()
user_uuid = django_filters.UUIDFilter(field_name='user__uuid')
o = django_filters.OrderingFilter(fields=('name',))
class Meta:
model = core_models.SshPublicKey
fields = [
'name',
'fingerprint',
'uuid',
'user_uuid',
'is_shared',
]
class ServiceTypeFilter(django_filters.Filter):
def filter(self, qs, value):
value = SupportedServices.get_filter_mapping().get(value)
return super(ServiceTypeFilter, self).filter(qs, value)
class ServiceSettingsFilter(NameFilterSet):
type = ServiceTypeFilter()
state = core_filters.StateFilter()
customer = django_filters.UUIDFilter(field_name='customer__uuid')
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
class Meta:
model = models.ServiceSettings
fields = ('name', 'type', 'state', 'shared')
class ServiceSettingsScopeFilterBackend(core_filters.GenericKeyFilterBackend):
def get_related_models(self):
return models.BaseResource.get_all_models()
def get_field_name(self):
return 'scope'
class BaseResourceFilter(NameFilterSet):
def __init__(self, *args, **kwargs):
super(BaseResourceFilter, self).__init__(*args, **kwargs)
self.filters['o'] = django_filters.OrderingFilter(fields=self.ORDERING_FIELDS)
# customer
customer = django_filters.UUIDFilter(field_name='project__customer__uuid')
customer_uuid = django_filters.UUIDFilter(field_name='project__customer__uuid')
customer_name = django_filters.CharFilter(
field_name='project__customer__name', lookup_expr='icontains',
)
customer_native_name = django_filters.CharFilter(
field_name='project__customer__native_name', lookup_expr='icontains',
)
customer_abbreviation = django_filters.CharFilter(
field_name='project__customer__abbreviation', lookup_expr='icontains',
)
# project
project = django_filters.UUIDFilter(field_name='project__uuid')
project_uuid = django_filters.UUIDFilter(field_name='project__uuid')
project_name = django_filters.CharFilter(
field_name='project__name', lookup_expr='icontains'
)
# service settings
service_settings_uuid = django_filters.UUIDFilter(
field_name='service_settings__uuid'
)
service_settings_name = django_filters.CharFilter(
field_name='service_settings__name', lookup_expr='icontains',
)
# resource
description = django_filters.CharFilter(lookup_expr='icontains')
state = core_filters.MappedMultipleChoiceFilter(
choices=[
(representation, representation)
for db_value, representation in core_models.StateMixin.States.CHOICES
],
choice_mappings={
representation: db_value
for db_value, representation in core_models.StateMixin.States.CHOICES
},
)
uuid = django_filters.UUIDFilter(lookup_expr='exact')
backend_id = django_filters.CharFilter(field_name='backend_id', lookup_expr='exact')
tag = django_filters.ModelMultipleChoiceFilter(
field_name='tags__name',
label='tag',
to_field_name='name',
queryset=taggit.models.Tag.objects.all(),
)
rtag = django_filters.ModelMultipleChoiceFilter(
field_name='tags__name',
label='rtag',
to_field_name='name',
queryset=taggit.models.Tag.objects.all(),
conjoined=True,
)
external_ip = core_filters.EmptyFilter()
ORDERING_FIELDS = (
('name', 'name'),
('state', 'state'),
('project__customer__name', 'customer_name'),
('project__customer__native_name', 'customer_native_name',),
('project__customer__abbreviation', 'customer_abbreviation',),
('project__name', 'project_name'),
('service_settings__name', 'service_name'),
('created', 'created'),
)
class Meta:
model = models.BaseResource
fields = (
# customer
'customer',
'customer_uuid',
'customer_name',
'customer_native_name',
'customer_abbreviation',
# project
'project',
'project_uuid',
'project_name',
# service settings
'service_settings_name',
'service_settings_uuid',
# resource
'name',
'name_exact',
'description',
'state',
'uuid',
'backend_id',
'tag',
'rtag',
)
class TagsFilter(BaseFilterBackend):
""" Tags ordering. Filtering for complex tags.
Example:
?tag__license-os=centos7 - will filter objects with tag "license-os:centos7".
Allow to define next parameters in view:
- tags_filter_db_field - name of tags field in database. Default: tags.
- tags_filter_request_field - name of tags in request. Default: tag.
"""
def filter_queryset(self, request, queryset, view):
self.db_field = getattr(view, 'tags_filter_db_field', 'tags')
self.request_field = getattr(view, 'tags_filter_request_field', 'tag')
queryset = self._filter(request, queryset)
queryset = self._order(request, queryset)
return queryset
def _filter(self, request, queryset):
for key in request.query_params.keys():
item_name = self._get_item_name(key)
if item_name:
value = request.query_params.get(key)
filter_kwargs = {
self.db_field + '__name__startswith': item_name,
self.db_field + '__name__icontains': value,
}
queryset = queryset.filter(**filter_kwargs)
return queryset
def _order(self, request, queryset):
order_by = get_ordering(request)
item_name = self._get_item_name(order_by)
if item_name:
filter_kwargs = {self.db_field + '__name__startswith': item_name}
queryset = queryset.filter(**filter_kwargs).order_by(
self.db_field + '__name'
)
return queryset
def _get_item_name(self, key):
prefix = self.request_field + '__'
if key and key.startswith(prefix):
return key[len(prefix) :]
class StartTimeFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
order_by = get_ordering(request)
if order_by not in ('start_time', '-start_time'):
return queryset
return order_with_nulls(queryset, order_by)
class BaseServicePropertyFilter(NameFilterSet):
class Meta:
fields = ('name', 'name_exact')
class ServicePropertySettingsFilter(BaseServicePropertyFilter):
settings_uuid = django_filters.UUIDFilter(field_name='settings__uuid')
settings = core_filters.URLFilter(
view_name='servicesettings-detail', field_name='settings__uuid', distinct=True
)
class Meta(BaseServicePropertyFilter.Meta):
fields = BaseServicePropertyFilter.Meta.fields + ('settings_uuid', 'settings')
class DivisionFilter(NameFilterSet):
type = django_filters.CharFilter(field_name='type__name', lookup_expr='iexact')
type_uuid = django_filters.UUIDFilter(field_name='type__uuid')
type_url = core_filters.URLFilter(
view_name='division-type-detail', field_name='type__uuid',
)
parent = django_filters.UUIDFilter(field_name='parent__uuid')
class Meta:
model = models.Division
fields = [
'name',
]
class DivisionTypesFilter(NameFilterSet):
class Meta:
model = models.DivisionType
fields = [
'name',
]
class UserRolesFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
customer = view.get_object()
project_roles = request.query_params.getlist('project_role')
organization_roles = request.query_params.getlist('organization_role')
query = Q()
if project_roles:
# Filter project permissions by current customer
query = query | Q(
projectpermission__role__in=project_roles,
projectpermission__project__customer_id=customer.id,
)
if organization_roles:
# Filter customer permissions by current customer
query = query | Q(
customerpermission__role__in=organization_roles,
customerpermission__customer_id=customer.id,
)
return queryset.filter(query)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import copy
import shutil
import logging
import yaml
import json
from collections import OrderedDict
from multiprocessing import Pool
from functools import partial
import sys
import numpy as np
import fermipy.config as config
import fermipy.utils as utils
import fermipy.gtutils as gtutils
import fermipy.roi_model as roi_model
import fermipy.gtanalysis
from fermipy import defaults
from fermipy import fits_utils
from fermipy.config import ConfigSchema
from fermipy.gtutils import FreeParameterState
import pyLikelihood as pyLike
from astropy.io import fits
from astropy.time import Time
from astropy.table import Table, Column
import pyLikelihood as pyLike
def _fit_lc(gta, name, **kwargs):
# lightcurve fitting routine-
# 1.) start by freeing target and provided list of
# sources, fix all else- if fit fails, fix all pars
# except norm and try again
# 2.) if that fails to converge then try fixing low TS
# (<4) sources and then refit
# 3.) if that fails to converge then try fixing low-moderate TS (<9) sources and then refit
# 4.) if that fails then fix sources out to 1dg away from center of ROI
# 5.) if that fails set values to 0 in output and print warning message
free_sources = kwargs.get('free_sources', [])
free_background = kwargs.get('free_background', False)
free_params = kwargs.get('free_params', None)
shape_ts_threshold = kwargs.get('shape_ts_threshold', 16)
max_free_sources = kwargs.get('max_free_sources', 5)
if name in free_sources:
free_sources.remove(name)
free_state = FreeParameterState(gta)
gta.free_sources(free=False)
gta.free_sources_by_name(free_sources + [name], pars='norm')
gta.fit()
free_sources = sorted(free_sources,
key=lambda t: gta.roi[t]['ts']
if np.isfinite(gta.roi[t]['ts']) else -100.,
reverse=True)
free_sources = free_sources[:max_free_sources]
free_sources_norm = free_sources + [name]
free_sources_shape = []
for t in free_sources_norm:
if gta.roi[t]['ts'] > shape_ts_threshold:
free_sources_shape += [t]
gta.free_sources(free=False)
gta.logger.debug('Free Sources Norm: %s', free_sources_norm)
gta.logger.debug('Free Sources Shape: %s', free_sources_shape)
for niter in range(5):
if free_background:
free_state.restore()
if free_params:
gta.free_source(name, pars=free_params)
if niter == 0:
gta.free_sources_by_name(free_sources_norm, pars='norm')
gta.free_sources_by_name(free_sources_shape, pars='shape')
elif niter == 1:
gta.logger.info('Fit Failed. Retrying with free '
'normalizations.')
gta.free_sources_by_name(free_sources, False)
gta.free_sources_by_name(free_sources_norm, pars='norm')
elif niter == 2:
gta.logger.info('Fit Failed with User Supplied List of '
'Free/Fixed Sources.....Lets try '
'fixing TS<4 sources')
gta.free_sources_by_name(free_sources, False)
gta.free_sources_by_name(free_sources_norm, pars='norm')
gta.free_sources(minmax_ts=[None, 4], free=False, exclude=[name])
elif niter == 3:
gta.logger.info('Fit Failed with User Supplied List of '
'Free/Fixed Sources.....Lets try '
'fixing TS<9 sources')
gta.free_sources_by_name(free_sources, False)
gta.free_sources_by_name(free_sources_norm, pars='norm')
gta.free_sources(minmax_ts=[None, 9], free=False, exclude=[name])
elif niter == 4:
gta.logger.info('Fit still did not converge, lets try fixing the '
'sources up to 1dg out from ROI')
gta.free_sources_by_name(free_sources, False)
for s in free_sources:
src = gta.roi.get_source_by_name(s)
if src['offset'] < 1.0:
gta.free_source(s, pars='norm')
gta.free_sources(minmax_ts=[None, 9], free=False, exclude=[name])
else:
gta.logger.error('Fit still didnt converge.....please examine this data '
'point, setting output to 0')
break
fit_results = gta.fit()
if fit_results['fit_success'] is True:
break
return fit_results
def _process_lc_bin(itime, name, config, basedir, workdir, diff_sources, const_spectrum, roi, lck_params,
**kwargs):
i, time = itime
roi = copy.deepcopy(roi)
config = copy.deepcopy(config)
config['selection']['tmin'] = time[0]
config['selection']['tmax'] = time[1]
# create output directories labeled in MET vals
outdir = basedir + 'lightcurve_%.0f_%.0f' % (time[0], time[1])
config['fileio']['outdir'] = os.path.join(workdir, outdir)
config['logging']['prefix'] = 'lightcurve_%.0f_%.0f ' % (time[0], time[1])
config['fileio']['logfile'] = os.path.join(config['fileio']['outdir'],
'fermipy.log')
utils.mkdir(config['fileio']['outdir'])
yaml.dump(utils.tolist(config),
open(os.path.join(config['fileio']['outdir'],
'config.yaml'), 'w'))
xmlfile = os.path.join(config['fileio']['outdir'], 'base.xml')
try:
from fermipy.gtanalysis import GTAnalysis
gta = GTAnalysis(config, roi, loglevel=logging.DEBUG)
gta.logger.info('Fitting time range %i %i' % (time[0], time[1]))
gta.setup()
except:
print('Analysis failed in time range %i %i' %
(time[0], time[1]))
print(sys.exc_info()[0])
return {}
gta._lck_params = lck_params
# Recompute source map for source of interest and sources within 3 deg
if gta.config['gtlike']['use_scaled_srcmap']:
names = [s.name for s in
gta.roi.get_sources(distance=3.0, skydir=gta.roi[name].skydir)
if not s.diffuse]
gta.reload_sources(names)
# Write the current model
gta.write_xml(xmlfile)
# Optimize the model
gta.optimize(skip=diff_sources,
shape_ts_threshold=kwargs.get('shape_ts_threshold'))
fit_results = _fit_lc(gta, name, **kwargs)
gta.write_xml('fit_model_final.xml')
srcmodel = copy.deepcopy(gta.get_src_model(name))
numfree = gta.get_free_param_vector().count(True)
const_srcmodel = gta.get_src_model(name).copy()
fixed_fit_results = fit_results.copy()
fixed_srcmodel = gta.get_src_model(name).copy()
fixed_fit_results['fit_success'],fixed_srcmodel['fit_success'] = [False,False]
fixed_fit_results['fit_quality'],fixed_srcmodel['fit_quality'] = [0,0]
max_ts_thresholds = [None, 4, 9, 16, 25]
for max_ts in max_ts_thresholds:
if max_ts is not None:
gta.free_sources(minmax_ts=[None, max_ts], free=False, exclude=[name])
# rerun fit using params from full time (constant) fit using same
# param vector as the successful fit to get loglike
specname, spectrum = const_spectrum
gta.set_source_spectrum(name, spectrum_type=specname,
spectrum_pars=spectrum,
update_source=False)
gta.free_source(name, free=False)
const_fit_results = gta.fit()
if not const_fit_results['fit_success']:
continue
const_srcmodel = gta.get_src_model(name)
# rerun using shape fixed to full time fit
# for the fixed-shape lightcurve
gta.free_source(name, pars='norm')
fixed_fit_results = gta.fit()
if not fixed_fit_results['fit_success']:
continue
fixed_srcmodel = gta.get_src_model(name)
break
# special lc output
o = {'flux_const': const_srcmodel['flux'],
'loglike_const': const_fit_results['loglike'],
'fit_success': fit_results['fit_success'],
'fit_success_fixed': fixed_fit_results['fit_success'],
'fit_quality': fit_results['fit_quality'],
'fit_status': fit_results['fit_status'],
'num_free_params': numfree,
'config': config}
# full flux output
if fit_results['fit_success'] == 1:
for k in defaults.source_flux_output.keys():
if not k in srcmodel:
continue
o[k] = srcmodel[k]
o[k+'_fixed'] = fixed_srcmodel[k]
gta.logger.info('Finished time range %i %i' % (time[0], time[1]))
return o
def calcTS_var(loglike, loglike_const, flux_err, flux_const, systematic, fit_success):
# calculates variability according to Eq. 4 in 2FGL
# including correction using non-numbered Eq. following Eq. 4
# first, remove failed bins
loglike = [elm for elm,success in zip(loglike,fit_success) if success]
loglike_const = [
elm for elm,success in zip(loglike_const,fit_success) if success]
flux_err = [elm for elm,success in zip(flux_err,fit_success) if success]
v_sqs = [loglike[i] - loglike_const[i] for i in range(len(loglike))]
factors = [flux_err[i]**2 / (flux_err[i]**2 + systematic**2 * flux_const**2)
for i in range(len(flux_err))]
return 2. * np.sum([a * b for a, b in zip(factors, v_sqs)])
class LightCurve(object):
def lightcurve(self, name, **kwargs):
"""Generate a lightcurve for the named source. The function will
complete the basic analysis steps for each bin and perform a
likelihood fit for each bin. Extracted values (along with
errors) are Integral Flux, spectral model, Spectral index, TS
value, pred. # of photons. Note: successful calculation of
TS:subscript:`var` requires at least one free background
parameter and a previously optimized ROI model.
Parameters
---------
name: str
source name
{options}
Returns
---------
LightCurve : dict
Dictionary containing output of the LC analysis
"""
name = self.roi.get_source_by_name(name).name
# Create schema for method configuration
schema = ConfigSchema(self.defaults['lightcurve'],
optimizer=self.defaults['optimizer'])
schema.add_option('prefix', '')
config = utils.create_dict(self.config['lightcurve'],
optimizer=self.config['optimizer'])
config = schema.create_config(config, **kwargs)
self.logger.info('Computing Lightcurve for %s' % name)
o = self._make_lc(name, **config)
filename = utils.format_filename(self.workdir, 'lightcurve',
prefix=[config['prefix'],
name.lower().replace(' ', '_')])
o['file'] = None
if config['write_fits']:
o['file'] = os.path.basename(filename) + '.fits'
self._make_lc_fits(o, filename + '.fits', **config)
if config['write_npy']:
np.save(filename + '.npy', o)
self.logger.info('Finished Lightcurve')
return o
def _make_lc_fits(self, lc, filename, **kwargs):
# produce columns in fits file
cols = OrderedDict()
cols['tmin'] = Column(name='tmin', dtype='f8',
data=lc['tmin'], unit='s')
cols['tmax'] = Column(name='tmax', dtype='f8',
data=lc['tmax'], unit='s')
cols['tmin_mjd'] = Column(name='tmin_mjd', dtype='f8',
data=lc['tmin_mjd'], unit='day')
cols['tmax_mjd'] = Column(name='tmax_mjd', dtype='f8',
data=lc['tmax_mjd'], unit='day')
# add in columns for model parameters
for k, v in lc.items():
if k in cols:
continue
if isinstance(v, np.ndarray):
cols[k] = Column(name=k, data=v, dtype=v.dtype)
tab = Table(cols)
hdu_lc = fits.table_to_hdu(tab)
hdu_lc.name = 'LIGHTCURVE'
hdus = [fits.PrimaryHDU(), hdu_lc]
keywords = {'SRCNAME': lc['name'],
'CONFIG': json.dumps(lc['config'])}
fits_utils.write_hdus(hdus, filename, keywords=keywords)
def _create_lc_dict(self, name, times):
# Output Dictionary
o = {}
o['name'] = name
o['tmin'] = times[:-1]
o['tmax'] = times[1:]
o['tmin_mjd'] = utils.met_to_mjd(o['tmin'])
o['tmax_mjd'] = utils.met_to_mjd(o['tmax'])
o['loglike_const'] = np.nan * np.ones(o['tmin'].shape)
o['flux_const'] = np.nan * np.ones(o['tmin'].shape)
o['fit_success'] = np.zeros(o['tmin'].shape, dtype=bool)
o['fit_success_fixed'] = np.zeros(o['tmin'].shape, dtype=bool)
o['fit_status'] = np.zeros(o['tmin'].shape, dtype=int)
o['fit_quality'] = np.zeros(o['tmin'].shape, dtype=int)
o['num_free_params'] = np.zeros(o['tmin'].shape, dtype=int)
for k, v in defaults.source_flux_output.items():
if not k in self.roi[name]:
continue
v = self.roi[name][k]
if isinstance(v, np.ndarray) and v.dtype.kind in ['S', 'U']:
o[k] = np.zeros(o['tmin'].shape + v.shape, dtype=v.dtype)
o[k+'_fixed'] = copy.deepcopy(o[k])
elif isinstance(v, np.ndarray):
o[k] = np.nan * np.ones(o['tmin'].shape + v.shape)
o[k+'_fixed'] = copy.deepcopy(o[k])
elif isinstance(v, np.float):
o[k] = np.nan * np.ones(o['tmin'].shape)
o[k+'_fixed'] = copy.deepcopy(o[k])
return o
def _make_lc(self, name, **kwargs):
# make array of time values in MET
if kwargs['time_bins']:
times = np.array(kwargs['time_bins'])
elif kwargs['nbins']:
times = np.linspace(self.tmin, self.tmax,
kwargs['nbins'] + 1)
else:
times = np.arange(self.tmin, self.tmax,
kwargs['binsz'])
diff_sources = [s.name for s in self.roi.sources if s.diffuse]
skydir = self.roi[name].skydir
if kwargs.get('free_radius', None) is not None:
kwargs['free_sources'] += [
s.name for s in self.roi.get_sources(skydir=skydir,
distance=kwargs['free_radius'],
exclude=diff_sources)]
# save params from full time fit
spectrum = self.like[name].src.spectrum()
specname = spectrum.genericName()
const_spectrum = (specname, gtutils.get_function_pars_dict(spectrum))
# Create Configurations
lck_params = copy.deepcopy(self._lck_params)
config = copy.deepcopy(self.config)
config['ltcube']['use_local_ltcube'] = kwargs['use_local_ltcube']
config['gtlike']['use_scaled_srcmap'] = kwargs['use_scaled_srcmap']
config['model']['diffuse_dir'] = [self.workdir]
config['selection']['filter'] = None
if config['components'] is None:
config['components'] = []
for j, c in enumerate(self.components):
if len(config['components']) <= j:
config['components'] += [{}]
data_cfg = {'evfile': c.files['ft1'],
'scfile': c.data_files['scfile'],
'ltcube': None}
gtlike_cfg = {}
if config['gtlike']['use_scaled_srcmap']:
gtlike_cfg['bexpmap_base'] = c.files['bexpmap']
gtlike_cfg['bexpmap_roi_base'] = c.files['bexpmap_roi']
gtlike_cfg['srcmap_base'] = c.files['srcmap']
config['components'][j] = \
utils.merge_dict(config['components'][j],
{'data': data_cfg, 'gtlike': gtlike_cfg},
add_new_keys=True)
config.setdefault('selection', {})
config['selection']['filter'] = None
outdir = kwargs.get('outdir', None)
basedir = outdir + '/' if outdir is not None else ''
wrap = partial(_process_lc_bin, name=name, config=config,
basedir=basedir, workdir=self.workdir, diff_sources=diff_sources,
const_spectrum=const_spectrum, roi=self.roi, lck_params=lck_params, **kwargs)
itimes = enumerate(zip(times[:-1], times[1:]))
if kwargs.get('multithread', False):
p = Pool(processes=kwargs.get('nthread', None))
mapo = p.imap(wrap, itimes)
p.close()
else:
mapo = map(wrap, itimes)
if not kwargs.get('save_bin_data', False):
for m in mapo:
shutil.rmtree(m['config']['fileio']['outdir'])
o = self._create_lc_dict(name, times)
o['config'] = kwargs
flux_const = None
for i, time in enumerate(zip(times[:-1], times[1:])):
next_fit = next(mapo)
if not next_fit['fit_success']:
self.logger.error(
'Fit failed in bin %d in range %i %i.' % (i, time[0], time[1]))
continue
if flux_const is None:
flux_const = next_fit['flux_const']
for k in o.keys():
if k == 'config':
continue
if not k in next_fit:
continue
# if (isinstance(o[k], np.ndarray) and
# o[k][i].shape != mapo[i][k].shape):
# gta.logger.warning('Incompatible shape for column %s', k)
# continue
try:
o[k][i] = next_fit[k]
except:
pass
systematic = kwargs.get('systematic', 0.02)
o['ts_var'] = calcTS_var(loglike=o['loglike_fixed'],
loglike_const=o['loglike_const'],
flux_err=o['flux_err_fixed'],
flux_const=flux_const,
systematic=systematic,
fit_success=o['fit_success_fixed'])
return o
|
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os
import glance_store as store_api
from glance_store import backend
from glance_store import exceptions as store_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import taskflow
from taskflow.patterns import linear_flow as lf
from taskflow import retry
from taskflow import task
from glance.api import common as api_common
import glance.async_.flows._internal_plugins as internal_plugins
import glance.async_.flows.plugins as import_plugins
from glance.common import exception
from glance.common.scripts.image_import import main as image_import
from glance.common.scripts import utils as script_utils
from glance.common import store_utils
from glance.i18n import _, _LE, _LI
from glance.quota import keystone as ks_quota
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
api_import_opts = [
cfg.ListOpt('image_import_plugins',
item_type=cfg.types.String(quotes=True),
bounds=True,
sample_default='[no_op]',
default=[],
help=_("""
Image import plugins to be enabled for task processing.
Provide list of strings reflecting to the task Objects
that should be included to the Image Import flow. The
task objects needs to be defined in the 'glance/async/
flows/plugins/*' and may be implemented by OpenStack
Glance project team, deployer or 3rd party.
By default no plugins are enabled and to take advantage
of the plugin model the list of plugins must be set
explicitly in the glance-image-import.conf file.
The allowed values for this option is comma separated
list of object names in between ``[`` and ``]``.
Possible values:
* no_op (only logs debug level message that the
plugin has been executed)
* Any provided Task object name to be included
in to the flow.
""")),
]
CONF.register_opts(api_import_opts, group='image_import_opts')
# TODO(jokke): We should refactor the task implementations so that we do not
# need to duplicate what we have already for example in base_import.py.
class _NoStoresSucceeded(exception.GlanceException):
def __init__(self, message):
super(_NoStoresSucceeded, self).__init__(message)
class ImportActionWrapper(object):
"""Wrapper for all the image metadata operations we do during an import.
This is used to consolidate the changes we make to image metadata during
an import operation, and can be used with an admin-capable repo to
enable non-owner controlled modification of that data if desired.
Use this as a context manager to make multiple changes followed by
a save of the image in one operation. An _ImportActions object is
yielded from the context manager, which defines the available operations.
:param image_repo: The ImageRepo we should use to fetch/save the image
:param image-id: The ID of the image we should be altering
"""
def __init__(self, image_repo, image_id, task_id):
self._image_repo = image_repo
self._image_id = image_id
self._task_id = task_id
def __enter__(self):
self._image = self._image_repo.get(self._image_id)
self._image_previous_status = self._image.status
self._assert_task_lock(self._image)
return _ImportActions(self._image)
def __exit__(self, type, value, traceback):
if type is not None:
# NOTE(danms): Do not save the image if we raised in context
return
# NOTE(danms): If we were in the middle of a long-running
# set_data() where someone else stole our lock, we may race
# with them to update image locations and erase one that
# someone else is working on. Checking the task lock here
# again is not perfect exclusion, but in lieu of actual
# thread-safe location updating, this at least reduces the
# likelihood of that happening.
self.assert_task_lock()
if self._image_previous_status != self._image.status:
LOG.debug('Image %(image_id)s status changing from '
'%(old_status)s to %(new_status)s',
{'image_id': self._image_id,
'old_status': self._image_previous_status,
'new_status': self._image.status})
self._image_repo.save(self._image, self._image_previous_status)
@property
def image_id(self):
return self._image_id
def drop_lock_for_task(self):
"""Delete the import lock for our task.
This is an atomic operation and thus does not require a context
for the image save. Note that after calling this method, no
further actions will be allowed on the image.
:raises: NotFound if the image was not locked by the expected task.
"""
image = self._image_repo.get(self._image_id)
self._image_repo.delete_property_atomic(image,
'os_glance_import_task',
self._task_id)
def _assert_task_lock(self, image):
task_lock = image.extra_properties.get('os_glance_import_task')
if task_lock != self._task_id:
LOG.error('Image %(image)s import task %(task)s attempted to '
'take action on image, but other task %(other)s holds '
'the lock; Aborting.',
{'image': self._image_id,
'task': self._task_id,
'other': task_lock})
raise exception.TaskAbortedError()
def assert_task_lock(self):
"""Assert that we own the task lock on the image.
:raises: TaskAbortedError if we do not
"""
image = self._image_repo.get(self._image_id)
self._assert_task_lock(image)
class _ImportActions(object):
"""Actions available for being performed on an image during import.
This defines the available actions that can be performed on an image
during import, which may be done with an image owned by another user.
Do not instantiate this object directly, get it from ImportActionWrapper.
"""
IMPORTING_STORES_KEY = 'os_glance_importing_to_stores'
IMPORT_FAILED_KEY = 'os_glance_failed_import'
def __init__(self, image):
self._image = image
@property
def image_id(self):
return self._image.image_id
@property
def image_size(self):
return self._image.size
@property
def image_locations(self):
# Return a copy of this complex structure to make sure we do
# not allow the plugin to mutate this underneath us for our
# later save. If this needs to be a thing in the future, we
# should have moderated access like all the other things here.
return copy.deepcopy(self._image.locations)
@property
def image_status(self):
return self._image.status
def merge_store_list(self, list_key, stores, subtract=False):
stores = set([store for store in stores if store])
existing = set(
self._image.extra_properties.get(list_key, '').split(','))
if subtract:
if stores - existing:
LOG.debug('Stores %(stores)s not in %(key)s for '
'image %(image_id)s',
{'stores': ','.join(sorted(stores - existing)),
'key': list_key,
'image_id': self.image_id})
merged_stores = existing - stores
else:
merged_stores = existing | stores
stores_list = ','.join(sorted((store for store in
merged_stores if store)))
self._image.extra_properties[list_key] = stores_list
LOG.debug('Image %(image_id)s %(key)s=%(stores)s',
{'image_id': self.image_id,
'key': list_key,
'stores': stores_list})
def add_importing_stores(self, stores):
"""Add a list of stores to the importing list.
Add stores to os_glance_importing_to_stores
:param stores: A list of store names
"""
self.merge_store_list(self.IMPORTING_STORES_KEY, stores)
def remove_importing_stores(self, stores):
"""Remove a list of stores from the importing list.
Remove stores from os_glance_importing_to_stores
:param stores: A list of store names
"""
self.merge_store_list(self.IMPORTING_STORES_KEY, stores, subtract=True)
def add_failed_stores(self, stores):
"""Add a list of stores to the failed list.
Add stores to os_glance_failed_import
:param stores: A list of store names
"""
self.merge_store_list(self.IMPORT_FAILED_KEY, stores)
def remove_failed_stores(self, stores):
"""Remove a list of stores from the failed list.
Remove stores from os_glance_failed_import
:param stores: A list of store names
"""
self.merge_store_list(self.IMPORT_FAILED_KEY, stores, subtract=True)
def set_image_data(self, uri, task_id, backend, set_active,
callback=None):
"""Populate image with data on a specific backend.
This is used during an image import operation to populate the data
in a given store for the image. If this object wraps an admin-capable
image_repo, then this will be done with admin credentials on behalf
of a user already determined to be able to perform this operation
(such as a copy-image import of an existing image owned by another
user).
:param uri: Source URL for image data
:param task_id: The task responsible for this operation
:param backend: The backend store to target the data
:param set_active: Whether or not to set the image to 'active'
state after the operation completes
:param callback: A callback function with signature:
fn(action, chunk_bytes, total_bytes)
which should be called while processing the image
approximately every minute.
"""
if callback:
callback = functools.partial(callback, self)
return image_import.set_image_data(self._image, uri, task_id,
backend=backend,
set_active=set_active,
callback=callback)
def set_image_attribute(self, **attrs):
"""Set an image attribute.
This allows setting various image attributes which will be saved
upon exiting the ImportActionWrapper context.
:param attrs: kwarg list of attributes to set on the image
:raises: AttributeError if an attribute outside the set of allowed
ones is present in attrs.
"""
allowed = ['status', 'disk_format', 'container_format',
'virtual_size', 'size']
for attr, value in attrs.items():
if attr not in allowed:
raise AttributeError('Setting %s is not allowed' % attr)
setattr(self._image, attr, value)
def set_image_extra_properties(self, properties):
"""Merge values into image extra_properties.
This allows a plugin to set additional properties on the image,
as long as those are outside the reserved namespace. Any keys
in the internal namespace will be dropped (and logged).
:param properties: A dict of properties to be merged in
"""
for key, value in properties.items():
if key.startswith(api_common.GLANCE_RESERVED_NS):
LOG.warning(('Dropping %(key)s=%(val)s during metadata '
'injection for %(image)s'),
{'key': key, 'val': value,
'image': self.image_id})
else:
self._image.extra_properties[key] = value
def remove_location_for_store(self, backend):
"""Remove a location from an image given a backend store.
Given a backend store, remove the corresponding location from the
image's set of locations. If the last location is removed, remove
the image checksum, hash information, and size.
:param backend: The backend store to remove from the image
"""
for i, location in enumerate(self._image.locations):
if location.get('metadata', {}).get('store') == backend:
try:
self._image.locations.pop(i)
except (store_exceptions.NotFound,
store_exceptions.Forbidden):
msg = (_("Error deleting from store %(store)s when "
"reverting.") % {'store': backend})
LOG.warning(msg)
# NOTE(yebinama): Some store drivers doesn't document which
# exceptions they throw.
except Exception:
msg = (_("Unexpected exception when deleting from store "
"%(store)s.") % {'store': backend})
LOG.warning(msg)
else:
if len(self._image.locations) == 0:
self._image.checksum = None
self._image.os_hash_algo = None
self._image.os_hash_value = None
self._image.size = None
break
def pop_extra_property(self, name):
"""Delete the named extra_properties value, if present.
If the image.extra_properties dict contains the named key,
delete it.
:param name: The key to delete.
"""
self._image.extra_properties.pop(name, None)
class _DeleteFromFS(task.Task):
def __init__(self, task_id, task_type):
self.task_id = task_id
self.task_type = task_type
super(_DeleteFromFS, self).__init__(
name='%s-DeleteFromFS-%s' % (task_type, task_id))
def execute(self, file_path):
"""Remove file from the backend
:param file_path: path to the file being deleted
"""
if CONF.enabled_backends:
try:
store_api.delete(file_path, 'os_glance_staging_store')
except store_api.exceptions.NotFound as e:
LOG.error(_("After upload to backend, deletion of staged "
"image data from %(fn)s has failed because "
"%(em)s"), {'fn': file_path,
'em': e.message})
else:
# TODO(abhishekk): After removal of backend module from
# glance_store need to change this to use multi_backend
# module.
file_path = file_path[7:]
if os.path.exists(file_path):
try:
LOG.debug(_("After upload to the backend, deleting staged "
"image data from %(fn)s"), {'fn': file_path})
os.unlink(file_path)
except OSError as e:
LOG.error(_("After upload to backend, deletion of staged "
"image data from %(fn)s has failed because "
"[Errno %(en)d]"), {'fn': file_path,
'en': e.errno})
else:
LOG.warning(_("After upload to backend, deletion of staged "
"image data has failed because "
"it cannot be found at %(fn)s"), {
'fn': file_path})
class _ImageLock(task.Task):
def __init__(self, task_id, task_type, action_wrapper):
self.task_id = task_id
self.task_type = task_type
self.action_wrapper = action_wrapper
super(_ImageLock, self).__init__(
name='%s-ImageLock-%s' % (task_type, task_id))
def execute(self):
self.action_wrapper.assert_task_lock()
LOG.debug('Image %(image)s import task %(task)s lock confirmed',
{'image': self.action_wrapper.image_id,
'task': self.task_id})
def revert(self, result, **kwargs):
"""Drop our claim on the image.
If we have failed, we need to drop our import_task lock on the image
so that something else can have a try. Note that we may have been
preempted so we should only drop *our* lock.
"""
try:
self.action_wrapper.drop_lock_for_task()
except exception.NotFound:
LOG.warning('Image %(image)s import task %(task)s lost its '
'lock during execution!',
{'image': self.action_wrapper.image_id,
'task': self.task_id})
else:
LOG.debug('Image %(image)s import task %(task)s dropped '
'its lock after failure',
{'image': self.action_wrapper.image_id,
'task': self.task_id})
class _VerifyStaging(task.Task):
# NOTE(jokke): This could be also for example "staging_path" but to
# keep this compatible with other flows we want to stay consistent
# with base_import
default_provides = 'file_path'
def __init__(self, task_id, task_type, task_repo, uri):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.uri = uri
super(_VerifyStaging, self).__init__(
name='%s-ConfigureStaging-%s' % (task_type, task_id))
# NOTE(jokke): If we want to use other than 'file' store in the
# future, this is one thing that needs to change.
try:
uri.index('file:///', 0)
except ValueError:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Value of node_staging_uri must be "
" in format 'file://<absolute-path>'") %
{'task_id': self.task_id,
'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
if not CONF.enabled_backends:
# NOTE(jokke): We really don't need the store for anything but
# verifying that we actually can build the store will allow us to
# fail the flow early with clear message why that happens.
self._build_store()
def _build_store(self):
# TODO(abhishekk): After removal of backend module from glance_store
# need to change this to use multi_backend module.
# NOTE(jokke): If we want to use some other store for staging, we can
# implement the logic more general here. For now this should do.
# NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're
# forced to build our own config object, register the required options
# (and by required I mean *ALL* of them, even the ones we don't want),
# and create our own store instance by calling a private function.
# This is certainly unfortunate but it's the best we can do until the
# glance_store refactor is done. A good thing is that glance_store is
# under our team's management and it gates on Glance so changes to
# this API will (should?) break task's tests.
conf = cfg.ConfigOpts()
try:
backend.register_opts(conf)
except cfg.DuplicateOptError:
pass
conf.set_override('filesystem_store_datadir',
CONF.node_staging_uri[7:],
group='glance_store')
# NOTE(flaper87): Do not even try to judge me for this... :(
# With the glance_store refactor, this code will change, until
# that happens, we don't have a better option and this is the
# least worst one, IMHO.
store = backend._load_store(conf, 'file')
try:
store.configure()
except AttributeError:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Could not load the filesystem store") %
{'task_id': self.task_id, 'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
def execute(self):
"""Test the backend store and return the 'file_path'"""
return self.uri
class _ImportToStore(task.Task):
def __init__(self, task_id, task_type, task_repo, action_wrapper, uri,
backend, all_stores_must_succeed, set_active):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.action_wrapper = action_wrapper
self.uri = uri
self.backend = backend
self.all_stores_must_succeed = all_stores_must_succeed
self.set_active = set_active
self.last_status = 0
super(_ImportToStore, self).__init__(
name='%s-ImportToStore-%s' % (task_type, task_id))
def execute(self, file_path=None):
"""Bringing the imported image to back end store
:param file_path: path to the image file
"""
# NOTE(flaper87): Let's dance... and fall
#
# Unfortunately, because of the way our domain layers work and
# the checks done in the FS store, we can't simply rename the file
# and set the location. To do that, we'd have to duplicate the logic
# of every and each of the domain factories (quota, location, etc)
# and we'd also need to hack the FS store to prevent it from raising
# a "duplication path" error. I'd rather have this task copying the
# image bits one more time than duplicating all that logic.
#
# Since I don't think this should be the definitive solution, I'm
# leaving the code below as a reference for what should happen here
# once the FS store and domain code will be able to handle this case.
#
# if file_path is None:
# image_import.set_image_data(image, self.uri, None)
# return
# NOTE(flaper87): Don't assume the image was stored in the
# work_dir. Think in the case this path was provided by another task.
# Also, lets try to neither assume things nor create "logic"
# dependencies between this task and `_ImportToFS`
#
# base_path = os.path.dirname(file_path.split("file://")[-1])
# NOTE(flaper87): Hopefully just scenarios #3 and #4. I say
# hopefully because nothing prevents the user to use the same
# FS store path as a work dir
#
# image_path = os.path.join(base_path, image_id)
#
# if (base_path == CONF.glance_store.filesystem_store_datadir or
# base_path in CONF.glance_store.filesystem_store_datadirs):
# os.rename(file_path, image_path)
#
# image_import.set_image_data(image, image_path, None)
# NOTE(jokke): The different options here are kind of pointless as we
# will need the file path anyways for our delete workflow for now.
# For future proofing keeping this as is.
with self.action_wrapper as action:
self._execute(action, file_path)
def _execute(self, action, file_path):
self.last_status = timeutils.now()
if action.image_status == "deleted":
raise exception.ImportTaskError("Image has been deleted, aborting"
" import.")
try:
action.set_image_data(file_path or self.uri,
self.task_id, backend=self.backend,
set_active=self.set_active,
callback=self._status_callback)
# NOTE(yebinama): set_image_data catches Exception and raises from
# them. Can't be more specific on exceptions caught.
except Exception:
if self.all_stores_must_succeed:
raise
msg = (_("%(task_id)s of %(task_type)s failed but since "
"all_stores_must_succeed is set to false, continue.") %
{'task_id': self.task_id, 'task_type': self.task_type})
LOG.warning(msg)
if self.backend is not None:
action.add_failed_stores([self.backend])
if self.backend is not None:
action.remove_importing_stores([self.backend])
def _status_callback(self, action, chunk_bytes, total_bytes):
# NOTE(danms): Only log status every five minutes
if timeutils.now() - self.last_status > 300:
LOG.debug('Image import %(image_id)s copied %(copied)i MiB',
{'image_id': action.image_id,
'copied': total_bytes // units.Mi})
self.last_status = timeutils.now()
task = script_utils.get_task(self.task_repo, self.task_id)
if task is None:
LOG.error(
'Status callback for task %(task)s found no task object!',
{'task': self.task_id})
raise exception.TaskNotFound(self.task_id)
if task.status != 'processing':
LOG.error('Task %(task)s expected "processing" status, '
'but found "%(status)s"; aborting.')
raise exception.TaskAbortedError()
task.message = _('Copied %i MiB') % (total_bytes // units.Mi)
self.task_repo.save(task)
def revert(self, result, **kwargs):
"""
Remove location from image in case of failure
:param result: taskflow result object
"""
with self.action_wrapper as action:
action.remove_location_for_store(self.backend)
action.remove_importing_stores([self.backend])
if isinstance(result, taskflow.types.failure.Failure):
# We are the store that failed, so add us to the failed list
action.add_failed_stores([self.backend])
class _VerifyImageState(task.Task):
def __init__(self, task_id, task_type, action_wrapper, import_method):
self.task_id = task_id
self.task_type = task_type
self.action_wrapper = action_wrapper
self.import_method = import_method
super(_VerifyImageState, self).__init__(
name='%s-VerifyImageState-%s' % (task_type, task_id))
def execute(self):
"""Verify we have active image
:param image_id: Glance Image ID
"""
with self.action_wrapper as action:
if action.image_status != 'active':
raise _NoStoresSucceeded(_('None of the uploads finished!'))
def revert(self, result, **kwargs):
"""Set back to queued if this wasn't copy-image job."""
with self.action_wrapper as action:
if self.import_method != 'copy-image':
action.set_image_attribute(status='queued')
class _CompleteTask(task.Task):
def __init__(self, task_id, task_type, task_repo, action_wrapper):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.action_wrapper = action_wrapper
super(_CompleteTask, self).__init__(
name='%s-CompleteTask-%s' % (task_type, task_id))
def _finish_task(self, task):
try:
task.succeed({'image_id': self.action_wrapper.image_id})
except Exception as e:
# Note: The message string contains Error in it to indicate
# in the task.message that it's a error message for the user.
# TODO(nikhil): need to bring back save_and_reraise_exception when
# necessary
log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: "
"%(e)s")
LOG.exception(log_msg, {'exc_type': str(type(e)),
'e': encodeutils.exception_to_unicode(e),
'task_id': task.task_id})
err_msg = _("Error: %(exc_type)s: %(e)s")
task.fail(err_msg % {'exc_type': str(type(e)),
'e': encodeutils.exception_to_unicode(e)})
finally:
self.task_repo.save(task)
def _drop_lock(self):
try:
self.action_wrapper.drop_lock_for_task()
except exception.NotFound:
# NOTE(danms): This would be really bad, but there is probably
# not much point in reverting all the way back if we got this
# far. Log the carnage for forensics.
LOG.error('Image %(image)s import task %(task)s did not hold the '
'lock upon completion!',
{'image': self.action_wrapper.image_id,
'task': self.task_id})
def execute(self):
"""Finishing the task flow
:param image_id: Glance Image ID
"""
task = script_utils.get_task(self.task_repo, self.task_id)
if task is not None:
self._finish_task(task)
self._drop_lock()
LOG.info(_LI("%(task_id)s of %(task_type)s completed"),
{'task_id': self.task_id, 'task_type': self.task_type})
def assert_quota(context, task_repo, task_id, stores,
action_wrapper, enforce_quota_fn,
**enforce_kwargs):
try:
enforce_quota_fn(context, context.owner, **enforce_kwargs)
except exception.LimitExceeded as e:
with excutils.save_and_reraise_exception():
with action_wrapper as action:
action.remove_importing_stores(stores)
if action.image_status == 'importing':
action.set_image_attribute(status='queued')
action_wrapper.drop_lock_for_task()
task = script_utils.get_task(task_repo, task_id)
if task is None:
LOG.error(_LE('Failed to find task %r to update after '
'quota failure'), task_id)
else:
task.fail(str(e))
task_repo.save(task)
def get_flow(**kwargs):
"""Return task flow
:param task_id: Task ID
:param task_type: Type of the task
:param task_repo: Task repo
:param image_repo: Image repository used
:param image_id: ID of the Image to be processed
:param uri: uri for the image file
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
task_repo = kwargs.get('task_repo')
image_repo = kwargs.get('image_repo')
admin_repo = kwargs.get('admin_repo')
image_id = kwargs.get('image_id')
import_method = kwargs.get('import_req')['method']['name']
uri = kwargs.get('import_req')['method'].get('uri')
stores = kwargs.get('backend', [None])
all_stores_must_succeed = kwargs.get('import_req').get(
'all_stores_must_succeed', True)
separator = ''
if not CONF.enabled_backends and not CONF.node_staging_uri.endswith('/'):
separator = '/'
# Instantiate an action wrapper with the admin repo if we got one,
# otherwise with the regular repo.
action_wrapper = ImportActionWrapper(admin_repo or image_repo, image_id,
task_id)
kwargs['action_wrapper'] = action_wrapper
if not uri and import_method in ['glance-direct', 'copy-image']:
if CONF.enabled_backends:
separator, staging_dir = store_utils.get_dir_separator()
uri = separator.join((staging_dir, str(image_id)))
else:
uri = separator.join((CONF.node_staging_uri, str(image_id)))
flow = lf.Flow(task_type, retry=retry.AlwaysRevert())
flow.add(_ImageLock(task_id, task_type, action_wrapper))
if import_method in ['web-download', 'copy-image']:
internal_plugin = internal_plugins.get_import_plugin(**kwargs)
flow.add(internal_plugin)
if CONF.enabled_backends:
separator, staging_dir = store_utils.get_dir_separator()
file_uri = separator.join((staging_dir, str(image_id)))
else:
file_uri = separator.join((CONF.node_staging_uri, str(image_id)))
else:
file_uri = uri
flow.add(_VerifyStaging(task_id, task_type, task_repo, file_uri))
# Note(jokke): The plugins were designed to act on the image data or
# metadata during the import process before the image goes active. It
# does not make sense to try to execute them during 'copy-image'.
if import_method != 'copy-image':
for plugin in import_plugins.get_import_plugins(**kwargs):
flow.add(plugin)
else:
LOG.debug("Skipping plugins on 'copy-image' job.")
for idx, store in enumerate(stores, 1):
set_active = (not all_stores_must_succeed) or (idx == len(stores))
if import_method == 'copy-image':
set_active = False
task_name = task_type + "-" + (store or "")
import_task = lf.Flow(task_name)
import_to_store = _ImportToStore(task_id,
task_name,
task_repo,
action_wrapper,
file_uri,
store,
all_stores_must_succeed,
set_active)
import_task.add(import_to_store)
flow.add(import_task)
delete_task = lf.Flow(task_type).add(_DeleteFromFS(task_id, task_type))
flow.add(delete_task)
verify_task = _VerifyImageState(task_id,
task_type,
action_wrapper,
import_method)
flow.add(verify_task)
complete_task = _CompleteTask(task_id,
task_type,
task_repo,
action_wrapper)
flow.add(complete_task)
with action_wrapper as action:
if import_method != 'copy-image':
action.set_image_attribute(status='importing')
image_size = (action.image_size or 0) // units.Mi
action.add_importing_stores(stores)
action.remove_failed_stores(stores)
action.pop_extra_property('os_glance_stage_host')
# After we have marked the image as intended, check quota to make
# sure we are not over a limit, otherwise we roll back.
if import_method == 'glance-direct':
# We know the size of the image in staging, so we can check
# against available image_size_total quota.
assert_quota(kwargs['context'], task_repo, task_id,
stores, action_wrapper,
ks_quota.enforce_image_size_total,
delta=image_size)
elif import_method in ('copy-image', 'web-download'):
# The copy-image and web-download methods will use staging space to
# do their work, so check that quota.
assert_quota(kwargs['context'], task_repo, task_id,
stores, action_wrapper,
ks_quota.enforce_image_staging_total,
delta=image_size)
assert_quota(kwargs['context'], task_repo, task_id,
stores, action_wrapper,
ks_quota.enforce_image_count_uploading)
return flow
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class Order(resource.Resource):
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'barbican'
PROPERTIES = (
NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION,
ALGORITHM, BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
PASS_PHRASE
) = (
'name', 'payload_content_type', 'mode', 'expiration',
'algorithm', 'bit_length', 'type', 'request_type', 'subject_dn',
'source_container_ref', 'ca_id', 'profile', 'request_data',
'pass_phrase'
)
ATTRIBUTES = (
STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
CERTIFICATE, INTERMEDIATES, CONTAINER_REF
) = (
'status', 'order_ref', 'secret_ref', 'public_key', 'private_key',
'certificate', 'intermediates', 'container_ref'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Human readable name for the secret.'),
),
PAYLOAD_CONTENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type/format the secret data is provided in.'),
),
EXPIRATION: properties.Schema(
properties.Schema.STRING,
_('The expiration date for the secret in ISO-8601 format.'),
constraints=[
constraints.CustomConstraint('iso_8601'),
],
),
ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm type used to generate the secret.'),
),
BIT_LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('The bit-length of the secret.'),
),
MODE: properties.Schema(
properties.Schema.STRING,
_('The type/mode of the algorithm associated with the secret '
'information.'),
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the order.'),
constraints=[
constraints.AllowedValues([
'key', 'asymmetric', 'certificate'
]),
],
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SUBJECT_DN: properties.Schema(
properties.Schema.STRING,
_('The subject of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SOURCE_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('The source of certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
CA_ID: properties.Schema(
properties.Schema.STRING,
_('The identifier of the CA to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PROFILE: properties.Schema(
properties.Schema.STRING,
_('The profile of certificate to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_DATA: properties.Schema(
properties.Schema.STRING,
_('The content of the CSR.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PASS_PHRASE: properties.Schema(
properties.Schema.STRING,
_('The passphrase the created key.'),
support_status=support.SupportStatus(version='5.0.0'),
),
}
attributes_schema = {
STATUS: attributes.Schema(
_('The status of the order.'),
type=attributes.Schema.STRING
),
ORDER_REF: attributes.Schema(
_('The URI to the order.'),
type=attributes.Schema.STRING
),
SECRET_REF: attributes.Schema(
_('The URI to the created secret.'),
type=attributes.Schema.STRING
),
CONTAINER_REF: attributes.Schema(
_('The URI to the created container.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PUBLIC_KEY: attributes.Schema(
_('The payload of the created public key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PRIVATE_KEY: attributes.Schema(
_('The payload of the created private key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
CERTIFICATE: attributes.Schema(
_('The payload of the created certificate, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
INTERMEDIATES: attributes.Schema(
_('The payload of the created intermediates, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
info = dict((k, v) for k, v in self.properties.items()
if v is not None)
order = self.client().orders.create(**info)
order_ref = order.submit()
self.resource_id_set(order_ref)
# NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
# need not to be fixed re LP bug #1393268
return order_ref
def check_create_complete(self, order_href):
order = self.client().orders.get(order_href)
if order.status == 'ERROR':
reason = order.error_reason
code = order.error_status_code
msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s")
% {'name': self.name, 'code': code, 'reason': reason})
raise exception.Error(msg)
return order.status == 'ACTIVE'
def handle_delete(self):
if not self.resource_id:
return
client = self.client()
try:
client.orders.delete(self.resource_id)
except Exception as exc:
# This is the only exception the client raises
# Inspecting the message to see if it's a 'Not Found'
if 'Not Found' not in six.text_type(exc):
raise
def _resolve_attribute(self, name):
client = self.client()
order = client.orders.get(self.resource_id)
if name in (
self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
self.INTERMEDIATES):
container = client.containers.get(order.container_ref)
secret = getattr(container, name)
return secret.payload
return getattr(order, name)
def resource_mapping():
return {
'OS::Barbican::Order': Order,
}
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
class RootedProducts(object):
"""Products of a build that have a concept of a 'root' directory.
E.g., classfiles, under a root package directory."""
def __init__(self, root):
self._root = root
self._rel_paths = OrderedSet()
def add_abs_paths(self, abs_paths):
for abs_path in abs_paths:
if not abs_path.startswith(self._root):
raise Exception('%s is not under %s' % (abs_path, self._root))
self._rel_paths.add(os.path.relpath(abs_path, self._root))
def add_rel_paths(self, rel_paths):
self._rel_paths.update(rel_paths)
def root(self):
return self._root
def rel_paths(self):
return self._rel_paths
def abs_paths(self):
for relpath in self._rel_paths:
yield os.path.join(self._root, relpath)
class MultipleRootedProducts(object):
"""A product consisting of multiple roots, with associated products."""
def __init__(self):
self._rooted_products_by_root = {}
def add_rel_paths(self, root, rel_paths):
self._get_products_for_root(root).add_rel_paths(rel_paths)
def add_abs_paths(self, root, abs_paths):
self._get_products_for_root(root).add_abs_paths(abs_paths)
def rel_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.rel_paths()
def abs_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.abs_paths()
def _get_products_for_root(self, root):
return self._rooted_products_by_root.setdefault(root, RootedProducts(root))
class Products(object):
"""An out-of-band 'dropbox' where tasks can place build product information for later tasks to use.
Historically, the only type of product was a ProductMapping. However this had some issues, as not
all products fit into the (basedir, [files-under-basedir]) paradigm. Also, ProductMapping docs
and varnames refer to targets, and implicitly expect the mappings to be keyed by a target, however
we sometimes also need to map sources to products.
So in practice we ended up abusing this in several ways:
1) Using fake basedirs when we didn't have a basedir concept.
2) Using objects other than strings as 'product paths' when we had a need to.
3) Using things other than targets as keys.
Right now this class is in an intermediate stage, as we transition to a more robust Products concept.
The abuses have been switched to use 'data_products' (see below) which is just a dictionary
of product type (e.g., 'classes_by_target') to arbitrary payload. That payload can be anything,
but the MultipleRootedProducts class is useful for products that do happen to fit into the
(basedir, [files-under-basedir]) paradigm.
The long-term future of Products is TBD. But we do want to make it easier to reason about
which tasks produce which products and which tasks consume them. Currently it's quite difficult
to match up 'requires' calls to the producers of those requirements, especially when the 'typename'
is in a variable, not a literal.
"""
class ProductMapping(object):
"""Maps products of a given type by target. Each product is a map from basedir to a list of
files in that dir.
"""
def __init__(self, typename):
self.typename = typename
self.by_target = defaultdict(lambda: defaultdict(list))
def empty(self):
return len(self.by_target) == 0
def add(self, target, basedir, product_paths=None):
"""
Adds a mapping of products for the given target, basedir pair.
If product_paths are specified, these will over-write any existing mapping for this target.
If product_paths is omitted, the current mutable list of mapped products for this target
and basedir is returned for appending.
"""
if product_paths is not None:
self.by_target[target][basedir].extend(product_paths)
else:
return self.by_target[target][basedir]
def has(self, target):
"""Returns whether we have a mapping for the specified target."""
return target in self.by_target
def get(self, target):
"""
Returns the product mapping for the given target as a tuple of (basedir, products list).
Can return None if there is no mapping for the given target.
"""
return self.by_target.get(target)
def __getitem__(self, target):
"""
Support for subscripting into this mapping. Returns the product mapping for the given target
as a map of <basedir> -> <products list>.
If no mapping exists, returns an empty map whose values default to empty lists. So you
can use the result without checking for None.
"""
return self.by_target[target]
def itermappings(self):
"""
Returns an iterable over all pairs (target, product) in this mapping.
Each product is itself a map of <basedir> -> <products list>.
"""
return self.by_target.iteritems()
def keys_for(self, basedir, product):
"""Returns the set of keys the given mapped product is registered under."""
keys = set()
for key, mappings in self.by_target.items():
for mapped in mappings.get(basedir, []):
if product == mapped:
keys.add(key)
break
return keys
def __repr__(self):
return 'ProductMapping(%s) {\n %s\n}' % (self.typename, '\n '.join(
'%s => %s\n %s' % (str(target), basedir, outputs)
for target, outputs_by_basedir in self.by_target.items()
for basedir, outputs in outputs_by_basedir.items()))
def __init__(self):
self.products = {} # type -> ProductMapping instance.
self.predicates_for_type = defaultdict(list)
self.data_products = {} # type -> arbitrary object.
self.required_data_products = set()
def require(self, typename, predicate=None):
"""Registers a requirement that file products of the given type by mapped.
If target predicates are supplied, only targets matching at least one of the predicates are
mapped.
"""
# TODO(John Sirois): This is a broken API. If one client does a require with no predicate and
# another requires with a predicate, the producer will only produce for the latter. The former
# presumably intended to have all products of this type mapped. Kill the predicate portion of
# the api by moving to the new tuple-based engine where all tasks require data for a specific
# set of targets.
self.predicates_for_type[typename].append(predicate or (lambda target: False))
def isrequired(self, typename):
"""Returns a predicate selecting targets required for the given type if mappings are required.
Otherwise returns None.
"""
predicates = self.predicates_for_type[typename]
if not predicates:
return None
def combine(first, second):
return lambda target: first(target) or second(target)
return reduce(combine, predicates, lambda target: False)
def get(self, typename):
"""Returns a ProductMapping for the given type name."""
return self.products.setdefault(typename, Products.ProductMapping(typename))
def require_data(self, typename):
""" Registers a requirement that data produced by tasks is required.
typename: the name of a data product that should be generated.
"""
self.required_data_products.add(typename)
def is_required_data(self, typename):
""" Checks if a particular data product is required by any tasks."""
return typename in self.required_data_products
def safe_create_data(self, typename, init_func):
"""Ensures that a data item is created if it doesn't already exist."""
# Basically just an alias for readability.
self.get_data(typename, init_func)
def get_data(self, typename, init_func=None):
""" Returns a data product.
If the product isn't found, returns None, unless init_func is set, in which case the product's
value is set to the return value of init_func(), and returned."""
if typename not in self.data_products:
if not init_func:
return None
self.data_products[typename] = init_func()
return self.data_products.get(typename)
|
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
from settings import *
OUTPUT_DIR = path.join(PROJECT_DIR, 'build', 'tests')
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', action='store', default='', help='Add toolchain file')
parser.add_argument('--buildoptions', action='store', default='', help='Add a comma separated list of extra build options to each test')
parser.add_argument('--outdir', action='store', default=OUTPUT_DIR, help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', action='store_true', default=False, help='Run signed-off check')
parser.add_argument('--check-signed-off-tolerant', action='store_true', default=False, help='Run signed-off check in tolerant mode')
parser.add_argument('--check-signed-off-travis', action='store_true', default=False, help='Run signed-off check in tolerant mode if on Travis CI and not checking a pull request')
parser.add_argument('--check-cppcheck', action='store_true', default=False, help='Run cppcheck')
parser.add_argument('--check-doxygen', action='store_true', default=False, help='Run doxygen')
parser.add_argument('--check-vera', action='store_true', default=False, help='Run vera check')
parser.add_argument('--check-license', action='store_true', default=False, help='Run license check')
parser.add_argument('--buildoption-test', action='store_true', default=False, help='Run buildoption-test')
parser.add_argument('--jerry-debugger', action='store_true', default=False, help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true', default=False, help='Run jerry-tests')
parser.add_argument('--jerry-test-suite', action='store_true', default=False, help='Run jerry-test-suite')
parser.add_argument('--unittests', action='store_true', default=False, help='Run unittests')
parser.add_argument('--precommit', action='store_true', default=False, dest='all', help='Run all test')
parser.add_argument('--test262', action='store_true', default=False, help='Run test262')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
script_args = parser.parse_args()
if path.isabs(script_args.outdir):
OUTPUT_DIR = script_args.outdir
else:
OUTPUT_DIR = path.join(PROJECT_DIR, script_args.outdir)
class Options:
def __init__(self, name = '', build_args = None, test_args = None):
if build_args is None:
build_args = []
if test_args is None:
test_args = []
self.out_dir = path.join(OUTPUT_DIR, name)
self.build_args = build_args
self.build_args.append('--builddir=%s' % self.out_dir)
self.test_args = test_args
# Test options for unittests
jerry_unittests_options = [
Options('unittests', ['--unittests', '--error-messages=on', '--snapshot-save=on', '--snapshot-exec=on']),
Options('unittests-debug', ['--unittests', '--debug', '--error-messages=on', '--snapshot-save=on', '--snapshot-exec=on']),
]
# Test options for jerry-tests
jerry_tests_options = [
Options('jerry_tests'),
Options('jerry_tests-debug', ['--debug']),
Options('jerry_tests-debug', ['--debug', '--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-snapshot', ['--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']),
Options('jerry_tests-debug-snapshot', ['--debug', '--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']),
]
# Test options for jerry-test-suite
jerry_test_suite_options = jerry_tests_options[:]
jerry_test_suite_options.append(Options('jerry_test_suite-minimal', ['--profile=minimal']))
jerry_test_suite_options.append(Options('jerry_test_suite-minimal-snapshot', ['--profile=minimal', '--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']))
jerry_test_suite_options.append(Options('jerry_test_suite-minimal-debug', ['--debug', '--profile=minimal']))
jerry_test_suite_options.append(Options('jerry_test_suite-minimal-debug-snapshot', ['--debug', '--profile=minimal', '--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']))
jerry_test_suite_options.append(Options('jerry_test_suite-es2015-subset', ['--profile=es2015-subset']))
jerry_test_suite_options.append(Options('jerry_test_suite-es2015-subset-snapshot', ['--profile=es2015-subset', '--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']))
jerry_test_suite_options.append(Options('jerry_test_suite-es2015-subset-debug', ['--debug', '--profile=es2015-subset']))
jerry_test_suite_options.append(Options('jerry_test_suite-es2015-subset-debug-snapshot', ['--debug', '--profile=es2015-subset', '--snapshot-save=on', '--snapshot-exec=on'], ['--snapshot']))
# Test options for test262
test262_test_suite_options = [
Options('test262_tests'),
]
# Test options for jerry-debugger
debugger_test_options = [
Options('jerry_debugger_tests', ['--debug', '--jerry-debugger=on', '--jerry-libc=off']),
]
# Test options for buildoption-test
jerry_buildoptions = [
Options('buildoption_test-lto', ['--lto=on']),
Options('buildoption_test-error_messages', ['--error-messages=on']),
Options('buildoption_test-all_in_one', ['--all-in-one=on']),
Options('buildoption_test-valgrind', ['--valgrind=on']),
Options('buildoption_test-valgrind_freya', ['--valgrind-freya=on']),
Options('buildoption_test-mem_stats', ['--mem-stats=on']),
Options('buildoption_test-show_opcodes', ['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes', ['--show-regexp-opcodes=on']),
Options('buildoption_test-compiler_default_libc', ['--jerry-libc=off']),
Options('buildoption_test-cpointer_32bit', ['--jerry-libc=off', '--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on']),
]
def get_bin_dir_path(out_dir):
return path.join(out_dir, 'bin')
def get_binary_path(out_dir):
return path.join(get_bin_dir_path(out_dir), 'jerry')
def create_binary(buildoptions):
build_cmd = [BUILD_SCRIPT]
build_cmd.extend(buildoptions)
if script_args.toolchain:
build_cmd.append('--toolchain=%s' % script_args.toolchain)
if script_args.buildoptions:
build_cmd.extend(script_args.buildoptions.split(','))
sys.stderr.write('Build command: %s\n' % ' '.join(build_cmd))
try:
script_output = subprocess.check_output(build_cmd)
except subprocess.CalledProcessError as e:
return e.returncode
return 0
def run_check(runnable):
sys.stderr.write('Test command: %s\n' % ' '.join(runnable))
try:
ret = subprocess.check_call(runnable)
except subprocess.CalledProcessError as e:
return e.returncode
return ret
def run_jerry_debugger_tests():
ret_build = ret_test = 0
for job in debugger_test_options:
ret_build = create_binary(job.build_args)
if ret_build:
break
for file in os.listdir(DEBUGGER_TESTS_DIR):
if file.endswith(".js"):
test_case, _ = os.path.splitext(file)
test_case_path = os.path.join (DEBUGGER_TESTS_DIR, test_case)
test_cmd = [
DEBUGGER_TEST_RUNNER_SCRIPT,
get_binary_path(job.out_dir),
DEBUGGER_CLIENT_SCRIPT,
os.path.relpath (test_case_path, PROJECT_DIR),
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_tests():
ret_build = ret_test = 0
for job in jerry_tests_options:
ret_build = create_binary(job.build_args)
if ret_build:
break
test_cmd = [TEST_RUNNER_SCRIPT, get_binary_path(job.out_dir), JERRY_TESTS_DIR]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_test_suite():
ret_build = ret_test = 0
for job in jerry_test_suite_options:
ret_build = create_binary(job.build_args)
if ret_build:
break
test_cmd = [TEST_RUNNER_SCRIPT, get_binary_path(job.out_dir)]
if '--profile=minimal' in job.build_args:
test_cmd.append(JERRY_TEST_SUITE_MINIMAL_LIST)
elif '--profile=es2015-subset' in job.build_args:
test_cmd.append(JERRY_TEST_SUITE_DIR)
else:
test_cmd.append(JERRY_TEST_SUITE_ES51_LIST)
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_test262_test_suite():
ret_build = ret_test = 0
for job in test262_test_suite_options:
ret_build = create_binary(job.build_args)
if ret_build:
break
test_cmd = [TEST262_RUNNER_SCRIPT, get_binary_path(job.out_dir), TEST262_TEST_SUITE_DIR]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_unittests():
ret_build = ret_test = 0
for job in jerry_unittests_options:
ret_build = create_binary(job.build_args)
if ret_build:
break
ret_test |= run_check([UNITTEST_RUNNER_SCRIPT, get_bin_dir_path(job.out_dir)])
return ret_build | ret_test
def run_buildoption_test():
for job in jerry_buildoptions:
ret = create_binary(job.build_args)
if ret:
break
return ret
def main():
ret = 0
if script_args.check_signed_off_tolerant:
ret = run_check([SIGNED_OFF_SCRIPT, '--tolerant'])
if not ret and script_args.check_signed_off_travis:
ret = run_check([SIGNED_OFF_SCRIPT, '--travis'])
if not ret and (script_args.all or script_args.check_signed_off):
ret = run_check([SIGNED_OFF_SCRIPT])
if not ret and (script_args.all or script_args.check_cppcheck):
ret = run_check([CPPCHECK_SCRIPT])
if not ret and (script_args.all or script_args.check_doxygen):
ret = run_check([DOXYGEN_SCRIPT])
if not ret and (script_args.all or script_args.check_vera):
ret = run_check([VERA_SCRIPT])
if not ret and (script_args.all or script_args.check_license):
ret = run_check([LICENSE_SCRIPT])
if not ret and (script_args.all or script_args.jerry_debugger):
ret = run_jerry_debugger_tests()
if not ret and (script_args.all or script_args.jerry_tests):
ret = run_jerry_tests()
if not ret and (script_args.all or script_args.jerry_test_suite):
ret = run_jerry_test_suite()
if not ret and (script_args.all or script_args.test262):
ret = run_test262_test_suite()
if not ret and (script_args.all or script_args.unittests):
ret = run_unittests()
if not ret and (script_args.all or script_args.buildoption_test):
ret = run_buildoption_test()
sys.exit(ret)
if __name__ == "__main__":
main()
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass # pragma: no cover
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
self.inlinePatterns = md.inlinePatterns
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode, isText)
if not isText and node is not subnode:
pos = list(node).index(subnode) + 1
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent, isText=True):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
elif not isText:
if parent.tail:
parent.tail += text
else:
parent.tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + list(node):
if child.tail:
if child.tail.strip():
self.__processElementText(
node, child, False
)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + list(node):
if not isString(node):
if child.text:
child.text = self.__handleInline(
child.text, patternIndex + 1
)
if child.tail:
child.tail = self.__handleInline(
child.tail, patternIndex
)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal
string, use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement:
if child.text and not isinstance(
child.text, util.AtomicString
):
text = child.text
child.text = None
lst = self.__processPlaceholders(
self.__handleInline(text), child
)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
child.tail = None
tailResult = self.__processPlaceholders(tail, dumby, False)
if dumby.tail:
child.tail = dumby.tail
pos = list(currElement).index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if len(child):
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = inlinepatterns.handleAttributes(
element.text, element
)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = inlinepatterns.handleAttributes(
newChild.tail, element
)
if newChild.text and isString(newChild.text):
newChild.text = inlinepatterns.handleAttributes(
newChild.text, newChild
)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.iter('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.iter('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
|
|
#
# WARNING: some of the classes below are manually implemented
#
import sys
import traceback
from pysnmp.smi.indices import OidOrderedDict
from pysnmp.smi import exval, error
from pysnmp.proto import rfc1902
from pysnmp import cache, debug
from pyasn1.error import PyAsn1Error
( Integer, ObjectIdentifier ) = mibBuilder.importSymbols(
"ASN1", "Integer", "ObjectIdentifier"
)
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint,
ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols(
"ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion",
"SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint"
)
# syntax of objects
OctetString = rfc1902.OctetString
Bits = rfc1902.Bits
Integer32 = rfc1902.Integer32
IpAddress = rfc1902.IpAddress
Counter32 = rfc1902.Counter32
Gauge32 = rfc1902.Gauge32
Unsigned32 = rfc1902.Unsigned32
TimeTicks = rfc1902.TimeTicks
Opaque = rfc1902.Opaque
Counter64 = rfc1902.Counter64
class ExtUTCTime(OctetString):
subtypeSpec = OctetString.subtypeSpec+ConstraintsUnion(ValueSizeConstraint(11,11), ValueSizeConstraint(13,13))
# MIB tree foundation class
class MibNode:
label = ''
def __init__(self, name):
self.name = name
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
def getName(self): return self.name
def getLabel(self): return self.label
def setLabel(self, label):
self.label = label
return self
def clone(self, name=None):
myClone = self.__class__(self.name)
if name is not None:
myClone.name = name
if self.label is not None:
myClone.label = self.label
return myClone
# definitions for information modules
class ModuleIdentity(MibNode):
def getLastUpdated(self):
return getattr(self, 'lastUpdated', '')
def setLastUpdated(self, v):
self.lastUpdated = v
return self
def getOrganization(self):
return getattr(self, 'organization', '')
def setOrganization(self, v):
self.organization = v
return self
def getContactInfo(self):
return getattr(self, 'contactInfo', '')
def setContactInfo(self, v):
self.contactInfo = v
return self
def getDescription(self):
return getattr(self, 'description', '')
def setDescription(self, v):
self.description = v
return self
def getRevisions(self):
return getattr(self, 'revisions', ())
def setRevisions(self, args):
self.revisions = args
return self
def asn1Print(self):
return '\
MODULE-IDENTITY\n\
LAST-UPDATED %s\n\
ORGANIZATION \"%s\"\n\
CONTACT-INFO \"%s\"\n\
DESCRIPTION \"%s\"\n\
%s\
' % (self.getLastUpdated(),
self.getOrganization(),
self.getContactInfo(),
self.getDescription(),
''.join([ "REVISION \"%s\"\n" % x for x in self.getRevisions() ]))
class ObjectIdentity(MibNode):
def getStatus(self):
return getattr(self, 'status', 'current')
def setStatus(self, v):
self.status = v
return self
def getDescription(self):
return getattr(self, 'description', '')
def setDescription(self, v):
self.description = v
return self
def getReference(self):
return getattr(self, 'reference', '')
def setReference(self, v):
self.reference = v
return self
def asn1Print(self):
return '\
OBJECT-IDENTITY\n\
STATUS %s\n\
DESCRIPTION \"%s\"\n\
REFERENCE \"%s\"\
' % (self.getStatus(),
self.getDescription(),
self.getReference())
# definition for objects
class NotificationType(MibNode):
def getObjects(self):
return getattr(self, 'objects', ())
def setObjects(self, *args):
self.objects = args
return self
def getStatus(self):
return getattr(self, 'status', 'current')
def setStatus(self, v):
self.status = v
return self
def getDescription(self):
return getattr(self, 'description', '')
def setDescription(self, v):
self.description = v
return self
def getRevisions(self):
return getattr(self, 'revisions', ())
def setRevisions(self, args):
self.revisions = args
return self
def asn1Print(self):
return '\
NOTIFICATION-TYPE\n\
OBJECTS { %s }\n\
STATUS %s\n\
DESCRIPTION \"%s\"\n\
%s\
' % (', '.join([ x for x in self.getObjects() ]),
self.getStatus(),
self.getDescription(),
''.join([ "REVISION \"%s\"\n" % x for x in self.getRevisions() ]))
class MibIdentifier(MibNode):
def asn1Print(self):
return 'OBJECT IDENTIFIER'
class ObjectType(MibNode):
maxAccess = None
def __init__(self, name, syntax=None):
MibNode.__init__(self, name)
self.syntax = syntax
# XXX
def __eq__(self, other): return self.syntax == other
def __ne__(self, other): return self.syntax != other
def __lt__(self, other): return self.syntax < other
def __le__(self, other): return self.syntax <= other
def __gt__(self, other): return self.syntax > other
def __ge__(self, other): return self.syntax >= other
def __repr__(self):
return '%s(%r, %r)' % (
self.__class__.__name__, self.name, self.syntax
)
def getSyntax(self):
return self.syntax
def setSyntax(self, v):
self.syntax = v
return self
def getUnits(self):
return getattr(self, 'units', '')
def setUnits(self, v):
self.units = v
return self
def getMaxAccess(self):
return getattr(self, 'maxAccess', 'not-accessible')
def setMaxAccess(self, v):
self.maxAccess = v
return self
def getStatus(self):
return getattr(self, 'status', 'current')
def setStatus(self, v):
self.status = v
return self
def getDescription(self):
return getattr(self, 'description', '')
def setDescription(self, v):
self.description = v
return self
def getReference(self):
return getattr(self, 'reference', '')
def setReference(self, v):
self.reference = v
return self
def asn1Print(self):
return '\
OBJECT-TYPE\n\
SYNTAX %s\n\
UNITS \"%s\"\n\
MAX-ACCESS %s\n\
STATUS %s\n\
DESCRIPTION \"%s\"\n\
REFERENCE \"%s\"\
' % (self.getSyntax().__class__.__name__,
self.getUnits(),
self.getMaxAccess(),
self.getStatus(),
self.getDescription(),
self.getReference())
class MibTree(ObjectType):
branchVersionId = 0 # cnanges on tree structure change
maxAccess = 'not-accessible'
def __init__(self, name, syntax=None):
ObjectType.__init__(self, name, syntax)
self._vars = OidOrderedDict()
# Subtrees registration
def registerSubtrees(self, *subTrees):
self.branchVersionId += 1
for subTree in subTrees:
if subTree.name in self._vars:
raise error.SmiError(
'MIB subtree %s already registered at %s' % (subTree.name, self)
)
self._vars[subTree.name] = subTree
def unregisterSubtrees(self, *names):
self.branchVersionId += 1
for name in names:
# This may fail if you fill a table by exporting MibScalarInstances
# but later drop them through SNMP.
if name not in self._vars:
raise error.SmiError(
'MIB subtree %s not registered at %s' % (name, self)
)
del self._vars[name]
#
# Tree traversal
#
# Missing branches are indicated by the NoSuchObjectError exception.
# Although subtrees may indicate their missing branches by the
# NoSuchInstanceError exception.
#
def getBranch(self, name, idx):
"""Return a branch of this tree where the 'name' OID may reside"""
for keyLen in self._vars.getKeysLens():
subName = name[:keyLen]
if subName in self._vars:
return self._vars[subName]
raise error.NoSuchObjectError(name=name, idx=idx)
def getNextBranch(self, name, idx=None):
# Start from the beginning
if self._vars: first = list(self._vars.keys())[0]
if self._vars and name < first:
return self._vars[first]
else:
try:
return self._vars[self._vars.nextKey(name)]
except KeyError:
raise error.NoSuchObjectError(idx=idx, name=name)
def getNode(self, name, idx=None):
"""Return tree node found by name"""
if name == self.name:
return self
else:
return self.getBranch(name, idx).getNode(name, idx)
def getNextNode(self, name, idx=None):
"""Return tree node next to name"""
try:
nextNode = self.getBranch(name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
return self.getNextBranch(name, idx)
else:
try:
return nextNode.getNextNode(name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
try:
return self._vars[self._vars.nextKey(nextNode.name)]
except KeyError:
raise error.NoSuchObjectError(idx=idx, name=name)
# MIB instrumentation
# Read operation
def readTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
if name == self.name:
if acFun:
if self.maxAccess not in (
'readonly', 'readwrite','readcreate'
) or acFun(name, self.syntax, idx, 'read', acCtx):
raise error.NoAccessError(idx=idx, name=name)
else:
try:
node = self.getBranch(name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
return # missing object is not an error here
else:
node.readTest(name, val, idx, acInfo)
def readGet(self, name, val, idx, acInfo):
try:
node = self.getBranch(name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
return name, exval.noSuchObject
else:
return node.readGet(name, val, idx, acInfo)
# Read next operation is subtree-specific
depthFirst, breadthFirst = 0, 1
def readTestNext(self, name, val, idx, acInfo, oName=None):
if oName is None:
oName = name
topOfTheMib = True
else:
topOfTheMib = False
nextName = name
direction = self.depthFirst
while 1: # XXX linear search here
if direction == self.depthFirst:
direction = self.breadthFirst
try:
node = self.getBranch(nextName, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
continue
else:
try:
node = self.getNextBranch(nextName, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
if topOfTheMib:
return
raise
direction = self.depthFirst
nextName = node.name
try:
return node.readTestNext(nextName, val, idx, acInfo, oName)
except (error.NoAccessError, error.NoSuchInstanceError, error.NoSuchObjectError):
pass
def readGetNext(self, name, val, idx, acInfo, oName=None):
if oName is None:
oName = name
topOfTheMib = True
else:
topOfTheMib = False
nextName = name
direction = self.depthFirst
while 1: # XXX linear search here
if direction == self.depthFirst:
direction = self.breadthFirst
try:
node = self.getBranch(nextName, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
continue
else:
try:
node = self.getNextBranch(nextName, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
if topOfTheMib:
return name, exval.endOfMib
raise
direction = self.depthFirst
nextName = node.name
try:
return node.readGetNext(nextName, val, idx, acInfo, oName)
except (error.NoAccessError, error.NoSuchInstanceError, error.NoSuchObjectError):
pass
# Write operation
def writeTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
if name == self.name:
# Make sure variable is writable
if acFun:
if self.maxAccess not in ('readwrite', 'readcreate') or \
acFun(name, self.syntax, idx, 'write', acCtx):
raise error.NotWritableError(idx=idx, name=name)
else:
node = self.getBranch(name, idx)
node.writeTest(name, val, idx, acInfo)
def writeCommit(self, name, val, idx, acInfo):
self.getBranch(name, idx).writeCommit(name, val, idx, acInfo)
def writeCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
self.getBranch(name, idx).writeCleanup(name, val, idx, acInfo)
def writeUndo(self, name, val, idx, acInfo):
self.getBranch(name, idx).writeUndo(name, val, idx, acInfo)
class MibScalar(MibTree):
"""Scalar MIB variable. Implements access control checking."""
maxAccess = 'readonly'
#
# Subtree traversal
#
# Missing branches are indicated by the NoSuchInstanceError exception.
#
def getBranch(self, name, idx):
try:
return MibTree.getBranch(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNextBranch(self, name, idx=None):
try:
return MibTree.getNextBranch(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNode(self, name, idx=None):
try:
return MibTree.getNode(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNextNode(self, name, idx=None):
try:
return MibTree.getNextNode(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
# MIB instrumentation methods
# Read operation
def readTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
if name == self.name:
raise error.NoAccessError(idx=idx, name=name)
if acFun:
if self.maxAccess not in (
'readonly', 'readwrite', 'readcreate'
) or acFun(name, self.syntax, idx, 'read', acCtx):
raise error.NoAccessError(idx=idx, name=name)
MibTree.readTest(self, name, val, idx, acInfo)
def readGet(self, name, val, idx, acInfo):
try:
node = self.getBranch(name, idx)
except error.NoSuchInstanceError:
return name, exval.noSuchInstance
else:
return node.readGet(name, val, idx, acInfo)
def readTestNext(self, name, val, idx, acInfo, oName=None):
(acFun, acCtx) = acInfo
if acFun:
if self.maxAccess not in (
'readonly', 'readwrite', 'readcreate'
) or acFun(name, self.syntax, idx, 'read', acCtx):
raise error.NoAccessError(idx=idx, name=name)
MibTree.readTestNext(self, name, val, idx, acInfo, oName)
def readGetNext(self, name, val, idx, acInfo, oName=None):
(acFun, acCtx) = acInfo
# have to dublicate AC here as *Next code above treats
# noAccess as a noSuchObject at the Test stage, goes on
# to Reading
if acFun:
if self.maxAccess not in (
'readonly', 'readwrite', 'readcreate'
) or acFun(name, self.syntax, idx, 'read', acCtx):
raise error.NoAccessError(idx=idx, name=name)
return MibTree.readGetNext(self, name, val, idx, acInfo, oName)
# Two-phase commit implementation
def writeTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
if name == self.name:
raise error.NoAccessError(idx=idx, name=name)
if acFun:
if self.maxAccess not in ('readwrite', 'readcreate') or \
acFun(name, self.syntax, idx, 'write', acCtx):
raise error.NotWritableError(idx=idx, name=name)
MibTree.writeTest(self, name, val, idx, acInfo)
class MibScalarInstance(MibTree):
"""Scalar MIB variable instance. Implements read/write operations."""
def __init__(self, typeName, instId, syntax):
MibTree.__init__(self, typeName+instId, syntax)
self.typeName = typeName
self.instId = instId
self.__oldSyntax = None
#
# Managed object value access methods
#
def getValue(self, name, idx):
debug.logger & debug.flagIns and debug.logger('getValue: returning %r for %s' % (self.syntax, self.name))
return self.syntax.clone()
def setValue(self, value, name, idx):
try:
if hasattr(self.syntax, 'setValue'):
return self.syntax.setValue(value)
else:
return self.syntax.clone(value)
except PyAsn1Error:
exc_t, exc_v, exc_tb = sys.exc_info()
debug.logger & debug.flagIns and debug.logger('setValue: %s=%r failed with traceback %s' % (self.name, value, traceback.format_exception(exc_t, exc_v, exc_tb)))
if isinstance(exc_v, error.TableRowManagement):
raise exc_v
else:
raise error.WrongValueError(idx=idx, name=name, msg=exc_v)
#
# Subtree traversal
#
# Missing branches are indicated by the NoSuchInstanceError exception.
#
def getBranch(self, name, idx):
try:
return MibTree.getBranch(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNextBranch(self, name, idx=None):
try:
return MibTree.getNextBranch(self, name, idx)
except (error.NoSuchInstanceError, error.NoSuchObjectError):
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNode(self, name, idx=None):
# Recursion terminator
if name == self.name:
return self
raise error.NoSuchInstanceError(idx=idx, name=name)
def getNextNode(self, name, idx=None):
raise error.NoSuchInstanceError(idx=idx, name=name)
# MIB instrumentation methods
# Read operation
def readTest(self, name, val, idx, acInfo):
if name != self.name:
raise error.NoSuchInstanceError(idx=idx, name=name)
def readGet(self, name, val, idx, acInfo):
# Return current variable (name, value).
if name == self.name:
debug.logger & debug.flagIns and debug.logger('readGet: %s=%r' % (self.name, self.syntax))
return self.name, self.getValue(name, idx)
else:
raise error.NoSuchInstanceError(idx=idx, name=name)
def readTestNext(self, name, val, idx, acInfo, oName=None):
if name != self.name or name <= oName:
raise error.NoSuchInstanceError(idx=idx, name=name)
def readGetNext(self, name, val, idx, acInfo, oName=None):
if name == self.name and name > oName:
debug.logger & debug.flagIns and debug.logger('readGetNext: %s=%r' % (self.name, self.syntax))
return self.readGet(name, val, idx, acInfo)
else:
raise error.NoSuchInstanceError(idx=idx, name=name)
# Write operation: two-phase commit
def writeTest(self, name, val, idx, acInfo):
# Make sure write's allowed
if name == self.name:
try:
self.__newSyntax = self.setValue(val, name, idx)
except error.MibOperationError:
# SMI exceptions may carry additional content
why = sys.exc_info()[1]
if 'syntax' in why:
self.__newSyntax = why['syntax']
raise why
else:
raise error.WrongValueError(idx=idx, name=name, msg=sys.exc_info()[1])
else:
raise error.NoSuchInstanceError(idx=idx, name=name)
def writeCommit(self, name, val, idx, acInfo):
# Backup original value
if self.__oldSyntax is None:
self.__oldSyntax = self.syntax
# Commit new value
self.syntax = self.__newSyntax
def writeCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
debug.logger & debug.flagIns and debug.logger('writeCleanup: %s=%r' % (name, val))
# Drop previous value
self.__newSyntax = self.__oldSyntax = None
def writeUndo(self, name, val, idx, acInfo):
# Revive previous value
self.syntax = self.__oldSyntax
self.__newSyntax = self.__oldSyntax = None
# Table column instance specifics
# Create operation
def createTest(self, name, val, idx, acInfo):
if name == self.name:
try:
self.__newSyntax = self.setValue(val, name, idx)
except error.MibOperationError:
# SMI exceptions may carry additional content
why = sys.exc_info()[1]
if 'syntax' in why:
self.__newSyntax = why['syntax']
else:
raise error.WrongValueError(idx=idx, name=name, msg=sys.exc_info()[1])
else:
raise error.NoSuchInstanceError(idx=idx, name=name)
def createCommit(self, name, val, idx, acInfo):
if val is not None:
self.writeCommit(name, val, idx, acInfo)
def createCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
debug.logger & debug.flagIns and debug.logger('createCleanup: %s=%r' % (name, val))
if val is not None:
self.writeCleanup(name, val, idx, acInfo)
def createUndo(self, name, val, idx, acInfo):
if val is not None:
self.writeUndo(name, val, idx, acInfo)
# Destroy operation
def destroyTest(self, name, val, idx, acInfo):
if name == self.name:
try:
self.__newSyntax = self.setValue(val, name, idx)
except error.MibOperationError:
# SMI exceptions may carry additional content
why = sys.exc_info()[1]
if 'syntax' in why:
self.__newSyntax = why['syntax']
else:
raise error.NoSuchInstanceError(idx=idx, name=name)
def destroyCommit(self, name, val, idx, acInfo): pass
def destroyCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
def destroyUndo(self, name, val, idx, acInfo): pass
# Conceptual table classes
class MibTableColumn(MibScalar):
"""MIB table column. Manages a set of column instance variables"""
protoInstance = MibScalarInstance
def __init__(self, name, syntax):
MibScalar.__init__(self, name, syntax)
self.__createdInstances = {}; self.__destroyedInstances = {}
self.__rowOpWanted = {}
#
# Subtree traversal
#
# Missing leaves are indicated by the NoSuchInstanceError exception.
#
def getBranch(self, name, idx):
if name in self._vars:
return self._vars[name]
raise error.NoSuchInstanceError(name=name, idx=idx)
def setProtoInstance(self, protoInstance):
self.protoInstance = protoInstance
# Column creation (this should probably be converted into some state
# machine for clarity). Also, it might be a good idea to inidicate
# defaulted cols creation in a clearer way than just a val == None.
def createTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
# Make sure creation allowed, create a new column instance but
# do not replace the old one
if name == self.name:
raise error.NoAccessError(idx=idx, name=name)
if acFun:
if val is not None and self.maxAccess != 'readcreate' or \
acFun(name, self.syntax, idx, 'write', acCtx):
debug.logger & debug.flagACL and debug.logger('createTest: %s=%r %s at %s' % (name, val, self.maxAccess, self.name))
raise error.NoCreationError(idx=idx, name=name)
# Create instances if either it does not yet exist (row creation)
# or a value is passed (multiple OIDs in SET PDU)
if val is None and name in self.__createdInstances:
return
self.__createdInstances[name] = self.protoInstance(
self.name, name[len(self.name):], self.syntax.clone()
)
self.__createdInstances[name].createTest(name, val, idx, acInfo)
def createCommit(self, name, val, idx, acInfo):
# Commit new instance value
if name in self._vars: # XXX
if name in self.__createdInstances:
self._vars[name].createCommit(name, val, idx, acInfo)
return
self.__createdInstances[name].createCommit(name, val, idx, acInfo)
# ...commit new column instance
self._vars[name], self.__createdInstances[name] = \
self.__createdInstances[name], self._vars.get(name)
def createCleanup(self, name, val, idx, acInfo):
# Drop previous column instance
self.branchVersionId += 1
if name in self.__createdInstances:
if self.__createdInstances[name] is not None:
self.__createdInstances[name].createCleanup(
name, val, idx, acInfo
)
del self.__createdInstances[name]
elif name in self._vars:
self._vars[name].createCleanup(name, val, idx, acInfo)
def createUndo(self, name, val, idx, acInfo):
# Set back previous column instance, drop the new one
if name in self.__createdInstances:
self._vars[name] = self.__createdInstances[name]
del self.__createdInstances[name]
# Remove new instance on rollback
if self._vars[name] is None:
del self._vars[name]
else:
# Catch half-created instances (hackerish)
try:
self._vars[name] == 0
except PyAsn1Error:
del self._vars[name]
else:
self._vars[name].createUndo(name, val, idx, acInfo)
# Column destruction
def destroyTest(self, name, val, idx, acInfo):
(acFun, acCtx) = acInfo
# Make sure destruction is allowed
if name == self.name:
raise error.NoAccessError(idx=idx, name=name)
if name not in self._vars:
return
if acFun:
if val is not None and self.maxAccess != 'readcreate' or \
acFun(name, self.syntax, idx, 'write', acCtx):
raise error.NoAccessError(idx=idx, name=name)
self._vars[name].destroyTest(name, val, idx, acInfo)
def destroyCommit(self, name, val, idx, acInfo):
# Make a copy of column instance and take it off the tree
if name in self._vars:
self._vars[name].destroyCommit(name, val, idx, acInfo)
self.__destroyedInstances[name] = self._vars[name]
del self._vars[name]
def destroyCleanup(self, name, val, idx, acInfo):
# Drop instance copy
self.branchVersionId += 1
if name in self.__destroyedInstances:
self.__destroyedInstances[name].destroyCleanup(
name, val, idx, acInfo
)
debug.logger & debug.flagIns and debug.logger('destroyCleanup: %s=%r' % (name, val))
del self.__destroyedInstances[name]
def destroyUndo(self, name, val, idx, acInfo):
# Set back column instance
if name in self.__destroyedInstances:
self._vars[name] = self.__destroyedInstances[name]
self._vars[name].destroyUndo(
name, val, idx, acInfo
)
del self.__destroyedInstances[name]
# Set/modify column
def writeTest(self, name, val, idx, acInfo):
# Besides common checks, request row creation on no-instance
try:
# First try the instance
MibScalar.writeTest(
self, name, val, idx, acInfo
)
# ...otherwise proceed with creating new column
except (error.NoSuchInstanceError, error.RowCreationWanted):
self.__rowOpWanted[name] = error.RowCreationWanted()
self.createTest(name, val, idx, acInfo)
except error.RowDestructionWanted:
self.__rowOpWanted[name] = error.RowDestructionWanted()
self.destroyTest(name, val, idx, acInfo)
if name in self.__rowOpWanted:
debug.logger & debug.flagIns and debug.logger('%s flagged by %s=%r, exception %s' % (self.__rowOpWanted[name], name, val, sys.exc_info()[1]))
raise self.__rowOpWanted[name]
def __delegateWrite(self, subAction, name, val, idx, acInfo):
if name not in self.__rowOpWanted:
getattr(MibScalar, 'write'+subAction)(
self, name, val, idx, acInfo
)
return
if isinstance(self.__rowOpWanted[name], error.RowCreationWanted):
getattr(self, 'create'+subAction)(
name, val, idx, acInfo
)
if isinstance(self.__rowOpWanted[name], error.RowDestructionWanted):
getattr(self, 'destroy'+subAction)(
name, val, idx, acInfo
)
def writeCommit(self, name, val, idx, acInfo):
self.__delegateWrite(
'Commit', name, val, idx, acInfo
)
if name in self.__rowOpWanted:
raise self.__rowOpWanted[name]
def writeCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
self.__delegateWrite(
'Cleanup', name, val, idx, acInfo
)
if name in self.__rowOpWanted:
e = self.__rowOpWanted[name]
del self.__rowOpWanted[name]
debug.logger & debug.flagIns and debug.logger('%s dropped by %s=%r' % (e, name, val))
raise e
def writeUndo(self, name, val, idx, acInfo):
self.__delegateWrite(
'Undo', name, val, idx, acInfo
)
if name in self.__rowOpWanted:
e = self.__rowOpWanted[name]
del self.__rowOpWanted[name]
debug.logger & debug.flagIns and debug.logger('%s dropped by %s=%r' % (e, name, val))
raise e
class MibTableRow(MibTree):
"""MIB table row (SMI 'Entry'). Manages a set of table columns.
Implements row creation/destruction.
"""
def __init__(self, name):
MibTree.__init__(self, name)
self.__idToIdxCache = cache.Cache()
self.__idxToIdCache = cache.Cache()
self.indexNames = ()
self.augmentingRows = {}
# Table indices resolution. Handle almost all possible rfc1902 types
# explicitly rather than by means of isSuperTypeOf() method because
# some subtypes may be implicitly tagged what renders base tag
# unavailable.
__intBaseTag = Integer.tagSet.getBaseTag()
__strBaseTag = OctetString.tagSet.getBaseTag()
__oidBaseTag = ObjectIdentifier.tagSet.getBaseTag()
__ipaddrTagSet = IpAddress.tagSet
__bitsBaseTag = Bits.tagSet.getBaseTag()
def setFromName(self, obj, value, impliedFlag=None):
if not value:
raise error.SmiError('Short OID for index %r' % (obj,))
value = tuple(value) # possible ObjectIdentifiers
baseTag = obj.getTagSet().getBaseTag()
if baseTag == self.__intBaseTag:
return obj.clone(value[0]), value[1:]
elif self.__ipaddrTagSet.isSuperTagSetOf(obj.getTagSet()):
return obj.clone('.'.join([str(x) for x in value[:4]])), value[4:]
elif baseTag == self.__strBaseTag:
# rfc1902, 7.7
if impliedFlag:
return obj.clone(value), ()
elif obj.isFixedLength():
l = obj.getFixedLength()
return obj.clone(value[:l]), value[l:]
else:
return obj.clone(value[1:value[0]+1]), value[value[0]+1:]
elif baseTag == self.__oidBaseTag:
if impliedFlag:
return obj.clone(value), ()
else:
return obj.clone(value[1:value[0]+1]), value[value[0]+1:]
# rfc2578, 7.1
elif baseTag == self.__bitsBaseTag:
return obj.clone(value[1:value[0]+1]), value[value[0]+1:]
else:
raise error.SmiError('Unknown value type for index %r' % (obj,))
def getAsName(self, obj, impliedFlag=None):
baseTag = obj.getTagSet().getBaseTag()
if baseTag == self.__intBaseTag:
return (int(obj),)
elif self.__ipaddrTagSet.isSuperTagSetOf(obj.getTagSet()):
return obj.asNumbers()
elif baseTag == self.__strBaseTag:
if impliedFlag or obj.isFixedLength():
initial = ()
else:
initial = (len(obj),)
return initial + obj.asNumbers()
elif baseTag == self.__oidBaseTag:
if impliedFlag:
return tuple(obj)
else:
return (len(self.name),) + tuple(obj)
# rfc2578, 7.1
elif baseTag == self.__bitsBaseTag:
return ( len(obj), ) + obj.asNumbers()
else:
raise error.SmiError('Unknown value type for index %r' % (obj,))
# Fate sharing mechanics
def announceManagementEvent(self, action, name, val, idx, acInfo):
# Convert OID suffix into index vals
instId = name[len(self.name)+1:]
baseIndices = []
for impliedFlag, modName, symName in self.indexNames:
mibObj, = mibBuilder.importSymbols(modName, symName)
syntax, instId = self.setFromName(
mibObj.syntax, instId, impliedFlag
)
if self.name == mibObj.name[:-1]:
baseIndices.append((mibObj.name, syntax))
if instId:
raise error.SmiError(
'Excessive instance identifier sub-OIDs left at %s: %s' %
(self, instId)
)
if not baseIndices:
return
for modName, mibSym in self.augmentingRows.keys():
mibObj, = mibBuilder.importSymbols(modName, mibSym)
debug.logger & debug.flagIns and debug.logger('announceManagementEvent %s to %s' % (action, mibObj))
mibObj.receiveManagementEvent(
action, baseIndices, val, idx, acInfo
)
def receiveManagementEvent(self, action, baseIndices, val, idx, acInfo):
# The default implementation supports one-to-one rows dependency
newSuffix = ()
# Resolve indices intersection
for impliedFlag, modName, symName in self.indexNames:
mibObj, = mibBuilder.importSymbols(modName, symName)
for name, syntax in baseIndices:
if name == mibObj.name:
newSuffix = newSuffix + self.getAsName(syntax, impliedFlag)
if newSuffix:
debug.logger & debug.flagIns and debug.logger('receiveManagementEvent %s for suffix %s' % (action, newSuffix))
self.__manageColumns(action, (), newSuffix, val, idx, acInfo)
def registerAugmentions(self, *names):
for modName, symName in names:
if (modName, symName) in self.augmentingRows:
raise error.SmiError(
'Row %s already augmented by %s::%s' % \
(self.name, modName, symName)
)
self.augmentingRows[(modName, symName)] = 1
return self
def setIndexNames(self, *names):
for name in names:
self.indexNames = self.indexNames + (name,)
return self
def getIndexNames(self):
return self.indexNames
def __manageColumns(self, action, excludeName, nameSuffix,
val, idx, acInfo):
# Build a map of index names and values for automatic initialization
indexVals = {}; instId = nameSuffix
for impliedFlag, modName, symName in self.indexNames:
mibObj, = mibBuilder.importSymbols(modName, symName)
syntax, instId = self.setFromName(
mibObj.syntax, instId, impliedFlag
)
indexVals[mibObj.name] = syntax
for name, var in self._vars.items():
if name == excludeName:
continue
if name in indexVals:
getattr(var, action)(name + nameSuffix, indexVals[name], idx,
(None, None))
else:
getattr(var, action)(name + nameSuffix, val, idx, acInfo)
debug.logger & debug.flagIns and debug.logger('__manageColumns: action %s name %s suffix %s %svalue %r' % (action, name, nameSuffix, name in indexVals and "index " or "", indexVals.get(name, val)))
def __delegate(self, subAction, name, val, idx, acInfo):
# Relay operation request to column, expect row operation request.
try:
getattr(self.getBranch(name, idx), 'write'+subAction)(
name, val, idx, acInfo
)
except error.RowCreationWanted:
self.__manageColumns(
'create'+subAction, name[:len(self.name)+1],
name[len(self.name)+1:], None, idx, acInfo
)
self.announceManagementEvent(
'create'+subAction, name, None, idx, acInfo
)
except error.RowDestructionWanted:
self.__manageColumns(
'destroy'+subAction, name[:len(self.name)+1],
name[len(self.name)+1:], None, idx, acInfo
)
self.announceManagementEvent(
'destroy'+subAction, name, None, idx, acInfo
)
def writeTest(self, name, val, idx, acInfo):
self.__delegate('Test', name, val, idx, acInfo)
def writeCommit(self, name, val, idx, acInfo):
self.__delegate('Commit', name, val, idx, acInfo)
def writeCleanup(self, name, val, idx, acInfo):
self.branchVersionId += 1
self.__delegate('Cleanup', name, val, idx, acInfo)
def writeUndo(self, name, val, idx, acInfo):
self.__delegate('Undo', name, val, idx, acInfo)
# Table row management
# Table row access by instance name
def getInstName(self, colId, instId):
return self.name + (colId,) + instId
# Table index management
def getIndicesFromInstId(self, instId):
"""Return index values for instance identification"""
if instId in self.__idToIdxCache:
return self.__idToIdxCache[instId]
indices = []
for impliedFlag, modName, symName in self.indexNames:
mibObj, = mibBuilder.importSymbols(modName, symName)
syntax, instId = self.setFromName(mibObj.syntax, instId, impliedFlag)
indices.append(syntax) # to avoid cyclic refs
if instId:
raise error.SmiError(
'Excessive instance identifier sub-OIDs left at %s: %s' %
(self, instId)
)
indices = tuple(indices)
self.__idToIdxCache[instId] = indices
return indices
def getInstIdFromIndices(self, *indices):
"""Return column instance identification from indices"""
if indices in self.__idxToIdCache:
return self.__idxToIdCache[indices]
idx = 0; idxLen = len(indices); instId = ()
for impliedFlag, modName, symName in self.indexNames:
mibObj, = mibBuilder.importSymbols(modName, symName)
if idx < idxLen:
instId = instId + self.getAsName(
mibObj.syntax.clone(indices[idx]), impliedFlag
)
else:
break
idx = idx + 1
self.__idxToIdCache[indices] = instId
return instId
# Table access by index
def getInstNameByIndex(self, colId, *indices):
"""Build column instance name from components"""
return self.name + (colId,) + self.getInstIdFromIndices(*indices)
def getInstNamesByIndex(self, *indices):
"""Build column instance names from indices"""
instNames = []
for columnName in self._vars.keys():
instNames.append(
self.getInstNameByIndex(*(columnName[-1],) + indices)
)
return tuple(instNames)
class MibTable(MibTree):
"""MIB table. Manages a set of TableRow's"""
def __init__(self, name):
MibTree.__init__(self, name)
zeroDotZero = ObjectIdentity((0,0))
#dot = MibTree()
iso = MibTree((1,))
org = MibIdentifier(iso.name + (3,))
dod = MibIdentifier(org.name + (6,))
internet = MibIdentifier(dod.name + (1,))
directory = MibIdentifier(internet.name + (1,))
mgmt = MibIdentifier(internet.name + (2,))
mib_2 = MibIdentifier(mgmt.name + (1,)).setLabel('mib-2')
transmission = MibIdentifier(mib_2.name + (10,))
experimental = MibIdentifier(internet.name + (3,))
private = MibIdentifier(internet.name + (4,))
enterprises = MibIdentifier(private.name + (1,))
security = MibIdentifier(internet.name + (5,))
snmpV2 = MibIdentifier(internet.name + (6,))
snmpDomains = MibIdentifier(snmpV2.name + (1,))
snmpProxys = MibIdentifier(snmpV2.name +(2,))
snmpModules = MibIdentifier(snmpV2.name +(3,))
mibBuilder.exportSymbols(
'SNMPv2-SMI', MibNode=MibNode,
Integer32=Integer32, Bits=Bits, IpAddress=IpAddress,
Counter32=Counter32, Gauge32=Gauge32, Unsigned32=Unsigned32,
TimeTicks=TimeTicks, Opaque=Opaque, Counter64=Counter64,
ExtUTCTime=ExtUTCTime,
ModuleIdentity=ModuleIdentity, ObjectIdentity=ObjectIdentity,
NotificationType=NotificationType, MibScalar=MibScalar,
MibScalarInstance=MibScalarInstance,
MibIdentifier=MibIdentifier, MibTree=MibTree,
MibTableColumn=MibTableColumn, MibTableRow=MibTableRow,
MibTable=MibTable, zeroDotZero=zeroDotZero,
iso=iso, org=org, dod=dod, internet=internet,
directory=directory, mgmt=mgmt, mib_2=mib_2, transmission=transmission,
experimental=experimental, private=private, enterprises=enterprises,
security=security, snmpV2=snmpV2, snmpDomains=snmpDomains,
snmpProxys=snmpProxys, snmpModules=snmpModules
)
# XXX
# getAsName/setFromName goes out of MibRow?
# revisit getNextNode() -- needs optimization
|
|
""" Properties for modeling Chart inputs, constraints, and dependencies.
selection spec:
[['x'], ['x', 'y']]
[{'x': categorical, 'y': numerical}]
"""
from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.properties import (HasProps, Either, String, Int, List, Bool,
PrimitiveProperty, bokeh_integer_types, Array)
from .utils import special_columns
class Column(Array):
def _is_seq(self, value):
is_array = super(Column, self)._is_seq(value)
return isinstance(value, pd.Series) or isinstance(value, list) or is_array
def _new_instance(self, value):
return pd.Series(value)
def transform(self, value):
if value is None:
return None
if isinstance(value, pd.Series):
arr = value.values
else:
arr = value
trans_array = super(Column, self).transform(arr)
try:
return pd.Series(trans_array)
except ValueError:
raise ValueError("Could not transform %r" % value)
class Logical(Bool):
"""A boolean like data type."""
def validate(self, value):
try:
super(Logical, self).validate(value)
except ValueError:
if isinstance(value, list):
value = np.array(value)
# If not a Bool, then look for psuedo-logical types
if isinstance(value, np.ndarray):
values = np.unique(value)
if len(values) == 2:
return
raise ValueError('expected a Bool or array with 2 unique values, got %s' % value)
class ColumnLabel(Either):
"""Specify a column by name or index."""
def __init__(self, columns=None, default=None, help=None):
# ToDo: make sure we can select by integer
types = (String,
Int)
self.columns = columns
super(ColumnLabel, self).__init__(*types, default=default, help=help)
def validate(self, value):
"""If we are given a column list, make sure that the column provided is valid."""
super(ColumnLabel, self).validate(value)
if self.columns:
if type(value) in bokeh_integer_types:
if len(self.columns) > value:
return
else:
raise ValueError("Not a valid column selection.")
else:
if value not in self.columns:
raise ValueError("Column provided is not in the list of valid columns: %s" % self.columns)
def __str__(self):
return "Column Name or Column String"
class Dimension(HasProps):
"""Configures valid Chart column selections.
A dimension is Chart property that is assigned one or more columns names or indices. Each
column can match one or more column types, which are important to charts,
because the type of column selection can greatly affect the behavior of generalized
Charts.
The Dimension also provides convenient utilities for accessing information
about the current provided configuration at the global, non-grouped level.
"""
name = String()
alt_names = Either(String, List(String), default=None)
columns = Either(ColumnLabel, List(ColumnLabel), default=None)
valid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None)
invalid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None)
selection = Either(ColumnLabel, List(ColumnLabel), default=None)
def __init__(self, name, **properties):
properties['name'] = name
super(Dimension, self).__init__(**properties)
self._data = pd.DataFrame()
self._chart_source = None
def get_valid_types(self, col_data):
"""Returns all property types that are matched."""
valid_types = list(self.valid)
matches = []
# validate each type on the provided column
for valid_type in valid_types:
prop = valid_type()
# if valid, append to the output
try:
prop.validate(col_data)
matches.append(valid_type)
except ValueError:
pass
return matches
@property
def data(self):
"""The data selected for the Dimension.
Returns pd.Series(1) if data is empty or no selection.
"""
if self._data.empty or self.selection is None:
return pd.Series(1)
else:
# return special column type if available
if self.selection in list(special_columns.keys()):
return special_columns[self.selection](self._data)
return self._data[self.selection]
def set_data(self, data):
"""Builder must provide data so that builder has access to configuration metadata."""
self.selection = data[self.name]
self._chart_source = data
self._data = data.df
self.columns = list(self._data.columns.values)
@property
def min(self):
"""The minimum of one to many column selections."""
if isinstance(self.data, pd.Series):
return self.data.min()
else:
return self.data.min(axis=1).min()
@property
def max(self):
"""The maximum of one to many column selections."""
if isinstance(self.data, pd.Series):
return self.data.max()
else:
return self.data.max(axis=1).max()
@property
def dtype(self):
if isinstance(self.data, pd.DataFrame):
return self.data.dtypes[self.selection[0]]
else:
return self.data.dtype
@property
def computed(self):
if self._chart_source is None:
return False
else:
return self._chart_source.is_computed(self.selection)
class EitherColumn(Either):
"""Allow providing option of column types."""
# ToDo: incorporate fix into Either
def matches(self, new, old):
comparison = super(EitherColumn, self).matches(new, old)
if isinstance(comparison, bool):
return comparison
elif isinstance(comparison, pd.Series):
return comparison.all()
else:
raise ValueError('Failed when comparing Columns')
|
|
'''
Python module dependencies:
biopython==1.63
fastcluster==1.1.13
numpy==1.7.1
python-Levenshtein==0.11.2
scipy==0.12.0
Under Ubuntu, scipy, numpy and biopython can be installed as:
sudo apt-get install python-biopython python-numpy python-scipy
fastcluster and python-Levenshtein can be installed using pip:
pip install fastcluster python-Levenshtein
'''
from __future__ import print_function
import time
import math
import json
import numpy as np
from multiprocessing import Pool, cpu_count
import fastcluster as fc
from Bio import pairwise2
from Levenshtein import distance
from scipy.cluster.hierarchy import fcluster
import subprocess
import os
import resource
default_dtype = 'f4'
distance_cutoff = 0.32
class Seq(object):
"""Contains genetic characteristics for a single sequence.
Input:
data = a MongoDB result (dict-like) containing the following fields:
[seq_id, v_gene, j_gene, <junc_query>, var_muts_nt]
where <junc_query> is the sequence of the nucleotide or AA junction.
junc_query = either 'junc_aa' or 'junc_nt' for nucleotide or AA junctions, respectively.
"""
def __init__(self, data, junc_query):
self.id = data['seq_id']
self.v_fam = data['v_gene']['fam']
self.v_gene = data['v_gene']['gene']
self.v_all = data['v_gene']['all']
self.j_gene = data['j_gene']['gene']
self.j_all = data['j_gene']['all']
self.junc = data[junc_query]
self.junc_len = len(self.junc)
self.muts = []
if 'var_muts_nt' in data.keys():
self.muts = data['var_muts_nt']
def v_gene_string(self):
return 'v{0}-{1}'.format(self.v_fam, self.v_gene)
def v_fam_string(self):
return 'v{0}'.format(self.v_fam)
def get_LD(i, j):
'''Calculate sequence distance between a pair of Seq objects'''
# pairwise2 is used to force 'gapless' distance when sequence pair is of the same length
if i.junc_len == j.junc_len:
identity = pairwise2.align.globalms(i.junc, j.junc, 1, 0, -50, -50, score_only=True, one_alignment_only=True)
return i.junc_len - identity
# Levenshtein distance is used for sequence pairs of different lengths
else:
return distance(i.junc, j.junc)
def vCompare(i, j):
'''Calculate penalty for mismatches in Variable segment.'''
if i.v_gene != j.v_gene:
return 8
if i.v_all != j.v_all:
return 1
return 0
def jCompare(i, j):
'''Calculate penalty for mismatches in Joining segment.'''
if i.j_gene != j.j_gene:
return 8
if i.j_all != j.j_all:
return 1
return 0
def sharedMuts(i, j):
'''Calculate bonus for shared mutations.'''
if i.id == j.id:
return 0.0
bonus = 0.0
for mut in i.muts:
if mut == '':
continue
if mut in j.muts:
bonus += 0.35
return bonus
def get_score(i, j=None):
if j is None:
i, j = i
if i.id == j.id:
return 0.0
LD = get_LD(i, j)
vPenalty = vCompare(i, j)
jPenalty = jCompare(i, j)
lenPenalty = math.fabs(i.junc_len - j.junc_len) * 2
editLength = min(i.junc_len, j.junc_len)
mutBonus = sharedMuts(i, j)
if mutBonus > (LD + vPenalty + jPenalty):
mutBonus = (LD + vPenalty + jPenalty - 0.001) # distance values can't be negative
return (LD + vPenalty + jPenalty + lenPenalty - mutBonus) / editLength
def make_iter(seqs, mode=1):
for i, seq_i in enumerate(seqs):
if mode == 1:
for seq_j in seqs[i + 1:]:
yield (seq_i, seq_j)
else:
yield (seq_i, seqs[i + 1:])
def get_scores_one_row(args):
(seq_i, row_j) = args
return np.array([get_score(seq_i, seq_j) for seq_j in row_j], dtype=default_dtype)
def build_condensed_matrix(seqs, mode=2):
result = np.array([], dtype=default_dtype)
p = Pool(processes=cpu_count())
if mode == 1:
n = len(seqs)
#chunksize = 500000
chunksize = int(n * (n - 1) / 2 / cpu_count() / 2)
result_one = p.imap(get_score, make_iter(seqs, mode=1), chunksize=chunksize)
result = np.array(list(result_one), dtype=default_dtype)
else:
result_one_row = p.imap(get_scores_one_row, make_iter(seqs, mode=2), chunksize=100)
result = np.concatenate(list(result_one_row))
#p.close()
#p.join()
return result
def build_cluster_dict(flatCluster):
clusters = {}
for i, c in enumerate(flatCluster):
if c in clusters:
clusters[c].append(i)
else:
clusters[c] = [i]
return clusters
def make_clusters(con_distMatrix):
linkageMatrix = fc.linkage(con_distMatrix, method='average', preserve_input=False)
del con_distMatrix
flatCluster = fcluster(linkageMatrix, distance_cutoff, criterion='distance')
del linkageMatrix
return flatCluster
def write_output(outfile, clusters, seqs, vh='v0'):
with open(outfile, 'w') as out_f:
for c in clusters.keys():
if len(clusters[c]) < 2:
continue
rString = "#lineage_{0}_{1}\n".format(vh, str(c))
for seq_idx in clusters[c]:
seq = seqs[seq_idx]
rString += '>{0}\n{1}\n'.format(seq.id, seq.junc)
rString += '\n'
out_f.write(rString)
def get_memery_usage():
rss = subprocess.check_output('ps -p {} u'.format(os.getpid()), shell=True).decode('utf-8').split('\n')[1].split()[5]
max_rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print('current_rss: {}\tmax_rss: {}'.format(rss, max_rss))
def analyze(infile, outfile=None, n=None, output_format='cluster_only', memory_usage=False):
if memory_usage:
get_memery_usage()
t00 = time.time()
print("Loading input sequences...", end='')
with open(infile) as in_f:
seqs = json.load(in_f)
if n:
seqs = seqs[:n]
seqs = [Seq(s, 'junc_aa') for s in seqs]
print("done. [{}, {:.2f}s]".format(len(seqs), time.time() - t00))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating condensed distance matrix...", end='')
con_distMatrix = build_condensed_matrix(seqs, mode=2) # ####
print("done. [{}, {:.2f}s]".format(con_distMatrix.shape, time.time() - t0))
print("\tmin: {}, max: {}".format(con_distMatrix.min(), con_distMatrix.max()))
if memory_usage:
get_memery_usage()
t0 = time.time()
print("Calculating clusters...", end='')
clusters = make_clusters(con_distMatrix)
print("done. [{}, {:.2f}s]".format(clusters.max(), time.time() - t0))
if memory_usage:
get_memery_usage()
t0 = time.time()
print ("Outputting clusters...", end='')
if output_format == 'seqs':
clusters = build_cluster_dict(clusters)
write_output(outfile, clusters, seqs)
else:
np.savetxt(outfile, clusters, fmt='%d')
print("done. {:.2f}s".format(time.time() - t0))
print('=' * 20)
print("Finished! Total time= {:.2f}s".format(time.time() - t00))
if memory_usage:
get_memery_usage()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Clonify script.')
parser.add_argument('infile', action="store", help='input sequence file')
parser.add_argument('outfile', action="store", help='output file')
parser.add_argument('-n', action="store", dest="n", type=int,
help='maximum number of sequences to process from input file')
parser.add_argument('-f', action='store', dest='output_format', default='cluster_only',
help='output format: cluster_only | seqs.')
parser.add_argument('-m', action='store_true', dest='memory_usage',
help='print out memeory useage')
args = parser.parse_args()
analyze(args.infile, args.outfile, n=args.n,
output_format=args.output_format,
memory_usage=args.memory_usage)
|
|
from nose.tools import eq_
import hashlib
import json
import nose
from js_helper import _do_real_test_raw as _js_test
from validator.testcases.markup.markuptester import MarkupParser
import validator.testcases.jetpack as jetpack
from validator.errorbundler import ErrorBundle
from validator.xpi import XPIManager
def _do_test(xpi_package, allow_old_sdk=True, compat=False):
err = ErrorBundle()
if compat:
err.save_resource('is_compat_test', True)
jetpack.inspect_jetpack(err, xpi_package, allow_old_sdk=allow_old_sdk)
return err
class MockXPI(object):
def __init__(self, resources):
self.resources = resources
def read(self, name):
if isinstance(self.resources[name], bool):
return ''
return self.resources[name]
def __iter__(self):
for name in self.resources.keys():
yield name
def __contains__(self, name):
return name in self.resources
def test_not_jetpack():
"""Test that add-ons which do not match the Jetpack pattern are ignored."""
err = _do_test(MockXPI({'foo': True, 'bar': True}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack', False), False)
def test_package_json_jetpack():
"""Test that add-ons with the new package.json are treated as jetpack."""
err = _do_test(MockXPI({'bootstrap.js': '', 'package.json': ''}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack'), True)
def test_package_json_pass_jetpack():
"""Test that a minimalistic package.json Jetpack setup will pass."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert pretested_files
assert 'bootstrap.js' in pretested_files
def test_package_json_different_bootstrap():
"""Test that a minimalistic package.json Jetpack setup will pass."""
err = _do_test(MockXPI({'bootstrap.js': "var foo = 'bar';",
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are not marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert not pretested_files
assert 'bootstrap.js' not in pretested_files
def test_mismatched_db_hash():
"""
Test that failure occurs when the hash of a file doesn't exist in the
Jetpack known file database.
"""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
# Break the hash with this.
bootstrap = 'function() {}; %s' % bootstrap
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'jetpack_identified_files' in err.metadata
assert 'jetpack_unknown_files' in err.metadata
unknown_files = err.metadata['jetpack_unknown_files']
nose.tools.eq_(len(unknown_files), 2)
nose.tools.ok_('bootstrap.js' in unknown_files)
def test_new_module_location_spec():
"""
Tests that we don't fail for missing modules in add-ons generated with
newer versions of the SDK.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.14.xpi')
err = _do_test(xpi)
assert not any(w['id'][2] == 'missing_jetpack_module'
for w in err.warnings)
def test_components_flagged():
"""Test that `Components` is flagged in Jetpack."""
js = """
var x = Components.services.foo.bar;
"""
assert not _js_test(js).failed()
assert _js_test(js, jetpack=True).failed()
def test_safe_require():
"""Test that requiring an innocuous module does not add the
requires_chrome flag."""
def base_case():
err = _js_test("""var foo = require("bar");""",
jetpack=True)
eq_(err.metadata['requires_chrome'], False)
yield base_case
def test_unsafe_safe_require():
"""Test that requiring low-level modules does add the requires_chrome
flag."""
interfaces = ['chrome', 'window-utils', 'observer-service']
def interface_cases(interface):
err = _js_test("""var {cc, ci} = require("%s")""" % interface,
jetpack=True)
print err.print_summary(verbose=True)
first_message = err.warnings[0]['message']
assert 'non-SDK interface' in first_message, ('unexpected: %s' %
first_message)
assert 'requires_chrome' in err.metadata, \
'unexpected: "requires_chrome" should be in metadata'
eq_(err.metadata['requires_chrome'], True)
for case in interfaces:
yield interface_cases, case
def test_absolute_uris_in_js():
"""
Test that a warning is thrown for absolute URIs within JS files.
"""
bad_js = 'alert("resource://foo-data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
# Test that literals are inspected even if they're the result of an
# operation.
bad_js = 'alert("resou" + "rce://foo-" + "data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
def test_observer_service_flagged():
assert _js_test("""
var {Ci} = require("chrome");
thing.QueryInterface(Ci.nsIObserverService);
""", jetpack=True).failed()
assert not _js_test("""
thing.QueryInterface(Ci.nsIObserverService);
""").failed()
def test_absolute_uris_in_markup():
"""
Test that a warning is thrown for absolute URIs within markup files.
"""
err = ErrorBundle()
bad_html = '<foo><bar src="resource://foo-data/bar/zap.png" /></foo>'
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert not err.failed()
err.metadata['is_jetpack'] = True
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert err.failed()
assert err.compat_summary['errors']
def test_fail_on_cfx():
"""
Test that we fail for add-ons built with 'cfx'.
"""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.17',
'manifest': {
'bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed() and err.errors
def test_pass_cfx_for_compat():
"""
Test that we fail for add-ons built with 'cfx'.
"""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.17',
'manifest': {
'bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}),
compat=True)
print err.print_summary(verbose=True)
assert not err.failed() and not err.errors
|
|
from __future__ import unicode_literals
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.http import HttpResponseRedirect
class FSMTransitionMixin(object):
"""
Mixin to use with `admin.ModelAdmin` to support transitioning
a model from one state to another (workflow style).
* The change_form.html must be overriden to use the custom submit
row template (on a model or global level).
{% load fsm_admin %}
{% block submit_buttons_bottom %}{% fsm_submit_row %}{% endblock %}
* To optionally display hints to the user about what's needed
to transition to other states that aren't available due to unmet
pre-conditions, add this to the change_form as well:
{% block after_field_sets %}
{{ block.super }}
{% fsm_transition_hints %}
{% endblock %}
* There must be one and only one FSMField on the model.
* There must be a corresponding model function to run the transition,
generally decorated with the transition decorator. This is what
determines the available transitions. Without a function, the action
in the submit row will not be available.
* In the absence of specific transition permissions, the user must
have change permission for the model.
"""
# Each transition input is named with the state field and transition.
# e.g. _fsmtransition-publish_state-publish
# _fsmtransition-revision_state-delete
fsm_input_prefix = '_fsmtransition'
# The name of one or more FSMFields on the model to transition
fsm_field = ['state',]
change_form_template = 'fsm_admin/change_form.html'
default_disallow_transition = not getattr(settings, 'FSM_ADMIN_FORCE_PERMIT', False)
def _fsm_get_transitions(self, obj, request, perms=None):
"""
Gets a list of transitions available to the user.
Available state transitions are provided by django-fsm
following the pattern get_available_FIELD_transitions
"""
user = request.user
fsm_fields = self._get_fsm_field_list()
transitions = {}
for field in fsm_fields:
transitions_func = 'get_available_user_{0}_transitions'.format(field)
transitions_generator = getattr(obj, transitions_func)(user) if obj else []
transitions[field] = self._filter_admin_transitions(transitions_generator)
return transitions
def get_redirect_url(self, request, obj):
"""
Hook to adjust the redirect post-save.
"""
return request.path
def fsm_field_instance(self, fsm_field_name):
"""
Returns the actual state field instance, as opposed to
fsm_field attribute representing just the field name.
"""
return self.model._meta.get_field_by_name(fsm_field_name)[0]
def display_fsm_field(self, obj, fsm_field_name):
"""
Makes sure get_FOO_display() is used for choices-based FSM fields.
"""
field_instance = self.fsm_field_instance(fsm_field_name)
if field_instance and field_instance.choices:
return getattr(obj, 'get_%s_display' % fsm_field_name)()
else:
return getattr(obj, fsm_field_name)
def response_change(self, request, obj):
"""
Override of `ModelAdmin.response_change` to detect the FSM button
that was clicked in the submit row and perform the state transtion.
"""
if not getattr(obj, '_fsmtransition_results', None):
return super(FSMTransitionMixin, self).response_change(request, obj)
if obj._fsmtransition_results['status'] == messages.SUCCESS:
msg = _('%(obj)s successfully set to %(new_state)s') % obj._fsmtransition_results
else:
msg = _('Error! %(obj)s failed to %(transition)s') % obj._fsmtransition_results
self.message_user(request, msg, obj._fsmtransition_results['status'])
opts = self.model._meta
redirect_url = self.get_redirect_url(request=request, obj=obj)
preserved_filters = self.get_preserved_filters(request)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
def _is_transition_available(self, obj, transition, request):
"""
Checks if the requested transition is available
"""
transitions = []
for field, field_transitions in iter(self._fsm_get_transitions(obj, request).items()):
transitions += [t.name for t in field_transitions]
return transitions
def _filter_admin_transitions(self, transitions_generator):
"""
Filter the given list of transitions, if their transition methods are declared as admin
transitions. To allow a transition inside fsm_admin, add the parameter
`admin=True` to the transition decorator, for example:
```
@transition(field='state', source=['startstate'], target='finalstate', custom=dict(admin=True))
def do_something(self):
...
```
If the configuration setting `FSM_ADMIN_FORCE_PERMIT = True` then only transitions with
`custom=dict(admin=True)` are allowed. Otherwise, if `FSM_ADMIN_FORCE_PERMIT = False` or
unset only those with `custom=dict(admin=False)`
"""
for transition in transitions_generator:
if transition.custom.get('admin', self.default_disallow_transition):
yield transition
def _get_requested_transition(self, request):
"""
Extracts the name of the transition requested by user
"""
for key in request.POST.keys():
if key.startswith(self.fsm_input_prefix):
fsm_input = key.split('-')
return (fsm_input[1], fsm_input[2])
return None, None
def _do_transition(self, transition, request, obj, form, fsm_field_name):
original_state = self.display_fsm_field(obj, fsm_field_name)
msg_dict = {
'obj': force_text(obj),
'transition': transition,
'original_state': original_state,
}
# Ensure the requested transition is available
available = self._is_transition_available(obj, transition, request)
trans_func = getattr(obj, transition, None)
if available and trans_func:
# Run the transition
try:
# Attempt to pass in the by argument if using django-fsm-log
trans_func(by=request.user)
except TypeError:
# If the function does not have a by attribute, just call with no arguments
trans_func()
new_state = self.display_fsm_field(obj, fsm_field_name)
# Mark the fsm_field as changed in the form so it will be
# picked up when the change message is constructed
form.changed_data.append(fsm_field_name)
msg_dict.update({'new_state': new_state, 'status': messages.SUCCESS})
else:
msg_dict.update({'status': messages.ERROR})
# Attach the results of our transition attempt
setattr(obj, '_fsmtransition_results', msg_dict)
def save_model(self, request, obj, form, change):
fsm_field, transition = self._get_requested_transition(request)
if transition:
self._do_transition(transition, request, obj, form, fsm_field)
super(FSMTransitionMixin, self).save_model(request, obj, form, change)
def get_transition_hints(self, obj):
"""
See `fsm_transition_hints` templatetag.
"""
hints = defaultdict(list)
transitions = self._get_possible_transitions(obj)
# Step through the conditions needed to accomplish the legal state
# transitions, and alert the user of any missing condition.
# TODO?: find a cleaner way to enumerate conditions methods?
for transition in transitions:
for condition in transition.conditions:
# If the condition is valid, then we don't need the hint
if condition(obj):
continue
hint = getattr(condition, 'hint', '')
if hint:
hints[transition.name].append(hint)
return dict(hints)
def _get_possible_transitions(self, obj):
"""
Get valid state transitions from the current state of `obj`
"""
fsm_fields = self._get_fsm_field_list()
for field in fsm_fields:
fsmfield = obj._meta.get_field_by_name(field)[0]
transitions = fsmfield.get_all_transitions(self.model)
for transition in transitions:
if transition.source in [getattr(obj, field), '*']:
yield transition
def _get_fsm_field_list(self):
"""
Ensure backward compatibility by converting a single fsm field to
a list. While we are guaranteeing compatibility we should use
this method to retrieve the fsm field rather than directly
accessing the property.
"""
if not isinstance(self.fsm_field, (list, tuple,)):
return [self.fsm_field,]
return self.fsm_field
|
|
"""
Some utility functions.
Miscellaneous utilities
* list2set
* first
* uniq
* more_than
Term characterisation and generation
* to_term
* from_n3
Date/time utilities
* date_time
* parse_date_time
Statement and component type checkers
* check_context
* check_subject
* check_predicate
* check_object
* check_statement
* check_pattern
"""
from calendar import timegm
from time import altzone
# from time import daylight
from time import gmtime
from time import localtime
from time import time
from time import timezone
from os.path import splitext
from StringIO import StringIO
from rdflib.exceptions import ContextTypeError
from rdflib.exceptions import ObjectTypeError
from rdflib.exceptions import PredicateTypeError
from rdflib.exceptions import SubjectTypeError
from rdflib.graph import Graph
from rdflib.graph import QuotedGraph
from rdflib.namespace import Namespace
from rdflib.namespace import NamespaceManager
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.py3compat import sign
__all__ = [
'list2set', 'first', 'uniq', 'more_than', 'to_term', 'from_n3',
'date_time', 'parse_date_time', 'check_context', 'check_subject',
'check_predicate', 'check_object', 'check_statement', 'check_pattern',
'guess_format', 'find_roots', 'get_tree']
def list2set(seq):
"""
Return a new list without duplicates.
Preserves the order, unlike set(seq)
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def first(seq):
"""
return the first element in a python sequence
for graphs, use graph.value instead
"""
for result in seq:
return result
return None
def uniq(sequence, strip=0):
"""removes duplicate strings from the sequence."""
if strip:
return set(s.strip() for s in sequence)
else:
return set(sequence)
def more_than(sequence, number):
"Returns 1 if sequence has more items than number and 0 if not."
i = 0
for item in sequence:
i += 1
if i > number:
return 1
return 0
def to_term(s, default=None):
"""
Creates and returns an Identifier of type corresponding
to the pattern of the given positional argument string ``s``:
'' returns the ``default`` keyword argument value or ``None``
'<s>' returns ``URIRef(s)`` (i.e. without angle brackets)
'"s"' returns ``Literal(s)`` (i.e. without doublequotes)
'_s' returns ``BNode(s)`` (i.e. without leading underscore)
"""
if not s:
return default
elif s.startswith("<") and s.endswith(">"):
return URIRef(s[1:-1])
elif s.startswith('"') and s.endswith('"'):
return Literal(s[1:-1])
elif s.startswith("_"):
return BNode(s)
else:
msg = "Unrecognised term syntax: '%s'" % s
raise Exception(msg)
def from_n3(s, default=None, backend=None, nsm=None):
r'''
Creates the Identifier corresponding to the given n3 string.
>>> from_n3('<http://ex.com/foo>') == URIRef('http://ex.com/foo')
True
>>> from_n3('"foo"@de') == Literal('foo', lang='de')
True
>>> from_n3('"""multi\nline\nstring"""@en') == Literal(
... 'multi\nline\nstring', lang='en')
True
>>> from_n3('42') == Literal(42)
True
>>> from_n3(Literal(42).n3()) == Literal(42)
True
>>> from_n3('"42"^^xsd:integer') == Literal(42)
True
>>> from rdflib import RDFS
>>> from_n3('rdfs:label') == RDFS['label']
True
>>> nsm = NamespaceManager(Graph())
>>> nsm.bind('dbpedia', 'http://dbpedia.org/resource/')
>>> berlin = URIRef('http://dbpedia.org/resource/Berlin')
>>> from_n3('dbpedia:Berlin', nsm=nsm) == berlin
True
'''
if not s:
return default
if s.startswith('<'):
return URIRef(s[1:-1])
elif s.startswith('"'):
if s.startswith('"""'):
quotes = '"""'
else:
quotes = '"'
value, rest = s.rsplit(quotes, 1)
value = value[len(quotes):] # strip leading quotes
datatype = None
language = None
# as a given datatype overrules lang-tag check for it first
dtoffset = rest.rfind('^^')
if dtoffset >= 0:
# found a datatype
# datatype has to come after lang-tag so ignore everything before
# see: http://www.w3.org/TR/2011/WD-turtle-20110809/
# #prod-turtle2-RDFLiteral
datatype = from_n3(rest[dtoffset + 2:], default, backend, nsm)
else:
if rest.startswith("@"):
language = rest[1:] # strip leading at sign
value = value.replace(r'\"', '"')
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
value = value.encode("raw-unicode-escape").decode("unicode-escape")
return Literal(value, language, datatype)
elif s == 'true' or s == 'false':
return Literal(s == 'true')
elif s.isdigit():
return Literal(int(s))
elif s.startswith('{'):
identifier = from_n3(s[1:-1])
return QuotedGraph(backend, identifier)
elif s.startswith('['):
identifier = from_n3(s[1:-1])
return Graph(backend, identifier)
elif s.startswith("_:"):
return BNode(s[2:])
elif ':' in s:
if nsm is None:
# instantiate default NamespaceManager and rely on its defaults
nsm = NamespaceManager(Graph())
prefix, last_part = s.split(':', 1)
ns = dict(nsm.namespaces())[prefix]
return Namespace(ns)[last_part]
else:
return BNode(s)
def check_context(c):
if not (isinstance(c, URIRef) or
isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
def check_subject(s):
""" Test that s is a valid subject identifier."""
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
def check_predicate(p):
""" Test that p is a valid predicate identifier."""
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
def check_object(o):
""" Test that o is a valid object identifier."""
if not (isinstance(o, URIRef) or
isinstance(o, Literal) or
isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_statement(triple):
(s, p, o) = triple
if not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
if not (isinstance(o, URIRef) or
isinstance(o, Literal) or
isinstance(o, BNode)):
raise ObjectTypeError(o)
def check_pattern(triple):
(s, p, o) = triple
if s and not (isinstance(s, URIRef) or isinstance(s, BNode)):
raise SubjectTypeError(s)
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
if o and not (isinstance(o, URIRef) or
isinstance(o, Literal) or
isinstance(o, BNode)):
raise ObjectTypeError(o)
def date_time(t=None, local_time_zone=False):
"""http://www.w3.org/TR/NOTE-datetime ex: 1997-07-16T19:20:30Z
>>> date_time(1126482850)
'2005-09-11T23:54:10Z'
@@ this will change depending on where it is run
#>>> date_time(1126482850, local_time_zone=True)
#'2005-09-11T19:54:10-04:00'
>>> date_time(1)
'1970-01-01T00:00:01Z'
>>> date_time(0)
'1970-01-01T00:00:00Z'
"""
if t is None:
t = time()
if local_time_zone:
time_tuple = localtime(t)
if time_tuple[8]:
tz_mins = altzone // 60
else:
tz_mins = timezone // 60
tzd = "-%02d:%02d" % (tz_mins // 60, tz_mins % 60)
else:
time_tuple = gmtime(t)
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (
year, month, day, hh, mm, ss, tzd)
return s
def parse_date_time(val):
"""always returns seconds in UTC
# tests are written like this to make any errors easier to understand
>>> parse_date_time('2005-09-11T23:54:10Z') - 1126482850.0
0.0
>>> parse_date_time('2005-09-11T16:54:10-07:00') - 1126482850.0
0.0
>>> parse_date_time('1970-01-01T00:00:01Z') - 1.0
0.0
>>> parse_date_time('1970-01-01T00:00:00Z') - 0.0
0.0
>>> parse_date_time("2005-09-05T10:42:00") - 1125916920.0
0.0
"""
if "T" not in val:
val += "T00:00:00Z"
ymd, time = val.split("T")
hms, tz_str = time[0:8], time[8:]
if not tz_str or tz_str == "Z":
time = time[:-1]
tz_offset = 0
else:
signed_hrs = int(tz_str[:3])
mins = int(tz_str[4:6])
secs = (sign(signed_hrs) * mins + signed_hrs * 60) * 60
tz_offset = -secs
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
t = timegm((int(year), int(month), int(day), int(hour),
int(minute), int(second), 0, 0, 0))
t = t + tz_offset
return t
SUFFIX_FORMAT_MAP = {
'rdf': 'xml',
'rdfs': 'xml',
'owl': 'xml',
'n3': 'n3',
'ttl': 'turtle',
'nt': 'nt',
'trix': 'trix',
'xhtml': 'rdfa',
'html': 'rdfa',
'svg': 'rdfa',
'nq': 'nquads',
'trig': 'trig'
}
def guess_format(fpath, fmap=None):
"""
Guess RDF serialization based on file suffix. Uses
``SUFFIX_FORMAT_MAP`` unless ``fmap`` is provided. Examples:
>>> guess_format('path/to/file.rdf')
'xml'
>>> guess_format('path/to/file.owl')
'xml'
>>> guess_format('path/to/file.ttl')
'turtle'
>>> guess_format('path/to/file.xhtml')
'rdfa'
>>> guess_format('path/to/file.svg')
'rdfa'
>>> guess_format('path/to/file.xhtml', {'xhtml': 'grddl'})
'grddl'
This also works with just the suffixes, with or without leading dot, and
regardless of letter case::
>>> guess_format('.rdf')
'xml'
>>> guess_format('rdf')
'xml'
>>> guess_format('RDF')
'xml'
"""
fmap = fmap or SUFFIX_FORMAT_MAP
return fmap.get(_get_ext(fpath)) or fmap.get(fpath.lower())
def _get_ext(fpath, lower=True):
"""
Gets the file extension from a file(path); stripped of leading '.' and in
lower case. Examples:
>>> _get_ext("path/to/file.txt")
'txt'
>>> _get_ext("OTHER.PDF")
'pdf'
>>> _get_ext("noext")
''
>>> _get_ext(".rdf")
'rdf'
"""
ext = splitext(fpath)[-1]
if ext == '' and fpath.startswith("."):
ext = fpath
if lower:
ext = ext.lower()
if ext.startswith('.'):
ext = ext[1:]
return ext
def find_roots(graph, prop, roots=None):
"""
Find the roots in some sort of transitive hierarchy.
find_roots(graph, rdflib.RDFS.subClassOf)
will return a set of all roots of the sub-class hierarchy
Assumes triple of the form (child, prop, parent), i.e. the direction of
RDFS.subClassOf or SKOS.broader
"""
non_roots = set()
if roots is None:
roots = set()
for x, y in graph.subject_objects(prop):
non_roots.add(x)
if x in roots:
roots.remove(x)
if y not in non_roots:
roots.add(y)
return roots
def get_tree(graph,
root,
prop,
mapper=lambda x: x,
sortkey=None,
done=None,
dir='down'):
"""
Return a nested list/tuple structure representing the tree
built by the transitive property given, starting from the root given
i.e.
get_tree(graph,
rdflib.URIRef("http://xmlns.com/foaf/0.1/Person"),
rdflib.RDFS.subClassOf)
will return the structure for the subClassTree below person.
dir='down' assumes triple of the form (child, prop, parent),
i.e. the direction of RDFS.subClassOf or SKOS.broader
Any other dir traverses in the other direction
"""
if done is None:
done = set()
if root in done:
return
done.add(root)
tree = []
if dir == 'down':
branches = graph.subjects(prop, root)
else:
branches = graph.objects(root, prop)
for branch in branches:
t = get_tree(graph, branch, prop, mapper, sortkey, done, dir)
if t:
tree.append(t)
return (mapper(root), sorted(tree, key=sortkey))
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
# try to make the tests work outside of the time zone they were written in
# import os, time
# os.environ['TZ'] = 'US/Pacific'
# try:
# time.tzset()
# except AttributeError, e:
# print e
# pass
# tzset missing! see
# http://mail.python.org/pipermail/python-dev/2003-April/034480.html
test() # pragma: no cover
|
|
'''
Module docstring goes here.
'''
from __future__ import print_function
import numpy as np
import desc.slcosmo
c = 3.00e5
class SLCosmo(object):
'''
Master class for handling cosmological parameter inference given
strong lens time delay and Fermat potential data.
In the TDC2 scheme (and perhaps others), we will need to read in one
file for each system, that contains the time delay posterior samples
for that lens. The header of that file should be the same as the
TDC2 data file, and contain the measuremd values of the Fermat
potential differences, along with their uncertainties.
A joint likelihood function will then compute the log likelihood of
H0 given the Fermat potential differences, for each sample time
delay in each lens, and combine them into a single log likelihood
value.
The H0 values will be drawn from a suitable prior, and each assigned
a log likelihood value, which is finally converted to a posterior
weight.
Use cases:
1. Make a set of mock TDC2 sample ensembles, and analyze them as if
they were real, aiming to recover the "true" cosmological
parameters.
2. Analyze a set of TDC2 sample files, reading them in and inferring
the cosmological parameters.
'''
def __init__(self):
self.cosmopars = {'H0':[]}
self.cosmotruth = {'H0':None}
self.H0_prior_mean = 70.0
self.H0_prior_width = 7.0
self.Npriorsamples = None
self.Nlenses = 0
self.lenses = None
self.lcdatafiles = []
self.tdc2samplefiles = []
self.log_likelihoods = None
self.weights = None
self.mock_files = []
return
def make_some_mock_data(self, Nlenses=100, Nsamples=1000,
percentage_dfp_err=4.0, dt_sigma=2.0,
quad_fraction=0.17, stem='mock'):
'''
Make a mock dataset of any number of lens systems, and write it
out in a set of correctly formatted files.
Parameters:
-----------
Nlenses : integer
The number of lenses worth of time delay data to
simulate.
Nsamples : integer
The number of posterior sample time delays to generate.
percentage_dfp_err : float
The percentage uncertainty in each and every Fermat
potential difference, which are assumed to be independent. A very simple approximation.
dt_sigma : float
The absolute uncertainty in each and every time delay,
in days. Another simple approximation.
quad_fraction : float
The fraction of lenses that have 4 images.
Notes:
------
True time delays and Fermat potentials are drawn randomly from
plausible Gaussian distributions.
Possible failure modes: 1. Simulated posterior time delays have
incorrect width
'''
assert Nlenses > 0
assert Nsamples > 1
assert percentage_dfp_err > 0.0
assert dt_sigma > 0.0
self.Nlenses = Nlenses
self.lenses = []
self.mock_files = []
self.cosmotruth['H0'] = 72.3
for k in range(self.Nlenses):
# How many images does this lens have?
if np.random.rand() < quad_fraction:
Nim = 4
else:
Nim = 2
Ndt = Nim - 1
# What are its true time delays?
dt_true = 20.0 + 2.0 * np.random.randn(Ndt)
# What is its Q value, relating H0 to time delay distance?
Q = 4e5 + 0.5e5 * np.random.randn()
# What are its true Fermat potential differences?
DeltaFP_true = (c * dt_true * self.cosmotruth['H0'] / Q)
# What are its observed Fermat potential differences?
DeltaFP_err = DeltaFP_true * percentage_dfp_err / 100.0
DeltaFP_obs = DeltaFP_true + \
DeltaFP_err * np.random.rand(Ndt)
# What are its posterior sample time delays?
dt_sigma_array = dt_sigma * np.ones(Ndt)
dt_obs = dt_true + \
dt_sigma_array * np.random.randn(Nsamples, Ndt)
# Create a TDC2 ensemble object and have it write
# itself out:
filename = stem+'_time_delays_'+str(k)+'.txt'
self.lenses.append(desc.slcosmo.TDC2ensemble())
self.lenses[k].Nsamples = Nsamples
self.lenses[k].Nim = Nim
self.lenses[k].dt_obs = dt_obs
self.lenses[k].DeltaFP_obs = DeltaFP_obs
self.lenses[k].DeltaFP_err = DeltaFP_err
self.lenses[k].Q = Q
self.lenses[k].write_out_to(filename)
self.mock_files.append(filename)
return
def read_in_time_delay_samples_from(self, paths):
'''
Ingest time delay data from a number of TDC2 submission files,
storing it in a list of `TDC2ensemble` objects, one for each
lens (and overwriting any existing list).
Parameters:
-----------
paths : [list of] string[s[]
A list of the files to be read from, or a string containing wildcards.
Notes:
------
Each tdc2samplefile is a multi-column plain text file, with a
header marked by '#' marks at the start of each line and
containing a set of Fermat potential information that we need.
'''
if type(paths) is str:
import glob
tdc2samplefiles = glob.glob(paths)
else:
tdc2samplefiles = paths
self.Nlenses = len(tdc2samplefiles)
self.lenses = [] # trashing any existing data we may have had.
quad_count = 0
for tdc2samplefile in tdc2samplefiles:
self.lenses.append(desc.slcosmo.TDC2ensemble.read_in_from(tdc2samplefile))
if self.lenses[-1].Nim == 4:
quad_count += 1
print("Read in", self.Nlenses, "lenses, quad fraction =",
np.round(float(quad_count)/float(self.Nlenses), 2))
return
def draw_some_prior_samples(self, Npriorsamples=1000):
'''
In simple Monte Carlo, we generate a large number of samples
from the prior for the cosmological parameters, so that we can
then evaluate their likelihood weights.
Parameters:
-----------
Npriorsamples : integer
The number of prior samples to draw.
Notes:
------
The cosmological parameter samples are stored in a numpy array,
which this method initializes.
'''
assert Npriorsamples > 20
self.Npriorsamples = Npriorsamples
self.cosmopars['H0'] = self.H0_prior_mean + \
self.H0_prior_width * np.random.randn(self.Npriorsamples)
return
def compute_the_joint_log_likelihood(self):
'''
Compute the joint log likelihood of the cosmological parameters
given a set of time delays and the measured Fermat potential
differences.
Notes:
------
The calculation is a sum of log likelihoods over the ensemble
of lenses, each of which has to first be computed. We also
compute the importance weights, rescaling and exponentiating.
'''
import time as wallclock
start = wallclock.time()
# Compute likelihoods, looping over lenses and summing
# over samples:
self.log_likelihoods = np.zeros(self.Npriorsamples)
# Loop over sampled values of H0
for k in range(self.Npriorsamples):
H0 = self.cosmopars['H0'][k]
jointlogL = np.array([])
for lens in self.lenses:
jointlogL = np.append(jointlogL,
lens.log_likelihood(H0))
self.log_likelihoods[k] = np.sum(jointlogL)
# Compute normalized importance weights:
logLmax = np.max(self.log_likelihoods)
self.weights = np.exp(self.log_likelihoods - logLmax)
# How long did that take?
end = wallclock.time()
print("Wallclock time spent characterizing posterior = ",
round(end-start), "seconds")
return
def estimate_H0(self):
'''
For this we need the posterior weight for each prior sample, so
that we can compute the posterior mean and standard deviation
for each cosmological parameter.
Notes:
------
Should probably be a static function.
'''
H0_sum = np.sum(self.weights * self.cosmopars['H0'])
H0_sumsq = np.sum(self.weights * self.cosmopars['H0']**2)
H0_N = np.sum(self.weights)
H0_mean = H0_sum / H0_N
H0_stdv = np.sqrt((H0_sumsq - H0_N*H0_mean**2)/H0_N)
return H0_mean, H0_stdv
def report_the_inferred_cosmological_parameters(self):
'''
For this we need the posterior weight for each prior sample, so
that we can compute the posterior mean and standard deviation
for each cosmological parameter.
Returns:
--------
(H0, sigma) : Tuple of floats
An estimate of H0, and its uncertainty.
Notes:
------
Convenience function.
'''
estimate, uncertainty = self.estimate_H0()
kmsMpc = "km/s/Mpc"
print("H0 =", round(estimate,1), "+/-",
round(uncertainty,1), kmsMpc)
if self.cosmotruth['H0'] is not None:
print("True H0 =", self.cosmotruth['H0'], kmsMpc)
return
def plot_the_inferred_cosmological_parameters(self):
'''
Make a nice plot of the histogram of posterior H0 samples,
overlaying the assumed prior PDF and the underlying true value,
if known.
'''
import pylab as plt
# Start figure:
fig = plt.figure(figsize=(8,5))
# Set font sizes:
params = {'axes.labelsize': 20,
'font.size': 20,
'legend.fontsize': 20,
'xtick.labelsize': 14,
'ytick.labelsize': 14}
plt.rcParams.update(params)
# Linear xes for histogram:
hax = fig.add_axes([0.15,0.15,0.85,0.80])
H0min, H0max = 60.0, 80.0
hax.set_xlim(H0min, H0max)
for label in hax.get_yticklabels():
label.set_visible(False)
for tick in hax.yaxis.get_ticklines():
tick.set_visible(False)
# Plot the posterior histogram:
Nbins = 0.1*self.Npriorsamples
bins = np.linspace(H0min, H0max, Nbins, endpoint=True)
plt.hist(self.cosmopars['H0'], weights=self.weights,
bins=bins, histtype='stepfilled', normed=True,
color='red', edgecolor='red', alpha=0.5,
label='Posterior PDF')
# Overlay Gaussian approximation to the posterior:
mu, sigma = self.estimate_H0()
value = str(round(mu,1))+' +/- '+str(round(sigma,1))
x = np.linspace(H0min, H0max, 1000, endpoint=True)
y = np.exp(-0.5*((x-mu)**2)/sigma**2) / \
np.sqrt(2*np.pi*sigma**2)
plt.plot(x, y, linewidth=2, color='red',
label='Posterior estimate: '+value)
# Overlay Gaussian prior:
mu, sigma = self.H0_prior_mean, self.H0_prior_width
assumption = str(round(mu,1))+' +/- '+str(round(sigma,1))
x = np.linspace(H0min, H0max, 1000, endpoint=True)
y = np.exp(-0.5*((x-mu)**2)/sigma**2) / \
np.sqrt(2*np.pi*sigma**2)
plt.plot(x, y, linestyle='dotted', linewidth=2, color='gray',
label='Prior PDF: '+assumption)
# Overlay true value:
value = self.cosmotruth['H0']
plt.axvline(x=value,
color='black', linestyle='dashed', linewidth=2,
label='Truth: '+str(value))
# Label axes of course:
plt.xlabel("$H_0 / {\\rm km s}^{-1}{\\rm Mpc}^{-1}$")
plt.ylabel("${\\rm Pr}(H_0 \\vert \Delta t_{\\rm obs} )$")
plt.legend(prop={'size':10}, framealpha=1.0, loc=0)
# Write out to file and report:
filename = "H0posterior.pdf"
plt.savefig(filename, dpi=300)
print("Plot saved to",filename)
return
# ======================================================================
if __name__ == '__main__':
Lets = desc.slcosmo.SLCosmo()
Lets.make_some_mock_data()
Lets.draw_some_prior_samples()
Lets.compute_the_joint_log_likelihood()
Lets.report_the_inferred_cosmological_parameters()
Lets.plot_the_inferred_cosmological_parameters()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
# Default execution mode.
default_execution_mode = GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
# TODO(agarwal): better name ?
class _EagerContext(threading.local):
"""Thread local eager context."""
def __init__(self, config=None):
super(_EagerContext, self).__init__()
self.device_spec = pydev.DeviceSpec.from_string("")
self.device_name = self.device_spec.to_string()
self.mode = default_execution_mode
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.recording_summaries = False
self.summary_writer_resource = None
self.scalar_cache = {}
self.ones_rank_cache = _EagerTensorCache()
self.zeros_cache = _EagerTensorCache()
self.execution_mode = None
self.rewriter_config = None
if config is not None and config.HasField(
"graph_options") and config.graph_options.HasField("rewrite_options"):
self.rewriter_config = (
config.graph_options.rewrite_options.SerializeToString())
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode)
def push(self, is_building_function, enter_context_fn):
"""Push metadata about a context switch onto the stack.
A context switch can take one of two forms: installing a graph as the
default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn))
def pop(self):
"""Pop the stack."""
self.stack.pop()
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32.
Valid values:
- tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- tfe.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
self._eager_context = _EagerContext(config)
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._post_execution_callbacks = []
self._config = config
self._seed = None
self._initialize_lock = threading.Lock()
self._device_policy = device_policy
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._execution_mode = execution_mode
self._server_def = server_def
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
self._rng = random.Random(self._seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_devices(self):
"""Helper to initialize devices."""
# Store list of devices
self._context_devices = []
device_list = pywrap_tensorflow.TFE_ContextListDevices(
self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):
dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)
self._context_devices.append(pydev.canonical_name(dev_name))
dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
pywrap_tensorflow.TF_DeleteDeviceList(device_list)
def _initialize_handle_and_devices(self):
"""Initialize handle and devices."""
with self._initialize_lock:
if self._context_handle is not None:
return
assert self._context_devices is None
opts = pywrap_tensorflow.TFE_NewContextOptions()
try:
if self._config is not None:
config_str = self._config.SerializeToString()
pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._execution_mode == ASYNC:
pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)
self._context_handle = pywrap_tensorflow.TFE_NewContext(opts)
finally:
pywrap_tensorflow.TFE_DeleteContextOptions(opts)
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, 600,
server_def_str)
self._initialize_devices()
def _clear_caches(self):
self.scalar_cache().clear()
self.ones_rank_cache().flush()
self.zeros_cache().flush()
def set_server_def(self, server_def, keep_alive_secs=600):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
if not self._context_handle:
self._server_def = server_def
else:
server_def_str = server_def.SerializeToString()
pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,
keep_alive_secs, server_def_str)
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
self._initialize_devices()
@property
def _handle(self):
ctx = self._context_handle
if ctx is None:
self._initialize_handle_and_devices()
return self._context_handle
else:
return ctx
@property
def _devices(self):
devices = self._context_devices
if devices is None:
self._initialize_handle_and_devices()
return self._context_devices
else:
return devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._eager_context
old_mode = ctx.mode
old_is_eager = ctx.is_eager
ctx.mode = mode
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode)
try:
yield
finally:
ctx.is_eager = old_is_eager
ctx.mode = old_mode
if mode == EAGER_MODE:
self.context_switches.pop()
@tf_contextlib.contextmanager
def rewriter_config(self, rewriter_config_=None):
"""A context manager to allow setting the grappler rewrite options.
Args:
rewriter_config_: A tensorflow.RewriterConfig proto object.
Yields:
Nothing.
Raises:
ValueError: if rewriter_config is not a tensorflow.RewriterConfig proto.
"""
if rewriter_config_ is None or not isinstance(
rewriter_config_, rewriter_config_pb2.RewriterConfig):
raise ValueError("Must pass a rewriter_config proto")
ctx = self._eager_context
old_rewriter_config = ctx.rewriter_config
ctx.rewriter_config = rewriter_config_.SerializeToString()
try:
yield
finally:
ctx.rewriter_config = old_rewriter_config
@property
def rewriter_config_string(self):
"""Returns the serialized rewriter_config for the current thread."""
return self._eager_context.rewriter_config
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._eager_context.is_eager
def scalar_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.scalar_cache
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return self._eager_context.zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._eager_context.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._eager_context.scope_name = s
@property
def summary_writer_resource(self):
"""Returns summary writer resource."""
return self._eager_context.summary_writer_resource
@summary_writer_resource.setter
def summary_writer_resource(self, resource):
"""Sets summary writer resource."""
self._eager_context.summary_writer_resource = resource
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._eager_context.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._eager_context.device_spec
@tf_contextlib.contextmanager
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Yields:
Nothing.
Raises:
ValueError: If name is not a string or is an invalid device name.
"""
devices = self._context_devices
if devices is None:
self._initialize_handle_and_devices()
devices = self._context_devices
eager_context = self._eager_context
old_device_name = eager_context.device_name
old_device_spec = eager_context.device_spec
cache_key = (old_device_name, name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
except KeyError:
# Handle a cache miss.
if name is not None:
if not isinstance(name, str):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(name), name))
device_spec = pydev.DeviceSpec.from_string(name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string(devices[0])
new_device_spec.merge_from(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
try:
eager_context.device_name = new_device_name
eager_context.device_spec = new_device_spec
yield
finally:
eager_context.device_name = old_device_name
eager_context.device_spec = old_device_spec
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def get_execution_mode(self):
mode = self._eager_context.execution_mode
if mode is None:
mode = self._execution_mode
return mode
def set_execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
self._eager_context.execution_mode = mode
pywrap_tensorflow.TFE_ContextSetAsyncForThread(self._handle, mode == ASYNC)
@tf_contextlib.contextmanager
def execution_mode(self, mode):
"""Context manager for setting execution mode for current thread."""
old_mode = self.get_execution_mode()
try:
self.set_execution_mode(mode)
yield
finally:
self.set_execution_mode(old_mode)
def async_wait(self):
"""Waits for ops dispatched in ASYNC mode to finish."""
pywrap_tensorflow.TFE_ContextAsyncWait(self._handle)
def async_clear_error(self):
"""Clears errors raised during ASYNC execution."""
pywrap_tensorflow.TFE_ContextAsyncClearError(self._handle)
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self._initialize_handle_and_devices()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
fdef_string = fdef.SerializeToString()
pywrap_tensorflow.TFE_ContextAddFunctionDef(
self._handle, fdef_string, len(fdef_string))
def add_post_execution_callback(self, callback):
"""Add a post-execution callback to the context.
A post-execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added.
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that has was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
# TODO(cais): (b/64674139) Allow access to function-internal operations.
self._post_execution_callbacks.append(callback)
def clear_post_execution_callbacks(self):
"""Clear all post-execution callbacks added to the context."""
del self._post_execution_callbacks[:]
@property
def post_execution_callbacks(self):
"""Get the list of post-execution callbacks added to the context."""
return self._post_execution_callbacks
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)
@tf_contextlib.contextmanager
def device_policy(self, policy):
handle = self._handle
old = pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(handle)
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
handle, policy)
try:
yield
finally:
pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(
handle, old)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TFE_ContextExportRunMetadata(
self._context_handle, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tensorflow.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tensorflow.TFE_ContextEndStep(self._handle)
_context = None
_context_lock = threading.Lock()
def _initialize_context():
global _context
with _context_lock:
if _context is None:
_context = Context()
def context():
"""Returns a singleton context object."""
if _context is None:
_initialize_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly")
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via `tf.enable_eager_execution`,
but may also be enabled within the context of a Python function via
tf.contrib.eager.py_func.
"""
return context().executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
# TODO(agarwal): get rid of this and use ops.name_scope instead.
@contextlib.contextmanager
def namescope(name):
"""ContextManager for creating hierarchical name scopes."""
ctx = context()
old_name = ctx.scope_name
ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name
try:
yield
finally:
ctx.scope_name = old_name
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tfe.device('gpu:0'):
with tfe.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
return context().device(name)
def list_devices():
"""List the names of the available devices.
Returns:
Names of the available devices, as a `list`.
"""
return context().devices()
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().set_execution_mode(mode)
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
return context().execution_mode(mode)
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().async_wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().async_clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
def rewriter_config(rewriter_config_):
"""Context manager for setting the grappler rewrite config."""
return context().rewriter_config(rewriter_config_)
def set_server_def(server_def):
context().set_server_def(server_def)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Hook for winrm remote execution."""
from typing import Optional
from winrm.protocol import Protocol
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
try:
from airflow.utils.platform import getuser
except ImportError:
from getpass import getuser
# TODO: Fixme please - I have too complex implementation
class WinRMHook(BaseHook):
"""
Hook for winrm remote execution using pywinrm.
:seealso: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
:param ssh_conn_id: connection id from airflow Connections from where
all the required parameters can be fetched like username and password.
Thought the priority is given to the param passed during init
:type ssh_conn_id: str
:param endpoint: When not set, endpoint will be constructed like this:
'http://{remote_host}:{remote_port}/wsman'
:type endpoint: str
:param remote_host: Remote host to connect to. Ignored if `endpoint` is set.
:type remote_host: str
:param remote_port: Remote port to connect to. Ignored if `endpoint` is set.
:type remote_port: int
:param transport: transport type, one of 'plaintext' (default), 'kerberos', 'ssl', 'ntlm', 'credssp'
:type transport: str
:param username: username to connect to the remote_host
:type username: str
:param password: password of the username to connect to the remote_host
:type password: str
:param service: the service name, default is HTTP
:type service: str
:param keytab: the path to a keytab file if you are using one
:type keytab: str
:param ca_trust_path: Certification Authority trust path
:type ca_trust_path: str
:param cert_pem: client authentication certificate file path in PEM format
:type cert_pem: str
:param cert_key_pem: client authentication certificate key file path in PEM format
:type cert_key_pem: str
:param server_cert_validation: whether server certificate should be validated on
Python versions that support it; one of 'validate' (default), 'ignore'
:type server_cert_validation: str
:param kerberos_delegation: if True, TGT is sent to target server to
allow multiple hops
:type kerberos_delegation: bool
:param read_timeout_sec: maximum seconds to wait before an HTTP connect/read times out (default 30).
This value should be slightly higher than operation_timeout_sec,
as the server can block *at least* that long.
:type read_timeout_sec: int
:param operation_timeout_sec: maximum allowed time in seconds for any single wsman
HTTP operation (default 20). Note that operation timeouts while receiving output
(the only wsman operation that should take any significant time,
and where these timeouts are expected) will be silently retried indefinitely.
:type operation_timeout_sec: int
:param kerberos_hostname_override: the hostname to use for the kerberos exchange
(defaults to the hostname in the endpoint URL)
:type kerberos_hostname_override: str
:param message_encryption: Will encrypt the WinRM messages if set
and the transport auth supports message encryption. (Default 'auto')
:type message_encryption: str
:param credssp_disable_tlsv1_2: Whether to disable TLSv1.2 support and work with older
protocols like TLSv1.0, default is False
:type credssp_disable_tlsv1_2: bool
:param send_cbt: Will send the channel bindings over a HTTPS channel (Default: True)
:type send_cbt: bool
"""
def __init__(
self,
ssh_conn_id: Optional[str] = None,
endpoint: Optional[str] = None,
remote_host: Optional[str] = None,
remote_port: int = 5985,
transport: str = 'plaintext',
username: Optional[str] = None,
password: Optional[str] = None,
service: str = 'HTTP',
keytab: Optional[str] = None,
ca_trust_path: Optional[str] = None,
cert_pem: Optional[str] = None,
cert_key_pem: Optional[str] = None,
server_cert_validation: str = 'validate',
kerberos_delegation: bool = False,
read_timeout_sec: int = 30,
operation_timeout_sec: int = 20,
kerberos_hostname_override: Optional[str] = None,
message_encryption: Optional[str] = 'auto',
credssp_disable_tlsv1_2: bool = False,
send_cbt: bool = True,
) -> None:
super().__init__()
self.ssh_conn_id = ssh_conn_id
self.endpoint = endpoint
self.remote_host = remote_host
self.remote_port = remote_port
self.transport = transport
self.username = username
self.password = password
self.service = service
self.keytab = keytab
self.ca_trust_path = ca_trust_path
self.cert_pem = cert_pem
self.cert_key_pem = cert_key_pem
self.server_cert_validation = server_cert_validation
self.kerberos_delegation = kerberos_delegation
self.read_timeout_sec = read_timeout_sec
self.operation_timeout_sec = operation_timeout_sec
self.kerberos_hostname_override = kerberos_hostname_override
self.message_encryption = message_encryption
self.credssp_disable_tlsv1_2 = credssp_disable_tlsv1_2
self.send_cbt = send_cbt
self.client = None
self.winrm_protocol = None
def get_conn(self):
if self.client:
return self.client
self.log.debug('Creating WinRM client for conn_id: %s', self.ssh_conn_id)
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if self.remote_host is None:
self.remote_host = conn.host
if conn.extra is not None:
extra_options = conn.extra_dejson
if "endpoint" in extra_options:
self.endpoint = str(extra_options["endpoint"])
if "remote_port" in extra_options:
self.remote_port = int(extra_options["remote_port"])
if "transport" in extra_options:
self.transport = str(extra_options["transport"])
if "service" in extra_options:
self.service = str(extra_options["service"])
if "keytab" in extra_options:
self.keytab = str(extra_options["keytab"])
if "ca_trust_path" in extra_options:
self.ca_trust_path = str(extra_options["ca_trust_path"])
if "cert_pem" in extra_options:
self.cert_pem = str(extra_options["cert_pem"])
if "cert_key_pem" in extra_options:
self.cert_key_pem = str(extra_options["cert_key_pem"])
if "server_cert_validation" in extra_options:
self.server_cert_validation = str(extra_options["server_cert_validation"])
if "kerberos_delegation" in extra_options:
self.kerberos_delegation = str(extra_options["kerberos_delegation"]).lower() == 'true'
if "read_timeout_sec" in extra_options:
self.read_timeout_sec = int(extra_options["read_timeout_sec"])
if "operation_timeout_sec" in extra_options:
self.operation_timeout_sec = int(extra_options["operation_timeout_sec"])
if "kerberos_hostname_override" in extra_options:
self.kerberos_hostname_override = str(extra_options["kerberos_hostname_override"])
if "message_encryption" in extra_options:
self.message_encryption = str(extra_options["message_encryption"])
if "credssp_disable_tlsv1_2" in extra_options:
self.credssp_disable_tlsv1_2 = (
str(extra_options["credssp_disable_tlsv1_2"]).lower() == 'true'
)
if "send_cbt" in extra_options:
self.send_cbt = str(extra_options["send_cbt"]).lower() == 'true'
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to WinRM to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host,
self.ssh_conn_id,
)
self.username = getuser()
# If endpoint is not set, then build a standard wsman endpoint from host and port.
if not self.endpoint:
self.endpoint = f'http://{self.remote_host}:{self.remote_port}/wsman'
try:
if self.password and self.password.strip():
self.winrm_protocol = Protocol(
endpoint=self.endpoint,
transport=self.transport,
username=self.username,
password=self.password,
service=self.service,
keytab=self.keytab,
ca_trust_path=self.ca_trust_path,
cert_pem=self.cert_pem,
cert_key_pem=self.cert_key_pem,
server_cert_validation=self.server_cert_validation,
kerberos_delegation=self.kerberos_delegation,
read_timeout_sec=self.read_timeout_sec,
operation_timeout_sec=self.operation_timeout_sec,
kerberos_hostname_override=self.kerberos_hostname_override,
message_encryption=self.message_encryption,
credssp_disable_tlsv1_2=self.credssp_disable_tlsv1_2,
send_cbt=self.send_cbt,
)
self.log.info("Establishing WinRM connection to host: %s", self.remote_host)
self.client = self.winrm_protocol.open_shell()
except Exception as error:
error_msg = f"Error connecting to host: {self.remote_host}, error: {error}"
self.log.error(error_msg)
raise AirflowException(error_msg)
return self.client
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_network_rules_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_server_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkRulesOperations:
"""VirtualNetworkRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
virtual_network_rule_name: str,
**kwargs: Any
) -> "_models.VirtualNetworkRule":
"""Gets a virtual network rule.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkRule, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.VirtualNetworkRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
virtual_network_rule_name: str,
parameters: "_models.VirtualNetworkRule",
**kwargs: Any
) -> Optional["_models.VirtualNetworkRule"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualNetworkRule')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
virtual_network_rule_name: str,
parameters: "_models.VirtualNetworkRule",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetworkRule"]:
"""Creates or updates an existing virtual network rule.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:param parameters: The requested virtual Network Rule Resource state.
:type parameters: ~azure.mgmt.sql.models.VirtualNetworkRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkRule or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.VirtualNetworkRule]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
virtual_network_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
virtual_network_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the virtual network rule with the given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkRuleListResult"]:
"""Gets a list of virtual network rules in a server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkRuleListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.VirtualNetworkRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualNetworkRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules'} # type: ignore
|
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import six
from cinderclient import base
class Volume(base.Resource):
"""A volume is an extra block level storage to the OpenStack instances."""
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""Delete this volume."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the display_name or display_description for this volume."""
self.manager.update(self, **kwargs)
def attach(self, instance_uuid, mountpoint, mode='rw'):
"""Set attachment metadata.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
:param mode: the access mode
"""
return self.manager.attach(self, instance_uuid, mountpoint, mode)
def detach(self):
"""Clear attachment metadata."""
return self.manager.detach(self)
def reserve(self, volume):
"""Reserve this volume."""
return self.manager.reserve(self)
def unreserve(self, volume):
"""Unreserve this volume."""
return self.manager.unreserve(self)
def begin_detaching(self, volume):
"""Begin detaching volume."""
return self.manager.begin_detaching(self)
def roll_detaching(self, volume):
"""Roll detaching volume."""
return self.manager.roll_detaching(self)
def initialize_connection(self, volume, connector):
"""Initialize a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.initialize_connection(self, connector)
def terminate_connection(self, volume, connector):
"""Terminate a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.terminate_connection(self, connector)
def set_metadata(self, volume, metadata):
"""Set or Append metadata to a volume.
:param volume : The :class: `Volume` to set metadata on
:param metadata: A dict of key/value pairs to set
"""
return self.manager.set_metadata(self, metadata)
def upload_to_image(self, force, image_name, container_format,
disk_format):
"""Upload a volume to image service as an image."""
return self.manager.upload_to_image(self, force, image_name,
container_format, disk_format)
def force_delete(self):
"""Delete the specified volume ignoring its current state.
:param volume: The UUID of the volume to force-delete.
"""
self.manager.force_delete(self)
def reset_state(self, state):
"""Update the volume with the provided state."""
self.manager.reset_state(self, state)
def extend(self, volume, new_size):
"""Extend the size of the specified volume.
:param volume: The UUID of the volume to extend.
:param new_size: The desired size to extend volume to.
"""
self.manager.extend(self, new_size)
def migrate_volume(self, host, force_host_copy):
"""Migrate the volume to a new host."""
self.manager.migrate_volume(self, host, force_host_copy)
# def migrate_volume_completion(self, old_volume, new_volume, error):
# """Complete the migration of the volume."""
# self.manager.migrate_volume_completion(self, old_volume,
# new_volume, error)
def update_all_metadata(self, metadata):
"""Update all metadata of this volume."""
return self.manager.update_all_metadata(self, metadata)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
"""
self.manager.update_readonly_flag(self, read_only)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None, source_volid=None,
display_name=None, display_description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None, shareable=False):
"""
Creates a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source volume to clone from
:param shareable: Can the volume be attached more than once.
:rtype: :class:`Volume`
"""
if metadata is None:
volume_metadata = {}
else:
volume_metadata = metadata
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'user_id': user_id,
'project_id': project_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'metadata': volume_metadata,
'imageRef': imageRef,
'source_volid': source_volid,
'shareable': shareable,
}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to get.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/volumes%s%s" % (detail, query_string),
"volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def update(self, volume, **kwargs):
"""
Update the display_name or display_description for a volume.
:param volume: The :class:`Volume` to update.
"""
if not kwargs:
return
body = {"volume": kwargs}
self._update("/volumes/%s" % base.getid(volume), body)
def _action(self, action, volume, info=None, **kwargs):
"""
Perform a volume "action."
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/volumes/%s/action' % base.getid(volume)
return self.api.client.post(url, body=body)
def attach(self, volume, instance_uuid, mountpoint, mode='rw'):
"""
Set attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to attach.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
:param mode: the access mode.
"""
return self._action('os-attach',
volume,
{'instance_uuid': instance_uuid,
'mountpoint': mountpoint,
'mode': mode})
def detach(self, volume, attachment_uuid):
"""
Clear attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
:param attachment_uuid: uuid of the volume attachment.
"""
return self._action('os-detach', volume,
{'attachment_id': attachment_uuid})
def reserve(self, volume):
"""
Reserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to reserve.
"""
return self._action('os-reserve', volume)
def unreserve(self, volume):
"""
Unreserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to unreserve.
"""
return self._action('os-unreserve', volume)
def begin_detaching(self, volume):
"""
Begin detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
"""
return self._action('os-begin_detaching', volume)
def roll_detaching(self, volume):
"""
Roll detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to roll detaching.
"""
return self._action('os-roll_detaching', volume)
def initialize_connection(self, volume, connector):
"""
Initialize a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
return self._action('os-initialize_connection', volume,
{'connector': connector})[1]['connection_info']
def terminate_connection(self, volume, connector):
"""
Terminate a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
self._action('os-terminate_connection', volume,
{'connector': connector})
def set_metadata(self, volume, metadata):
"""
Update/Set a volumes metadata.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be set.
"""
body = {'metadata': metadata}
return self._create("/volumes/%s/metadata" % base.getid(volume),
body, "metadata")
def delete_metadata(self, volume, keys):
"""
Delete specified keys from volumes metadata.
:param volume: The :class:`Volume`.
:param keys: A list of keys to be removed.
"""
for k in keys:
self._delete("/volumes/%s/metadata/%s" % (base.getid(volume), k))
def upload_to_image(self, volume, force, image_name, container_format,
disk_format):
"""
Upload volume to image service as image.
:param volume: The :class:`Volume` to upload.
"""
return self._action('os-volume_upload_image',
volume,
{'force': force,
'image_name': image_name,
'container_format': container_format,
'disk_format': disk_format})
def force_delete(self, volume):
return self._action('os-force_delete', base.getid(volume))
def reset_state(self, volume, state):
"""Update the provided volume with the provided state."""
return self._action('os-reset_status', volume, {'status': state})
def extend(self, volume, new_size):
return self._action('os-extend',
base.getid(volume),
{'new_size': new_size})
def get_encryption_metadata(self, volume_id):
"""
Retrieve the encryption metadata from the desired volume.
:param volume_id: the id of the volume to query
:return: a dictionary of volume encryption metadata
"""
return self._get("/volumes/%s/encryption" % volume_id)._info
def migrate_volume(self, volume, host, force_host_copy):
"""Migrate volume to new host.
:param volume: The :class:`Volume` to migrate
:param host: The destination host
:param force_host_copy: Skip driver optimizations
"""
return self._action('os-migrate_volume',
volume,
{'host': host, 'force_host_copy': force_host_copy})
def migrate_volume_completion(self, old_volume, new_volume, error):
"""Complete the migration from the old volume to the temp new one.
:param old_volume: The original :class:`Volume` in the migration
:param new_volume: The new temporary :class:`Volume` in the migration
:param error: Inform of an error to cause migration cleanup
"""
new_volume_id = base.getid(new_volume)
return self._action('os-migrate_volume_completion',
old_volume,
{'new_volume': new_volume_id, 'error': error})[1]
def update_all_metadata(self, volume, metadata):
"""Update all metadata of a volume.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be updated.
"""
body = {'metadata': metadata}
return self._update("/volumes/%s/metadata" % base.getid(volume),
body)
def update_readonly_flag(self, volume, flag):
return self._action('os-update_readonly_flag',
base.getid(volume),
{'readonly': flag})
def set_bootable(self, volume, flag):
return self._action('os-set_bootable',
base.getid(volume),
{'bootable': flag})
|
|
import copy
import json
import multiprocessing
import os
import random
import shutil
import string
import tempfile
from contextlib import contextmanager
from os import chdir, getcwd, mkdir
from os.path import exists
from subprocess import CalledProcessError, check_call, check_output
import pkgpanda.build.constants
import pkgpanda.build.src_fetchers
from pkgpanda import expand_require as expand_require_exceptions
from pkgpanda import Install, PackageId, Repository
from pkgpanda.actions import add_package_file
from pkgpanda.constants import install_root, PKG_DIR, RESERVED_UNIT_NAMES
from pkgpanda.exceptions import FetchError, PackageError, ValidationError
from pkgpanda.util import (check_forbidden_services, download_atomic,
hash_checkout, is_windows, load_json, load_string, logger,
make_directory, make_file, make_tar, remove_directory, rewrite_symlinks, write_json,
write_string)
class BuildError(Exception):
"""An error while building something."""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return self.msg
class DockerCmd:
def __init__(self):
self.volumes = dict()
self.environment = dict()
self.container = str()
def run(self, name, cmd):
container_name = "{}-{}".format(
name, ''.join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
)
docker = ["docker", "run", "--name={}".format(container_name)]
if is_windows:
# Default number of processes on Windows is 1, so bumping up to use all of them.
# The default memory allowed on Windows is 1GB. Some packages (mesos is an example)
# needs about 3.5gb to compile a single file. Therefore we need about 4gb per CPU.
numprocs = os.environ.get('NUMBER_OF_PROCESSORS')
docker += ["-m", "{0}gb".format(int(numprocs) * 4), "--cpu-count", numprocs]
for host_path, container_path in self.volumes.items():
docker += ["-v", "{0}:{1}".format(host_path, container_path)]
for k, v in self.environment.items():
docker += ["-e", "{0}={1}".format(k, v)]
docker.append(self.container)
docker += cmd
check_call(docker)
DockerCmd.clean(container_name)
@staticmethod
def clean(name):
"""Cleans up the specified container"""
check_call(["docker", "rm", "-v", name])
def get_variants_from_filesystem(directory, extension):
results = set()
for filename in os.listdir(directory):
# Skip things that don't end in the extension
if not filename.endswith(extension):
continue
variant = filename[:-len(extension)]
# Empty name variant shouldn't have a `.` following it
if variant == '.':
raise BuildError("Invalid filename {}. The \"default\" variant file should be just {}".format(
filename, extension))
# Empty / default variant is represented as 'None'.
if variant == '':
variant = None
else:
# Should be foo. since we've moved the extension.
if variant[-1] != '.':
raise BuildError("Invalid variant filename {}. Expected a '.' separating the "
"variant name and extension '{}'.".format(filename, extension))
variant = variant[:-1]
results.add(variant)
return results
def get_src_fetcher(src_info, cache_dir, working_directory):
try:
kind = src_info['kind']
if kind not in pkgpanda.build.src_fetchers.all_fetchers:
raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
kind,
pkgpanda.src_fetchers.all_fetchers.keys()))
args = {
'src_info': src_info,
'cache_dir': cache_dir
}
if src_info['kind'] in ['git_local', 'url', 'url_extract']:
args['working_directory'] = working_directory
return pkgpanda.build.src_fetchers.all_fetchers[kind](**args)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
class TreeInfo:
ALLOWED_TREEINFO_KEYS = {'exclude', 'variants', 'core_package_list', 'bootstrap_package_list'}
def __init__(self, treeinfo_dict):
if treeinfo_dict.keys() > self.ALLOWED_TREEINFO_KEYS:
raise BuildError(
"treeinfo can only include the keys {}. Found {}".format(
self.ALLOWED_TREEINFO_KEYS, treeinfo_dict.keys()))
self.excludes = set(self._get_package_list(treeinfo_dict, 'exclude'))
self.core_package_list = set(self._get_package_list(treeinfo_dict, 'core_package_list', self.excludes))
self.bootstrap_package_list = set(self._get_package_list(
treeinfo_dict,
'bootstrap_package_list',
self.excludes))
# List of mandatory package variants to include in the buildinfo.
self.variants = treeinfo_dict.get('variants', dict())
if not isinstance(self.variants, dict):
raise BuildError("treeinfo variants must be a dictionary of package name to variant name")
@staticmethod
def _get_package_list(treeinfo_dict, key, excludes=None):
"""Return a list of package name strings from treeinfo_dict by key.
If key isn't present in treeinfo_dict, an empty list is returned.
"""
excludes = excludes or list()
package_list = treeinfo_dict.get(key, list())
# Validate package list.
if not isinstance(package_list, list):
raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
for package_name in package_list:
if not isinstance(package_name, str):
raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
key, type(package_name), package_name))
try:
PackageId.validate_name(package_name)
except ValidationError as ex:
raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex
if package_name in excludes:
raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))
return package_list
class PackageSet:
def __init__(self, variant, treeinfo, package_store):
self.variant = variant
self.all_packages = self.package_tuples_with_dependencies(
# If core_package_list is empty, default to all non-excluded packages.
treeinfo.core_package_list or (package_store.packages_by_name.keys() - treeinfo.excludes),
treeinfo,
package_store
)
self.validate_package_tuples(self.all_packages, treeinfo, package_store)
if treeinfo.bootstrap_package_list:
self.bootstrap_packages = self.package_tuples_with_dependencies(
treeinfo.bootstrap_package_list,
treeinfo,
package_store
)
self.validate_package_tuples(self.bootstrap_packages, treeinfo, package_store)
else:
self.bootstrap_packages = self.all_packages
# Validate bootstrap packages are a subset of all packages.
for package_name, variant in self.bootstrap_packages:
if (package_name, variant) not in self.all_packages:
raise BuildError("Bootstrap package {} (variant {}) not found in set of all packages".format(
package_name, pkgpanda.util.variant_name(variant)))
@staticmethod
def package_tuples_with_dependencies(package_names, treeinfo, package_store):
package_tuples = set((name, treeinfo.variants.get(name)) for name in set(package_names))
to_visit = list(package_tuples)
while to_visit:
package_tuple = to_visit.pop()
for require in package_store.get_buildinfo(*package_tuple)['requires']:
require_tuple = expand_require(require)
if require_tuple not in package_tuples:
to_visit.append(require_tuple)
package_tuples.add(require_tuple)
return package_tuples
@staticmethod
def validate_package_tuples(package_tuples, treeinfo, package_store):
# Validate that all packages have the variant specified in treeinfo.
for package_name, variant in package_tuples:
treeinfo_variant = treeinfo.variants.get(package_name)
if variant != treeinfo_variant:
raise BuildError(
"package {} is supposed to have variant {} included in the tree according to the treeinfo, "
"but variant {} was found.".format(
package_name,
pkgpanda.util.variant_name(treeinfo_variant),
pkgpanda.util.variant_name(variant),
)
)
# Validate that all needed packages are built and not excluded by treeinfo.
for package_name, variant in package_tuples:
if (package_name, variant) not in package_store.packages:
raise BuildError(
"package {} variant {} is needed (explicitly requested or as a requires) "
"but is not in the set of built packages.".format(
package_name,
pkgpanda.util.variant_name(variant),
)
)
if package_name in treeinfo.excludes:
raise BuildError("package {} is needed (explicitly requested or as a requires) "
"but is excluded according to the treeinfo.json.".format(package_name))
class PackageStore:
def __init__(self, packages_dir, repository_url):
self._builders = {}
self._repository_url = repository_url.rstrip('/') if repository_url is not None else None
self._packages_dir = packages_dir.rstrip('/')
# Load all possible packages, making a dictionary from (name, variant) -> buildinfo
self._packages = dict()
self._packages_by_name = dict()
self._package_folders = dict()
# Load an upstream if one exists
# TODO(cmaloney): Allow upstreams to have upstreams
self._package_cache_dir = self._packages_dir + "/cache/packages"
self._upstream_dir = self._packages_dir + "/cache/upstream/checkout"
self._upstream = None
self._upstream_package_dir = self._upstream_dir + "/packages"
# TODO(cmaloney): Make it so the upstream directory can be kept around
remove_directory(self._upstream_dir)
upstream_config = self._packages_dir + '/upstream.json'
if os.path.exists(upstream_config):
try:
self._upstream = get_src_fetcher(
load_optional_json(upstream_config),
self._packages_dir + '/cache/upstream',
packages_dir)
self._upstream.checkout_to(self._upstream_dir)
if os.path.exists(self._upstream_package_dir + "/upstream.json"):
raise Exception("Support for upstreams which have upstreams is not currently implemented")
except Exception as ex:
raise BuildError("Error fetching upstream: {}".format(ex))
# Iterate through the packages directory finding all packages. Note this package dir comes
# first, then we ignore duplicate definitions of the same package
package_dirs = [self._packages_dir]
if self._upstream:
package_dirs.append(self._upstream_package_dir)
for directory in package_dirs:
for name in os.listdir(directory):
package_folder = directory + '/' + name
# Ignore files / non-directories
if not os.path.isdir(package_folder):
continue
# If we've already found this package, it means 1+ versions have been defined. Use
# those and ignore everything in the upstreams.
if name in self._packages_by_name:
continue
if is_windows:
builder_folder = os.path.join(directory, name, 'docker.windows')
else:
builder_folder = os.path.join(directory, name, 'docker')
if os.path.exists(builder_folder):
self._builders[name] = builder_folder
# Search the directory for buildinfo.json files, record the variants
for variant in get_variants_from_filesystem(package_folder, 'buildinfo.json'):
# Only adding the default dictionary once we know we have a package.
self._packages_by_name.setdefault(name, dict())
buildinfo = load_buildinfo(package_folder, variant)
self._packages[(name, variant)] = buildinfo
self._packages_by_name[name][variant] = buildinfo
if name in self._package_folders:
assert self._package_folders[name] == package_folder
else:
self._package_folders[name] = package_folder
def get_package_folder(self, name):
return self._package_folders[name]
def get_bootstrap_cache_dir(self):
return self._packages_dir + "/cache/bootstrap"
def get_complete_cache_dir(self):
return self._packages_dir + "/cache/complete"
def get_buildinfo(self, name, variant):
return self._packages[(name, variant)]
def get_last_complete_set(self, variants):
def get_last_complete(variant):
complete_latest = (
self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
if not os.path.exists(complete_latest):
raise BuildError("No last complete found for variant {}. Expected to find {} to match "
"{}".format(pkgpanda.util.variant_name(variant), complete_latest,
pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
return load_json(complete_latest)
result = {}
if variants is None:
# Get all defined variants.
requested_variants = self.list_trees()
else:
requested_variants = variants
for variant in requested_variants:
result[variant] = get_last_complete(variant)
return result
def get_last_build_filename(self, name, variant):
return self.get_package_cache_folder(name) + '/{}latest'.format(pkgpanda.util.variant_prefix(variant))
def get_package_path(self, pkg_id):
return self.get_package_cache_folder(pkg_id.name) + '/{}.tar.xz'.format(pkg_id)
def get_package_cache_folder(self, name):
directory = self._package_cache_dir + '/' + name
make_directory(directory)
return directory
def list_trees(self):
return get_variants_from_filesystem(self._packages_dir, 'treeinfo.json')
def get_package_set(self, variant):
return PackageSet(variant, TreeInfo(load_config_variant(self._packages_dir, variant, 'treeinfo.json')), self)
def get_all_package_sets(self):
return [self.get_package_set(variant) for variant in sorted(self.list_trees(), key=pkgpanda.util.variant_str)]
@property
def packages(self):
return self._packages
@property
def builders(self):
return self._builders.copy()
@property
def packages_by_name(self):
return self._packages_by_name
@property
def packages_dir(self):
return self._packages_dir
def try_fetch_by_id(self, pkg_id: PackageId):
if self._repository_url is None:
return False
# TODO(cmaloney): Use storage providers to download instead of open coding.
pkg_path = "{}.tar.xz".format(pkg_id)
url = self._repository_url + '/packages/{0}/{1}'.format(pkg_id.name, pkg_path)
try:
directory = self.get_package_cache_folder(pkg_id.name)
# TODO(cmaloney): Move to some sort of logging mechanism?
print("Attempting to download", pkg_id, "from", url, "to", directory)
download_atomic(directory + '/' + pkg_path, url, directory)
assert os.path.exists(directory + '/' + pkg_path)
return directory + '/' + pkg_path
except FetchError:
return False
def try_fetch_bootstrap_and_active(self, bootstrap_id):
if self._repository_url is None:
return False
try:
bootstrap_name = '{}.bootstrap.tar.xz'.format(bootstrap_id)
active_name = '{}.active.json'.format(bootstrap_id)
# TODO(cmaloney): Use storage providers to download instead of open coding.
bootstrap_url = self._repository_url + '/bootstrap/' + bootstrap_name
active_url = self._repository_url + '/bootstrap/' + active_name
print("Attempting to download", bootstrap_name, "from", bootstrap_url)
dest_dir = self.get_bootstrap_cache_dir()
# Normalize to no trailing slash for repository_url
download_atomic(dest_dir + '/' + bootstrap_name, bootstrap_url, self._packages_dir)
print("Attempting to download", active_name, "from", active_url)
download_atomic(dest_dir + '/' + active_name, active_url, self._packages_dir)
return True
except FetchError:
return False
def expand_require(require):
try:
return expand_require_exceptions(require)
except ValidationError as ex:
raise BuildError(str(ex)) from ex
def get_docker_id(docker_name):
return check_output(["docker", "inspect", "-f", "{{ .Id }}", docker_name]).decode('utf-8').strip()
def hash_files_in_folder(directory):
"""Given a relative path, hashes all files inside that folder and subfolders
Returns a dictionary from filename to the hash of that file. If that whole
dictionary is hashed, you get a hash of all the contents of the folder.
This is split out from calculating the whole folder hash so that the
behavior in different walking corner cases can be more easily tested.
"""
assert not directory.startswith('/'), \
"For the hash to be reproducible on other machines relative paths must always be used. " \
"Got path: {}".format(directory)
directory = directory.rstrip('/')
file_hash_dict = {}
# TODO(cmaloney): Disallow symlinks as they're hard to hash, people can symlink / copy in their
# build steps if needed.
for root, dirs, filenames in os.walk(directory):
assert not root.startswith('/')
for name in filenames:
path = root + '/' + name
base = path[len(directory) + 1:]
file_hash_dict[base] = pkgpanda.util.sha1(path)
# If the directory has files inside of it, then it'll be picked up implicitly. by the files
# or folders inside of it. If it contains nothing, it wouldn't be picked up but the existence
# is important, so added it with a value for it's hash not-makeable via sha1 (empty string).
if len(filenames) == 0 and len(dirs) == 0:
path = root[len(directory) + 1:]
# Empty path means it is the root directory, in which case we want no entries, not a
# single entry "": ""
if path:
file_hash_dict[root[len(directory) + 1:]] = ""
return file_hash_dict
@contextmanager
def as_cwd(path):
start_dir = getcwd()
chdir(path)
yield
chdir(start_dir)
def hash_folder_abs(directory, work_dir):
assert directory.startswith(work_dir), "directory must be inside work_dir: {} {}".format(directory, work_dir)
assert not work_dir[-1] == '/', "This code assumes no trailing slash on the work_dir"
with as_cwd(work_dir):
return hash_folder(directory[len(work_dir) + 1:])
def hash_folder(directory):
return hash_checkout(hash_files_in_folder(directory))
# Try to read json from the given file. If it is an empty file, then return an
# empty json dictionary.
def load_optional_json(filename):
try:
with open(filename) as f:
text = f.read().strip()
if text:
return json.loads(text)
return {}
except OSError as ex:
raise BuildError("Failed to open JSON file {}: {}".format(filename, ex))
except ValueError as ex:
raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
def load_config_variant(directory, variant, extension):
assert directory[-1] != '/'
return load_optional_json(directory + '/' + pkgpanda.util.variant_prefix(variant) + extension)
def load_buildinfo(path, variant):
buildinfo = load_config_variant(path, variant, 'buildinfo.json')
# Fill in default / guaranteed members so code everywhere doesn't have to guard around it.
default_build_script = 'build'
if is_windows:
default_build_script = 'build.ps1'
buildinfo.setdefault('build_script', pkgpanda.util.variant_prefix(variant) + default_build_script)
buildinfo.setdefault('docker', 'dcos/dcos-builder:dcos-builder_dockerdir-latest')
buildinfo.setdefault('environment', dict())
buildinfo.setdefault('requires', list())
buildinfo.setdefault('state_directory', False)
return buildinfo
def make_bootstrap_tarball(package_store, packages, variant):
# Convert filenames to package ids
pkg_ids = list()
for pkg_path in packages:
# Get the package id from the given package path
filename = os.path.basename(pkg_path)
if not filename.endswith(".tar.xz"):
raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
pkg_id = filename[:-len(".tar.xz")]
pkg_ids.append(pkg_id)
bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()
# Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
bootstrap_id = hash_checkout(pkg_ids)
latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))
output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'
# bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
active_name = "{}active.json".format(output_name)
def mark_latest():
# Ensure latest is always written
write_string(latest_name, bootstrap_id)
print("bootstrap: {}".format(bootstrap_name))
print("active: {}".format(active_name))
print("latest: {}".format(latest_name))
return bootstrap_id
if (os.path.exists(bootstrap_name)):
print("Bootstrap already up to date, not recreating")
return mark_latest()
make_directory(bootstrap_cache_dir)
# Try downloading.
if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
return mark_latest()
print("Unable to download from cache. Building.")
print("Creating bootstrap tarball for variant {}".format(variant))
work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')
def make_abs(path):
return os.path.join(work_dir, path)
pkgpanda_root = make_abs("opt/mesosphere")
repository = Repository(os.path.join(pkgpanda_root, "packages"))
# Fetch all the packages to the root
for pkg_path in packages:
filename = os.path.basename(pkg_path)
pkg_id = filename[:-len(".tar.xz")]
def local_fetcher(id, target):
shutil.unpack_archive(pkg_path, target, "gztar")
repository.add(local_fetcher, pkg_id, False)
# Activate the packages inside the repository.
# Do generate dcos.target.wants inside the root so that we don't
# try messing with /etc/systemd/system.
install = Install(
root=pkgpanda_root,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
skip_systemd_dirs=True,
manage_users=False,
manage_state_dir=False)
install.activate(repository.load_packages(pkg_ids))
# Mark the tarball as a bootstrap tarball/filesystem so that
# dcos-setup.service will fire.
make_file(make_abs("opt/mesosphere/bootstrap"))
# Write out an active.json for the bootstrap tarball
write_json(active_name, pkg_ids)
# Rewrite all the symlinks to point to /opt/mesosphere
rewrite_symlinks(work_dir, work_dir, "/")
make_tar(bootstrap_name, pkgpanda_root)
remove_directory(work_dir)
# Update latest last so that we don't ever use partially-built things.
write_string(latest_name, bootstrap_id)
print("Built bootstrap")
return mark_latest()
def build_tree_variants(package_store, mkbootstrap):
""" Builds all possible tree variants in a given package store
"""
result = dict()
tree_variants = get_variants_from_filesystem(package_store.packages_dir, 'treeinfo.json')
if len(tree_variants) == 0:
raise Exception('No treeinfo.json can be found in {}'.format(package_store.packages_dir))
for variant in tree_variants:
result[variant] = pkgpanda.build.build_tree(package_store, mkbootstrap, variant)
return result
def build_tree(package_store, mkbootstrap, tree_variants):
"""Build packages and bootstrap tarballs for one or all tree variants.
Returns a dict mapping tree variants to bootstrap IDs.
If tree_variant is None, builds all available tree variants.
"""
# TODO(cmaloney): Add support for circular dependencies. They are doable
# long as there is a pre-built version of enough of the packages.
# TODO(cmaloney): Make it so when we're building a treeinfo which has a
# explicit package list we don't build all the other packages.
build_order = list()
visited = set()
built = set()
def visit(pkg_tuple: tuple):
"""Add a package and its requires to the build order.
Raises AssertionError if pkg_tuple is in the set of visited packages.
If the package has any requires, they're recursively visited and added
to the build order depth-first. Then the package itself is added.
"""
# Visit the node for the first (and only) time.
assert pkg_tuple not in visited
visited.add(pkg_tuple)
# Ensure all dependencies are built. Sorted for stability.
# Requirements may be either strings or dicts, so we convert them all to (name, variant) tuples before sorting.
for require_tuple in sorted(expand_require(r) for r in package_store.packages[pkg_tuple]['requires']):
# If the dependency has already been built, we can move on.
if require_tuple in built:
continue
# If the dependency has not been built but has been visited, then
# there's a cycle in the dependency graph.
if require_tuple in visited:
raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))
if PackageId.is_id(require_tuple[0]):
raise BuildError("Depending on a specific package id is not supported. Package {} "
"depends on {}".format(pkg_tuple, require_tuple))
if require_tuple not in package_store.packages:
raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))
# Add the dependency (after its dependencies, if any) to the build
# order.
visit(require_tuple)
build_order.append(pkg_tuple)
built.add(pkg_tuple)
# Can't compare none to string, so expand none -> "true" / "false", then put
# the string in a field after "" if none, the string if not.
def key_func(elem):
return elem[0], elem[1] is None, elem[1] or ""
def visit_packages(package_tuples):
for pkg_tuple in sorted(package_tuples, key=key_func):
if pkg_tuple in visited:
continue
visit(pkg_tuple)
if tree_variants:
package_sets = [package_store.get_package_set(v) for v in tree_variants]
else:
package_sets = package_store.get_all_package_sets()
with logger.scope("resolve package graph"):
# Build all required packages for all tree variants.
for package_set in package_sets:
visit_packages(package_set.all_packages)
built_packages = dict()
for (name, variant) in build_order:
built_packages.setdefault(name, dict())
# Run the build, store the built package path for later use.
# TODO(cmaloney): Only build the requested variants, rather than all variants.
built_packages[name][variant] = build(
package_store,
name,
variant,
True)
# Build bootstrap tarballs for all tree variants.
def make_bootstrap(package_set):
with logger.scope("Making bootstrap variant: {}".format(pkgpanda.util.variant_name(package_set.variant))):
package_paths = list()
for name, pkg_variant in package_set.bootstrap_packages:
package_paths.append(built_packages[name][pkg_variant])
if mkbootstrap:
return make_bootstrap_tarball(
package_store,
list(sorted(package_paths)),
package_set.variant)
# Build bootstraps and and package lists for all variants.
# TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
complete_cache_dir = package_store.get_complete_cache_dir()
make_directory(complete_cache_dir)
results = {}
for package_set in package_sets:
info = {
'bootstrap': make_bootstrap(package_set),
'packages': sorted(
load_string(package_store.get_last_build_filename(*pkg_tuple))
for pkg_tuple in package_set.all_packages)}
write_json(
complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
info)
results[package_set.variant] = info
return results
def assert_no_duplicate_keys(lhs, rhs):
if len(lhs.keys() & rhs.keys()) != 0:
print("ASSERTION FAILED: Duplicate keys between {} and {}".format(lhs, rhs))
assert len(lhs.keys() & rhs.keys()) == 0
# Find all build variants and build them
def build_package_variants(package_store, name, clean_after_build=True, recursive=False):
# Find the packages dir / root of the packages tree, and create a PackageStore
results = dict()
for variant in package_store.packages_by_name[name].keys():
results[variant] = build(
package_store,
name,
variant,
clean_after_build=clean_after_build,
recursive=recursive)
return results
class IdBuilder():
def __init__(self, buildinfo):
self._start_keys = set(buildinfo.keys())
self._buildinfo = copy.deepcopy(buildinfo)
self._taken = set()
def _check_no_key(self, field):
if field in self._buildinfo:
raise BuildError("Key {} shouldn't be in buildinfo, but was".format(field))
def add(self, field, value):
self._check_no_key(field)
self._buildinfo[field] = value
def has(self, field):
return field in self._buildinfo
def take(self, field):
self._taken.add(field)
return self._buildinfo[field]
def replace(self, taken_field, new_field, new_value):
assert taken_field in self._buildinfo
self._check_no_key(new_field)
del self._buildinfo[taken_field]
self._buildinfo[new_field] = new_value
self._taken.add(new_field)
def update(self, field, new_value):
assert field in self._buildinfo
self._buildinfo[field] = new_value
def get_build_ids(self):
# If any keys are left in the buildinfo, error that there were unused keys
remaining_keys = self._start_keys - self._taken
if remaining_keys:
raise BuildError("ERROR: Unknown keys {} in buildinfo.json".format(remaining_keys))
return self._buildinfo
def build(package_store: PackageStore, name: str, variant, clean_after_build, recursive=False):
msg = "Building package {} variant {}".format(name, pkgpanda.util.variant_name(variant))
with logger.scope(msg):
return _build(package_store, name, variant, clean_after_build, recursive)
def _build(package_store, name, variant, clean_after_build, recursive):
assert isinstance(package_store, PackageStore)
tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
repository = Repository(tmpdir.name)
package_dir = package_store.get_package_folder(name)
def src_abs(name):
return package_dir + '/' + name
def cache_abs(filename):
return package_store.get_package_cache_folder(name) + '/' + filename
# Build pkginfo over time, translating fields from buildinfo.
pkginfo = {}
# Build up the docker command arguments over time, translating fields as needed.
cmd = DockerCmd()
assert (name, variant) in package_store.packages, \
"Programming error: name, variant should have been validated to be valid before calling build()."
builder = IdBuilder(package_store.get_buildinfo(name, variant))
final_buildinfo = dict()
builder.add('name', name)
builder.add('variant', pkgpanda.util.variant_str(variant))
# Convert single_source -> sources
if builder.has('sources'):
if builder.has('single_source'):
raise BuildError('Both sources and single_source cannot be specified at the same time')
sources = builder.take('sources')
elif builder.has('single_source'):
sources = {name: builder.take('single_source')}
builder.replace('single_source', 'sources', sources)
else:
builder.add('sources', {})
sources = dict()
print("NOTICE: No sources specified")
final_buildinfo['sources'] = sources
# Construct the source fetchers, gather the checkout ids from them
checkout_ids = dict()
fetchers = dict()
try:
for src_name, src_info in sorted(sources.items()):
# TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
make_directory(cache_dir)
fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
fetchers[src_name] = fetcher
checkout_ids[src_name] = fetcher.get_id()
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
for src_name, checkout_id in checkout_ids.items():
# NOTE: single_source buildinfo was expanded above so the src_name is
# always correct here.
# Make sure we never accidentally overwrite something which might be
# important. Fields should match if specified (And that should be
# tested at some point). For now disallowing identical saves hassle.
assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
final_buildinfo['sources'][src_name].update(checkout_id)
# Add the sha1 of the buildinfo.json + build file to the build ids
builder.update('sources', checkout_ids)
build_script_file = builder.take('build_script')
# TODO(cmaloney): Change dest name to build_script_sha1
builder.replace('build_script', 'build', pkgpanda.util.sha1(src_abs(build_script_file)))
builder.add('pkgpanda_version', pkgpanda.build.constants.version)
extra_dir = src_abs("extra")
# Add the "extra" folder inside the package as an additional source if it
# exists
if os.path.exists(extra_dir):
extra_id = hash_folder_abs(extra_dir, package_dir)
builder.add('extra_source', extra_id)
final_buildinfo['extra_source'] = extra_id
# Figure out the docker name.
docker_name = builder.take('docker')
cmd.container = docker_name
# Add the id of the docker build environment to the build_ids.
try:
docker_id = get_docker_id(docker_name)
except CalledProcessError:
# docker pull the container and try again
check_call(['docker', 'pull', docker_name])
docker_id = get_docker_id(docker_name)
builder.update('docker', docker_id)
# TODO(cmaloney): The environment variables should be generated during build
# not live in buildinfo.json.
pkginfo['environment'] = builder.take('environment')
# Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
pkginfo['state_directory'] = builder.take('state_directory')
if pkginfo['state_directory'] not in [True, False]:
raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")
username = None
if builder.has('username'):
username = builder.take('username')
if not isinstance(username, str):
raise BuildError("username in buildinfo.json must be either not set (no user for this"
" package), or a user name string")
try:
pkgpanda.UserManagement.validate_username(username)
except ValidationError as ex:
raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['username'] = username
group = None
if builder.has('group'):
group = builder.take('group')
if not isinstance(group, str):
raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
", or group must be a string")
try:
pkgpanda.UserManagement.validate_group_name(group)
except ValidationError as ex:
raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['group'] = group
# Packages need directories inside the fake install root (otherwise docker
# will try making the directories on a readonly filesystem), so build the
# install root now, and make the package directories in it as we go.
install_dir = tempfile.mkdtemp(prefix="pkgpanda-")
active_packages = list()
active_package_ids = set()
active_package_variants = dict()
auto_deps = set()
# Final package has the same requires as the build.
requires = builder.take('requires')
pkginfo['requires'] = requires
if builder.has("sysctl"):
pkginfo["sysctl"] = builder.take("sysctl")
# TODO(cmaloney): Pull generating the full set of requires a function.
to_check = copy.deepcopy(requires)
if type(to_check) != list:
raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
while to_check:
requires_info = to_check.pop(0)
requires_name, requires_variant = expand_require(requires_info)
if requires_name in active_package_variants:
# TODO(cmaloney): If one package depends on the <default>
# variant of a package and 1+ others depends on a non-<default>
# variant then update the dependency to the non-default variant
# rather than erroring.
if requires_variant != active_package_variants[requires_name]:
# TODO(cmaloney): Make this contain the chains of
# dependencies which contain the conflicting packages.
# a -> b -> c -> d {foo}
# e {bar} -> d {baz}
raise BuildError(
"Dependncy on multiple variants of the same package {}. variants: {} {}".format(
requires_name,
requires_variant,
active_package_variants[requires_name]))
# The variant has package {requires_name, variant} already is a
# dependency, don't process it again / move on to the next.
continue
active_package_variants[requires_name] = requires_variant
# Figure out the last build of the dependency, add that as the
# fully expanded dependency.
requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
if not os.path.exists(requires_last_build):
if recursive:
# Build the dependency
build(package_store, requires_name, requires_variant, clean_after_build, recursive)
else:
raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
"the dependency".format(requires_name, requires_variant))
try:
pkg_id_str = load_string(requires_last_build)
auto_deps.add(pkg_id_str)
pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
pkg_requires = pkg_buildinfo['requires']
pkg_path = repository.package_path(pkg_id_str)
pkg_tar = pkg_id_str + '.tar.xz'
if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
raise BuildError(
"The build tarball {} refered to by the last_build file of the dependency {} "
"variant {} doesn't exist. Rebuild the dependency.".format(
pkg_tar,
requires_name,
requires_variant))
active_package_ids.add(pkg_id_str)
# Mount the package into the docker container.
cmd.volumes[pkg_path] = install_root + "/packages/{}:ro".format(pkg_id_str)
os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))
# Add the dependencies of the package to the set which will be
# activated.
# TODO(cmaloney): All these 'transitive' dependencies shouldn't
# be available to the package being built, only what depends on
# them directly.
to_check += pkg_requires
except ValidationError as ex:
raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
except PackageError as ex:
raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
# Add requires to the package id, calculate the final package id.
# NOTE: active_packages isn't fully constructed here since we lazily load
# packages not already in the repository.
builder.update('requires', list(active_package_ids))
version_extra = None
if builder.has('version_extra'):
version_extra = builder.take('version_extra')
build_ids = builder.get_build_ids()
version_base = hash_checkout(build_ids)
version = None
if builder.has('version_extra'):
version = "{0}-{1}".format(version_extra, version_base)
else:
version = version_base
pkg_id = PackageId.from_parts(name, version)
# Everything must have been extracted by now. If it wasn't, then we just
# had a hard error that it was set but not used, as well as didn't include
# it in the caluclation of the PackageId.
builder = None
# Save the build_ids. Useful for verify exactly what went into the
# package build hash.
final_buildinfo['build_ids'] = build_ids
final_buildinfo['package_version'] = version
# Save the package name and variant. The variant is used when installing
# packages to validate dependencies.
final_buildinfo['name'] = name
final_buildinfo['variant'] = variant
# If the package is already built, don't do anything.
pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)
# Done if it exists locally
if exists(pkg_path):
print("Package up to date. Not re-building.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
return pkg_path
# Try downloading.
dl_path = package_store.try_fetch_by_id(pkg_id)
if dl_path:
print("Package up to date. Not re-building. Downloaded from repository-url.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
print(dl_path, pkg_path)
assert dl_path == pkg_path
return pkg_path
# Fall out and do the build since it couldn't be downloaded
print("Unable to download from cache. Proceeding to build")
print("Building package {} with buildinfo: {}".format(
pkg_id,
json.dumps(final_buildinfo, indent=2, sort_keys=True)))
# Clean out src, result so later steps can use them freely for building.
def clean():
# Run a docker container to remove src/ and result/
cmd = DockerCmd()
cmd.volumes = {
package_store.get_package_cache_folder(name): PKG_DIR + "/:rw",
}
if is_windows:
cmd.container = "microsoft/windowsservercore:1709"
filename = PKG_DIR + "\\src"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
filename = PKG_DIR + "\\result"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
else:
cmd.container = "ubuntu:14.04.4"
cmd.run("package-cleaner", ["rm", "-rf", PKG_DIR + "/src", PKG_DIR + "/result"])
clean()
# Only fresh builds are allowed which don't overlap existing artifacts.
result_dir = cache_abs("result")
if exists(result_dir):
raise BuildError("result folder must not exist. It will be made when the package is "
"built. {}".format(result_dir))
# 'mkpanda add' all implicit dependencies since we actually need to build.
for dep in auto_deps:
print("Auto-adding dependency: {}".format(dep))
# NOTE: Not using the name pkg_id because that overrides the outer one.
id_obj = PackageId(dep)
add_package_file(repository, package_store.get_package_path(id_obj))
package = repository.load(dep)
active_packages.append(package)
# Checkout all the sources int their respective 'src/' folders.
try:
src_dir = cache_abs('src')
if os.path.exists(src_dir):
raise ValidationError(
"'src' directory already exists, did you have a previous build? " +
"Currently all builds must be from scratch. Support should be " +
"added for re-using a src directory when possible. src={}".format(src_dir))
os.mkdir(src_dir)
for src_name, fetcher in sorted(fetchers.items()):
root = cache_abs('src/' + src_name)
os.mkdir(root)
fetcher.checkout_to(root)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
# Activate the packages so that we have a proper path, environment
# variables.
# TODO(cmaloney): RAII type thing for temproary directory so if we
# don't get all the way through things will be cleaned up?
install = Install(
root=install_dir,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
manage_users=False,
manage_state_dir=False)
install.activate(active_packages)
# Rewrite all the symlinks inside the active path because we will
# be mounting the folder into a docker container, and the absolute
# paths to the packages will change.
# TODO(cmaloney): This isn't very clean, it would be much nicer to
# just run pkgpanda inside the package.
rewrite_symlinks(install_dir, repository.path, install_root + "/packages/")
print("Building package in docker")
# TODO(cmaloney): Run as a specific non-root user, make it possible
# for non-root to cleanup afterwards.
# Run the build, prepping the environment as necessary.
mkdir(cache_abs("result"))
# Copy the build info to the resulting tarball
write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/pkginfo.json"), pkginfo)
# Make the folder for the package we are building. If docker does it, it
# gets auto-created with root permissions and we can't actually delete it.
os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))
# TOOD(cmaloney): Disallow writing to well known files and directories?
# Source we checked out
cmd.volumes.update({
# TODO(cmaloney): src should be read only...
# Source directory
cache_abs("src"): PKG_DIR + "/src:rw",
# Getting the result out
cache_abs("result"): install_root + "/packages/{}:rw".format(pkg_id),
# The build script directory
package_dir: PKG_DIR + "/build:ro"
})
if is_windows:
cmd.volumes.update({
# todo: This is a temporary work around until Windows RS4 comes out that has a fix
# that allows overlapping mount directories. We should not make this also happen
# on Linux as it will probably break a bunch of stuff unnecessarily that will only
# need to be undone in the future.
install_dir: install_root + "/install_dir:ro"
})
else:
cmd.volumes.update({
install_dir: install_root + ":ro"
})
if os.path.exists(extra_dir):
cmd.volumes[extra_dir] = PKG_DIR + "/extra:ro"
cmd.environment = {
"PKG_VERSION": version,
"PKG_NAME": name,
"PKG_ID": pkg_id,
"PKG_PATH": install_root + "/packages/{}".format(pkg_id),
"PKG_VARIANT": variant if variant is not None else "<default>",
"NUM_CORES": multiprocessing.cpu_count()
}
try:
# TODO(cmaloney): Run a wrapper which sources
# /opt/mesosphere/environment then runs a build. Also should fix
# ownership of /opt/mesosphere/packages/{pkg_id} post build.
command = [PKG_DIR + "/build/" + build_script_file]
cmd.run("package-builder", command)
except CalledProcessError as ex:
raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))
# Clean up the temporary install dir used for dependencies.
# TODO(cmaloney): Move to an RAII wrapper.
remove_directory(install_dir)
with logger.scope("Build package tarball"):
# Check for forbidden services before packaging the tarball:
try:
check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
except ValidationError as ex:
raise BuildError("Package validation failed: {}".format(ex))
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
# Bundle the artifacts into the pkgpanda package
tmp_name = pkg_path + "-tmp.tar.xz"
make_tar(tmp_name, cache_abs("result"))
os.replace(tmp_name, pkg_path)
print("Package built.")
if clean_after_build:
clean()
return pkg_path
|
|
from .sum import copy
from .edge import Edge
from .variable import Variable
from .constant import Constant
from .vstack import split
from ..utils.cuda_codegen import CudaSubGraph, gpuarray
from .vstack import vstack
from proximal.utils.timings_log import TimingsLog
import copy as cp
from collections import defaultdict
import numpy as np
from scipy.sparse.linalg import LinearOperator, eigs
class CompGraph(object):
"""A computation graph representing a composite lin op.
"""
instanceCnt = 0
def __init__(self, end, implem=None):
self.instanceID = CompGraph.instanceCnt
CompGraph.instanceCnt += 1
self.orig_end = end
self.end = cp.copy(end)
self.end.orig_node = end.orig_node
self.shape = self.end.shape
# Construct via graph traversal.
self.nodes = []
self.edges = []
self.constants = []
self.input_edges = {}
self.output_edges = {}
new_vars = []
# Assumes all nodes have at most one output.
ready = [self.end]
done = []
node_to_copies = {}
self.split_nodes = {}
while len(ready) > 0:
curr = ready.pop(0)
done.append(curr)
if isinstance(curr, Variable):
# new_vars may contain specific variables more than once
new_vars.append(curr)
# Zero out constants.
self.nodes.append(curr)
input_edges = []
for node in curr.input_nodes:
# Zero out constants. Constants are handled in absorb_offset
if isinstance(node, Constant):
node = Constant(np.zeros(curr.shape))
node.orig_node = None
self.constants.append(node)
else:
# avoid copying too many nodes
if not node in node_to_copies:
cnode = cp.copy(node)
node.orig_node = node.orig_node
node_to_copies[node] = cnode
else:
self.split_nodes[node_to_copies[node]] = True
node = node_to_copies[node]
# Default implementation.
if implem is not None:
node.implem = implem
if not node in ready and not node in done:
ready.append(node)
edge = Edge(node, curr, node.shape)
input_edges.append(edge)
if not node in self.output_edges:
self.output_edges[node] = [edge]
else:
self.output_edges[node].append(edge)
self.edges += input_edges
self.input_edges[curr] = input_edges
# replace the split nodes with copy nodes
for n in self.split_nodes.keys():
outedges = self.output_edges[n]
outnodes = [e.end for e in outedges]
copy_node = copy(n, implem=implem)
copy_node.input_nodes += [n]
self.output_edges[n] = [Edge(n, copy_node, n.shape)]
self.input_edges[copy_node] = self.output_edges[n]
self.output_edges[copy_node] = []
self.nodes.append(copy_node)
for ns in outnodes:
inedges = self.input_edges[ns]
newinedges = []
for e in inedges:
if e.start is n:
e = Edge(copy_node, e.end, copy_node.shape)
newinedges.append( e )
self.output_edges[copy_node].append(e)
else:
newinedges.append( e )
self.input_edges[ns] = newinedges
# Make copy node for each variable.
old_vars = self.orig_end.variables()
id2copy = {}
copy_nodes = []
self.var_info = {}
offset = 0
for var in old_vars:
copy_node = copy(var.shape, implem=implem)
copy_node.orig_node = None
id2copy[var.uuid] = copy_node
copy_nodes.append(copy_node)
self.var_info[var.uuid] = offset
offset += copy_node.size
self.output_edges[copy_node] = []
self.nodes.append(copy_node)
# Replace variables with copy nodes in graph.
for var in new_vars:
copy_node = id2copy[var.uuid]
for output_edge in self.output_edges[var]:
output_node = output_edge.end
edge = Edge(copy_node, output_node, var.shape)
self.edges.append(edge)
self.output_edges[copy_node].append(edge)
idx = self.input_edges[output_node].index(output_edge)
#print("Variable %s(%s): idx=%d" % (var.varname, var.uuid, idx))
self.input_edges[output_node][idx] = edge
# Record information about variables.
self.input_size = sum([var.size for var in old_vars])
self.output_size = self.end.size
self.start = split(copy_nodes, implem=implem)
self.start.orig_node = None
self.nodes.append(self.start)
split_outputs = []
for copy_node in copy_nodes:
edge = Edge(self.start, copy_node, copy_node.shape)
split_outputs.append(edge)
self.input_edges[copy_node] = [edge]
self.edges += split_outputs
self.output_edges[self.start] = split_outputs
self.cuda_forward_subgraphs = None
self.cuda_adjoint_subgraphs = None
# A record of timings.
self.forward_log = TimingsLog(self.nodes + [self])
self.adjoint_log = TimingsLog(self.nodes + [self])
def input_nodes(self, node):
return list([e.start for e in self.input_edges[node]])
def output_nodes(self, node):
return list([e.end for e in self.output_edges[node]])
def get_inputs(self, node):
"""Returns the input data for a node.
"""
return [e.data for e in self.input_edges[node]]
def get_outputs(self, node):
"""Returns the output data for a node.
"""
return [e.data for e in self.output_edges[node]]
def gen_cuda_code(self):
# The basic original idea is to generate a cuda kernel for the whole graph
# this is done by calling the output node (self.end for forward direction,
# self.start for adjoint direction), and these nodes will recursively
# generate the kernel operations also for their input nodes.
#
# There are certain nodes in the graph which are either not yet ported to
# cuda or they don't efficiently fit into the above scheme. For example,
# it is not efficient to perform a convolution operation in the middle of
# a long linear graph (because the preceding image values have to be
# calculated size(conv_kernel) times). Therefore we need a way to split
# the graph into subgraphs, and calculate individual nodes for their own.
#
# Nodes who want to be isolated can either not implement the forward_cuda_kernel
# function or override the function cuda_kernel_available(self) and return false.
# forward direction
self.cuda_forward_subgraphs = CudaSubGraph(self.input_nodes, self.output_nodes, self.end)
self.cuda_forward_subgraphs.gen_code("forward_cuda_kernel")
#print("Forward subgraphs:")
#self.cuda_forward_subgraphs.visualize()
self.cuda_adjoint_subgraphs = CudaSubGraph(self.output_nodes, self.input_nodes, self.start)
self.cuda_adjoint_subgraphs.gen_code("adjoint_cuda_kernel")
#print("Adjoint subgraphs:")
#self.cuda_adjoint_subgraphs.visualize()
def forward_cuda(self, x, y, printt=False):
if 0:
needcopy = False
if type(x) is gpuarray.GPUArray:
x = x.get()
if type(y) is gpuarray.GPUArray:
needcopy = True
yorig = y
y = y.get()
self.forward(x, y)
if needcopy:
yorig[:] = gpuarray.to_gpu(y)
else:
if self.cuda_forward_subgraphs is None:
self.gen_cuda_code()
if not type(x) is gpuarray.GPUArray:
x = gpuarray.to_gpu(x.astype(np.float32))
if not type(y) is gpuarray.GPUArray:
y = gpuarray.to_gpu(y.astype(np.float32))
print("Warning: result y is no GPU array.")
self.forward_log[self].tic()
t = self.cuda_forward_subgraphs.apply(x, y)
self.forward_log[self].toc()
if printt: print(t)
return y
def adjoint_cuda(self, y, x, printt=False):
if 0:
needcopy = False
if type(x) is gpuarray.GPUArray:
needcopy = True
xorig = x
x = x.get()
if type(y) is gpuarray.GPUArray:
y = y.get()
self.adjoint(y, x)
if needcopy:
xorig[:] = gpuarray.to_gpu(x)
else:
if self.cuda_adjoint_subgraphs is None:
self.gen_cuda_code()
if not type(x) is gpuarray.GPUArray:
x = gpuarray.to_gpu(x.astype(np.float32))
print("Warning: result x is no GPU array.")
if not type(y) is gpuarray.GPUArray:
y = gpuarray.to_gpu(y.astype(np.float32))
self.adjoint_log[self].tic()
t = self.cuda_adjoint_subgraphs.apply(y, x)
self.adjoint_log[self].toc()
if printt: print(t)
return x
def forward(self, x, y):
"""Evaluates the forward composition.
Reads from x and writes to y.
"""
def forward_eval(node):
if node is self.start:
inputs = [x]
else:
inputs = self.get_inputs(node)
if node is self.end:
outputs = [y]
else:
outputs = self.get_outputs(node)
# Run forward op and time it.
self.forward_log[node].tic()
node.forward(inputs, outputs)
#if node in self.split_nodes:
# for io in range(1,len(outputs)):
# np.copyto(outputs[io], outputs[0])
self.forward_log[node].toc()
self.forward_log[self].tic()
# Evaluate forward graph and time it.
self.traverse_graph(forward_eval, True)
self.forward_log[self].toc()
return y
def adjoint(self, u, v):
"""Evaluates the adjoint composition.
Reads from u and writes to v.
"""
def adjoint_eval(node):
if node is self.end:
outputs = [u]
else:
outputs = self.get_outputs(node)
if node is self.start:
inputs = [v]
else:
inputs = self.get_inputs(node)
# Run adjoint op and time it.
self.adjoint_log[node].tic()
#if node in self.split_nodes:
# for io in range(len(outputs)):
# node.adjoint([outputs[io]], inputs)
# assert(len(inputs) == 1)
# if io == 0:
# res = inputs[0].copy()
# else:
# res += inputs[0]
# np.copyto(inputs[0], res)
#else:
if 1:
node.adjoint(outputs, inputs)
self.adjoint_log[node].toc()
# Evaluate adjoint graph and time it.
self.adjoint_log[self].tic()
self.traverse_graph(adjoint_eval, False)
self.adjoint_log[self].toc()
return v
def traverse_graph(self, node_fn, forward):
"""Traverse the graph and apply the given function at each node.
forward: Traverse in standard or reverse order?
node_fn: Function to evaluate on each node.
"""
ready = []
eval_map = defaultdict(int)
if forward:
ready.append(self.start)
# Constant nodes are leaves as well.
ready += self.constants
else:
ready.append(self.end)
while len(ready) > 0:
curr = ready.pop()
# Evaluate the given function on curr.
node_fn(curr)
eval_map[curr] += 1
if forward:
child_edges = self.output_edges.get(curr, [])
else:
child_edges = self.input_edges.get(curr, [])
# If each input has visited the node, it is ready.
for edge in child_edges:
if forward:
node = edge.end
else:
node = edge.start
eval_map[node] += 1
if forward:
node_inputs_count = len(self.input_edges[node])
else:
node_inputs_count = len(self.output_edges[node])
if (eval_map[node] == node_inputs_count):
ready.append(node)
def norm_bound(self, final_output_mags):
"""Returns fast upper bound on ||K||.
Parameters
----------
final_output_mags : list
Place to store final output magnitudes.
"""
def node_norm_bound(node):
# Read input magnitudes off edges.
if node is self.start:
input_mags = [1]
else:
input_mags = [e.mag for e in self.input_edges[node]]
# If a node doesn't support norm_bound, propagate that.
if NotImplemented in input_mags:
output_mag = NotImplemented
else:
output_mag = node.norm_bound(input_mags)
if node is self.end:
final_output_mags[0] = output_mag
else:
for idx, e in enumerate(self.output_edges[node]):
e.mag = output_mag
self.traverse_graph(node_norm_bound, True)
def update_vars(self, val):
"""Map sections of val to variables.
"""
for var in self.orig_end.variables():
offset = self.var_info[var.uuid]
var.value = np.reshape(val[offset:offset + var.size], var.shape)
offset += var.size
def x0(self):
res = np.zeros(self.input_size)
for var in self.orig_end.variables():
if var.initval is not None:
offset = self.var_info[var.uuid]
res[offset:offset + var.size] = np.ravel(var.initval)
return res
def __str__(self):
return self.__class__.__name__
def est_CompGraph_norm(K, tol=1e-3, try_fast_norm=True):
"""Estimates operator norm for L = ||K||.
Parameters
----------
tol : float
Accuracy of estimate if not trying for upper bound.
try_fast_norm : bool
Whether to try for a fast upper bound.
Returns
-------
float
Estimate of ||K||.
"""
if try_fast_norm:
output_mags = [NotImplemented]
K.norm_bound(output_mags)
if NotImplemented not in output_mags:
return output_mags[0]
input_data = np.zeros(K.input_size)
output_data = np.zeros(K.output_size)
def KtK(x):
K.forward(x, output_data)
K.adjoint(output_data, input_data)
return input_data
# Define linear operator
A = LinearOperator((K.input_size, K.input_size),
KtK, KtK)
Knorm = np.sqrt(eigs(A, k=1, M=None, sigma=None, which='LM', tol=tol)[0].real)
return np.float(Knorm)
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bert_sngp."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import uncertainty_baselines as ub
from uncertainty_baselines.models import bert_sngp
from official.nlp.bert import configs as bert_configs
SNFeedforward = bert_sngp.SpectralNormalizedFeedforwardLayer
SNAttention = bert_sngp.SpectralNormalizedMultiHeadAttention
SNTransformer = bert_sngp.SpectralNormalizedTransformer
def _compute_spectral_norm(weight):
"""Computes the spectral norm for a numpy weight matrix."""
# TODO(b/165683434): Support different re-shaping options.
if weight.ndim > 2:
# Reshape weight to a 2D matrix.
weight_shape = weight.shape
weight = weight.reshape((-1, weight_shape[-1]))
return np.max(np.linalg.svd(weight, compute_uv=False))
def _compute_layer_spectral_norms(layer):
"""Computes the spectral norm for all kernels in a layer."""
return [
_compute_spectral_norm(weight.numpy())
for weight in layer.weights
if 'kernel' in weight.name
]
class SngpModelTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.random_seed = 42
self.num_classes = 10
self.batch_size = 4
self.seq_length = 4
self.hidden_dim = 8
self.num_heads = 2
self.key_dim = self.hidden_dim // self.num_heads
self.bert_test_config = bert_configs.BertConfig(
attention_probs_dropout_prob=0.12,
hidden_dropout_prob=0.34,
hidden_act='gelu',
hidden_size=self.hidden_dim,
initializer_range=0.02,
intermediate_size=self.hidden_dim,
max_position_embeddings=self.seq_length,
num_attention_heads=self.num_heads,
num_hidden_layers=2,
type_vocab_size=2,
vocab_size=128)
self.input_shape_3d = tf.TensorShape(
(self.batch_size, self.seq_length, self.hidden_dim))
self.input_shape_4d = tf.TensorShape(
(self.batch_size, self.seq_length, self.num_heads, self.key_dim))
# Layer arguments.
self.sn_norm_multiplier = 0.05
self.spec_norm_kwargs = dict(
iteration=1000, norm_multiplier=self.sn_norm_multiplier)
self.attention_kwargs = dict(num_heads=self.num_heads, key_dim=self.key_dim)
self.feedforward_kwargs = dict(
intermediate_size=128,
intermediate_activation='gelu',
dropout=0.1,
use_layer_norm=True)
self.gp_layer_kwargs = dict(
num_inducing=32, gp_cov_momentum=0.999, gp_cov_ridge_penalty=1e-6)
def test_make_spec_norm_dense_layer(self):
"""Tests if the weights of spec_norm_dense_layer is correctly normalized."""
# For a input sequence tensor [batch_size, a, b], defines a matrix
# multiplication op (along hidden dimension b) in eisum notation.
einsum_equation = 'abc,cd->abd'
eisum_layer_class = bert_sngp.make_spec_norm_dense_layer(
**self.spec_norm_kwargs)
dense_layer = eisum_layer_class(
output_shape=(self.seq_length, 10),
equation=einsum_equation,
activation='relu')
# Perform normalization.
dense_layer.build(self.input_shape_3d)
dense_layer.update_weights()
normalized_kernel = dense_layer.layer.kernel.numpy()
spectral_norm_computed = _compute_spectral_norm(normalized_kernel)
self.assertAllClose(
spectral_norm_computed, self.sn_norm_multiplier, atol=1e-3)
@parameterized.named_parameters(('feedforward', False), ('attention', True))
def test_layer_spectral_normalization(self, test_attention):
"""Tests if the layer weights can be correctly normalized."""
layer_class = SNAttention if test_attention else SNFeedforward
input_shape = self.input_shape_4d if test_attention else self.input_shape_3d
kwargs = self.attention_kwargs if test_attention else self.feedforward_kwargs
# Create input data.
tf.random.set_seed(self.random_seed)
random_data = tf.random.normal(input_shape)
input_tensors = (random_data,) * 2 if test_attention else (random_data,)
layer_instance = layer_class(
use_spec_norm=True, spec_norm_kwargs=self.spec_norm_kwargs, **kwargs)
# Invoke spectral normalization via model call.
_ = layer_instance(*input_tensors)
spec_norm_list_observed = _compute_layer_spectral_norms(layer_instance)
if test_attention:
# Remove the key, query and value layers from comparison since they are
# not normalized.
spec_norm_list_observed = spec_norm_list_observed[3:]
spec_norm_list_expected = [self.sn_norm_multiplier
] * len(spec_norm_list_observed)
self.assertAllClose(spec_norm_list_observed, spec_norm_list_expected,
atol=1e-3)
@parameterized.named_parameters(('att_and_ffn', True, True),
('att_only', False, True),
('ffn_only', True, False))
def test_transformer_spectral_normalization(self, use_spec_norm_att,
use_spec_norm_ffn):
"""Tests if the transformer weights can be correctly normalized."""
tf.random.set_seed(self.random_seed)
input_tensor = tf.random.normal(self.input_shape_3d)
transformer_model = SNTransformer(
num_attention_heads=self.num_heads,
intermediate_size=self.hidden_dim,
intermediate_activation='gelu',
use_layer_norm_att=False,
use_layer_norm_ffn=False,
use_spec_norm_att=use_spec_norm_att,
use_spec_norm_ffn=use_spec_norm_ffn,
spec_norm_kwargs=self.spec_norm_kwargs)
_ = transformer_model(input_tensor)
spec_norm_list_all = _compute_layer_spectral_norms(transformer_model)
# Collect spectral norms of the normalized kernel matrices.
spec_norm_list_observed = []
if use_spec_norm_att:
# Collect the output layers.
spec_norm_list_observed += spec_norm_list_all[3:4]
if use_spec_norm_ffn:
# Collect the last two feedforward layers.
spec_norm_list_observed += spec_norm_list_all[-2:]
spec_norm_list_expected = [self.sn_norm_multiplier
] * len(spec_norm_list_observed)
self.assertAllClose(
spec_norm_list_observed, spec_norm_list_expected, atol=1e-3)
def test_transformer_encoder_spectral_normalization(self):
"""Tests if the transformer encoder weights are correctly normalized."""
input_ids = tf.ones((self.batch_size, self.seq_length), dtype=tf.int32)
input_tensors = [input_ids, input_ids, input_ids]
transformer_encoder = (
bert_sngp.get_spectral_normalized_transformer_encoder(
bert_config=self.bert_test_config,
spec_norm_kwargs=self.spec_norm_kwargs,
use_layer_norm_att=True,
use_layer_norm_ffn=True,
use_spec_norm_att=True,
use_spec_norm_ffn=True))
_ = transformer_encoder(input_tensors)
# Currently the model does not apply spectral normalization to the
# key and query layers. Remove them from evaluation.
spec_norm_list_observed = _compute_layer_spectral_norms(transformer_encoder)
spec_norm_list_observed = (
spec_norm_list_observed[3:5] + spec_norm_list_observed[9:10])
spec_norm_list_expected = [self.sn_norm_multiplier
] * len(spec_norm_list_observed)
self.assertAllClose(
spec_norm_list_observed, spec_norm_list_expected, atol=1e-3)
def test_bert_gp_classifier(self):
"""Tests if BertGaussianProcessClassifier can be compiled successfully."""
# Compile a mock input model
inputs = tf.keras.Input(shape=self.seq_length, batch_size=self.batch_size)
outputs = tf.keras.layers.Lambda(lambda x: x)(inputs)
network = tf.keras.Model(inputs=inputs, outputs=[outputs, outputs])
# Compiles classifier model.
model = bert_sngp.BertGaussianProcessClassifier(
network,
num_classes=self.num_classes,
dropout_rate=0.1,
use_gp_layer=True,
gp_layer_kwargs=self.gp_layer_kwargs)
# Computes output.
tf.random.set_seed(self.random_seed)
inputs_tensor = tf.random.normal((self.batch_size, self.seq_length))
logits, stddev = model(inputs_tensor, training=False)
# Check if output tensors have correct shapes.
logits_shape_observed = logits.shape.as_list()
stddev_shape_observed = stddev.shape.as_list()
logits_shape_expected = [self.batch_size, self.num_classes]
stddev_shape_expected = [self.batch_size, self.batch_size]
self.assertEqual(logits_shape_observed, logits_shape_expected)
self.assertEqual(stddev_shape_observed, stddev_shape_expected)
def test_create_model(self):
"""Integration test for create_model."""
# Set iteration to 1 to avoid long waiting time.
spec_norm_kwargs = dict(iteration=1,
norm_multiplier=self.sn_norm_multiplier)
bert_model, bert_encoder = ub.models.bert_sngp_model(
num_classes=10,
bert_config=self.bert_test_config,
gp_layer_kwargs=self.gp_layer_kwargs,
spec_norm_kwargs=spec_norm_kwargs,
use_gp_layer=True,
use_spec_norm_att=True,
use_spec_norm_ffn=True,
use_layer_norm_att=False,
use_layer_norm_ffn=False)
self.assertIsInstance(bert_model,
bert_sngp.BertGaussianProcessClassifier)
self.assertIsInstance(bert_encoder,
bert_sngp.SpectralNormalizedTransformerEncoder)
if __name__ == '__main__':
tf.test.main()
|
|
import functools
import collections
import logging
import threading
from mopidy import audio, backend
import spotify
logger = logging.getLogger(__name__)
# These GStreamer caps matches the audio data provided by libspotify
GST_CAPS = "audio/x-raw,format=S16LE,rate=44100,channels=2,layout=interleaved"
# Extra log level with lower importance than DEBUG=10 for noisy debug logging
TRACE_LOG_LEVEL = 5
class SpotifyPlaybackProvider(backend.PlaybackProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._timeout = self.backend._config["spotify"]["timeout"]
self._buffer_timestamp = BufferTimestamp(0)
self._seeking_event = threading.Event()
self._first_seek = False
self._push_audio_data_event = threading.Event()
self._push_audio_data_event.set()
self._end_of_track_event = threading.Event()
self._events_connected = False
# libspotify sends a single empty buffer at the end of each track which
# must be discarded to ensure a gapless track transition. We delay using
# each buffer until we receieve the next one, up until we change track
# and clear everything, therefore dropping the unwanted last buffer.
self._held_buffers = collections.deque()
def _connect_events(self):
if not self._events_connected:
self._events_connected = True
self.backend._session.on(
spotify.SessionEvent.MUSIC_DELIVERY,
music_delivery_callback,
self.audio,
self._seeking_event,
self._push_audio_data_event,
self._buffer_timestamp,
self._held_buffers,
)
self.backend._session.on(
spotify.SessionEvent.END_OF_TRACK,
end_of_track_callback,
self._end_of_track_event,
self.audio,
)
def change_track(self, track):
self._connect_events()
if track.uri is None:
return False
logger.debug(
"Audio requested change of track; "
"loading and starting Spotify player"
)
need_data_callback_bound = functools.partial(
need_data_callback, self._push_audio_data_event
)
enough_data_callback_bound = functools.partial(
enough_data_callback, self._push_audio_data_event
)
seek_data_callback_bound = functools.partial(
seek_data_callback, self._seeking_event, self.backend._actor_proxy
)
self._buffer_timestamp.set(0)
self._first_seek = True
self._end_of_track_event.clear()
# Discard held buffers
self._held_buffers.clear()
try:
sp_track = self.backend._session.get_track(track.uri)
sp_track.load(self._timeout)
self.backend._session.player.load(sp_track)
self.backend._session.player.play()
future = self.audio.set_appsrc(
GST_CAPS,
need_data=need_data_callback_bound,
enough_data=enough_data_callback_bound,
seek_data=seek_data_callback_bound,
)
self.audio.set_metadata(track)
# Gapless playback requires that we block until URI change in
# mopidy.audio has completed before we return from change_track().
future.get()
return True
except spotify.Error as exc:
logger.info(f"Playback of {track.uri} failed: {exc}")
return False
def resume(self):
logger.debug("Audio requested resume; starting Spotify player")
self.backend._session.player.play()
return super().resume()
def stop(self):
logger.debug("Audio requested stop; pausing Spotify player")
self.backend._session.player.pause()
return super().stop()
def pause(self):
logger.debug("Audio requested pause; pausing Spotify player")
self.backend._session.player.pause()
return super().pause()
def on_seek_data(self, time_position):
logger.debug(f"Audio requested seek to {time_position}")
if time_position == 0 and self._first_seek:
self._seeking_event.clear()
self._first_seek = False
logger.debug("Skipping seek due to issue mopidy/mopidy#300")
return
# After seeking any data buffered so far will be stale, so clear it.
#
# This also seems to fix intermittent soft failures of the player after
# seeking (especially backwards), i.e. it pretends to be playing music,
# but doesn't.
self._held_buffers.clear()
self._buffer_timestamp.set(
audio.millisecond_to_clocktime(time_position)
)
self.backend._session.player.seek(time_position)
def need_data_callback(push_audio_data_event, length_hint):
# This callback is called from GStreamer/the GObject event loop.
logger.log(
TRACE_LOG_LEVEL,
f"Audio requested more data (hint={length_hint}); "
"accepting deliveries",
)
push_audio_data_event.set()
def enough_data_callback(push_audio_data_event):
# This callback is called from GStreamer/the GObject event loop.
logger.log(TRACE_LOG_LEVEL, "Audio has enough data; rejecting deliveries")
push_audio_data_event.clear()
def seek_data_callback(seeking_event, spotify_backend, time_position):
# This callback is called from GStreamer/the GObject event loop.
# It forwards the call to the backend actor.
seeking_event.set()
spotify_backend.playback.on_seek_data(time_position)
def music_delivery_callback(
session,
audio_format,
frames,
num_frames,
audio_actor,
seeking_event,
push_audio_data_event,
buffer_timestamp,
held_buffers,
):
# This is called from an internal libspotify thread.
# Ideally, nothing here should block.
if seeking_event.is_set():
# A seek has happened, but libspotify hasn't confirmed yet, so
# we're dropping all audio data from libspotify.
if num_frames == 0:
# libspotify signals that it has completed the seek. We'll accept
# the next audio data delivery.
seeking_event.clear()
return num_frames
if not push_audio_data_event.is_set():
return 0 # Reject the audio data. It will be redelivered later.
if not frames:
return 0 # No audio data; return immediately.
known_format = (
audio_format.sample_type == spotify.SampleType.INT16_NATIVE_ENDIAN
)
assert known_format, "Expects 16-bit signed integer samples"
duration = audio.calculate_duration(num_frames, audio_format.sample_rate)
buffer_ = audio.create_buffer(
bytes(frames), timestamp=buffer_timestamp.get(), duration=duration
)
# Try to consume any held buffers.
if held_buffers:
while held_buffers:
buf = held_buffers.popleft()
consumed = audio_actor.emit_data(buf).get()
if not consumed:
held_buffers.appendleft(buf)
break
else:
# No held buffer, don't apply back-pressure
consumed = True
if consumed:
# Consumed all held buffers so take the new one libspotify delivered us.
held_buffers.append(buffer_)
buffer_timestamp.increase(duration)
return num_frames
else:
# Pass back-pressure on to libspotify, next buffer will be redelivered.
return 0
def end_of_track_callback(session, end_of_track_event, audio_actor):
# This callback is called from the pyspotify event loop.
if end_of_track_event.is_set():
logger.debug("End of track already received; ignoring callback")
return
logger.debug("End of track reached")
end_of_track_event.set()
audio_actor.emit_data(None)
# Stop the track to prevent receiving empty audio data
session.player.unload()
class BufferTimestamp:
"""Wrapper around an int to serialize access by multiple threads.
The value is used both from the backend actor and callbacks called by
internal libspotify threads.
"""
def __init__(self, value):
self._value = value
self._lock = threading.RLock()
def get(self):
with self._lock:
return self._value
def set(self, value):
with self._lock:
self._value = value
def increase(self, value):
with self._lock:
self._value += value
|
|
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import argparse
import os
import time
import fnmatch
import tempfile
import shutil
from zipfile import ZipFile
from viper.common.out import *
from viper.common.objects import File
from viper.common.network import download
from viper.core.session import __sessions__
from viper.core.project import __project__
from viper.core.plugins import __modules__
from viper.core.database import Database
from viper.core.storage import store_sample, get_sample_path
class Commands(object):
output = []
def __init__(self):
# Open connection to the database.
self.db = Database()
# Map commands to their related functions.
self.commands = dict(
help=dict(obj=self.cmd_help, description="Show this help message"),
open=dict(obj=self.cmd_open, description="Open a file"),
close=dict(obj=self.cmd_close, description="Close the current session"),
info=dict(obj=self.cmd_info, description="Show information on the opened file"),
notes=dict(obj=self.cmd_notes, description="View, add and edit notes on the opened file"),
clear=dict(obj=self.cmd_clear, description="Clear the console"),
store=dict(obj=self.cmd_store, description="Store the opened file to the local repository"),
delete=dict(obj=self.cmd_delete, description="Delete the opened file"),
find=dict(obj=self.cmd_find, description="Find a file"),
tags=dict(obj=self.cmd_tags, description="Modify tags of the opened file"),
sessions=dict(obj=self.cmd_sessions, description="List or switch sessions"),
projects=dict(obj=self.cmd_projects, description="List or switch existing projects"),
export=dict(obj=self.cmd_export, description="Export the current session to file or zip"),
)
# Output Logging
def log(self, event_type, event_data):
self.output.append(dict(
type=event_type,
data=event_data
))
##
# CLEAR
#
# This command simply clears the shell.
def cmd_clear(self, *args):
os.system('clear')
##
# HELP
#
# This command simply prints the help message.
# It lists both embedded commands and loaded modules.
def cmd_help(self, *args):
self.log("info", "Commands")
rows = []
for command_name, command_item in self.commands.items():
rows.append([command_name, command_item['description']])
rows.append(["exit, quit", "Exit Viper"])
rows = sorted(rows, key=lambda entry: entry[0])
self.log('table', dict(header=['Command', 'Description'], rows=rows))
self.log("info", "Modules")
rows = []
for module_name, module_item in __modules__.items():
rows.append([module_name, module_item['description']])
rows = sorted(rows, key=lambda entry: entry[0])
self.log('table', dict(header=['Command', 'Description'], rows=rows))
##
# OPEN
#
# This command is used to open a session on a given file.
# It either can be an external file path, or a SHA256 hash of a file which
# has been previously imported and stored.
# While the session is active, every operation and module executed will be
# run against the file specified.
def cmd_open(self, *args):
parser = argparse.ArgumentParser(prog="open", description="Open a file", epilog="You can also specify a MD5 or SHA256 hash to a previously stored file in order to open a session on it.")
group = parser.add_mutually_exclusive_group()
group.add_argument('-f', '--file', action="store_true", help="target is a file")
group.add_argument('-u', '--url', action="store_true", help="target is a URL")
group.add_argument('-l', '--last', action="store_true", help="target is the entry number from the last find command's results")
parser.add_argument('-t', '--tor', action="store_true", help="Download the file through Tor")
parser.add_argument("value", metavar='Path, URL, hash or ID', nargs='*', help="Target to open. Hash can be md5 or sha256. ID has to be from the last search.")
try:
args = parser.parse_args(args)
except:
return
target = " ".join(args.value)
if not args.last and target is None:
parser.print_usage()
return
# If it's a file path, open a session on it.
if args.file:
target = os.path.expanduser(target)
if not os.path.exists(target) or not os.path.isfile(target):
self.log("error", "File not found: {0}".format(target))
return
__sessions__.new(target)
# If it's a URL, download it and open a session on the temporary file.
elif args.url:
data = download(url=target, tor=args.tor)
if data:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(data)
tmp.close()
__sessions__.new(tmp.name)
# Try to open the specified file from the list of results from
# the last find command.
elif args.last:
if __sessions__.find:
count = 1
for item in __sessions__.find:
if count == int(target):
__sessions__.new(get_sample_path(item.sha256))
break
count += 1
else:
self.log("warning", "You haven't performed a find yet")
# Otherwise we assume it's an hash of an previously stored sample.
else:
target = target.strip().lower()
if len(target) == 32:
key = 'md5'
elif len(target) == 64:
key = 'sha256'
else:
parser.print_usage()
return
rows = self.db.find(key=key, value=target)
if not rows:
self.log("warning", "No file found with the given hash {0}".format(target))
return
path = get_sample_path(rows[0].sha256)
if path:
__sessions__.new(path)
##
# CLOSE
#
# This command resets the open session.
# After that, all handles to the opened file should be closed and the
# shell should be restored to the default prompt.
def cmd_close(self, *args):
__sessions__.close()
##
# INFO
#
# This command returns information on the open session. It returns details
# on the file (e.g. hashes) and other information that might available from
# the database.
def cmd_info(self, *args):
if __sessions__.is_set():
self.log('table', dict(
header=['Key', 'Value'],
rows=[
['Name', __sessions__.current.file.name],
['Tags', __sessions__.current.file.tags],
['Path', __sessions__.current.file.path],
['Size', __sessions__.current.file.size],
['Type', __sessions__.current.file.type],
['Mime', __sessions__.current.file.mime],
['MD5', __sessions__.current.file.md5],
['SHA1', __sessions__.current.file.sha1],
['SHA256', __sessions__.current.file.sha256],
['SHA512', __sessions__.current.file.sha512],
['SSdeep', __sessions__.current.file.ssdeep],
['CRC32', __sessions__.current.file.crc32]
]
))
##
# NOTES
#
# This command allows you to view, add, modify and delete notes associated
# with the currently opened file.
def cmd_notes(self, *args):
parser = argparse.ArgumentParser(prog="notes", description="Show information on the opened file")
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', action="store_true", help="List all notes available for the current file")
group.add_argument('-a', '--add', action="store_true", help="Add a new note to the current file")
group.add_argument('-v', '--view', metavar='note_id', type=int, help="View the specified note")
group.add_argument('-e', '--edit', metavar='note_id', type=int, help="Edit an existing note")
group.add_argument('-d', '--delete', metavar='note_id', type=int, help="Delete an existing note")
try:
args = parser.parse_args(args)
except:
return
if not __sessions__.is_set():
self.log("error", "No session opened")
return
if args.list:
# Retrieve all notes for the currently opened file.
malware = Database().find(key='sha256', value=__sessions__.current.file.sha256)
if not malware:
self.log("error", "The opened file doesn't appear to be in the database, have you stored it yet?")
return
notes = malware[0].note
if not notes:
self.log("info", "No notes available for this file yet")
return
# Populate table rows.
rows = [[note.id, note.title] for note in notes]
# Display list of existing notes.
self.log('table', dict(header=['ID', 'Title'], rows=rows))
elif args.add:
title = raw_input("Enter a title for the new note: ")
# Create a new temporary file.
tmp = tempfile.NamedTemporaryFile(delete=False)
# Open the temporary file with the default editor, or with nano.
os.system('"${EDITOR:-nano}" ' + tmp.name)
# Once the user is done editing, we need to read the content and
# store it in the database.
body = tmp.read()
Database().add_note(__sessions__.current.file.sha256, title, body)
# Finally, remove the temporary file.
os.remove(tmp.name)
self.log("info", "New note with title \"{0}\" added to the current file".format(bold(title)))
elif args.view:
# Retrieve note wth the specified ID and print it.
note = Database().get_note(args.view)
if note:
self.log("info", bold('Title: ') + note.title)
self.log("info", bold('Body:'))
print(note.body)
else:
self.log("info", "There is no note with ID {0}".format(args.view))
elif args.edit:
# Retrieve note with the specified ID.
note = Database().get_note(args.edit)
if note:
# Create a new temporary file.
tmp = tempfile.NamedTemporaryFile(delete=False)
# Write the old body to the temporary file.
tmp.write(note.body)
tmp.close()
# Open the old body with the text editor.
os.system('"${EDITOR:-nano}" ' + tmp.name)
# Read the new body from the temporary file.
body = open(tmp.name, 'r').read()
# Update the note entry with the new body.
Database().edit_note(args.edit, body)
# Remove the temporary file.
os.remove(tmp.name)
self.log("info", "Updated note with ID {0}".format(args.edit))
elif args.delete:
# Delete the note with the specified ID.
Database().delete_note(args.delete)
else:
parser.print_usage()
##
# STORE
#
# This command stores the opened file in the local repository and tries
# to store details in the database.
def cmd_store(self, *args):
parser = argparse.ArgumentParser(prog="store", description="Store the opened file to the local repository")
parser.add_argument('-d', '--delete', action="store_true", help="Delete the original file")
parser.add_argument('-f', '--folder', type=str, nargs='+', help="Specify a folder to import")
parser.add_argument('-s', '--file-size', type=int, help="Specify a maximum file size")
parser.add_argument('-y', '--file-type', type=str, help="Specify a file type pattern")
parser.add_argument('-n', '--file-name', type=str, help="Specify a file name pattern")
parser.add_argument('-t', '--tags', type=str, nargs='+', help="Specify a list of comma-separated tags")
try:
args = parser.parse_args(args)
except:
return
if args.folder is not None:
# Allows to have spaces in the path.
args.folder = " ".join(args.folder)
if args.tags is not None:
# Remove the spaces in the list of tags
args.tags = "".join(args.tags)
def add_file(obj, tags=None):
if get_sample_path(obj.sha256):
self.log("warning", "Skip, file \"{0}\" appears to be already stored".format(obj.name))
return False
# Try to store file object into database.
status = self.db.add(obj=obj, tags=tags)
if status:
# If succeeds, store also in the local repository.
# If something fails in the database (for example unicode strings)
# we don't want to have the binary lying in the repository with no
# associated database record.
new_path = store_sample(obj)
self.log("success", "Stored file \"{0}\" to {1}".format(obj.name, new_path))
else:
return False
# Delete the file if requested to do so.
if args.delete:
try:
os.unlink(obj.path)
except Exception as e:
self.log("warning", "Failed deleting file: {0}".format(e))
return True
# If the user specified the --folder flag, we walk recursively and try
# to add all contained files to the local repository.
# This is note going to open a new session.
# TODO: perhaps disable or make recursion optional?
if args.folder is not None:
# Check if the specified folder is valid.
if os.path.isdir(args.folder):
# Walk through the folder and subfolders.
for dir_name, dir_names, file_names in os.walk(args.folder):
# Add each collected file.
for file_name in file_names:
file_path = os.path.join(dir_name, file_name)
if not os.path.exists(file_path):
continue
# Check if file is not zero.
if not os.path.getsize(file_path) > 0:
continue
# Check if the file name matches the provided pattern.
if args.file_name:
if not fnmatch.fnmatch(file_name, args.file_name):
# self.log("warning", "Skip, file \"{0}\" doesn't match the file name pattern".format(file_path))
continue
# Check if the file type matches the provided pattern.
if args.file_type:
if args.file_type not in File(file_path).type:
# self.log("warning", "Skip, file \"{0}\" doesn't match the file type".format(file_path))
continue
# Check if file exceeds maximum size limit.
if args.file_size:
# Obtain file size.
if os.path.getsize(file_path) > args.file_size:
self.log("warning", "Skip, file \"{0}\" is too big".format(file_path))
continue
file_obj = File(file_path)
# Add file.
add_file(file_obj, args.tags)
else:
self.log("error", "You specified an invalid folder: {0}".format(args.folder))
# Otherwise we try to store the currently opened file, if there is any.
else:
if __sessions__.is_set():
if __sessions__.current.file.size == 0:
self.log("warning", "Skip, file \"{0}\" appears to be empty".format(__sessions__.current.file.name))
return False
# Add file.
if add_file(__sessions__.current.file, args.tags):
# Open session to the new file.
self.cmd_open(*[__sessions__.current.file.sha256])
else:
self.log("error", "No session opened")
##
# DELETE
#
# This commands deletes the currenlty opened file (only if it's stored in
# the local repository) and removes the details from the database
def cmd_delete(self, *args):
if __sessions__.is_set():
while True:
choice = raw_input("Are you sure you want to delete this binary? Can't be reverted! [y/n] ")
if choice == 'y':
break
elif choice == 'n':
return
rows = self.db.find('sha256', __sessions__.current.file.sha256)
if rows:
malware_id = rows[0].id
if self.db.delete(malware_id):
self.log("success", "File deleted")
else:
self.log("error", "Unable to delete file")
os.remove(__sessions__.current.file.path)
__sessions__.close()
else:
self.log("error", "No session opened")
##
# FIND
#
# This command is used to search for files in the database.
def cmd_find(self, *args):
parser = argparse.ArgumentParser(prog="find", description="Find a file")
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--tags', action="store_true", help="List available tags and quit")
group.add_argument('type', nargs='?', choices=["all", "latest", "name", "type", "mime", "md5", "sha256", "tag", "note"], help="Where to search.")
parser.add_argument("value", nargs='?', help="String to search.")
try:
args = parser.parse_args(args)
except:
return
# One of the most useful search terms is by tag. With the --tags
# argument we first retrieve a list of existing tags and the count
# of files associated with each of them.
if args.tags:
# Retrieve list of tags.
tags = self.db.list_tags()
if tags:
rows = []
# For each tag, retrieve the count of files associated with it.
for tag in tags:
count = len(self.db.find('tag', tag.tag))
rows.append([tag.tag, count])
# Generate the table with the results.
header = ['Tag', '# Entries']
rows.sort(key=lambda x: x[1], reverse=True)
self.log('table', dict(header=header, rows=rows))
else:
self.log("warning", "No tags available")
return
# At this point, if there are no search terms specified, return.
if args.type is None:
parser.print_usage()
return
key = args.type
if key != 'all' and key != 'latest':
try:
# The second argument is the search value.
value = args.value
except IndexError:
self.log("error", "You need to include a search term.")
return
else:
value = None
# Search all the files matching the given parameters.
items = self.db.find(key, value)
if not items:
return
# Populate the list of search results.
rows = []
count = 1
for item in items:
tag = ', '.join([t.tag for t in item.tag if t.tag])
row = [count, item.name, item.mime, item.md5, tag]
if key == 'latest':
row.append(item.created_at)
rows.append(row)
count += 1
# Update find results in current session.
__sessions__.find = items
# Generate a table with the results.
header = ['#', 'Name', 'Mime', 'MD5', 'Tags']
if key == 'latest':
header.append('Created At')
self.log("table", dict(header=header, rows=rows))
##
# TAGS
#
# This command is used to modify the tags of the opened file.
def cmd_tags(self, *args):
parser = argparse.ArgumentParser(prog="tags", description="Modify tags of the opened file")
parser.add_argument('-a', '--add', help="Add tags to the opened file (comma separated)")
parser.add_argument('-d', '--delete', help="Delete a tag from the opened file")
try:
args = parser.parse_args(args)
except:
return
# This command requires a session to be opened.
if not __sessions__.is_set():
self.log("error", "No session opened")
parser.print_usage()
return
# If no arguments are specified, there's not much to do.
# However, it could make sense to also retrieve a list of existing
# tags from this command, and not just from the "find" command alone.
if args.add is None and args.delete is None:
parser.print_usage()
return
# TODO: handle situation where addition or deletion of a tag fail.
if args.add:
# Add specified tags to the database's entry belonging to
# the opened file.
db = Database()
db.add_tags(__sessions__.current.file.sha256, args.add)
self.log("info", "Tags added to the currently opened file")
# We refresh the opened session to update the attributes.
# Namely, the list of tags returned by the "info" command
# needs to be re-generated, or it wouldn't show the new tags
# until the existing session is closed a new one is opened.
self.log("info", "Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
if args.delete:
# Delete the tag from the database.
Database().delete_tag(args.delete)
# Refresh the session so that the attributes of the file are
# updated.
self.log("info", "Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
###
# SESSION
#
# This command is used to list and switch across all the opened sessions.
def cmd_sessions(self, *args):
parser = argparse.ArgumentParser(prog="sessions", description="Open a file", epilog="List or switch sessions")
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', action="store_true", help="List all existing sessions")
group.add_argument('-s', '--switch', type=int, help="Switch to the specified session")
try:
args = parser.parse_args(args)
except:
return
if args.list:
if not __sessions__.sessions:
self.log("info", "There are no opened sessions")
return
rows = []
for session in __sessions__.sessions:
current = ''
if session == __sessions__.current:
current = 'Yes'
rows.append([
session.id,
session.file.name,
session.file.md5,
session.created_at,
current
])
self.log("info", "Opened Sessions:")
self.log("table", dict(header=['#', 'Name', 'MD5', 'Created At', 'Current'], rows=rows))
elif args.switch:
for session in __sessions__.sessions:
if args.switch == session.id:
__sessions__.switch(session)
return
self.log("warning", "The specified session ID doesn't seem to exist")
else:
parser.print_usage()
##
# PROJECTS
#
# This command retrieves a list of all projects.
# You can also switch to a different project.
def cmd_projects(self, *args):
parser = argparse.ArgumentParser(prog="projects", description="Open a file", epilog="List or switch existing projects")
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', action="store_true", help="List all existing projects")
group.add_argument('-s', '--switch', metavar='project_name', help="Switch to the specified project")
try:
args = parser.parse_args(args)
except:
return
projects_path = os.path.join(os.getcwd(), 'projects')
if not os.path.exists(projects_path):
self.log("info", "The projects directory does not exist yet")
return
if args.list:
self.log("info", "Projects Available:")
rows = []
for project in os.listdir(projects_path):
project_path = os.path.join(projects_path, project)
if os.path.isdir(project_path):
current = ''
if __project__.name and project == __project__.name:
current = 'Yes'
rows.append([project, time.ctime(os.path.getctime(project_path)), current])
self.log('table', dict(header=['Project Name', 'Creation Time', 'Current'], rows=rows))
elif args.switch:
if __sessions__.is_set():
__sessions__.close()
self.log("info", "Closed opened session")
__project__.open(args.switch)
self.log("info", "Switched to project {0}".format(bold(args.switch)))
# Need to re-initialize the Database to open the new SQLite file.
self.db = Database()
else:
self.log('info', parser.print_usage())
##
# EXPORT
#
# This command will export the current session to file or zip.
def cmd_export(self, *args):
parser = argparse.ArgumentParser(prog="export", description="Export the current session to file or zip")
parser.add_argument('-z', '--zip', action="store_true", help="Export session in a zip archive")
parser.add_argument('value', help="path or archive name")
try:
args = parser.parse_args(args)
except:
return
# This command requires a session to be opened.
if not __sessions__.is_set():
self.log("error", "No session opened")
parser.print_usage()
return
# Check for valid export path.
if args.value is None:
parser.print_usage()
return
# TODO: having for one a folder and for the other a full
# target path can be confusing. We should perhaps standardize this.
# Abort if the specified path already exists.
if os.path.isfile(args.value):
self.log("error", "File at path \"{0}\" already exists, abort".format(args.value))
return
# If the argument chosed so, archive the file when exporting it.
# TODO: perhaps add an option to use a password for the archive
# and default it to "infected".
if args.zip:
try:
with ZipFile(args.value, 'w') as export_zip:
export_zip.write(__sessions__.current.file.path, arcname=__sessions__.current.file.name)
except IOError as e:
self.log("error", "Unable to export file: {0}".format(e))
else:
self.log("info", "File archived and exported to {0}".format(args.value))
# Otherwise just dump it to the given directory.
else:
# XXX: Export file with the original file name.
store_path = os.path.join(args.value, __sessions__.current.file.name)
try:
shutil.copyfile(__sessions__.current.file.path, store_path)
except IOError as e:
self.log("error", "Unable to export file: {0}".format(e))
else:
self.log("info", "File exported to {0}".format(store_path))
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .file_utils import (
CONFIG_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
)
from .utils import logging
logger = logging.get_logger(__name__)
_re_configuration_file = re.compile(r"config\.(.*)\.json")
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
[`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
- **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
[`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.file_utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
cross_attention_hidden_size** (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for sequence generation
max_length (`int`, *optional*, defaults to 20):
Maximum length that will be used by default in the `generate` method of the model.
min_length (`int`, *optional*, defaults to 10):
Minimum length that will be used by default in the `generate` method of the model.
do_sample (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
use greedy decoding otherwise.
early_stopping (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
when at least `num_beams` sentences are finished per batch or not.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
no beam search.
num_beam_groups (`int`, *optional*, defaults to 1):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
that will be used by default in the `generate` method of the model. 1 means no group beam search.
diversity_penalty (`float`, *optional*, defaults to 0.0):
Value to control diversity for group beam search. that will be used by default in the `generate` method of
the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
temperature (`float`, *optional*, defaults to 1):
The value used to module the next token probabilities that will be used by default in the `generate` method
of the model. Must be strictly positive.
top_k (`int`, *optional*, defaults to 50):
Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
the `generate` method of the model.
top_p (`float`, *optional*, defaults to 1):
Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
repetition_penalty (`float`, *optional*, defaults to 1):
Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
means no penalty.
length_penalty (`float`, *optional*, defaults to 1):
Exponential penalty to the length that will be used by default in the `generate` method of the model.
no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
`generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
only occur once.
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
bad_words_ids (`List[int]`, *optional*):
List of token ids that are not allowed to be generated that will be used by default in the `generate`
method of the model. In order to get the tokens of the words that should not appear in the generated text,
use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences (`int`, *optional*, defaults to 1):
Number of independently computed returned sequences for each element in the batch that will be used by
default in the `generate` method of the model.
output_scores (`bool`, *optional*, defaults to `False`):
Whether the model should return the logits when used for generation.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether the model should return a [`~transformers.file_utils.ModelOutput`] instead of a `torch.LongTensor`.
forced_bos_token_id (`int`, *optional*):
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
language token.
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached.
remove_invalid_values (`bool`, *optional*):
Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
Note that using `remove_invalid_values` can slow down generation.
> Parameters for fine-tuning tasks
architectures (`List[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
or PyTorch) checkpoint.
id2label (`Dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`Dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*): The id of the _padding_ token.
eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*): The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
torch_dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights. Since the config object is stored in plain text, this attribute contains just the
floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
`"float16"` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
> TensorFlow specific parameters
use_bfloat16 (`bool`, *optional*, defaults to `False`):
Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return getattr(self, "_name_or_path", None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
`bool`: Whether or not return [`~file_utils.ModelOutput`] instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
kwargs:
Additional key word arguments passed along to the [`~file_utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
original_kwargs = copy.deepcopy(kwargs)
# Get config dict associated with the base config file
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
# That config file may point us toward another config file to use.
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, configuration_file)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=configuration_file, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on "
"'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having "
"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass "
"`use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for "
"available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}."
)
except HTTPError:
raise EnvironmentError(
"We couldn't connect to 'https://huggingface.co/' to load this model and it looks like "
f"{pretrained_model_name_or_path} is not the path to a directory conaining a {configuration_file} "
"file.\nCheckout your internet connection or see how to run the library in offline mode at "
"'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a {configuration_file} file"
)
try:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary has a *torch_dtype* key and if it's not None, converts torch.dtype to a
string of just the type. For example, `torch.float32` get converted into *"float32"* string, which can then be
stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_configuration_file(configuration_files: List[str]) -> str:
"""
Get the configuration file to use for this version of transformers.
Args:
configuration_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The configuration file to use.
"""
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if search is not None:
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
# Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
configuration_file = configuration_files_map[v]
else:
# No point going further since the versions are sorted.
break
return configuration_file
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
|
|
"""
In this module, the similarity classes of the item-based approach are defined.
The most important classes are the CombinedRecordSimilarity, the
CollaborativeRecordSimilarity and the ContentRecordSimilarity. The first one
combines the record similarity values of two underlying item based similarity
classes. The second similarity class calculates the similarity of two records
by using a similarity metric, e.g., cosine similarity, on their preference
vectors. Finally, ContentRecordSimilarity retrieves the imported content-based
similarity from the database and stores in the local memory.
"""
from .. import queries
from search_rex.models import ActionType
from ..refreshable import Refreshable
from ..refreshable import RefreshHelper
class Preference(object):
"""
An entry of the session-record matrix consisting of a value and a
preference time
"""
def __init__(self, value, preference_time):
"""
:param value: the value of the preference
:param preference_time: the time at which the preference was committed
"""
self.value = value
self.preference_time = preference_time
class AbstractRecordDataModel(Refreshable):
"""
The repository for the session-record matrix
"""
def get_records(self):
"""
Gets an iterator over all the records
"""
raise NotImplementedError()
def get_preferences_of_session(self, session_id):
"""
Retrieves the preferences of the session
"""
raise NotImplementedError()
def get_preferences_for_record(self, record_id):
"""
Retrieves the preferences for the record
"""
raise NotImplementedError()
def get_preferences_for_records(self):
"""
Retrieves the preference columns of all records
"""
raise NotImplementedError()
class PersistentRecordDataModel(AbstractRecordDataModel):
"""
This repository works directly on the database. It includes the variable
include_internal_records that indicates if this repository includes
internal records
"""
def __init__(
self, include_internal_records, copy_action_weight=2.0,
view_action_weight=1.0):
"""
:param include_internal_records: indicates if this repository includes
internal records
:param copy_action_weight: the preference value of a copy action
:param view_action_weight: the preference value of a view action
"""
self.include_internal_records = include_internal_records
self.view_action_weight = view_action_weight
self.copy_action_weight = copy_action_weight
def get_records(self):
"""
Gets an iterator over all the records
"""
return queries.get_records(self.include_internal_records)
def __get_preferences_from_actions(self, actions, key_func):
preferences = {}
for action in actions:
key = key_func(action)
if key not in preferences:
pref_value = 0.0
if action.action_type == ActionType.view:
pref_value = self.view_action_weight
elif action.action_type == ActionType.copy:
pref_value = self.copy_action_weight
preferences[key] = Preference(
value=pref_value, preference_time=action.time_created)
elif action.action_type == ActionType.copy:
preferences[key].value = self.copy_action_weight
preferences[key].preference_time = action.time_created
return preferences
def get_preferences_of_session(self, session_id):
"""
Retrieves the preferences of the session
"""
actions = queries.get_actions_of_session(session_id)
preferences = self.__get_preferences_from_actions(
actions, lambda action: action.record_id)
return preferences
def get_preferences_for_record(self, record_id):
"""
Retrieves the preferences for the record
"""
actions = queries.get_actions_on_record(record_id)
preferences = self.__get_preferences_from_actions(
actions, lambda action: action.session_id)
return preferences
def get_preferences_for_records(self):
"""
Retrieves the preference columns of all records
"""
for record_id, actions in queries.get_actions_on_records(
self.include_internal_records):
preferences = self.__get_preferences_from_actions(
actions, lambda action: action.session_id)
yield (record_id, preferences)
def refresh(self, refreshed_components):
"""
No refresh needed as the class works directly on the database
"""
refreshed_components.add(self)
class InMemoryRecordDataModel(AbstractRecordDataModel):
"""
This data model retrieves the data from an underlying data model and stores
the data in a dictionary. Calling refresh, reloads the data
"""
def __init__(self, data_model):
self.data_model = data_model
self.record_session_mat = {}
self.refresh_helper = RefreshHelper(
target_refresh_function=self.init_model)
self.refresh_helper.add_dependency(data_model)
self.init_model()
def init_model(self):
record_session_mat = {}
for record_id, preferences in\
self.data_model.get_preferences_for_records():
record_session_mat[record_id] = preferences
self.record_session_mat = record_session_mat
def get_records(self):
"""
Gets an iterator over all the records
"""
return self.record_session_mat.keys()
def get_preferences_of_session(self, session_id):
"""
Retrieves the preferences of the session
"""
preferences = {}
for record_id, rec_prefs in self.record_session_mat.iteritems():
if session_id in rec_prefs:
preferences[record_id] = rec_prefs[session_id]
return preferences
def get_preferences_for_record(self, record_id):
"""
Retrieves the preferences for the record
"""
if record_id not in self.record_session_mat:
return {}
return self.record_session_mat[record_id]
def get_preferences_for_records(self):
"""
Retrieves the preference columns of all records
"""
for record_id, preferences in self.record_session_mat.iteritems():
yield record_id, preferences
def refresh(self, refreshed_components):
"""
No refresh needed as the class works directly on the database
"""
self.refresh_helper.refresh(refreshed_components)
refreshed_components.add(self)
|
|
# The help text for various thresholding options whose code resides here is in modules/identify.py
from __future__ import absolute_import
from __future__ import division
import inspect
import math
import numpy as np
import scipy.ndimage
import scipy.sparse
import scipy.interpolate
from .otsu import otsu, entropy, otsu3, entropy3
from .smooth import smooth_with_noise
from .filter import stretch, unstretch
from six.moves import range
from six.moves import zip
TM_OTSU = "Otsu"
TM_OTSU_GLOBAL = "Otsu Global"
TM_OTSU_ADAPTIVE = "Otsu Adaptive"
TM_OTSU_PER_OBJECT = "Otsu PerObject"
TM_MOG = "MoG"
TM_MOG_GLOBAL = "MoG Global"
TM_MOG_ADAPTIVE = "MoG Adaptive"
TM_MOG_PER_OBJECT = "MoG PerObject"
TM_BACKGROUND = "Background"
TM_BACKGROUND_GLOBAL = "Background Global"
TM_BACKGROUND_ADAPTIVE = "Background Adaptive"
TM_BACKGROUND_PER_OBJECT = "Background PerObject"
TM_ROBUST_BACKGROUND = "RobustBackground"
TM_ROBUST_BACKGROUND_GLOBAL = "RobustBackground Global"
TM_ROBUST_BACKGROUND_ADAPTIVE = "RobustBackground Adaptive"
TM_ROBUST_BACKGROUND_PER_OBJECT = "RobustBackground PerObject"
TM_RIDLER_CALVARD = "RidlerCalvard"
TM_RIDLER_CALVARD_GLOBAL = "RidlerCalvard Global"
TM_RIDLER_CALVARD_ADAPTIVE = "RidlerCalvard Adaptive"
TM_RIDLER_CALVARD_PER_OBJECT = "RidlerCalvard PerObject"
TM_KAPUR = "Kapur"
TM_KAPUR_GLOBAL = "Kapur Global"
TM_KAPUR_ADAPTIVE = "Kapur Adaptive"
TM_KAPUR_PER_OBJECT = "Kapur PerObject"
TM_MCT = "MCT"
TM_MCT_GLOBAL = "MCT Global"
TM_MCT_ADAPTIVE = "MCT Adaptive"
TM_MCT_PER_OBJECT = "MCT PerObject"
TM_MANUAL = "Manual"
TM_MEASUREMENT = "Measurement"
TM_BINARY_IMAGE = "Binary image"
"""Compute a single threshold for the entire image"""
TM_GLOBAL = "Global"
"""Compute a local thresholding matrix of the same size as the image"""
TM_ADAPTIVE = "Adaptive"
"""Compute a threshold for each labeled object in the image"""
TM_PER_OBJECT = "PerObject"
TM_METHODS = [
TM_OTSU,
TM_MOG,
TM_BACKGROUND,
TM_ROBUST_BACKGROUND,
TM_RIDLER_CALVARD,
TM_KAPUR,
TM_MCT,
]
TM_GLOBAL_METHODS = [" ".join((x, TM_GLOBAL)) for x in TM_METHODS]
def get_threshold(
threshold_method,
threshold_modifier,
image,
mask=None,
labels=None,
threshold_range_min=None,
threshold_range_max=None,
threshold_correction_factor=1.0,
adaptive_window_size=10,
**kwargs
):
"""Compute a threshold for an image
threshold_method - one of the TM_ methods above
threshold_modifier - TM_GLOBAL to calculate one threshold over entire image
TM_ADAPTIVE to calculate a per-pixel threshold
TM_PER_OBJECT to calculate a different threshold for
each object
image - a NxM numpy array of the image data
Returns a tuple of local_threshold and global_threshold where:
* global_threshold is the single number calculated using the threshold
method over the whole image
* local_threshold is the global_threshold for global methods. For adaptive
and per-object thresholding, local_threshold is a matrix of threshold
values representing the threshold to be applied at each pixel of the
image.
Different methods have optional and required parameters:
Required:
TM_PER_OBJECT:
labels - a labels matrix that defines the extents of the individual objects
to be thresholded separately.
Optional:
All:
mask - a mask of the significant pixels in the image
threshold_range_min, threshold_range_max - constrain the threshold
values to be examined to values between these limits
threshold_correction_factor - the calculated threshold is multiplied
by this number to get the final threshold
TM_MOG (mixture of Gaussians):
object_fraction - fraction of image expected to be occupied by objects
(pixels that are above the threshold)
TM_OTSU - We have algorithms derived from Otsu. There is a three-class
version of Otsu in addition to the two class. There is also
an entropy measure in addition to the weighted variance.
two_class_otsu - assume that the distribution represents
two intensity classes if true, three if false.
use_weighted_variance - use Otsu's weighted variance if true,
an entropy measure if false
assign_middle_to_foreground - assign pixels in the middle class
in a three-class Otsu to the foreground if true
or the background if false.
"""
global_threshold = get_global_threshold(threshold_method, image, mask, **kwargs)
global_threshold *= threshold_correction_factor
if not threshold_range_min is None:
global_threshold = max(global_threshold, threshold_range_min)
if not threshold_range_max is None:
global_threshold = min(global_threshold, threshold_range_max)
if threshold_modifier == TM_GLOBAL:
local_threshold = global_threshold
elif threshold_modifier == TM_ADAPTIVE:
local_threshold = get_adaptive_threshold(
threshold_method,
image,
global_threshold,
mask,
adaptive_window_size=adaptive_window_size,
**kwargs
)
local_threshold = local_threshold * threshold_correction_factor
elif threshold_modifier == TM_PER_OBJECT:
local_threshold = get_per_object_threshold(
threshold_method,
image,
global_threshold,
mask,
labels,
threshold_range_min,
threshold_range_max,
**kwargs
)
local_threshold = local_threshold * threshold_correction_factor
else:
raise NotImplementedError(
"%s thresholding is not implemented" % (threshold_modifier)
)
if isinstance(local_threshold, np.ndarray):
#
# Constrain thresholds to within .7 to 1.5 of the global threshold.
#
threshold_range_min = max(threshold_range_min, global_threshold * 0.7)
threshold_range_max = min(threshold_range_max, global_threshold * 1.5)
if not threshold_range_min is None:
local_threshold[local_threshold < threshold_range_min] = threshold_range_min
if not threshold_range_max is None:
local_threshold[local_threshold > threshold_range_max] = threshold_range_max
if (threshold_modifier == TM_PER_OBJECT) and (labels is not None):
local_threshold[labels == 0] = 1.0
else:
if not threshold_range_min is None:
local_threshold = max(local_threshold, threshold_range_min)
if not threshold_range_max is None:
local_threshold = min(local_threshold, threshold_range_max)
return local_threshold, global_threshold
def get_global_threshold(threshold_method, image, mask=None, **kwargs):
"""Compute a single threshold over the whole image"""
if mask is not None and not np.any(mask):
return 1
if threshold_method == TM_OTSU:
fn = get_otsu_threshold
elif threshold_method == TM_MOG:
fn = get_mog_threshold
elif threshold_method == TM_BACKGROUND:
fn = get_background_threshold
elif threshold_method == TM_ROBUST_BACKGROUND:
fn = get_robust_background_threshold
elif threshold_method == TM_RIDLER_CALVARD:
fn = get_ridler_calvard_threshold
elif threshold_method == TM_KAPUR:
fn = get_kapur_threshold
elif threshold_method == TM_MCT:
fn = get_maximum_correlation_threshold
else:
raise NotImplementedError("%s algorithm not implemented" % (threshold_method))
kwargs = dict([(k, v) for k, v in kwargs.items() if k in fn.args])
return fn(image, mask, **kwargs)
def get_adaptive_threshold(
threshold_method, image, threshold, mask=None, adaptive_window_size=10, **kwargs
):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2], dtype=int)
nblocks = image_size // adaptive_window_size
if any(n < 2 for n in nblocks):
raise ValueError(
"Adaptive window cannot exceed 50%% of an image dimension.\n"
"Window of %dpx is too large for a %sx%s image" % (
adaptive_window_size, image_size[1], image_size[0]
)
)
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = np.array(image_size, dtype=float) / np.array(nblocks, dtype=float)
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0], nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i * increment[0])
i1 = int((i + 1) * increment[0])
for j in range(nblocks[1]):
j0 = int(j * increment[1])
j1 = int((j + 1) * increment[1])
block = image[i0:i1, j0:j1]
block_mask = None if mask is None else mask[i0:i1, j0:j1]
block_threshold[i, j] = get_global_threshold(
threshold_method, block, mask=block_mask, **kwargs
)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = 0.5
xtEnd = image.shape[0] - 0.5
ytStart = 0.5
ytEnd = image.shape[1] - 0.5
block_x_coords = np.linspace(xStart, xEnd, nblocks[0])
block_y_coords = np.linspace(yStart, yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords,
block_y_coords,
block_threshold,
bbox=(xtStart, xtEnd, ytStart, ytEnd),
kx=spline_order,
ky=spline_order,
)
thresh_out_x_coords = np.linspace(
0.5, int(nblocks[0] * increment[0]) - 0.5, thresh_out.shape[0]
)
thresh_out_y_coords = np.linspace(
0.5, int(nblocks[1] * increment[1]) - 0.5, thresh_out.shape[1]
)
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out
def get_per_object_threshold(
method,
image,
threshold,
mask=None,
labels=None,
threshold_range_min=None,
threshold_range_max=None,
**kwargs
):
"""Return a matrix giving threshold per pixel calculated per-object
image - image to be thresholded
mask - mask out "don't care" pixels
labels - a label mask indicating object boundaries
threshold - the global threshold
"""
if labels is None:
labels = np.ones(image.shape, int)
if not mask is None:
labels[np.logical_not(mask)] = 0
label_extents = scipy.ndimage.find_objects(labels, np.max(labels))
local_threshold = np.ones(image.shape, image.dtype)
for i, extent in enumerate(label_extents, start=1):
label_mask = labels[extent] == i
if not mask is None:
label_mask = np.logical_and(mask[extent], label_mask)
values = image[extent]
per_object_threshold = get_global_threshold(
method, values, mask=label_mask, **kwargs
)
local_threshold[extent][label_mask] = per_object_threshold
return local_threshold
def get_otsu_threshold(
image,
mask=None,
two_class_otsu=True,
use_weighted_variance=True,
assign_middle_to_foreground=True,
):
if not mask is None:
image = image[mask]
else:
image = np.array(image.flat)
image = image[image >= 0]
if len(image) == 0:
return 1
image, d = log_transform(image)
if two_class_otsu:
if use_weighted_variance:
threshold = otsu(image)
else:
threshold = entropy(image)
else:
if use_weighted_variance:
t1, t2 = otsu3(image)
else:
t1, t2 = entropy3(image)
threshold = t1 if assign_middle_to_foreground else t2
threshold = inverse_log_transform(threshold, d)
return threshold
get_otsu_threshold.args = inspect.getargspec(get_otsu_threshold).args
def get_mog_threshold(image, mask=None, object_fraction=0.2):
"""Compute a background using a mixture of gaussians
This function finds a suitable
threshold for the input image Block. It assumes that the pixels in the
image belong to either a background class or an object class. 'pObject'
is an initial guess of the prior probability of an object pixel, or
equivalently, the fraction of the image that is covered by objects.
Essentially, there are two steps. First, a number of Gaussian
distributions are estimated to match the distribution of pixel
intensities in OrigImage. Currently 3 Gaussian distributions are
fitted, one corresponding to a background class, one corresponding to
an object class, and one distribution for an intermediate class. The
distributions are fitted using the Expectation-Maximization (EM)
algorithm, a procedure referred to as Mixture of Gaussians modeling.
When the 3 Gaussian distributions have been fitted, it's decided
whether the intermediate class models background pixels or object
pixels based on the probability of an object pixel 'pObject' given by
the user.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
pixel_count = np.product(cropped_image.shape)
max_count = 512 ** 2 # maximum # of pixels analyzed
#
# We need at least 3 pixels to keep from crashing because the highest
# and lowest are chopped out below.
#
object_fraction = float(object_fraction)
background_fraction = 1.0 - object_fraction
if pixel_count < 3 / min(object_fraction, background_fraction):
return 1
if np.max(cropped_image) == np.min(cropped_image):
return cropped_image[0]
number_of_classes = 3
if pixel_count > max_count:
np.random.seed(0)
pixel_indices = np.random.permutation(pixel_count)[:max_count]
cropped_image = cropped_image[pixel_indices]
# Initialize mean and standard deviations of the three Gaussian
# distributions by looking at the pixel intensities in the original
# image and by considering the percentage of the image that is
# covered by object pixels. Class 1 is the background class and Class
# 3 is the object class. Class 2 is an intermediate class and we will
# decide later if it encodes background or object pixels. Also, for
# robustness the we remove 1% of the smallest and highest intensities
# in case there are any quantization effects that have resulted in
# unnaturally many 0:s or 1:s in the image.
cropped_image.sort()
one_percent = (np.product(cropped_image.shape) + 99) // 100
cropped_image = cropped_image[one_percent:-one_percent]
pixel_count = np.product(cropped_image.shape)
# Guess at the class means for the 3 classes: background,
# in-between and object
bg_pixel = cropped_image[int(round(pixel_count * background_fraction / 2.0))]
fg_pixel = cropped_image[int(round(pixel_count * (1 - object_fraction / 2)))]
class_mean = np.array([bg_pixel, (bg_pixel + fg_pixel) / 2, fg_pixel])
class_std = np.ones((3,)) * 0.15
# Initialize prior probabilities of a pixel belonging to each class.
# The intermediate class steals some probability from the background
# and object classes.
class_prob = np.array(
[3.0 / 4.0 * background_fraction, 1.0 / 4.0, 3.0 / 4.0 * object_fraction]
)
# Expectation-Maximization algorithm for fitting the three Gaussian
# distributions/classes to the data. Note, the code below is general
# and works for any number of classes. Iterate until parameters don't
# change anymore.
class_count = np.prod(class_mean.shape)
#
# Do a coarse iteration on subsampled data and a fine iteration on the real
# data
#
r = np.random.RandomState()
r.seed(np.frombuffer(cropped_image[:100].data, np.uint8).tolist())
for data in (
r.permutation(cropped_image)[0 : (len(cropped_image) // 10)],
cropped_image,
):
delta = 1
pixel_count = len(data)
while delta > 0.001:
old_class_mean = class_mean.copy()
# Update probabilities of a pixel belonging to the background or
# object1 or object2
pixel_class_prob = np.ndarray((pixel_count, class_count))
for k in range(class_count):
norm = scipy.stats.norm(class_mean[k], class_std[k])
pixel_class_prob[:, k] = class_prob[k] * norm.pdf(data)
pixel_class_normalizer = np.sum(pixel_class_prob, 1) + 0.000000000001
for k in range(class_count):
pixel_class_prob[:, k] = pixel_class_prob[:, k] / pixel_class_normalizer
# Update parameters in Gaussian distributions
class_prob[k] = np.mean(pixel_class_prob[:, k])
class_mean[k] = np.sum(pixel_class_prob[:, k] * data) / (
class_prob[k] * pixel_count
)
class_std[k] = (
math.sqrt(
np.sum(pixel_class_prob[:, k] * (data - class_mean[k]) ** 2)
/ (pixel_count * class_prob[k])
)
+ 0.000001
)
delta = np.sum(np.abs(old_class_mean - class_mean))
# Now the Gaussian distributions are fitted and we can describe the
# histogram of the pixel intensities as the sum of these Gaussian
# distributions. To find a threshold we first have to decide if the
# intermediate class 2 encodes background or object pixels. This is
# done by choosing the combination of class probabilities "class_prob"
# that best matches the user input "object_fraction".
# Construct an equally spaced array of values between the background
# and object mean
ndivisions = 10000
level = (
np.arange(ndivisions) * ((class_mean[2] - class_mean[0]) / ndivisions)
+ class_mean[0]
)
class_gaussian = np.ndarray((ndivisions, class_count))
for k in range(class_count):
norm = scipy.stats.norm(class_mean[k], class_std[k])
class_gaussian[:, k] = class_prob[k] * norm.pdf(level)
if abs(class_prob[1] + class_prob[2] - object_fraction) < abs(
class_prob[2] - object_fraction
):
# classifying the intermediate as object more closely models
# the user's desired object fraction
background_distribution = class_gaussian[:, 0]
object_distribution = class_gaussian[:, 1] + class_gaussian[:, 2]
else:
background_distribution = class_gaussian[:, 0] + class_gaussian[:, 1]
object_distribution = class_gaussian[:, 2]
# Now, find the threshold at the intersection of the background
# distribution and the object distribution.
index = np.argmin(np.abs(background_distribution - object_distribution))
return level[index]
get_mog_threshold.args = inspect.getargspec(get_mog_threshold).args
def get_background_threshold(image, mask=None):
"""Get threshold based on the mode of the image
The threshold is calculated by calculating the mode and multiplying by
2 (an arbitrary empirical factor). The user will presumably adjust the
multiplication factor as needed."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) == 0:
return 0
img_min = np.min(cropped_image)
img_max = np.max(cropped_image)
if img_min == img_max:
return cropped_image[0]
# Only do the histogram between values a bit removed from saturation
robust_min = 0.02 * (img_max - img_min) + img_min
robust_max = 0.98 * (img_max - img_min) + img_min
nbins = 256
cropped_image = cropped_image[
np.logical_and(cropped_image > robust_min, cropped_image < robust_max)
]
if len(cropped_image) == 0:
return robust_min
h = scipy.ndimage.histogram(cropped_image, robust_min, robust_max, nbins)
index = np.argmax(h)
cutoff = float(index) / float(nbins - 1)
#
# If we have a low (or almost no) background, the cutoff will be
# zero since the background falls into the lowest bin. We want to
# offset by the robust cutoff factor of .02. We rescale by 1.04
# to account for the 0.02 at the top and bottom.
#
cutoff = (cutoff + 0.02) / 1.04
return img_min + cutoff * 2 * (img_max - img_min)
get_background_threshold.args = inspect.getargspec(get_background_threshold).args
def get_robust_background_threshold(
image,
mask=None,
lower_outlier_fraction=0.05,
upper_outlier_fraction=0.05,
deviations_above_average=2.0,
average_fn=np.mean,
variance_fn=np.std,
):
"""Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5% of
pixels off the image, then calculating the mean and standard deviation
of the remaining image. The threshold is then set at 2 (empirical
value) standard deviations above the mean.
image - the image to threshold
mask - mask of pixels to consider (default = all pixels)
lower_outlier_fraction - after ordering the pixels by intensity, remove
the pixels from 0 to len(image) * lower_outlier_fraction from
the threshold calculation (default = .05).
upper_outlier_fraction - remove the pixels from
len(image) * (1 - upper_outlier_fraction) to len(image) from
consideration (default = .05).
deviations_above_average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold (default = 2)
average_fn - function used to calculate the average intensity (e.g.
np.mean, np.median or some sort of mode function). Default = np.mean
variance_fn - function used to calculate the amount of variance.
Default = np.sd
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
n_pixels = np.product(cropped_image.shape)
if n_pixels < 3:
return 0
cropped_image.sort()
if cropped_image[0] == cropped_image[-1]:
return cropped_image[0]
low_chop = int(round(n_pixels * lower_outlier_fraction))
hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction))
im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop]
mean = average_fn(im)
sd = variance_fn(im)
return mean + sd * deviations_above_average
get_robust_background_threshold.args = inspect.getargspec(
get_robust_background_threshold
).args
def mad(a):
"""Calculate the median absolute deviation of a sample
a - a numpy array-like collection of values
returns the median of the deviation of a from its median.
"""
a = np.asfarray(a).flatten()
return np.median(np.abs(a - np.median(a)))
def binned_mode(a):
"""Calculate a binned mode of a sample
a - array of values
This routine bins the sample into np.sqrt(len(a)) bins. This is a
number that is a compromise between fineness of measurement and
the stochastic nature of counting which roughly scales as the
square root of the sample size.
"""
a = np.asarray(a).flatten()
a_min = np.min(a)
a_max = np.max(a)
n_bins = np.ceil(np.sqrt(len(a)))
b = ((a - a_min) / (a_max - a_min) * n_bins).astype(int)
idx = np.argmax(np.bincount(b))
return np.percentile(a, 100 * float(idx + 0.5) / n_bins)
def get_ridler_calvard_threshold(image, mask=None):
"""Find a threshold using the method of Ridler and Calvard
The reference for this method is:
"Picture Thresholding Using an Iterative Selection Method"
by T. Ridler and S. Calvard, in IEEE Transactions on Systems, Man and
Cybernetics, vol. 8, no. 8, August 1978.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
# We want to limit the dynamic range of the image to 256. Otherwise,
# an image with almost all values near zero can give a bad result.
min_val = np.max(cropped_image) / 256
cropped_image[cropped_image < min_val] = min_val
im = np.log(cropped_image)
min_val = np.min(im)
max_val = np.max(im)
im = (im - min_val) / (max_val - min_val)
pre_thresh = 0
# This method needs an initial value to start iterating. Using
# graythresh (Otsu's method) is probably not the best, because the
# Ridler Calvard threshold ends up being too close to this one and in
# most cases has the same exact value.
new_thresh = otsu(im)
delta = 0.00001
while abs(pre_thresh - new_thresh) > delta:
pre_thresh = new_thresh
mean1 = np.mean(im[im < pre_thresh])
mean2 = np.mean(im[im >= pre_thresh])
new_thresh = np.mean([mean1, mean2])
return math.exp(min_val + (max_val - min_val) * new_thresh)
get_ridler_calvard_threshold.args = inspect.getargspec(
get_ridler_calvard_threshold
).args
def get_kapur_threshold(image, mask=None):
"""The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
log_image = np.log2(smooth_with_noise(cropped_image, 8))
min_log_image = np.min(log_image)
max_log_image = np.max(log_image)
histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256)
histogram_values = (
min_log_image
+ (max_log_image - min_log_image) * np.arange(256, dtype=float) / 255
)
# drop any zero bins
keep = histogram != 0
histogram = histogram[keep]
histogram_values = histogram_values[keep]
# check for corner cases
if np.product(histogram_values) == 1:
return 2 ** histogram_values[0]
# Normalize to probabilities
p = histogram.astype(float) / float(np.sum(histogram))
# Find the probabilities totals up to and above each possible threshold.
lo_sum = np.cumsum(p)
hi_sum = lo_sum[-1] - lo_sum
lo_e = np.cumsum(p * np.log2(p))
hi_e = lo_e[-1] - lo_e
# compute the entropies
lo_entropy = lo_e / lo_sum - np.log2(lo_sum)
hi_entropy = hi_e / hi_sum - np.log2(hi_sum)
sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]
sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf
entry = np.argmin(sum_entropy)
return 2 ** ((histogram_values[entry] + histogram_values[entry + 1]) / 2)
get_kapur_threshold.args = inspect.getargspec(get_kapur_threshold).args
def get_maximum_correlation_threshold(image, mask=None, bins=256):
"""Return the maximum correlation threshold of the image
image - image to be thresholded
mask - mask of relevant pixels
bins - # of value bins to use
This is an implementation of the maximum correlation threshold as
described in Padmanabhan, "A novel algorithm for optimal image thresholding
of biological data", Journal of Neuroscience Methods 193 (2010) p 380-384
"""
if mask is not None:
image = image[mask]
image = image.ravel()
nm = len(image)
if nm == 0:
return 0
#
# Bin the image
#
min_value = np.min(image)
max_value = np.max(image)
if min_value == max_value:
return min_value
image = ((image - min_value) * (bins - 1) / (max_value - min_value)).astype(int)
histogram = np.bincount(image)
#
# Compute (j - mean) and (j - mean) **2
mean_value = np.mean(image)
diff = np.arange(len(histogram)) - mean_value
diff2 = diff * diff
ndiff = histogram * diff
ndiff2 = histogram * diff2
#
# This is the sum over all j of (j-mean)**2. It's a constant that could
# be factored out, but I follow the method and use it anyway.
#
sndiff2 = np.sum(ndiff2)
#
# Compute the cumulative sum from i to m which is the cumsum at m
# minus the cumsum at i-1
cndiff = np.cumsum(ndiff)
numerator = np.hstack([[cndiff[-1]], cndiff[-1] - cndiff[:-1]])
#
# For the bottom, we need (Nm - Ni) * Ni / Nm
#
ni = nm - np.hstack([[0], np.cumsum(histogram[:-1])]) # number of pixels above i-1
denominator = np.sqrt(sndiff2 * (nm - ni) * ni / nm)
#
mct = numerator / denominator
mct[denominator == 0] = 0
my_bin = np.argmax(mct) - 1
return min_value + my_bin * (max_value - min_value) / (bins - 1)
get_maximum_correlation_threshold.args = inspect.getargspec(
get_maximum_correlation_threshold
).args
def weighted_variance(image, mask, binary_image):
"""Compute the log-transformed variance of foreground and background
image - intensity image used for thresholding
mask - mask of ignored pixels
binary_image - binary image marking foreground and background
"""
if not np.any(mask):
return 0
#
# Clamp the dynamic range of the foreground
#
minval = np.max(image[mask]) / 256
if minval == 0:
return 0
fg = np.log2(np.maximum(image[binary_image & mask], minval))
bg = np.log2(np.maximum(image[(~binary_image) & mask], minval))
nfg = np.product(fg.shape)
nbg = np.product(bg.shape)
if nfg == 0:
return np.var(bg)
elif nbg == 0:
return np.var(fg)
else:
return (np.var(fg) * nfg + np.var(bg) * nbg) / (nfg + nbg)
def sum_of_entropies(image, mask, binary_image):
"""Bin the foreground and background pixels and compute the entropy
of the distribution of points among the bins
"""
mask = mask.copy()
mask[np.isnan(image)] = False
if not np.any(mask):
return 0
#
# Clamp the dynamic range of the foreground
#
minval = np.max(image[mask]) / 256
if minval == 0:
return 0
clamped_image = image.copy()
clamped_image[clamped_image < minval] = minval
#
# Smooth image with -8 bits of noise
#
image = smooth_with_noise(clamped_image, 8)
im_min = np.min(image)
im_max = np.max(image)
#
# Figure out the bounds for the histogram
#
upper = np.log2(im_max)
lower = np.log2(im_min)
if upper == lower:
# All values are the same, answer is log2 of # of pixels
return math.log(np.sum(mask), 2)
#
# Create log-transformed lists of points in the foreground and background
#
fg = image[binary_image & mask]
bg = image[(~binary_image) & mask]
if len(fg) == 0 or len(bg) == 0:
return 0
log_fg = np.log2(fg)
log_bg = np.log2(bg)
#
# Make these into histograms
hfg = np.histogram(log_fg, 256, range=(lower, upper), normed=False, weights=None)[0]
hbg = np.histogram(log_bg, 256, range=(lower, upper), normed=False, weights=None)[0]
# hfg = scipy.ndimage.histogram(log_fg,lower,upper,256)
# hbg = scipy.ndimage.histogram(log_bg,lower,upper,256)
#
# Drop empty bins
#
hfg = hfg[hfg > 0]
hbg = hbg[hbg > 0]
if np.product(hfg.shape) == 0:
hfg = np.ones((1,), int)
if np.product(hbg.shape) == 0:
hbg = np.ones((1,), int)
#
# Normalize
#
hfg = hfg.astype(float) / float(np.sum(hfg))
hbg = hbg.astype(float) / float(np.sum(hbg))
#
# Compute sum of entropies
#
return np.sum(hfg * np.log2(hfg)) + np.sum(hbg * np.log2(hbg))
def log_transform(image):
"""Renormalize image intensities to log space
Returns a tuple of transformed image and a dictionary to be passed into
inverse_log_transform. The minimum and maximum from the dictionary
can be applied to an image by the inverse_log_transform to
convert it back to its former intensity values.
"""
orig_min, orig_max = scipy.ndimage.extrema(image)[:2]
#
# We add 1/2 bit noise to an 8 bit image to give the log a bottom
#
limage = image.copy()
noise_min = orig_min + (orig_max - orig_min) / 256.0 + np.finfo(image.dtype).eps
limage[limage < noise_min] = noise_min
d = {"noise_min": noise_min}
limage = np.log(limage)
log_min, log_max = scipy.ndimage.extrema(limage)[:2]
d["log_min"] = log_min
d["log_max"] = log_max
return stretch(limage), d
def inverse_log_transform(image, d):
"""Convert the values in image back to the scale prior to log_transform
image - an image or value or values similarly scaled to image
d - object returned by log_transform
"""
return np.exp(unstretch(image, d["log_min"], d["log_max"]))
|
|
from __future__ import absolute_import
import six
from sentry.models import ExternalIssue, GroupLink, Integration
from sentry.testutils import APITestCase
from sentry.utils.http import absolute_uri
class GroupIntegrationDetailsTest(APITestCase):
def test_simple_get_link(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/?action=link'.format(group.id, integration.id)
with self.feature('organizations:integrations-issue-basic'):
response = self.client.get(path)
provider = integration.get_provider()
assert response.data == {
'id': six.text_type(integration.id),
'name': integration.name,
'icon': integration.metadata.get('icon'),
'domainName': integration.metadata.get('domain_name'),
'accountType': integration.metadata.get('account_type'),
'status': integration.get_status_display(),
'provider': {
'key': provider.key,
'name': provider.name,
'canAdd': provider.can_add,
'canDisable': provider.can_disable,
'features': [f.value for f in provider.features],
'aspects': provider.metadata.aspects,
},
'linkIssueConfig': [{
'default': '',
'type': 'string',
'name': 'externalIssue',
'label': 'Issue',
}]
}
def test_simple_get_create(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
self.create_event(group=group)
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/?action=create'.format(group.id, integration.id)
with self.feature('organizations:integrations-issue-basic'):
response = self.client.get(path)
provider = integration.get_provider()
assert response.data == {
'id': six.text_type(integration.id),
'name': integration.name,
'icon': integration.metadata.get('icon'),
'domainName': integration.metadata.get('domain_name'),
'accountType': integration.metadata.get('account_type'),
'status': integration.get_status_display(),
'provider': {
'key': provider.key,
'name': provider.name,
'canAdd': provider.can_add,
'canDisable': provider.can_disable,
'features': [f.value for f in provider.features],
'aspects': provider.metadata.aspects,
},
'createIssueConfig': [
{
'default': 'message',
'type': 'string',
'name': 'title',
'label': 'Title',
'required': True,
}, {
'default': ('Sentry Issue: [%s](%s)\n\n```\n'
'Stacktrace (most recent call last):\n\n '
'File "sentry/models/foo.py", line 29, in build_msg\n '
'string_max_length=self.string_max_length)\n\nmessage\n```'
) % (group.qualified_short_id, absolute_uri(group.get_absolute_url())),
'type': 'textarea',
'name': 'description',
'label': 'Description',
'autosize': True,
'maxRows': 10,
}
]
}
def test_get_feature_disabled(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
self.create_event(group=group)
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/?action=create'.format(group.id, integration.id)
response = self.client.get(path)
assert response.status_code == 400
assert response.data['detail'] == 'Your organization does not have access to this feature.'
def test_simple_put(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/'.format(group.id, integration.id)
with self.feature('organizations:integrations-issue-basic'):
response = self.client.put(path, data={
'externalIssue': 'APP-123'
})
assert response.status_code == 201
external_issue = ExternalIssue.objects.get(
key='APP-123',
integration_id=integration.id,
organization_id=org.id,
)
assert external_issue.title == 'This is a test external issue title'
assert external_issue.description == 'This is a test external issue description'
assert GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.issue,
group_id=group.id,
linked_id=external_issue.id,
).exists()
def test_put_feature_disabled(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/'.format(group.id, integration.id)
response = self.client.put(path, data={
'externalIssue': 'APP-123'
})
assert response.status_code == 400
assert response.data['detail'] == 'Your organization does not have access to this feature.'
def test_simple_post(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/'.format(group.id, integration.id)
with self.feature('organizations:integrations-issue-basic'):
response = self.client.post(path, data={})
assert response.status_code == 400
assert response.data['non_field_errors'] == ['Assignee is required']
response = self.client.post(path, data={'assignee': '[email protected]'})
assert response.status_code == 201
external_issue = ExternalIssue.objects.get(
key='APP-123',
integration_id=integration.id,
organization_id=org.id,
)
assert external_issue.description == u'This is a test external issue description'
assert external_issue.title == u'This is a test external issue title'
assert GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.issue,
group_id=group.id,
linked_id=external_issue.id,
).exists()
def test_post_feature_disabled(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
path = '/api/0/issues/{}/integrations/{}/'.format(group.id, integration.id)
response = self.client.post(path, data={})
assert response.status_code == 400
assert response.data['detail'] == 'Your organization does not have access to this feature.'
def test_simple_delete(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id,
integration_id=integration.id,
key='APP-123',
)[0]
group_link = GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
path = '/api/0/issues/{}/integrations/{}/?externalIssue={}'.format(
group.id, integration.id, external_issue.id,
)
with self.feature('organizations:integrations-issue-basic'):
response = self.client.delete(path)
assert response.status_code == 204
assert not ExternalIssue.objects.filter(id=external_issue.id).exists()
assert not GroupLink.objects.filter(id=group_link.id).exists()
def test_delete_feature_disabled(self):
self.login_as(user=self.user)
org = self.organization
group = self.create_group()
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org.id)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id,
integration_id=integration.id,
key='APP-123',
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
path = '/api/0/issues/{}/integrations/{}/?externalIssue={}'.format(
group.id, integration.id, external_issue.id,
)
response = self.client.delete(path)
assert response.status_code == 400
assert response.data['detail'] == 'Your organization does not have access to this feature.'
|
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for NetApp volume driver
"""
import BaseHTTPServer
import httplib
from lxml import etree
import mock
import six
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp import iscsi
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
def create_configuration():
configuration = conf.Configuration(None)
configuration.append_config_values(netapp_connection_opts)
configuration.append_config_values(netapp_transport_opts)
configuration.append_config_values(netapp_basicauth_opts)
configuration.append_config_values(netapp_cluster_opts)
configuration.append_config_values(netapp_7mode_opts)
configuration.append_config_values(netapp_provisioning_opts)
return configuration
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that doesn't spam the log."""
def log_message(self, format, *args):
pass
class FakeHttplibSocket(object):
"""A fake socket implementation for httplib.HTTPResponse."""
def __init__(self, value):
self._rbuffer = six.StringIO(value)
self._wbuffer = six.StringIO('')
oldclose = self._wbuffer.close
def newclose():
self.result = self._wbuffer.getvalue()
oldclose()
self._wbuffer.close = newclose
def makefile(self, mode, _other):
"""Returns the socket's internal buffer"""
if mode == 'r' or mode == 'rb':
return self._rbuffer
if mode == 'w' or mode == 'wb':
return self._wbuffer
RESPONSE_PREFIX_DIRECT_CMODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>"""
RESPONSE_PREFIX_DIRECT_7MODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM "/na_admin/netapp_filer.dtd">"""
RESPONSE_PREFIX_DIRECT = """
<netapp version='1.15' xmlns='http://www.netapp.com/filer/admin'>"""
RESPONSE_SUFFIX_DIRECT = """</netapp>"""
class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s):
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
api = etree.QName(tag).localname or tag
if 'lun-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<lun-info>
<alignment>indeterminate</alignment>
<block-size>512</block-size>
<comment></comment><creation-timestamp>1354536362
</creation-timestamp>
<is-space-alloc-enabled>false</is-space-alloc-enabled>
<is-space-reservation-enabled>true
</is-space-reservation-enabled>
<mapped>false</mapped><multiprotocol-type>linux
</multiprotocol-type>
<online>true</online><path>/vol/navneet/lun1</path>
<prefix-size>0</prefix-size><qtree></qtree><read-only>
false</read-only><serial-number>2FfGI$APyN68</serial-number>
<share-state>none</share-state><size>20971520</size>
<size-used>0</size-used><staging>false</staging>
<suffix-size>0</suffix-size>
<uuid>cec1f3d7-3d41-11e2-9cf4-123478563412</uuid>
<volume>navneet</volume><vserver>ben_vserver</vserver>
</lun-info></attributes-list>
<next-tag><lun-get-iter-key-td>
<key-0>ben_vserver</key-0>
<key-1>/vol/navneet/lun2</key-1>
<key-2>navneet</key-2>
<key-3></key-3>
<key-4>lun2</key-4>
</lun-get-iter-key-td>
</next-tag><num-records>1</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<lun-info>
<alignment>indeterminate</alignment>
<block-size>512</block-size>
<comment></comment><creation-timestamp>1354536362
</creation-timestamp>
<is-space-alloc-enabled>false</is-space-alloc-enabled>
<is-space-reservation-enabled>true
</is-space-reservation-enabled>
<mapped>false</mapped><multiprotocol-type>linux
</multiprotocol-type>
<online>true</online><path>/vol/navneet/lun3</path>
<prefix-size>0</prefix-size><qtree></qtree><read-only>
false</read-only><serial-number>2FfGI$APyN68
</serial-number>
<share-state>none</share-state><size>20971520</size>
<size-used>0</size-used><staging>false</staging>
<suffix-size>0</suffix-size>
<uuid>cec1f3d7-3d41-11e2-9cf4-123478563412</uuid>
<volume>navneet</volume><vserver>ben_vserver</vserver>
</lun-info></attributes-list>
<num-records>1</num-records></results>"""
elif 'volume-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes><name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>214748364</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
<volume-attributes>
<volume-id-attributes><name>nfsvol</name>
<owning-vserver-name>openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>247483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
</attributes-list>
<next-tag><volume-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>nfsvol</key-1>
</volume-get-iter-key-td>
</next-tag><num-records>2</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes><name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>4147483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
<volume-attributes>
<volume-id-attributes><name>nfsvol</name>
<owning-vserver-name>openstack</owning-vserver-name>
</volume-id-attributes>
<volume-space-attributes>
<size-available>8147483648</size-available>
</volume-space-attributes>
<volume-state-attributes><is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root><state>online</state>
</volume-state-attributes></volume-attributes>
</attributes-list>
<num-records>2</num-records></results>"""
elif 'lun-create-by-size' == api:
body = """<results status="passed">
<actual-size>22020096</actual-size></results>"""
elif 'lun-destroy' == api:
body = """<results status="passed"/>"""
elif 'igroup-get-iter' == api:
init_found = True
query = FakeDirectCMODEServerHandler._get_child_by_name(request,
'query')
if query is not None:
igroup_info = FakeDirectCMODEServerHandler._get_child_by_name(
query, 'initiator-group-info')
if igroup_info is not None:
inits = FakeDirectCMODEServerHandler._get_child_by_name(
igroup_info, 'initiators')
if inits is not None:
init_info = \
FakeDirectCMODEServerHandler._get_child_by_name(
inits, 'initiator-info')
init_name = \
FakeDirectCMODEServerHandler._get_child_content(
init_info,
'initiator-name')
if init_name == 'iqn.1993-08.org.debian:01:10':
init_found = True
else:
init_found = False
if init_found:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(
request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<initiator-group-info><initiator-group-name>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118
</initiator-group-name>
<initiator-group-os-type>windows</initiator-group-os-type>
<initiator-group-type>iscsi</initiator-group-type>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info></initiators>
<vserver>openstack</vserver></initiator-group-info>
</attributes-list><next-tag>
<igroup-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118<
/key-1>
</igroup-get-iter-key-td>
</next-tag><num-records>1</num-records></results>"""
else:
body = """<results status="passed"><attributes-list>
<initiator-group-info><initiator-group-name>
openstack-01f5297b-00f7-4170-bf30-69b1314b2118
</initiator-group-name>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-type>iscsi</initiator-group-type>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info></initiators>
<vserver>openstack</vserver></initiator-group-info>
</attributes-list><num-records>1</num-records></results>"""
else:
body = """<results status="passed">
<num-records>0</num-records>
</results>"""
elif 'lun-map-get-iter' == api:
tag = \
FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag')
if tag is None:
body = """<results status="passed"><attributes-list>
<lun-map-info>
<initiator-group>openstack-44c5e7e1-3306-4800-9623-259e57d56a83
</initiator-group>
<initiator-group-uuid>948ae304-06e9-11e2</initiator-group-uuid>
<lun-id>0</lun-id>
<lun-uuid>5587e563-06e9-11e2-9cf4-123478563412</lun-uuid>
<path>/vol/openvol/lun1</path>
<vserver>openstack</vserver>
</lun-map-info></attributes-list>
<next-tag>
<lun-map-get-iter-key-td>
<key-0>openstack</key-0>
<key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118<
/key-1>
</lun-map-get-iter-key-td>
</next-tag>
<num-records>1</num-records>
</results>"""
else:
body = """<results status="passed"><attributes-list>
<lun-map-info>
<initiator-group>openstack-44c5e7e1-3306-4800-9623-259e57d56a83
</initiator-group>
<initiator-group-uuid>948ae304-06e9-11e2</initiator-group-uuid>
<lun-id>0</lun-id>
<lun-uuid>5587e563-06e9-11e2-9cf4-123478563412</lun-uuid>
<path>/vol/openvol/lun1</path>
<vserver>openstack</vserver>
</lun-map-info></attributes-list><num-records>1</num-records>
</results>"""
elif 'lun-map' == api:
body = """<results status="passed"><lun-id-assigned>1
</lun-id-assigned>
</results>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed"><bytes-per-sector>256
</bytes-per-sector><cylinders>512</cylinders><max-resize-size>
3221225472</max-resize-size><sectors-per-track>512
</sectors-per-track><size>2147483648</size>
<tracks-per-cylinder>256</tracks-per-cylinder></results>"""
elif 'iscsi-service-get-iter' == api:
body = """<results status="passed"><attributes-list>
<iscsi-service-info>
<alias-name>openstack</alias-name>
<is-available>true</is-available>
<node-name>iqn.1992-08.com.netapp:sn.fa9:vs.105</node-name>
<vserver>openstack</vserver></iscsi-service-info>
</attributes-list><num-records>1</num-records></results>"""
elif 'iscsi-interface-get-iter' == api:
body = """<results status="passed"><attributes-list>
<iscsi-interface-list-entry-info><current-node>
fas3170rre-cmode-01
</current-node><current-port>e1b-1165</current-port>
<interface-name>
iscsi_data_if</interface-name>
<ip-address>10.63.165.216</ip-address>
<ip-port>3260</ip-port><is-interface-enabled>true
</is-interface-enabled>
<relative-port-id>5</relative-port-id>
<tpgroup-name>iscsi_data_if</tpgroup-name>
<tpgroup-tag>1038</tpgroup-tag><vserver>
openstack</vserver>
</iscsi-interface-list-entry-info></attributes-list>
<num-records>1</num-records></results>"""
elif 'igroup-create' == api:
body = """<results status="passed"/>"""
elif 'igroup-add' == api:
body = """<results status="passed"/>"""
elif 'clone-create' == api:
body = """<results status="passed"/>"""
elif 'lun-unmap' == api:
body = """<results status="passed"/>"""
elif 'system-get-ontapi-version' == api:
body = """<results status="passed">
<major-version>1</major-version>
<minor-version>19</minor-version>
</results>"""
elif 'vserver-get-iter' == api:
body = """<results status="passed"><attributes-list>
<vserver-info>
<vserver-name>vserver</vserver-name>
<vserver-type>node</vserver-type>
</vserver-info>
</attributes-list>
<num-records>1</num-records></results>"""
elif 'ems-autosupport-log' == api:
body = """<results status="passed"/>"""
elif 'lun-resize' == api:
body = """<results status="passed"/>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed">
<size>1</size>
<bytes-per-sector>2</bytes-per-sector>
<sectors-per-track>8</sectors-per-track>
<tracks-per-cylinder>2</tracks-per-cylinder>
<cylinders>4</cylinders>
<max-resize-size>5</max-resize-size>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<option>
<name>compression</name>
<value>off</value>
</option>
</options>
</results>"""
elif 'lun-move' == api:
body = """<results status="passed"/>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
@staticmethod
def _get_child_by_name(self, name):
for child in self.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child
return None
@staticmethod
def _get_child_content(self, name):
"""Get the content of the child."""
for child in self.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child.text
return None
class FakeDirectCmodeHTTPConnection(object):
"""A fake httplib.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the httplib.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normalizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = httplib.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1',
'volume_size': 2, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1',
'volume_size': 1, 'project_id': 'project'}
volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_clone = {'name': 'cl_sm', 'size': 3, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_sm',
'id': 'lun1', 'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
volume_clone_large = {'name': 'cl_lg', 'size': 6, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_lg',
'id': 'lun1', 'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
connector = {'initiator': 'iqn.1993-08.org.debian:01:10'}
vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
vol1 = ssc_utils.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
vol1.state['junction_active'] = True
vol1.space['size_avl_bytes'] = '4000000000'
vol1.space['size_total_bytes'] = '5000000000'
vol1.space['space-guarantee-enabled'] = False
vol1.space['space-guarantee'] = 'file'
vol1.space['thin_provisioned'] = True
vol1.mirror['mirrored'] = True
vol1.qos['qos_policy_group'] = None
vol1.aggr['name'] = 'aggr1'
vol1.aggr['junction'] = '/vola'
vol1.sis['dedup'] = True
vol1.sis['compression'] = True
vol1.aggr['raid_type'] = 'raiddp'
vol1.aggr['ha_policy'] = 'cfo'
vol1.aggr['disk_type'] = 'SSD'
ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]),
'compression': set([vol1]),
'thin': set([vol1]), 'all': set([vol1])}
def setUp(self):
super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.stubs.Set(
ssc_utils, 'refresh_cluster_ssc',
lambda a, b, c, synchronous: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirectCmodeHTTPConnection)
driver.do_setup(context='')
client = driver.client
client.set_api_version(1, 15)
self.driver = driver
self.driver.ssc_vols = self.ssc_map
def _set_config(self, configuration):
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = '80'
configuration.netapp_vserver = 'openstack'
return configuration
def test_connect(self):
self.driver.check_for_setup_error()
def test_create_destroy(self):
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_destroy(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_unmap(self):
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume)
self.assertTrue(updates['provider_location'])
self.volume['provider_location'] = updates['provider_location']
connection_info = self.driver.initialize_connection(self.volume,
self.connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
if not properties:
raise AssertionError('Target portal is none')
self.driver.terminate_connection(self.volume, self.connector)
self.driver.delete_volume(self.volume)
def test_cloned_volume_destroy(self):
self.driver.create_volume(self.volume)
self.driver.create_cloned_volume(self.snapshot, self.volume)
self.driver.delete_volume(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_by_creating_igroup(self):
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume)
self.assertTrue(updates['provider_location'])
self.volume['provider_location'] = updates['provider_location']
connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'}
connection_info = self.driver.initialize_connection(self.volume,
connector_new)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
if not properties:
raise AssertionError('Target portal is none')
def test_fail_create_vol(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.vol_fail)
def test_vol_stats(self):
self.driver.get_volume_stats(refresh=True)
def test_create_vol_snapshot_diff_size_resize(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_diff_size_subclone(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone_large, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_extend_vol_same_size(self):
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, self.volume['size'])
def test_extend_vol_direct_resize(self):
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, 3)
def test_extend_vol_sub_lun_clone(self):
self.driver.create_volume(self.volume)
self.driver.extend_volume(self.volume, 4)
@mock.patch.object(iscsi.LOG, 'error')
def test_na_api_error_in_create_lun_on_eligible_vol(self, mock_log):
drv = self.driver.driver
vol_name = 'fake_lun_vol'
lun_name = 'lun1'
size = '1'
metadata = {'OSType': 'linux', 'SpaceReserved': 'true'}
path = '/vol/%(vol_name)s/%(lun_name)s' % {'vol_name': vol_name,
'lun_name': lun_name}
metadata_out = {'Path': path,
'Qtree': None,
'OSType': 'linux',
'SpaceReserved': 'true',
'Volume': 'lun1'}
extra_specs = {}
available_vol = ssc_utils.NetAppVolume(vol_name)
with mock.patch.object(drv, '_get_avl_volumes',
return_value=[available_vol]):
with mock.patch.object(drv, 'create_lun', side_effect=NaApiError):
self.assertRaises(exception.VolumeBackendAPIException,
drv._create_lun_on_eligible_vol,
lun_name, size, metadata, extra_specs)
self.assertEqual(1, mock_log.call_count)
class NetAppDriverNegativeTestCase(test.TestCase):
"""Test case for NetAppDriver"""
def setUp(self):
super(NetAppDriverNegativeTestCase, self).setUp()
def test_incorrect_family(self):
configuration = create_configuration()
configuration.netapp_storage_family = 'xyz_abc'
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage family is getting accepted.')
except exception.InvalidInput:
pass
def test_incorrect_protocol(self):
configuration = create_configuration()
configuration.netapp_storage_family = 'ontap'
configuration.netapp_storage_protocol = 'ontap'
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage protocol is getting accepted.')
except exception.InvalidInput:
pass
def test_non_netapp_driver(self):
configuration = create_configuration()
common.netapp_unified_plugin_registry['test_family'] =\
{'iscsi': 'cinder.volume.drivers.arbitrary.IscsiDriver'}
configuration.netapp_storage_family = 'test_family'
configuration.netapp_storage_protocol = 'iscsi'
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Non NetApp driver is getting instantiated.')
except exception.InvalidInput:
pass
finally:
common.netapp_unified_plugin_registry.pop('test_family')
class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s):
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
api = etree.QName(tag).localname or tag
if 'lun-list-info' == api:
body = """<results status="passed">
<are-vols-onlining>false</are-vols-onlining>
<are-vols-busy>false</are-vols-busy>
<luns>
<lun-info>
<path>/vol/vol1/lun1</path>
<size>20971520</size>
<online>true</online>
<mapped>false</mapped>
<read-only>false</read-only>
<staging>false</staging>
<share-state>none</share-state>
<multiprotocol-type>linux</multiprotocol-type>
<uuid>e867d844-c2c0-11e0-9282-00a09825b3b5</uuid>
<serial-number>P3lgP4eTyaNl</serial-number>
<block-size>512</block-size>
<is-space-reservation-enabled>true</is-space-reservation-enabled>
<size-used>0</size-used>
<alignment>indeterminate</alignment>
</lun-info>
<lun-info>
<path>/vol/vol1/lun1</path>
<size>20971520</size>
<online>true</online>
<mapped>false</mapped>
<read-only>false</read-only>
<staging>false</staging>
<share-state>none</share-state>
<multiprotocol-type>linux</multiprotocol-type>
<uuid>8e1e9284-c288-11e0-9282-00a09825b3b5</uuid>
<serial-number>P3lgP4eTc3lp</serial-number>
<block-size>512</block-size>
<is-space-reservation-enabled>true</is-space-reservation-enabled>
<size-used>0</size-used>
<alignment>indeterminate</alignment>
</lun-info>
</luns>
</results>"""
elif 'volume-list-info' == api:
body = """<results status="passed">
<volumes>
<volume-info>
<name>vol0</name>
<uuid>019c8f7a-9243-11e0-9281-00a09825b3b5</uuid>
<type>flex</type>
<block-type>32_bit</block-type>
<state>online</state>
<size-total>576914493440</size-total>
<size-used>13820354560</size-used>
<size-available>563094110208</size-available>
<percentage-used>2</percentage-used>
<snapshot-percent-reserved>20</snapshot-percent-reserved>
<snapshot-blocks-reserved>140848264</snapshot-blocks-reserved>
<reserve-required>0</reserve-required>
<reserve>0</reserve>
<reserve-used>0</reserve-used>
<reserve-used-actual>0</reserve-used-actual>
<files-total>20907162</files-total>
<files-used>7010</files-used>
<files-private-used>518</files-private-used>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<quota-init>0</quota-init>
<is-snaplock>false</is-snaplock>
<containing-aggregate>aggr0</containing-aggregate>
<sis>
<sis-info>
<state>disabled</state>
<status>idle</status>
<progress>idle for 70:36:44</progress>
<type>regular</type>
<schedule>sun-sat@0</schedule>
<last-operation-begin>Mon Aug 8 09:34:15 EST 2011
</last-operation-begin>
<last-operation-end>Mon Aug 8 09:34:15 EST 2011
</last-operation-end>
<last-operation-size>0</last-operation-size>
<size-shared>0</size-shared>
<size-saved>0</size-saved>
<percentage-saved>0</percentage-saved>
<compress-saved>0</compress-saved>
<percent-compress-saved>0</percent-compress-saved>
<dedup-saved>0</dedup-saved>
<percent-dedup-saved>0</percent-dedup-saved>
<total-saved>0</total-saved>
<percent-total-saved>0</percent-total-saved>
</sis-info>
</sis>
<compression-info>
<is-compression-enabled>false</is-compression-enabled>
</compression-info>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<raid-size>14</raid-size>
<raid-status>raid_dp,sis</raid-status>
<checksum-style>block</checksum-style>
<is-checksum-enabled>true</is-checksum-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
<is-in-snapmirror-jumpahead>false</is-in-snapmirror-jumpahead>
<mirror-status>unmirrored</mirror-status>
<disk-count>3</disk-count>
<plex-count>1</plex-count>
<plexes>
<plex-info>
<name>/aggr0/plex0</name>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
</plex-info>
</plexes>
</volume-info>
<volume-info>
<name>vol1</name>
<uuid>2d50ecf4-c288-11e0-9282-00a09825b3b5</uuid>
<type>flex</type>
<block-type>32_bit</block-type>
<state>online</state>
<size-total>42949672960</size-total>
<size-used>44089344</size-used>
<size-available>42905583616</size-available>
<percentage-used>0</percentage-used>
<snapshot-percent-reserved>20</snapshot-percent-reserved>
<snapshot-blocks-reserved>10485760</snapshot-blocks-reserved>
<reserve-required>8192</reserve-required>
<reserve>8192</reserve>
<reserve-used>0</reserve-used>
<reserve-used-actual>0</reserve-used-actual>
<files-total>1556480</files-total>
<files-used>110</files-used>
<files-private-used>504</files-private-used>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<quota-init>0</quota-init>
<is-snaplock>false</is-snaplock>
<containing-aggregate>aggr1</containing-aggregate>
<sis>
<sis-info>
<state>disabled</state>
<status>idle</status>
<progress>idle for 89:19:59</progress>
<type>regular</type>
<schedule>sun-sat@0</schedule>
<last-operation-begin>Sun Aug 7 14:51:00 EST 2011
</last-operation-begin>
<last-operation-end>Sun Aug 7 14:51:00 EST 2011
</last-operation-end>
<last-operation-size>0</last-operation-size>
<size-shared>0</size-shared>
<size-saved>0</size-saved>
<percentage-saved>0</percentage-saved>
<compress-saved>0</compress-saved>
<percent-compress-saved>0</percent-compress-saved>
<dedup-saved>0</dedup-saved>
<percent-dedup-saved>0</percent-dedup-saved>
<total-saved>0</total-saved>
<percent-total-saved>0</percent-total-saved>
</sis-info>
</sis>
<compression-info>
<is-compression-enabled>false</is-compression-enabled>
</compression-info>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<raid-size>7</raid-size>
<raid-status>raid4,sis</raid-status>
<checksum-style>block</checksum-style>
<is-checksum-enabled>true</is-checksum-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
<is-in-snapmirror-jumpahead>false</is-in-snapmirror-jumpahead>
<mirror-status>unmirrored</mirror-status>
<disk-count>2</disk-count>
<plex-count>1</plex-count>
<plexes>
<plex-info>
<name>/aggr1/plex0</name>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
</plex-info>
</plexes>
</volume-info>
</volumes>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<volume-option-info>
<name>snapmirrored</name>
<value>off</value>
</volume-option-info>
<volume-option-info>
<name>root</name>
<value>false</value>
</volume-option-info>
<volume-option-info>
<name>ha_policy</name>
<value>cfo</value>
</volume-option-info>
<volume-option-info>
<name>striping</name>
<value>not_striped</value>
</volume-option-info>
<volume-option-info>
<name>compression</name>
<value>off</value>
</volume-option-info>
</options>
</results>"""
elif 'lun-create-by-size' == api:
body = """<results status="passed">
<actual-size>22020096</actual-size></results>"""
elif 'lun-destroy' == api:
body = """<results status="passed"/>"""
elif 'igroup-list-info' == api:
body = """<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-8bc96490</initiator-group-name>
<initiator-group-type>iscsi</initiator-group-type>
<initiator-group-uuid>b8e1d274-c378-11e0</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>false</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
<initiator-group-info>
<initiator-group-name>iscsi_group</initiator-group-name>
<initiator-group-type>iscsi</initiator-group-type>
<initiator-group-uuid>ccb8cbe4-c36f</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>false</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiators>
<initiator-info>
<initiator-name>iqn.1993-08.org.debian:01:10ca</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
</initiator-groups>
</results>"""
elif 'lun-map-list-info' == api:
body = """<results status="passed">
<initiator-groups/>
</results>"""
elif 'lun-map' == api:
body = """<results status="passed"><lun-id-assigned>1
</lun-id-assigned>
</results>"""
elif 'iscsi-node-get-name' == api:
body = """<results status="passed">
<node-name>iqn.1992-08.com.netapp:sn.135093938</node-name>
</results>"""
elif 'iscsi-portal-list-info' == api:
body = """<results status="passed">
<iscsi-portal-list-entries>
<iscsi-portal-list-entry-info>
<ip-address>10.61.176.156</ip-address>
<ip-port>3260</ip-port>
<tpgroup-tag>1000</tpgroup-tag>
<interface-name>e0a</interface-name>
</iscsi-portal-list-entry-info>
</iscsi-portal-list-entries>
</results>"""
elif 'igroup-create' == api:
body = """<results status="passed"/>"""
elif 'igroup-add' == api:
body = """<results status="passed"/>"""
elif 'clone-start' == api:
body = """<results status="passed">
<clone-id>
<clone-id-info>
<volume-uuid>2d50ecf4-c288-11e0-9282-00a09825b3b5</volume-uuid>
<clone-op-id>11</clone-op-id>
</clone-id-info>
</clone-id>
</results>"""
elif 'clone-list-status' == api:
body = """<results status="passed">
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>"""
elif 'lun-unmap' == api:
body = """<results status="passed"/>"""
elif 'system-get-ontapi-version' == api:
body = """<results status="passed">
<major-version>1</major-version>
<minor-version>8</minor-version>
</results>"""
elif 'lun-set-space-reservation-info' == api:
body = """<results status="passed"/>"""
elif 'ems-autosupport-log' == api:
body = """<results status="passed"/>"""
elif 'lun-resize' == api:
body = """<results status="passed"/>"""
elif 'lun-get-geometry' == api:
body = """<results status="passed">
<size>1</size>
<bytes-per-sector>2</bytes-per-sector>
<sectors-per-track>8</sectors-per-track>
<tracks-per-cylinder>2</tracks-per-cylinder>
<cylinders>4</cylinders>
<max-resize-size>5</max-resize-size>
</results>"""
elif 'volume-options-list-info' == api:
body = """<results status="passed">
<options>
<option>
<name>compression</name>
<value>off</value>
</option>
</options>
</results>"""
elif 'lun-move' == api:
body = """<results status="passed"/>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
class FakeDirect7modeHTTPConnection(object):
"""A fake httplib.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the httplib.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = httplib.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppDirect7modeISCSIDriverTestCase_NV(
NetAppDirectCmodeISCSIDriverTestCase):
"""Test case for NetAppISCSIDriver
No vfiler
"""
def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp()
def _custom_setup(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
driver.do_setup(context='')
client = driver.client
client.set_api_version(1, 9)
self.driver = driver
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = '80'
return configuration
def test_create_on_select_vol(self):
self.driver.volume_list = ['vol0', 'vol1']
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
self.driver.volume_list = []
def test_create_fail_on_select_vol(self):
self.driver.volume_list = ['vol2', 'vol3']
success = False
try:
self.driver.create_volume(self.volume)
except exception.VolumeBackendAPIException:
success = True
pass
finally:
self.driver.volume_list = []
if not success:
raise AssertionError('Failed creating on selected volumes')
def test_check_for_setup_error_version(self):
drv = self.driver
delattr(drv.client, '_api_version')
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv.client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_na_api_error_in_create_lun_on_eligible_vol(self):
drv = self.driver.driver
req_size = 1.0
fake_volume = {'name': 'fake_vol'}
fake_metadata = {}
with mock.patch.object(drv, '_get_avl_volume_by_size',
return_value=fake_volume):
with mock.patch.object(drv, 'create_lun', side_effect=NaApiError):
self.assertRaises(NaApiError, drv._create_lun_on_eligible_vol,
fake_volume['name'], req_size, fake_metadata)
class NetAppDirect7modeISCSIDriverTestCase_WV(
NetAppDirect7modeISCSIDriverTestCase_NV):
"""Test case for NetAppISCSIDriver
With vfiler
"""
def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp()
def _custom_setup(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
driver.do_setup(context='')
client = driver.client
client.set_api_version(1, 9)
self.driver = driver
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = '80'
configuration.netapp_vfiler = 'openstack'
return configuration
class NetAppApiElementTransTests(test.TestCase):
"""Test case for NetApp api element translations."""
def setUp(self):
super(NetAppApiElementTransTests, self).setUp()
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
root = NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
self.assertEqual(root.get_child_content('e1'), 'v1')
self.assertEqual(root.get_child_content('e2'), 'v2')
self.assertEqual(root.get_child_content('e3'), 'v3')
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
root = NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
children = root.get_children()
for c in children:
if c.get_name() == 'e1':
self.assertIn(c.get_content(), ['v1', 'v3'])
else:
self.assertEqual(c.get_content(), 'v2')
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
root = NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
root = NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
root = NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
root = NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
root['e4'] = 8l
self.assertEqual(len(root.get_children()), 4)
self.assertEqual(root.get_child_content('e1'), 'v1')
self.assertEqual(root.get_child_content('e2'), '1')
self.assertEqual(root.get_child_content('e3'), '2.0')
self.assertEqual(root.get_child_content('e4'), '8')
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
root = NaElement('root')
root['e1'] = NaElement('nested')
self.assertEqual(len(root.get_children()), 1)
e1 = root.get_child_by_name('e1')
self.assertIsInstance(e1, NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'), NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
root = NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
self.assertIsInstance(e1, NaElement)
sub_ch = e1.get_children()
self.assertEqual(len(sub_ch), 2)
for c in sub_ch:
self.assertIn(c.get_name(), ['e1', 'e2'])
if c.get_name() == 'e1':
self.assertEqual(c.get_content(), 'v1')
else:
self.assertEqual(c.get_content(), 'v2')
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
root = NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
self.assertIn(te.get_name(), ['t1', 't2'])
def test_setter_no_value(self):
"""Tests key with None value."""
root = NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
root = NaElement('root')
try:
root['k'] = NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
root = NaElement('root')
try:
root[None] = 'value'
except Exception as e:
if not isinstance(e, KeyError):
self.fail(_('Error not a KeyError.'))
|
|
"""
component_cost.py
Matthew Woodruff ([email protected])
The Pennsylvania State University
Applied Research Laboratory
2013
Compile a list of the components in a design and determine
their costs from the manufacturing models.
"""
# For every part in the design
# look up its manufacturing model
# extract price and lead time from MM
import glob
import json
import os
import xml.etree.ElementTree as ET
import re
import sys
import argparse
import traceback
import urllib
class CostModelError(Exception):
""" error in the cost model """
pass
def get_args(argv):
""" extract arguments from argv """
parser = argparse.ArgumentParser()
parser.add_argument("design", help="design file name")
parser.add_argument("-v", "--verbose", action="store_true",
help="print a verbose report")
return parser.parse_args(argv)
def evaluate(designfile, verbosity):
""" extract components and produce a score """
design = read_design(designfile)
components = components_in(design.containers())
path = os.path.dirname(designfile)
manfmanifest = read_component_index(os.path.join(path, "component_index.json"))
manfmanifest.update(read_manufacturing_manifest(os.path.join(path, "manufacturing.manifest.json")))
score = score_components(path, components, manfmanifest)
return score.report(verbosity)
def cli(argv):
""" score the file given as an argument, print result """
args = get_args(argv)
verbosity = "summary"
if args.verbose:
verbosity = "report"
report = evaluate(args.design, verbosity)
print json.dumps(report, indent=4)
def read_design(designfile):
"""
read the design from the design file and return an appropriate
design object
"""
designtype = None
if re.search(r"\.adm$", designfile, flags=re.I) is not None:
designtype = XMLDesign
elif re.search(r"\.xml$", designfile, flags=re.I) is not None:
designtype = XMLDesign
elif re.search(r"\.json$", designfile, flags=re.I) is not None:
designtype = JSONDesign
design = designtype(designfile)
return design
def read_component_index(component_index_file):
result = {}
with open(component_index_file, 'r') as fp:
component_index = json.load(fp)
for component in component_index:
filename = os.path.join(os.path.dirname(component_index_file), component["ModelPath"])
if not os.path.exists(filename):
filename = urllib.unquote(filename)
if os.path.exists(filename):
try:
manfmodel = ET.parse(filename)
except ET.ParseError:
try:
manfmodel = ET.parse(filename, parser=ET.XMLParser(encoding="us-ascii"))
except ET.ParseError:
# give up. no manf model
continue
result[component["InstanceID"]] = {"id":component["InstanceID"], "model":manfmodel}
return result
def read_manufacturing_manifest(manifestfile):
with open(manifestfile, 'r') as fp:
manifest = json.load(fp)
component_list = manifest["ComponentManufactureList"]
result = {}
for component in component_list:
if "ManufacturingModel" in component:
filename = os.path.join(os.path.dirname(manifestfile), component["ManufacturingModel"])
if os.path.exists(filename):
try:
manfmodel = ET.parse(filename)
except ET.ParseError:
try:
manfmodel = ET.parse(filename,
parser=ET.XMLParser(encoding="us-ascii"))
except ET.ParseError:
# give up. no manf model
continue
if "InstanceGUID" in component:
result[component["InstanceGUID"]] = {"id":component["InstanceGUID"].strip("{}"), "model":manfmodel}
elif "id" in component:
result[component["id"]] = {"id":component["id"].strip("{}"), "model":manfmodel}
return result
class XMLComponent(object):
""" a component derived from an XML design """
def __init__(self, component):
""" just save a ref to the component element """
self.component = component
def name(self):
""" extract the component name """
return self.component.get("Name", "")
def ident(self):
""" extract the component id """
return self.component.get("ID", "")
class JSONComponent(object):
""" a component derived from a JSON design """
def __init__(self, component):
""" just save a reference to the component """
self.component = component
def name(self):
""" extract the component name """
return self.component.get("Name", "")
def ident(self):
""" extract the component id """
text = self.component.get("id", "")
# strip surrounding curly braces from id
return re.sub("[{}]", "", text)
class XMLContainer(object):
""" a container derived from an XML design """
def __init__(self, container):
""" just save a reference to the container element """
self.container = container
def components(self):
""" return a list of components in the container """
children = self.container.findall("ComponentInstance")
return [XMLComponent(c) for c in children]
def containers(self):
"""
This is tricky because I don't have an example design
with nested containers. I don't even know what they're
supposed to be called, but I'm going to guess "Container".
"""
children = self.container.findall("Container")
return [XMLContainer(c) for c in children]
class JSONContainer(object):
""" a container derived from a JSON design """
def __init__(self, container):
""" just save a reference to the container """
self.container = container
def components(self):
""" return a list of components in the container """
return [JSONComponent(c) for c
in self.container.get("ComponentInstances", [])]
def containers(self):
""" return a list of containers in the container"""
return [JSONContainer(c) for c
in self.container.get("Containers", [])]
class XMLDesign(object):
""" an XML design file """
def __init__(self, designfile):
try:
self.design = ET.parse(designfile)
except ET.ParseError:
# just let it raise an error if we can't parse it now
parser = ET.XMLParser(encoding="us-ascii")
self.design = ET.parse(designfile, parser=parser)
def containers(self):
"""
Return a list of containers, although unlike the JSON
designs, I think the XML designs must have a single
RootContainer. Although I could be wrong about that.
Also, all of the XML designs I've seen are flat, but
I'm not sure if that's guaranteed.
"""
rootcontainers = self.design.getroot().findall("RootContainer")
return [XMLContainer(c) for c in rootcontainers]
class JSONDesign(object):
""" a JSON design file """
def __init__(self, designfile):
""" read in the JSON file """
with open(designfile, 'r') as fp:
self.design = json.load(fp)
def containers(self):
""" return a list of containers """
return [JSONContainer(c) for c
in self.design.get("Containers", [])]
def components_in(containers):
""" given a list of containers, return the components within """
components = []
for container in containers:
instances = container.components()
components.extend(instances)
subcontainers = container.containers()
components.extend(components_in(subcontainers))
return components
# $ USD per kilogram
material_library = {"GOLD_MASS": 47619.0, "TITANIUM_MASS": 6.15, "STEEL_MASS": 0.209}
def to_kg(value, unit="kg"):
if unit == "oz":
return value*0.0283495
elif unit == "lb":
return value*0.453592
elif unit == "ton":
return value*907.185
elif unit == "mg":
return value*0.000001
elif unit == "g":
return value*0.001
elif unit == "kg":
return value
elif unit == "tonne":
return value * 1000
else:
print "Unrecognized unit", unit
return value
def lookup_weight(id, path):
metrics = ET.parse(os.path.join(path, "CADAssembly_metrics.xml"))
root = metrics.getroot()
metricId = None
# first find the metric id
for component in root.find("Assemblies").iter("CADComponent"):
if component.get("ComponentInstanceID").strip("{}") == id:
metricId = component.get("MetricID")
break
# then find the mass
for component in root.find("MetricComponents").iter("MetricComponent"):
if component.get("MetricID") == metricId:
for scalar in component.find("Scalars").iter("Scalar"):
if scalar.get("Name") == "Mass":
return to_kg(float(scalar.get("Value")), scalar.get("Unit", "kg"))
return None
def manufacturing_cost_model(id, name, model, path):
""" compute the cost and lead time of a component """
if model is None:
return ("no manufacturing model", "no manufacturing model")
try:
# the manufacturing info may be wrapped in a <part> element
root = model.getroot()
if root.tag == "part":
root = root.find("manufacturingDetails")
if root.tag == "manufacturingDetails":
root = list(root)[0]
if root.tag == "{avm}Component":
is_production = True
production_cost = "no manufacturing model"
production_leadTime = "no manufacturing model"
prototype_cost = "no manufacturing model"
prototype_leadTime = "no manufacturing model"
for parameter in root.iter("Parameter"):
if parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_production__part_cost_per_orderable_unit__value":
if parameter.find("Value").find("ValueExpression").find("Value").text != "None":
production_cost = float(parameter.find("Value").find("ValueExpression").find("Value").text)
elif parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_production__part_leadtime_per_orderable_unit" or parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_production__part_leadtime":
if parameter.find("Value").find("ValueExpression").find("Value").text != "None":
production_leadTime = to_days(float(parameter.find("Value").find("ValueExpression").find("Value").text), parameter.find("Value").get("Unit", "s"))
elif parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_prototype__part_cost_per_orderable_unit__value":
if parameter.find("Value").find("ValueExpression").find("Value").text != "None":
prototype_cost = float(parameter.find("Value").find("ValueExpression").find("Value").text)
elif parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_prototype__part_leadtime_per_orderable_unit" or parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_prototype__part_leadtime":
if parameter.find("Value").find("ValueExpression").find("Value").text != "None":
prototype_leadTime = to_days(float(parameter.find("Value").find("ValueExpression").find("Value").text), parameter.find("Value").get("Unit", "s"))
elif parameter.get("Name") == "procurement__supplier_specific_part_info__part_info__cost_leadtime_production__production_or_prototype":
if parameter.find("Value").find("ValueExpression").find("Value").text != "None":
is_production = True if parameter.find("Value").find("ValueExpression").find("Value").text.lower() == "production" else False
if is_production:
if production_cost == "no manufacturing model" or production_leadTime == "no manufacturing model":
return (prototype_cost, prototype_leadTime)
else:
return (production_cost, production_leadTime)
else:
if prototype_cost == "no manufacturing model" or prototype_leadTime == "no manufacturing model":
return (production_cost, production_leadTime)
else:
return (prototype_cost, prototype_leadTime)
elif root.tag == "purchased":
root = root.find("supplier")
cost = float(root.find("price").text)
leadTime = to_days(float(root.find("leadTime").text), root.find("leadTime").attrib.get("unit", "day"))
# Hack for MSD, since the procurement information is not changing with different weights
if name.startswith("Mass_"):
part_weight = lookup_weight(id, path)
if name.endswith("Gold"):
cost = material_library.get("GOLD_MASS", 1.0)*part_weight
elif name.endswith("Titanium"):
cost = material_library.get("TITANIUM_MASS", 1.0)*part_weight
elif name.endswith("Steel"):
cost = material_library.get("STEEL_MASS", 1.0)*part_weight
return (cost, leadTime)
else:
# estimate manufacturing cost based on mass
if root.find("material") is None or root.find("material").find("alloySteel") is not None:
material = "STEEL_MASS"
else:
material = root.find("material").text
material_cost = material_library.get(material, 0.209)
part_weight = lookup_weight(id, path)
time = 30 + part_weight*2
return (round(material_cost*part_weight + 50*(time/60),2), round(4 + time/(8*60),1))
except:
traceback.print_exc()
return ("no manufacturing model", "no manufacturing model")
def score_components(path, components, manfmanifest):
""" compute a score given a list of components """
score = Score()
for component in components:
name = component.name()
ident = component.ident()
entry = manfmanifest.get(ident, None)
if entry is None or "model" not in entry:
cost = "no manufacturing model"
time = "no manufacturing model"
else:
(cost, time) = manufacturing_cost_model(entry["id"], name, entry["model"], path)
try:
cost = float(cost) + 600 # add labor cost for ordering, receiving, & inspection
except ValueError:
cost = "no manufacturing model"
try:
time = float(time) + 2 # add shipping time
except ValueError:
time = "no manufacturing model"
score.tally(ident, name, cost, time)
return score
class Score(object):
""" keep score on cost and lead time """
def __init__(self):
""" prepare aligned lists for tally """
self.idents = []
self.names = []
self.costs = []
self.leadtimes = []
def tally(self, ident, name, cost, leadtime):
""" add info to each list """
self.idents.append(ident)
self.names.append(name)
self.costs.append(cost)
self.leadtimes.append(leadtime)
def report(self, kind="summary"):
""" construct a table and compute the score """
header = ["identity", "name",
"cost", "leadtime"]
data = zip(self.idents, self.names,
self.costs, self.leadtimes)
cost = 0.0
leadtime = 0.0
defective = 0
for _, _, price, lead in data:
try:
cost += float(price)
leadtime = max(float(lead), leadtime)
except TypeError:
defective += 1
except ValueError:
defective += 1
if kind == "report":
verbose = [dict(zip(header, record)) for record in data]
report = {"detail": verbose,
"cost": cost, "leadtime": leadtime,
"defective": defective}
else:
report = {"cost": cost, "leadtime": leadtime,
"defective": defective}
return report
def to_days(astring, unit):
""" No weeks in QUDT unit library: days or years """
factors = {
"week": 7.0, "weeks": 7.0,
"Week": 7.0, "Weeks": 7.0,
"day": 1.0, "Day": 1.0,
"days": 1.0, "Days": 1.0,
"hour": 1.0/24.0, "hours": 1.0/24.0,
"Hour": 1.0/24.0, "Hours": 1.0/24.0,
"minute": 1.0/1440.0, "Minute": 1.0/1440.0,
"minutes": 1.0/1440.0, "Minutes": 1.0/1440.0,
"s": 1.0/86400.0
}
factor = factors.get(unit, 0.0)
try:
value = float(astring)
except ValueError:
return None
return factor * value
if __name__ == "__main__":
cli(sys.argv[1:])
# vim:ts=4:sw=4:expandtab:fdm=indent:wrap lbr:ai:colorcolumn=74:number
|
|
# encoding: utf-8
# Utility functions for api2
import os
import time
import json
import re
from collections import defaultdict
from functools import wraps
from seahub import settings
from django.core.paginator import EmptyPage, InvalidPage
from django.http import HttpResponse
from rest_framework.response import Response
from rest_framework import status, serializers
import seaserv
from seaserv import seafile_api, get_commits, server_repo_size, \
get_personal_groups_by_user, is_group_user, get_group, seafserv_threaded_rpc
from pysearpc import SearpcError
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname, \
translate_seahub_time, file_icon_filter
from seahub.contacts.models import Contact
from seahub.group.models import GroupMessage, MessageReply, \
MessageAttachment, PublicGroup
from seahub.group.views import is_group_staff
from seahub.message.models import UserMessage, UserMsgAttachment
from seahub.notifications.models import UserNotification
from seahub.utils import api_convert_desc_link, get_file_type_and_ext, \
gen_file_get_url, is_org_context
from seahub.utils.paginator import Paginator
from seahub.utils.file_types import IMAGE
from seahub.api2.models import Token, TokenV2, DESKTOP_PLATFORMS
from seahub.avatar.settings import AVATAR_DEFAULT_SIZE
from seahub.avatar.templatetags.avatar_tags import api_avatar_url, \
get_default_avatar_url
from seahub.profile.models import Profile
def api_error(code, msg):
err_resp = {'error_msg': msg}
return Response(err_resp, status=code)
def get_file_size(store_id, repo_version, file_id):
size = seafile_api.get_file_size(store_id, repo_version, file_id)
return size if size else 0
def prepare_starred_files(files):
array = []
for f in files:
sfile = {'org' : f.org_id,
'repo' : f.repo.id,
'repo_id' : f.repo.id,
'repo_name' : f.repo.name,
'path' : f.path,
'icon_path' : file_icon_filter(f.path),
'file_name' : os.path.basename(f.path),
'mtime' : f.last_modified,
'mtime_relative': translate_seahub_time(f.last_modified),
'dir' : f.is_dir
}
if not f.is_dir:
try:
file_id = seafile_api.get_file_id_by_path(f.repo.id, f.path)
sfile['oid'] = file_id
sfile['size'] = get_file_size(f.repo.store_id, f.repo.version, file_id)
except SearpcError, e:
pass
array.append(sfile)
return array
def get_groups(email):
group_json = []
joined_groups = get_personal_groups_by_user(email)
grpmsgs = {}
for g in joined_groups:
grpmsgs[g.id] = 0
notes = UserNotification.objects.get_user_notifications(email, seen=False)
replynum = 0
for n in notes:
if n.is_group_msg():
try:
gid = n.group_message_detail_to_dict().get('group_id')
except UserNotification.InvalidDetailError:
continue
if gid not in grpmsgs:
continue
grpmsgs[gid] = grpmsgs[gid] + 1
for g in joined_groups:
msg = GroupMessage.objects.filter(group_id=g.id).order_by('-timestamp')[:1]
mtime = 0
if len(msg) >= 1:
mtime = get_timestamp(msg[0].timestamp)
group = {
"id":g.id,
"name":g.group_name,
"creator":g.creator_name,
"ctime":g.timestamp,
"mtime":mtime,
"msgnum":grpmsgs[g.id],
}
group_json.append(group)
return group_json, replynum
def get_msg_group_id(msg_id):
try:
msg = GroupMessage.objects.get(id=msg_id)
except GroupMessage.DoesNotExist:
return None
return msg.group_id
def get_group_and_contacts(email):
group_json = []
contacts_json = []
replies_json = []
gmsgnums = {}
umsgnums = {}
replies = {}
gmsgnum = umsgnum = replynum = 0
contacts = [c.contact_email for c in Contact.objects.filter(user_email=email)]
joined_groups = get_personal_groups_by_user(email)
notes = UserNotification.objects.get_user_notifications(email, seen=False)
for n in notes:
if n.is_group_msg():
try:
gid = n.group_message_detail_to_dict().get('group_id')
except UserNotification.InvalidDetailError:
continue
gmsgnums[gid] = gmsgnums.get(gid, 0) + 1
elif n.is_user_message():
msg_from = n.user_message_detail_to_dict()['msg_from']
if msg_from not in contacts:
contacts.append(msg_from)
umsgnums[n.detail] = umsgnums.get(msg_from, 0) + 1
for r in replies_json:
r['msgnum'] = replies[r['msg_id']]
for g in joined_groups:
msg = GroupMessage.objects.filter(group_id=g.id).order_by('-timestamp')[:1]
mtime = 0
lastmsg = None
if len(msg) >= 1:
mtime = get_timestamp(msg[0].timestamp)
lastmsg = msg[0].message
group = {
"id":g.id,
"name":g.group_name,
"creator":g.creator_name,
"ctime":g.timestamp,
"mtime":mtime,
"lastmsg":lastmsg,
"msgnum":gmsgnums.get(g.id, 0),
}
gmsgnum = gmsgnum + gmsgnums.get(g.id, 0)
group_json.append(group)
for contact in contacts:
msg = UserMessage.objects.get_messages_between_users(
contact, email).order_by('-timestamp')[:1]
mtime = 0
lastmsg = None
if len(msg) >= 1:
mtime = get_timestamp(msg[0].timestamp)
lastmsg = msg[0].message
c = {
'email' : contact,
'name' : email2nickname(contact),
"mtime" : mtime,
"lastmsg":lastmsg,
"msgnum" : umsgnums.get(contact, 0),
}
umsgnum = umsgnum + umsgnums.get(contact, 0)
contacts_json.append(c)
contacts_json.sort(key=lambda x: x["mtime"], reverse=True)
return contacts_json, umsgnum, group_json, gmsgnum, replies_json, replynum
def prepare_events(event_groups):
for g in event_groups:
for e in g["events"]:
if e.etype != "repo-delete":
e.link = "api://repos/%s" % e.repo_id
if e.etype == "repo-update":
api_convert_desc_link(e)
def get_group_msgs(groupid, page, username):
# Show 15 group messages per page.
paginator = Paginator(GroupMessage.objects.filter(
group_id=groupid).order_by('-timestamp'), 15)
# If page request (9999) is out of range, return None
try:
group_msgs = paginator.page(page)
except (EmptyPage, InvalidPage):
return None
# Force evaluate queryset to fix some database error for mysql.
group_msgs.object_list = list(group_msgs.object_list)
attachments = MessageAttachment.objects.filter(group_message__in=group_msgs.object_list)
msg_replies = MessageReply.objects.filter(reply_to__in=group_msgs.object_list)
reply_to_list = [ r.reply_to_id for r in msg_replies ]
for msg in group_msgs.object_list:
msg.reply_cnt = reply_to_list.count(msg.id)
msg.replies = []
for r in msg_replies:
if msg.id == r.reply_to_id:
msg.replies.append(r)
msg.replies = msg.replies[-3:]
for att in attachments:
if att.group_message_id != msg.id:
continue
# Attachment name is file name or directory name.
# If is top directory, use repo name instead.
path = att.path
if path == '/':
repo = seafile_api.get_repo(att.repo_id)
if not repo:
# TODO: what should we do here, tell user the repo
# is no longer exists?
continue
att.name = repo.name
else:
path = path.rstrip('/') # cut out last '/' if possible
att.name = os.path.basename(path)
# Load to discuss page if attachment is a image and from recommend.
if att.attach_type == 'file' and att.src == 'recommend':
att.filetype, att.fileext = get_file_type_and_ext(att.name)
if att.filetype == IMAGE:
att.obj_id = seafile_api.get_file_id_by_path(att.repo_id, path)
if not att.obj_id:
att.err = 'File does not exist'
else:
att.token = seafile_api.get_fileserver_access_token(
att.repo_id, att.obj_id, 'view', username)
att.img_url = gen_file_get_url(att.token, att.name)
msg.attachment = att
return group_msgs
def get_timestamp(msgtimestamp):
if not msgtimestamp:
return 0
timestamp = int(time.mktime(msgtimestamp.timetuple()))
return timestamp
def group_msg_to_json(msg, get_all_replies):
ret = {
'from_email': msg.from_email,
'nickname': email2nickname(msg.from_email),
'timestamp': get_timestamp(msg.timestamp),
'msg': msg.message,
'msgid': msg.id,
}
atts_json = []
atts = MessageAttachment.objects.filter(group_message_id=msg.id)
for att in atts:
att_json = {
'path': att.path,
'repo': att.repo_id,
'type': att.attach_type,
'src': att.src,
}
atts_json.append(att_json)
if len(atts_json) > 0:
ret['atts'] = atts_json
reply_list = MessageReply.objects.filter(reply_to=msg)
msg.reply_cnt = reply_list.count()
if not get_all_replies and msg.reply_cnt > 3:
msg.replies = reply_list[msg.reply_cnt - 3:]
else:
msg.replies = reply_list
replies = []
for reply in msg.replies:
r = {
'from_email' : reply.from_email,
'nickname' : email2nickname(reply.from_email),
'timestamp' : get_timestamp(reply.timestamp),
'msg' : reply.message,
'msgid' : reply.id,
}
replies.append(r)
ret['reply_cnt'] = msg.reply_cnt
ret['replies'] = replies
return ret
def get_group_msgs_json(groupid, page, username):
# Show 15 group messages per page.
paginator = Paginator(GroupMessage.objects.filter(
group_id=groupid).order_by('-timestamp'), 15)
# If page request (9999) is out of range, return None
try:
group_msgs = paginator.page(page)
except (EmptyPage, InvalidPage):
return None, -1
if group_msgs.has_next():
next_page = group_msgs.next_page_number()
else:
next_page = -1
group_msgs.object_list = list(group_msgs.object_list)
msgs = [ group_msg_to_json(msg, True) for msg in group_msgs.object_list ]
return msgs, next_page
def get_group_message_json(group_id, msg_id, get_all_replies):
try:
msg = GroupMessage.objects.get(id=msg_id)
except GroupMessage.DoesNotExist:
return None
if group_id and group_id != msg.group_id:
return None
return group_msg_to_json(msg, get_all_replies)
def get_person_msgs(to_email, page, username):
# Show 15 group messages per page.
paginator = Paginator(UserMessage.objects.get_messages_between_users(username, to_email).order_by('-timestamp'), 15)
# If page request (9999) is out of range, return None
try:
person_msgs = paginator.page(page)
except (EmptyPage, InvalidPage):
return None
# Force evaluate queryset to fix some database error for mysql.
person_msgs.object_list = list(person_msgs.object_list)
attachments = UserMsgAttachment.objects.list_attachments_by_user_msgs(person_msgs.object_list)
for msg in person_msgs.object_list:
msg.attachments = []
for att in attachments:
if att.user_msg != msg:
continue
pfds = att.priv_file_dir_share
if pfds is None: # in case that this attachment is unshared.
continue
att.repo_id = pfds.repo_id
att.path = pfds.path
att.name = os.path.basename(pfds.path.rstrip('/'))
att.token = pfds.token
msg.attachments.append(att)
return person_msgs
def get_email(id_or_email):
try:
uid = int(id_or_email)
try:
user = User.objects.get(id=uid)
except User.DoesNotExist:
user = None
if not user:
return None
to_email = user.email
except ValueError:
to_email = id_or_email
return to_email
def api_group_check(func):
"""
Decorator for initial group permission check tasks
un-login user & group not pub --> login page
un-login user & group pub --> view_perm = "pub"
login user & non group member & group not pub --> public info page
login user & non group member & group pub --> view_perm = "pub"
group member --> view_perm = "joined"
sys admin --> view_perm = "sys_admin"
"""
def _decorated(view, request, group_id, *args, **kwargs):
group_id_int = int(group_id) # Checked by URL Conf
group = get_group(group_id_int)
if not group:
return api_error(status.HTTP_404_NOT_FOUND, 'Group not found.')
group.is_staff = False
if PublicGroup.objects.filter(group_id=group.id):
group.is_pub = True
else:
group.is_pub = False
joined = is_group_user(group_id_int, request.user.username)
if joined:
group.view_perm = "joined"
group.is_staff = is_group_staff(group, request.user)
return func(view, request, group, *args, **kwargs)
if request.user.is_staff:
# viewed by system admin
group.view_perm = "sys_admin"
return func(view, request, group, *args, **kwargs)
if group.is_pub:
group.view_perm = "pub"
return func(view, request, group, *args, **kwargs)
# Return group public info page.
return api_error(status.HTTP_403_FORBIDDEN, 'Forbid to access this group.')
return _decorated
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
def get_diff_details(repo_id, commit1, commit2):
result = defaultdict(list)
diff_result = seafserv_threaded_rpc.get_diff(repo_id, commit1, commit2)
if not diff_result:
return result
for d in diff_result:
if d.status == 'add':
result['added_files'].append(d.name)
elif d.status == 'del':
result['deleted_files'].append(d.name)
elif d.status == 'mov':
result['renamed_files'].extend((d.name, d.new_name))
elif d.status == 'mod':
result['modified_files'].append(d.name)
elif d.status == 'newdir':
result['added_dirs'].append(d.name)
elif d.status == 'deldir':
result['deleted_dirs'].append(d.name)
return result
JSON_CONTENT_TYPE = 'application/json; charset=utf-8'
def json_response(func):
@wraps(func)
def wrapped(*a, **kw):
result = func(*a, **kw)
if isinstance(result, HttpResponse):
return result
else:
return HttpResponse(json.dumps(result), status=200,
content_type=JSON_CONTENT_TYPE)
return wrapped
def get_token_v1(username):
token, _ = Token.objects.get_or_create(user=username)
return token
_ANDROID_DEVICE_ID_PATTERN = re.compile('^[a-f0-9]{1,16}$')
def get_token_v2(request, username, platform, device_id, device_name,
client_version, platform_version):
if platform in DESKTOP_PLATFORMS:
# desktop device id is the peer id, so it must be 40 chars
if len(device_id) != 40:
raise serializers.ValidationError('invalid device id')
elif platform == 'android':
# See http://developer.android.com/reference/android/provider/Settings.Secure.html#ANDROID_ID
# android device id is the 64bit secure id, so it must be 16 chars in hex representation
# but some user reports their device ids are 14 or 15 chars long. So we relax the validation.
if not _ANDROID_DEVICE_ID_PATTERN.match(device_id.lower()):
raise serializers.ValidationError('invalid device id')
elif platform == 'ios':
if len(device_id) != 36:
raise serializers.ValidationError('invalid device id')
else:
raise serializers.ValidationError('invalid platform')
return TokenV2.objects.get_or_create_token(
username, platform, device_id, device_name,
client_version, platform_version, get_client_ip(request))
def to_python_boolean(string):
"""Convert a string to boolean.
"""
string = string.lower()
if string in ('t', 'true', '1'):
return True
if string in ('f', 'false', '0'):
return False
raise ValueError("Invalid boolean value: '%s'" % string)
def is_seafile_pro():
return any(['seahub_extra' in app for app in settings.INSTALLED_APPS])
def api_repo_setting_permission_check(func):
"""Decorator for initial repo setting permission check
"""
def _decorated(view, request, repo_id, *args, **kwargs):
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
username = request.user.username
if repo.is_virtual or username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
return func(view, request, repo_id, *args, **kwargs)
return _decorated
def api_repo_user_folder_perm_check(func):
"""Check repo setting permission and args used by user-folder-perm
"""
def _decorated(view, request, repo_id, *args, **kwargs):
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
username = request.user.username
if repo.is_virtual or username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check arguments
user = request.data.get('user_email', None)
path = request.data.get('folder_path', None)
perm = request.data.get('permission', None)
try:
User.objects.get(email=user)
except User.DoesNotExist:
error_msg = 'User %s not found.' % user
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if path:
path = path.rstrip('/') if path != '/' else path
if seafile_api.get_dir_id_by_path(repo_id, path) is None:
error_msg = 'Folder %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if request.method in ('POST', 'PUT') and perm not in ('r', 'rw'):
error_msg = 'permission invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
return func(view, request, repo_id, *args, **kwargs)
return _decorated
def api_repo_group_folder_perm_check(func):
"""Check repo setting permission and args used by group-folder-perm
"""
def _decorated(view, request, repo_id, *args, **kwargs):
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
username = request.user.username
if repo.is_virtual or username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check arguments
group_id = request.data.get('group_id', None)
path = request.data.get('folder_path', None)
perm = request.data.get('permission', None)
try:
group_id = int(group_id)
except ValueError:
error_msg = 'group_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not seaserv.get_group(group_id):
error_msg = 'Group %s not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if path:
path = path.rstrip('/') if path != '/' else path
if seafile_api.get_dir_id_by_path(repo_id, path) is None:
error_msg = 'Folder %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if request.method in ('POST', 'PUT') and perm not in ('r', 'rw'):
error_msg = 'permission invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
return func(view, request, repo_id, *args, **kwargs)
return _decorated
def get_user_common_info(email, avatar_size=AVATAR_DEFAULT_SIZE):
try:
avatar_url, is_default, date_uploaded = api_avatar_url(email, avatar_size)
except Exception as e:
logger.error(e)
avatar_url = get_default_avatar_url()
p = Profile.objects.get_profile_by_user(email)
if p:
login_id = p.login_id if p.login_id else ''
else:
login_id = ''
return {
"email": email,
"name": email2nickname(email),
"avatar_url": avatar_url,
"login_id": login_id
}
|
|
#
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
import six
from ironic.common import pxe_utils
from ironic.conductor import task_manager
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.objects import utils as object_utils
CONF = cfg.CONF
class TestPXEUtils(db_base.DbTestCase):
def setUp(self):
super(TestPXEUtils, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake")
common_pxe_options = {
'deployment_aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/deploy_kernel',
'aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'kernel',
'pxe_append_params': 'test_param',
'deployment_ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7'
u'f33c123/deploy_ramdisk',
'root_device': 'vendor=fake,size=123',
'ipa-api-url': 'http://192.168.122.184:6385',
}
self.pxe_options = {
'deployment_key': '0123456789ABCDEFGHIJKLMNOPQRSTUV',
'ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'ramdisk',
'iscsi_target_iqn': u'iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33'
u'c123',
'deployment_id': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'ironic_api_url': 'http://192.168.122.184:6385',
'disk': 'cciss/c0d0,sda,hda,vda',
'boot_option': 'netboot',
'ipa-driver-name': 'pxe_ssh',
'boot_mode': 'bios',
}
self.pxe_options.update(common_pxe_options)
self.agent_pxe_options = {
'ipa-driver-name': 'agent_ipmitool',
}
self.agent_pxe_options.update(common_pxe_options)
self.ipxe_options = self.pxe_options.copy()
self.ipxe_options.update({
'deployment_aki_path': 'http://1.2.3.4:1234/deploy_kernel',
'deployment_ari_path': 'http://1.2.3.4:1234/deploy_ramdisk',
'aki_path': 'http://1.2.3.4:1234/kernel',
'ari_path': 'http://1.2.3.4:1234/ramdisk',
})
self.node = object_utils.create_test_node(self.context)
def test__build_pxe_config(self):
rendered_template = pxe_utils._build_pxe_config(
self.pxe_options, CONF.pxe.pxe_config_template)
expected_template = open(
'ironic/tests/drivers/pxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_pxe_config_with_agent(self):
rendered_template = pxe_utils._build_pxe_config(
self.agent_pxe_options, CONF.agent.agent_pxe_config_template)
expected_template = open(
'ironic/tests/drivers/agent_pxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_ipxe_config(self):
# NOTE(lucasagomes): iPXE is just an extension of the PXE driver,
# it doesn't have it's own configuration option for template.
# More info:
# http://docs.openstack.org/developer/ironic/deploy/install-guide.html
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='pxe')
rendered_template = pxe_utils._build_pxe_config(
self.ipxe_options, CONF.pxe.pxe_config_template)
expected_template = open(
'ironic/tests/drivers/ipxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_elilo_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
rendered_template = pxe_utils._build_pxe_config(
pxe_opts, CONF.pxe.uefi_pxe_config_template)
expected_template = open(
'ironic/tests/drivers/elilo_efi_pxe_config.template'
).read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
def test__write_mac_pxe_configs(self, get_macs_mock, unlink_mock,
create_link_mock):
macs = [
'00:11:22:33:44:55:66',
'00:11:22:33:44:55:67'
]
get_macs_mock.return_value = macs
create_link_calls = [
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66'),
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66'),
mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
def test__write_mac_ipxe_configs(self, get_macs_mock, unlink_mock,
create_link_mock):
self.config(ipxe_enabled=True, group='pxe')
macs = [
'00:11:22:33:44:55:66',
'00:11:22:33:44:55:67'
]
get_macs_mock.return_value = macs
create_link_calls = [
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00112233445566'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00112233445567'),
]
unlink_calls = [
mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
mock.call('/httpboot/pxelinux.cfg/00112233445566'),
mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
mock.call('/httpboot/pxelinux.cfg/00112233445567'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test__link_ip_address_pxe_configs(self, provider_mock, unlink_mock,
create_link_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
create_link_calls = [
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
u'/tftpboot/0A0A0001.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_ip_address_pxe_configs(task)
unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch.object(pxe_utils, '_build_pxe_config', autospec=True)
@mock.patch('ironic.openstack.common.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config(self, ensure_tree_mock, build_mock,
write_mock):
build_mock.return_value = self.pxe_options
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
build_mock.assert_called_with(self.pxe_options,
CONF.pxe.pxe_config_template)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))
]
ensure_tree_mock.assert_has_calls(ensure_calls)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path, self.pxe_options)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
def test_clean_up_pxe_config(self, unlink_mock, rmtree_mock):
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.clean_up_pxe_config(task)
unlink_mock.assert_called_once_with("/tftpboot/pxelinux.cfg/01-%s"
% address.replace(':', '-'))
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
def test__get_pxe_mac_path(self):
mac = '00:11:22:33:44:55:66'
self.assertEqual('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_mac_path_ipxe(self):
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root='/httpboot', group='pxe')
mac = '00:11:22:33:AA:BB:CC'
self.assertEqual('/httpboot/pxelinux.cfg/00-11-22-33-aa-bb-cc',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_ip_address_path(self):
ipaddress = '10.10.0.1'
self.assertEqual('/tftpboot/0A0A0001.conf',
pxe_utils._get_pxe_ip_address_path(ipaddress))
def test_get_root_dir(self):
expected_dir = '/tftproot'
self.config(ipxe_enabled=False, group='pxe')
self.config(tftp_root=expected_dir, group='pxe')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_root_dir_ipxe(self):
expected_dir = '/httpboot'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='pxe')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_pxe_config_file_path(self):
self.assertEqual(os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
def test_dhcp_options_for_instance(self):
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
expected_info = [{'opt_name': 'bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'}
]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
def _test_get_deploy_kr_info(self, expected_dir):
node_uuid = 'fake-node'
driver_info = {
'deploy_kernel': 'glance://deploy-kernel',
'deploy_ramdisk': 'glance://deploy-ramdisk',
}
expected = {
'deploy_kernel': ('glance://deploy-kernel',
expected_dir + '/fake-node/deploy_kernel'),
'deploy_ramdisk': ('glance://deploy-ramdisk',
expected_dir + '/fake-node/deploy_ramdisk'),
}
kr_info = pxe_utils.get_deploy_kr_info(node_uuid, driver_info)
self.assertEqual(expected, kr_info)
def test_get_deploy_kr_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_deploy_kr_info(expected_dir)
def test_get_deploy_kr_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='pxe')
self._test_get_deploy_kr_info(expected_dir)
def test_get_deploy_kr_info_bad_driver_info(self):
self.config(tftp_root='/tftp', group='pxe')
node_uuid = 'fake-node'
driver_info = {}
self.assertRaises(KeyError,
pxe_utils.get_deploy_kr_info,
node_uuid,
driver_info)
def test_dhcp_options_for_instance_ipxe(self):
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url='http://192.0.3.2:1234', group='pxe')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
self.config(dhcp_provider='isc', group='dhcp')
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'},
{'opt_name': 'bootfile-name',
'opt_value': expected_boot_script_url}]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
self.config(dhcp_provider='neutron', group='dhcp')
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': 'tag:!ipxe,bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'},
{'opt_name': 'tag:ipxe,bootfile-name',
'opt_value': expected_boot_script_url}]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
def test_clean_up_pxe_config_uefi(self, provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise')
@mock.patch('ironic.common.utils.unlink_without_raise')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
def test_clean_up_pxe_config_uefi_instance_info(self,
provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info['deploy_boot_mode'] = 'uefi'
pxe_utils.clean_up_pxe_config(task)
unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import httplib
import locale
import json
import logging
import urllib
import time
import urllib2
from resource_management import Environment
from ambari_commons.aggregate_functions import sample_standard_deviation, mean
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
RESULT_STATE_OK = 'OK'
RESULT_STATE_CRITICAL = 'CRITICAL'
RESULT_STATE_WARNING = 'WARNING'
RESULT_STATE_UNKNOWN = 'UNKNOWN'
RESULT_STATE_SKIPPED = 'SKIPPED'
HDFS_NN_STATE_ACTIVE = 'active'
HDFS_NN_STATE_STANDBY = 'standby'
HDFS_SITE_KEY = '{{hdfs-site}}'
NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = '{{ams-site/timeline.metrics.service.webapp.address}}'
METRICS_COLLECTOR_VIP_HOST_KEY = '{{cluster-env/metrics_collector_vip_host}}'
METRICS_COLLECTOR_VIP_PORT_KEY = '{{cluster-env/metrics_collector_vip_port}}'
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
MERGE_HA_METRICS_PARAM_KEY = 'mergeHaMetrics'
MERGE_HA_METRICS_PARAM_DEFAULT = False
METRIC_NAME_PARAM_KEY = 'metricName'
METRIC_NAME_PARAM_DEFAULT = ''
METRIC_UNITS_PARAM_KEY = 'metric.units'
METRIC_UNITS_DEFAULT = ''
APP_ID_PARAM_KEY = 'appId'
APP_ID_PARAM_DEFAULT = 'NAMENODE'
# the interval to check the metric (should be cast to int but could be a float)
INTERVAL_PARAM_KEY = 'interval'
INTERVAL_PARAM_DEFAULT = 60
# the default threshold to trigger a CRITICAL (should be cast to int but could a float)
DEVIATION_CRITICAL_THRESHOLD_KEY = 'metric.deviation.critical.threshold'
DEVIATION_CRITICAL_THRESHOLD_DEFAULT = 10
# the default threshold to trigger a WARNING (should be cast to int but could be a float)
DEVIATION_WARNING_THRESHOLD_KEY = 'metric.deviation.warning.threshold'
DEVIATION_WARNING_THRESHOLD_DEFAULT = 5
NAMENODE_SERVICE_RPC_PORT_KEY = ''
MINIMUM_VALUE_THRESHOLD_KEY = 'minimumValue'
AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
# The variance for this alert is 27MB which is 27% of the 100MB average (20MB is the limit)
DEVIATION_THRESHOLD_MESSAGE = "The variance for this alert is {0}{1} which is {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
# The variance for this alert is 15MB which is within 20% of the 904ms average (20MB is the limit)
DEVIATION_OK_MESSAGE = "The variance for this alert is {0}{1} which is within {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
logger = logging.getLogger()
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,
EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,
KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,
METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,
METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations : a mapping of configuration key to value
parameters : a mapping of script parameter key to value
host_name : the name of this host where the alert is running
:type configurations dict
:type parameters dict
:type host_name str
"""
hostnames = host_name
current_time = int(time.time()) * 1000
# parse script arguments
connection_timeout = CONNECTION_TIMEOUT_DEFAULT
if CONNECTION_TIMEOUT_KEY in parameters:
connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
merge_ha_metrics = MERGE_HA_METRICS_PARAM_DEFAULT
if MERGE_HA_METRICS_PARAM_KEY in parameters:
merge_ha_metrics = parameters[MERGE_HA_METRICS_PARAM_KEY].lower() == 'true'
metric_name = METRIC_NAME_PARAM_DEFAULT
if METRIC_NAME_PARAM_KEY in parameters:
metric_name = parameters[METRIC_NAME_PARAM_KEY]
metric_units = METRIC_UNITS_DEFAULT
if METRIC_UNITS_PARAM_KEY in parameters:
metric_units = parameters[METRIC_UNITS_PARAM_KEY]
app_id = APP_ID_PARAM_DEFAULT
if APP_ID_PARAM_KEY in parameters:
app_id = parameters[APP_ID_PARAM_KEY]
interval = INTERVAL_PARAM_DEFAULT
if INTERVAL_PARAM_KEY in parameters:
interval = _coerce_to_integer(parameters[INTERVAL_PARAM_KEY])
warning_threshold = DEVIATION_WARNING_THRESHOLD_DEFAULT
if DEVIATION_WARNING_THRESHOLD_KEY in parameters:
warning_threshold = _coerce_to_integer(parameters[DEVIATION_WARNING_THRESHOLD_KEY])
critical_threshold = DEVIATION_CRITICAL_THRESHOLD_DEFAULT
if DEVIATION_CRITICAL_THRESHOLD_KEY in parameters:
critical_threshold = _coerce_to_integer(parameters[DEVIATION_CRITICAL_THRESHOLD_KEY])
minimum_value_threshold = None
if MINIMUM_VALUE_THRESHOLD_KEY in parameters:
minimum_value_threshold = _coerce_to_integer(parameters[MINIMUM_VALUE_THRESHOLD_KEY])
#parse configuration
if configurations is None:
return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
# hdfs-site is required
if not HDFS_SITE_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
if METRICS_COLLECTOR_VIP_HOST_KEY in configurations and METRICS_COLLECTOR_VIP_PORT_KEY in configurations:
collector_host = configurations[METRICS_COLLECTOR_VIP_HOST_KEY]
collector_port = int(configurations[METRICS_COLLECTOR_VIP_PORT_KEY])
else:
# ams-site/timeline.metrics.service.webapp.address is required
if not METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)])
else:
collector_webapp_address = configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":")
if valid_collector_webapp_address(collector_webapp_address):
collector_host = collector_webapp_address[0]
collector_port = int(collector_webapp_address[1])
else:
return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but set to {1}'.format(
METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY])])
namenode_service_rpc_address = None
# hdfs-site is required
if not HDFS_SITE_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
hdfs_site = configurations[HDFS_SITE_KEY]
if 'dfs.namenode.servicerpc-address' in hdfs_site:
namenode_service_rpc_address = hdfs_site['dfs.namenode.servicerpc-address']
# if namenode alert and HA mode
if NAMESERVICE_KEY in configurations and app_id.lower() == 'namenode':
# hdfs-site is required
if not HDFS_SITE_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
executable_paths = None
if EXECUTABLE_SEARCH_PATHS in configurations:
executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
# parse script arguments
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
kerberos_keytab = None
if KERBEROS_KEYTAB in configurations:
kerberos_keytab = configurations[KERBEROS_KEYTAB]
kerberos_principal = None
if KERBEROS_PRINCIPAL in configurations:
kerberos_principal = configurations[KERBEROS_PRINCIPAL]
kerberos_principal = kerberos_principal.replace('_HOST', host_name)
# determine whether or not SSL is enabled
is_ssl_enabled = False
if DFS_POLICY_KEY in configurations:
dfs_policy = configurations[DFS_POLICY_KEY]
if dfs_policy == "HTTPS_ONLY":
is_ssl_enabled = True
kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
name_service = configurations[NAMESERVICE_KEY]
# look for dfs.ha.namenodes.foo
nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
if not nn_unique_ids_key in hdfs_site:
return (RESULT_STATE_UNKNOWN, ['Unable to find unique NameNode alias key {0}'.format(nn_unique_ids_key)])
namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
if is_ssl_enabled:
namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
# now we have something like 'nn1,nn2,nn3,nn4'
# turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
# ie dfs.namenode.http-address.hacluster.nn1
namenodes = []
active_namenodes = []
nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
for nn_unique_id in nn_unique_ids:
key = namenode_http_fragment.format(name_service, nn_unique_id)
if key in hdfs_site:
# use str() to ensure that unicode strings do not have the u' in them
value = str(hdfs_site[key])
namenode = str(hdfs_site[key]).split(":")[0]
namenodes.append(namenode)
try:
jmx_uri = jmx_uri_fragment.format(value)
if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
env = Environment.get_instance()
# curl requires an integer timeout
curl_connection_timeout = int(connection_timeout)
state_response, error_msg, time_millis = curl_krb_request(env.tmp_dir,
kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
"NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
state = _get_ha_state_from_json(state_response)
else:
state_response = get_jmx(jmx_uri, connection_timeout)
state = _get_ha_state_from_json(state_response)
if state == HDFS_NN_STATE_ACTIVE:
active_namenodes.append(namenode)
# Only check active NN
nn_service_rpc_address_key = 'dfs.namenode.servicerpc-address.{0}.{1}'.format(name_service, nn_unique_id)
if nn_service_rpc_address_key in hdfs_site:
namenode_service_rpc_address = hdfs_site[nn_service_rpc_address_key]
pass
except:
logger.exception("Unable to determine the active NameNode")
pass
if merge_ha_metrics:
hostnames = ",".join(namenodes)
# run only on active NN, no need to run the same requests from the standby
if host_name not in active_namenodes:
return (RESULT_STATE_SKIPPED, ['This alert will be reported by another host.'])
pass
# Skip service rpc alert if port is not enabled
if not namenode_service_rpc_address and 'rpc.rpc.datanode' in metric_name:
return (RESULT_STATE_SKIPPED, ['Service RPC port is not enabled.'])
get_metrics_parameters = {
"metricNames": metric_name,
"appId": app_id,
"hostname": hostnames,
"startTime": current_time - interval * 60 * 1000,
"endTime": current_time,
"grouped": "true",
}
encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
try:
conn = httplib.HTTPConnection(collector_host, int(collector_port),
timeout=connection_timeout)
conn.request("GET", AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
response = conn.getresponse()
data = response.read()
conn.close()
except Exception:
return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
if response.status != 200:
return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
data_json = json.loads(data)
metrics = []
# will get large standard deviation for multiple hosts,
# if host1 reports small local values, but host2 reports large local values
for metrics_data in data_json["metrics"]:
metrics += metrics_data["metrics"].values()
pass
if not metrics or len(metrics) < 2:
number_of_data_points = len(metrics) if metrics else 0
return (RESULT_STATE_SKIPPED, ["There are not enough data points to calculate the standard deviation ({0} sampled)".format(
number_of_data_points)])
minimum_value_multiplier = 1
if 'dfs.FSNamesystem.CapacityUsed' in metric_name:
minimum_value_multiplier = 1024 * 1024 # MB to bytes
elif 'rpc.rpc.datanode' in metric_name or 'rpc.rpc.client' in metric_name:
minimum_value_multiplier = 1000 # seconds to millis
if minimum_value_threshold:
# Filter out points below min threshold
metrics = [metric for metric in metrics if metric > (minimum_value_threshold * minimum_value_multiplier)]
if len(metrics) < 2:
return (RESULT_STATE_OK, ['There were no data points above the minimum threshold of {0} seconds'.format(minimum_value_threshold)])
mean_value = mean(metrics)
stddev = sample_standard_deviation(metrics)
try:
deviation_percent = stddev / float(mean_value) * 100
except ZeroDivisionError:
# should not be a case for this alert
return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation because the mean value is 0"])
# log the AMS request
if logger.isEnabledFor(logging.DEBUG):
logger.debug("""
AMS request parameters - {0}
AMS response - {1}
Mean - {2}
Standard deviation - {3}
Percentage standard deviation - {4}
""".format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent))
mean_value_localized = locale.format("%.0f", mean_value, grouping=True)
variance_value = (deviation_percent / 100.0) * mean_value
variance_value_localized = locale.format("%.0f", variance_value, grouping=True)
# check for CRITICAL status
if deviation_percent > critical_threshold:
threshold_value = ((critical_threshold / 100.0) * mean_value)
threshold_value_localized = locale.format("%.0f", threshold_value, grouping=True)
message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
mean_value_localized, metric_units, threshold_value_localized, metric_units)
return (RESULT_STATE_CRITICAL,[message])
# check for WARNING status
if deviation_percent > warning_threshold:
threshold_value = ((warning_threshold / 100.0) * mean_value)
threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
mean_value_localized, metric_units, threshold_value_localized, metric_units)
return (RESULT_STATE_WARNING, [message])
# return OK status; use the warning threshold as the value to compare against
threshold_value = ((warning_threshold / 100.0) * mean_value)
threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
message = DEVIATION_OK_MESSAGE.format(variance_value_localized, metric_units, warning_threshold,
mean_value_localized, metric_units, threshold_value_localized, metric_units)
return (RESULT_STATE_OK,[message])
def valid_collector_webapp_address(webapp_address):
if len(webapp_address) == 2 \
and webapp_address[0] != '127.0.0.1' \
and webapp_address[0] != '0.0.0.0' \
and webapp_address[1].isdigit():
return True
return False
def get_jmx(query, connection_timeout):
response = None
try:
response = urllib2.urlopen(query, timeout=connection_timeout)
json_data = response.read()
return json_data
except Exception:
return {"beans": {}}
finally:
if response is not None:
try:
response.close()
except:
pass
def _get_ha_state_from_json(string_json):
"""
Searches through the specified JSON string looking for HA state
enumerations.
:param string_json: the string JSON
:return: the value of the HA state (active, standby, etc)
"""
json_data = json.loads(string_json)
jmx_beans = json_data["beans"]
# look for NameNodeStatus-State first
for jmx_bean in jmx_beans:
if "name" not in jmx_bean:
continue
jmx_bean_name = jmx_bean["name"]
if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
return jmx_bean["State"]
# look for FSNamesystem-tag.HAState last
for jmx_bean in jmx_beans:
if "name" not in jmx_bean:
continue
jmx_bean_name = jmx_bean["name"]
if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem":
return jmx_bean["tag.HAState"]
def _coerce_to_integer(value):
"""
Attempts to correctly coerce a value to an integer. For the case of an integer or a float,
this will essentially either NOOP or return a truncated value. If the parameter is a string,
then it will first attempt to be coerced from a integer, and failing that, a float.
:param value: the value to coerce
:return: the coerced value as an integer
"""
try:
return int(value)
except ValueError:
return int(float(value))
|
|
#!/usr/bin/env python
import sys
import os
import numpy
from math import log
from pyx import canvas, text, path, graph, color, trafo, unit, attr, deco, style
unit.set(defaultunit="cm")
text.set(mode="latex")
text.preamble(r"\usepackage{times}")
text.preamble(r"\usepackage{sansmath}")
text.preamble(r"\sansmath")
text.preamble(r"\renewcommand*\familydefault{\sfdefault}")
painter = graph.axis.painter.regular( labeldist=0.1, labelattrs=[text.size(-3)], titleattrs=[text.size(-2)] )
method_colors = {
'HiFive-Probability':color.cmyk.Black,
'HiFive-Express':color.cmyk.CadetBlue,
'HiFive-Binning':color.cmyk.MidnightBlue,
'HiCNorm':color.cmyk.Dandelion,
'HiCPipe':color.cmyk.Mahogany,
'Matrix-Balancing':color.cmyk.OliveGreen,
}
def main():
out_fname = sys.argv[1]
basedir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-2])
mm9_methods = {
'HiFive-Probability':'%s/Analysis/hifive_mm9_ESC_prob_correlations.txt' % basedir,
'HiFive-Express':'%s/Analysis/hifive_mm9_ESC_exp_correlations.txt' % basedir,
'HiFive-Binning':'%s/Analysis/hifive_mm9_ESC_bin_correlations.txt' % basedir,
'HiCNorm':'%s/Analysis/hicnorm_mm9_ESC_correlations.txt' % basedir,
'HiCPipe':'%s/Analysis/hicpipe_mm9_ESC_correlations.txt' % basedir,
'Matrix-Balancing':'%s/Analysis/mb_mm9_ESC_correlations.txt' % basedir,
}
hg19_methods = {
'HiFive-Probability':'%s/Analysis/hifive_hg19_GM12878_prob_correlations.txt' % basedir,
'HiFive-Express':'%s/Analysis/hifive_hg19_GM12878_exp_correlations.txt' % basedir,
'HiFive-Binning':'%s/Analysis/hifive_hg19_GM12878_bin_correlations.txt' % basedir,
'HiCNorm':'%s/Analysis/hicnorm_hg19_GM12878_correlations.txt' % basedir,
'HiCPipe':'%s/Analysis/hicpipe_hg19_GM12878_correlations.txt' % basedir,
'Matrix-Balancing':'%s/Analysis/mb_hg19_GM12878_correlations.txt' % basedir,
}
mm9_data = load_data(mm9_methods)
hg19_data = load_data(hg19_methods)
width = 16.8
spacer = 0.4
overall_width = (width - spacer * 2) / 2.6
c = canvas.canvas()
mm9_ranges_img, mm9_ranges_height = plot_dataset_ranges(mm9_data, width, "MM9 ESC")
mm9_ranges_img.text(0, mm9_ranges_height, 'a',
[text.halign.left, text.valign.top, text.size(-1)])
c.insert(mm9_ranges_img)
hg19_ranges_img, hg19_ranges_height = plot_dataset_ranges(hg19_data, width, "HG19 GM12878")
hg19_ranges_img.text(0, hg19_ranges_height, 'b',
[text.halign.left, text.valign.top, text.size(-1)])
c.insert(hg19_ranges_img, [trafo.translate(0, -hg19_ranges_height - spacer)])
overall_height = mm9_ranges_height * 0.6
mm9_overall_img = plot_overall(mm9_data, overall_width, overall_height, "MM9 ESC")
mm9_overall_img.text(0, overall_height + 0.1, 'c',
[text.halign.left, text.valign.top, text.size(-1)])
c.insert(mm9_overall_img, [trafo.translate(0, -hg19_ranges_height - overall_height - spacer * 2)])
hg19_overall_img = plot_overall(hg19_data, overall_width, overall_height, "HG19 GM12878")
hg19_overall_img.text(0, overall_height + 0.1, 'd',
[text.halign.left, text.valign.top, text.size(-1)])
c.insert(hg19_overall_img, [trafo.translate(overall_width * 1.6 + spacer * 2,
-hg19_ranges_height - overall_height - spacer * 2)])
c.insert(plot_key(overall_width * 0.6 + 0.4, overall_height),
[trafo.translate(overall_width + spacer + 0.6, -hg19_ranges_height - overall_height - spacer * 2)])
c.writePDFfile(out_fname)
def load_data(fdict):
all_data = {}
for name in fdict.keys():
if not os.path.exists(fdict[name]):
continue
data = []
for line in open(fdict[name]):
temp = line[:-1].split('\t')
data.append((int(temp[0]), int(temp[1].replace('NA', '0')), temp[2], int(temp[3]), float(temp[4])))
all_data[name] = numpy.array(data, dtype=numpy.dtype([('binsize', numpy.int32), ('range', numpy.int32),
('interaction', 'a5'), ('count', numpy.int32), ('correlation', numpy.float32)]))
return all_data
def plot_overall(data, width, height, name):
vo = 0.55
ho = 0.7
plot_width = width - ho
plot_height = height - vo - 0.3
c = canvas.canvas()
methods = data.keys()
methods.sort()
bar_colors = []
cis_binsizes = numpy.unique(data[methods[0]]['binsize'][numpy.where(data[methods[0]]['interaction'] == 'cis')])
trans_binsizes = numpy.unique(data[methods[0]]['binsize'][numpy.where(data[methods[0]]['interaction'] == 'trans')])
Y = numpy.zeros((len(methods), cis_binsizes.shape[0] + trans_binsizes.shape[0]), dtype=numpy.float32)
for i, method in enumerate(methods):
for j, binsize in enumerate(cis_binsizes):
where = numpy.where((data[method]['binsize'] == binsize) *
(data[method]['interaction'] == 'cis') *
(data[method]['range'] == 0))
if where[0].shape[0] > 0:
Y[i, j] = data[method]['correlation'][where]
for j, binsize in enumerate(trans_binsizes):
where = numpy.where((data[method]['binsize'] == binsize) *
(data[method]['interaction'] == 'trans') *
(data[method]['range'] == 0))
if where[0].shape[0] > 0:
Y[i, j + cis_binsizes.shape[0]] = data[method]['correlation'][where]
bar_colors.append(method_colors[method])
Y = numpy.array(Y)
g = graph.graphxy(width=plot_width, height=plot_height,
x=graph.axis.nestedbar(painter=graph.axis.painter.bar(nameattrs=None)),
y=graph.axis.lin(painter=painter),
x2=graph.axis.lin(parter=None, min=0, max=1),
y2=graph.axis.lin(parter=None, min=0, max=1))
for i in range(len(methods)):
g.plot(graph.data.points(zip(zip(range(Y.shape[1]), [i] * Y.shape[1]), Y[i, :]), xname=1, y=2),
[graph.style.changebar([method_colors[methods[i]]])])
c.insert(g, [trafo.translate(ho, vo)])
for i, label in enumerate(["10Kb", "50Kb", "250Kb", "1Mb", "250Kb", "1Mb"]):
c.text(ho + plot_width * (i + 0.5) / 6.0, vo - 0.05, "%s" % label,
[text.halign.center, text.valign.top, text.size(-3)])
c.text(ho + plot_width * 2.0 / 6.0, 0.05, "cis",
[text.halign.center, text.valign.bottom, text.size(-3)])
c.stroke(path.line(ho + 0.2, vo * 0.5, ho - 0.2 + plot_width * 4.0 / 6.0, vo * 0.5), [style.linewidth.THin])
c.text(ho + plot_width * 5.0 / 6.0, 0.05, "trans",
[text.halign.center, text.valign.bottom, text.size(-3)])
c.stroke(path.line(ho + 0.2 + plot_width * 4.0 / 6.0, vo * 0.5, ho - 0.2 + plot_width, vo * 0.5), [style.linewidth.THin])
c.text(0, plot_height * 0.5 + vo, "Correlation",
[text.halign.center, text.valign.top, text.size(-3), trafo.rotate(90)])
c.text(plot_width * 0.5 + ho, height, name,
[text.halign.center, text.valign.top, text.size(-3)])
return c
def plot_dataset_ranges(data, width, label):
methods = data.keys()
binsizes = numpy.unique(data[methods[0]]['binsize'])
ho = 0.4
ho2 = 0.4
vo = 0.6
spacer = 0.25
plot_width = (width - ho * 2 - (binsizes.shape[0] - 1) * spacer) / binsizes.shape[0] - ho2
plot_height = plot_width
c = canvas.canvas()
for i, binsize in enumerate(binsizes):
img = plot_single_range(data, binsize, plot_width, plot_width)
c.insert(img, [trafo.translate((plot_width + spacer) * i + ho2 * (i + 1) + ho, vo)])
c.text(0, plot_height * 0.5 + vo, "Correlation",
[text.halign.center, text.valign.top, text.size(-3), trafo.rotate(90)])
c.text(width, plot_height * 0.5 + vo, label,
[text.halign.center, text.valign.top, text.size(-3), trafo.rotate(-90)])
c.text((plot_width + ho2) * 2 + spacer * 1.5 + ho, 0, "Interaction Range (bp)",
[text.halign.center, text.valign.bottom, text.size(-3)])
return c, plot_height + vo + 0.3
def plot_single_range(data, binsize, width, height):
plot_width = width
plot_height = height
c = canvas.canvas()
xmax = 0.0
methods = data.keys()
methods.sort()
for method in methods:
where = numpy.where((data[method]['binsize'] == binsize) *
(data[method]['interaction'] == 'cis') *
(data[method]['range'] > 0))
if where[0].shape[0] > 0:
xmax = max(xmax, numpy.amax(data[method]['range'][where]))
X = data[method]['range'][where]
X = numpy.r_[0, X]
X[0] = X[1] ** 2.0 / X[2]
xmin = X[0]
g = graph.graphxy(width=plot_width, height=plot_height,
x=graph.axis.log(painter=painter, min=X[0], max=xmax),
y=graph.axis.lin(painter=painter),
x2=graph.axis.lin(parter=None, min=0, max=1),
y2=graph.axis.lin(parter=None, min=0, max=1))
for x in X[1:-1]:
pos = ((log(x) - log(xmin)) / (log(xmax) - log(xmin)) * plot_width)
g.stroke(path.line(pos, 0, pos, plot_height), [style.linestyle.dotted, style.linewidth.THin])
X = (X[1:] ** 0.5) * (X[:-1] ** 0.5)
for method in methods:
where = numpy.where((data[method]['binsize'] == binsize) *
(data[method]['interaction'] == 'cis') *
(data[method]['range'] > 0))
if where[0].shape[0] > 0:
Y = data[method]['correlation'][where]
g.plot(graph.data.points(zip(X, Y), x=1, y=2),
[graph.style.line(lineattrs=[method_colors[method], style.linewidth.Thick])])
if binsize / 1000000 > 0:
binstring = "%iMb" % (binsize / 1000000)
elif binsize / 1000 > 0:
binstring = "%iKb" % (binsize / 1000)
else:
binstring = str(binsize)
g.text(plot_width / 2, plot_height + 0.3, "%s binning" % (binstring),
[text.halign.center, text.valign.top, text.size(-2)])
c.insert(g)
return c
def plot_key(width, height):
c = canvas.canvas()
step = height / float(len(method_colors))
for i, meth in enumerate(['HiFive-Probability', 'HiFive-Express', 'HiFive-Binning',
'HiCNorm', 'HiCPipe', 'Matrix-Balancing']):
c.fill(path.rect(0.2, height - step * (i + 0.5) - 0.1, 0.2, 0.2),
[method_colors[meth]])
c.text(0.5, height - step * (i + 0.5), meth, [text.halign.left, text.valign.middle, text.size(-2)])
return c
if __name__ == "__main__":
main()
|
|
"""
Cubes
=====
Tools to deal with spectroscopic data cubes.
Some features in Cubes require additional packages:
* smoothing - requires agpy_\'s smooth and parallel_map routines
* `pyregion <git://github.com/astropy/pyregion.git>`_
The 'grunt work' is performed by the :py:mod:`cubes` module
"""
# import parent package
import pyspeckit
from pyspeckit import spectrum
from ..spectrum.units import generate_xarr,SpectroscopicAxis
# import local things
import mapplot
import readers
import time
import numpy as np
from pyspeckit.parallel_map import parallel_map
import types
import copy
import itertools
from pyspeckit.spectrum import history
from astropy.io import fits
import cubes
from astropy import log
from astropy import wcs
from astropy import units
class Cube(spectrum.Spectrum):
def __init__(self, filename=None, cube=None, xarr=None, xunit=None,
errorcube=None, header=None, x0=0, y0=0,
maskmap=None,
**kwargs):
"""
A pyspeckit Cube object. Can be created from a FITS file on disk or
from an array or a `spectral_cube.SpectralCube` object. If an array
is used to insantiate the cube, the `xarr` keyword must be given,
specifying the X-axis units
Parameters
----------
filename : str, optional
The name of a FITS file to open and read from. Must be 3D
cube : `np.ndarray`, `spectral_cube.SpectralCube`, or \
`astropy.units.Quantity`
The data from which to instantiate a Cube object. If it is
an array or an astropy Quantity (which is an array with attached
units), the X-axis must be specified. If this is given as a
SpectralCube object, the X-axis and units should be handled
automatically.
xarr : `np.ndarray` or `astropy.units.Quantity`, optional
The X-axis of the spectra from each cube. This actually
corresponds to axis 0, or what we normally refer to as the Z-axis
of the cube, but it indicates the X-axis in a plot of intensity vs
wavelength. The units for this array are specified in the `xunit`
keyword unless a `~astropy.units.Quantity` is given.
xunit : str, optional
The unit of the ``xarr`` array if ``xarr`` is given as a numpy
array
errorcube : `np.ndarray`, `spectral_cube.SpectralCube`,\
or `~astropy.units.Quantity`, optional
A cube with the same shape as the input cube providing the 1-sigma
error for each voxel. This can be specified more efficiently as an
error map for most use cases, but that approach has not yet been
implemented. However, you can pass a 2D error map to `fiteach`.
header : `fits.Header` or dict, optional
The header associated with the data. Only needed if the cube is
given as an array or a quantity.
x0, y0 : int
The initial spectrum to use. The `Cube` object can be treated as
a `pyspeckit.Spectrum` object, with all the associated tools
(plotter, fitter) using the `set_spectrum` method to select a pixel
from the cube to plot and fit. However, it is generally more sensible
to extract individual spectra and treat them separately using the
`get_spectrum` method, so these keywords MAY BE DEPRECATED in the
future.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`.
"""
if filename is not None:
self.load_fits(filename)
return
else:
if hasattr(cube, 'spectral_axis'):
# Load from a SpectralCube instance
self.cube = cube.hdu.data
if (cube.unit in ('undefined', units.dimensionless_unscaled)
and 'BUNIT' in cube._meta):
self.unit = cube._meta['BUNIT']
else:
self.unit = cube.unit
log.debug("Self.unit: {0}".format(self.unit))
if xarr is None:
xarr = SpectroscopicAxis(cube.spectral_axis,
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq, refX_unit='Hz')
if header is None:
header = cube.header
elif hasattr(cube, 'unit'):
self.cube = cube.value
self.unit = cube.unit
else:
self.cube = cube
if hasattr(errorcube, 'spectral_axis'):
# Load from a SpectralCube instance
self.errorcube = errorcube.hdu.data
elif hasattr(errorcube, 'unit'):
self.errorcube = errorcube.value
else:
self.errorcube = errorcube
self.xarr = generate_xarr(xarr, unit=xunit)
self.header = header
self.error = None
if self.cube is not None:
self.data = self.cube[:,y0,x0]
log.debug("Self.unit before header: {0}".format(self.unit))
if self.header is not None:
self.parse_header(self.header)
else:
log.debug("self.header is None: {0}".format(self.header))
self.unit = 'undefined'
self.header = fits.Header()
log.debug("Self.unit after header: {0}".format(self.unit))
if maskmap is not None:
if maskmap.ndim != 2:
raise ValueError("Mask map must be two-dimensional.")
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
if isinstance(filename,str):
self.fileprefix = filename.rsplit('.', 1)[0] # Everything prior to .fits or .txt
else:
self.fileprefix = "pyfitsHDU"
self.plotter = spectrum.plotters.Plotter(self)
self._register_fitters()
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
self.mapplot = mapplot.MapPlotter(self)
def load_fits(self, fitsfile):
from spectral_cube import SpectralCube
mycube = SpectralCube.read(fitsfile)
return self.load_spectral_cube(mycube)
def load_spectral_cube(self, cube):
"""
Load the cube from a spectral_cube.SpectralCube object
"""
self.__init__(cube=cube)
def __repr__(self):
return r'<Cube object over spectral range %6.5g : %6.5g %s and flux range = [%2.1f, %2.1f] %s with shape %r at %s>' % \
(self.xarr.min().value, self.xarr.max().value, self.xarr.unit,
self.data.min(), self.data.max(), self.unit,
self.cube.shape, str(hex(self.__hash__())))
def copy(self,deep=True):
"""
Create a copy of the spectrum with its own plotter, fitter, etc.
Useful for, e.g., comparing smoothed to unsmoothed data
"""
newcube = copy.copy(self)
if deep:
newcube.xarr = copy.copy(self.xarr)
newcube.data = copy.copy(self.data)
if self.error is not None:
newcube.error = copy.copy(self.error)
newcube.header = copy.copy(self.header)
newcube.plotter = self.plotter.copy(parent=newcube)
newcube._register_fitters()
newcube.specfit = self.specfit.copy(parent=newcube)
newcube.specfit.Spectrum.plotter = newcube.plotter
newcube.baseline = self.baseline.copy(parent=newcube)
newcube.baseline.Spectrum.plotter = newcube.plotter
newcube.mapplot = self.mapplot.copy(parent=newcube)
newcube.mapplot.Cube = newcube
return newcube
def slice(self, start=None, stop=None, unit='pixel', preserve_fits=False,
copy=True):
"""
Slice a cube along the spectral axis
(equivalent to "spectral_slab" from the spectral_cube package)
Parameters
----------
start : numpy.float or int
start of slice
stop : numpy.float or int
stop of slice
unit : str
allowed values are any supported physical unit, 'pixel'
"""
x_in_units = self.xarr.as_unit(unit)
start_ind = x_in_units.x_to_pix(start)
stop_ind = x_in_units.x_to_pix(stop)
if start_ind > stop_ind:
start_ind, stop_ind = stop_ind, start_ind
spectrum_slice = slice(start_ind,stop_ind)
if not copy:
raise NotImplementedError("Must copy when slicing a cube.")
newcube = self.copy()
newcube.cube = newcube.cube[spectrum_slice,:,:]
if hasattr(newcube,'errcube'):
newcube.errcube = newcube.errcube[spectrum_slice,:,:]
newcube.data = newcube.data[spectrum_slice]
if newcube.error is not None:
newcube.error = newcube.error[spectrum_slice]
newcube.xarr = newcube.xarr[spectrum_slice]
# create new specfit / baseline instances (otherwise they'll be the wrong length)
newcube._register_fitters()
newcube.baseline = spectrum.baseline.Baseline(newcube)
newcube.specfit = spectrum.fitters.Specfit(newcube,Registry=newcube.Registry)
if preserve_fits:
newcube.specfit.modelpars = self.specfit.modelpars
newcube.specfit.parinfo = self.specfit.parinfo
newcube.baseline.baselinepars = self.baseline.baselinepars
newcube.baseline.order = self.baseline.order
return newcube
def __getitem__(self, indx):
"""
If [] is used on a cube, slice on the cube and use
the first dimension to slice on the xarr and the data
"""
return Cube(xarr=self.xarr.__getitem__(indx[0]), cube=self.cube[indx],
errorcube=self.errorcube[indx] if self.errorcube else None,
maskmap=self.maskmap)
def set_spectrum(self, x, y):
self.data = self.cube[:,y,x]
if self.errorcube is not None:
self.error = self.errorcube[:,y,x]
def plot_spectrum(self, x, y, plot_fit=False, **kwargs):
"""
Fill the .data array with a real spectrum and plot it
"""
self.set_spectrum(x,y)
if self.plot_special is None:
self.plotter(**kwargs)
if plot_fit:
self.plot_fit(x,y)
self.plotted_spectrum = self
else:
sp = self.get_spectrum(x,y)
sp.plot_special = types.MethodType(self.plot_special, sp, sp.__class__)
self._spdict = sp.plot_special(**dict(kwargs.items()+
self.plot_special_kwargs.items()))
self.plotted_spectrum = sp
self.plotter = sp.plotter
self.plotter.refresh = lambda: [spi.plotter.refresh()
for spi in self._spdict.values()]
self.specfit.modelplot = [comp
for spi in self._spdict.values()
for comp in spi.specfit.modelplot]
self.specfit._plotted_components = [comp
for spi in self._spdict.values()
for comp in spi.specfit._plotted_components]
def plot_fit(self, x, y, silent=False, **kwargs):
"""
If fiteach has been run, plot the best fit at the specified location
Parameters
----------
x : int
y : int
The x, y coordinates of the pixel (indices 2 and 1 respectively in
numpy notation)
"""
if not hasattr(self,'parcube'):
if not silent: log.info("Must run fiteach before plotting a fit. "
"If you want to fit a single spectrum, "
"use plot_spectrum() and specfit() directly.")
return
if self.plot_special is not None:
# don't try to overplot a fit on a "special" plot
# this is already handled in plot_spectrum
return
self.specfit.modelpars = self.parcube[:,y,x]
self.specfit.npeaks = self.specfit.fitter.npeaks
self.specfit.model = self.specfit.fitter.n_modelfunc(self.specfit.modelpars,
**self.specfit.fitter.modelfunc_kwargs)(self.xarr)
# set the parinfo values correctly for annotations
for pi,p,e in zip(self.specfit.parinfo,
self.specfit.modelpars,
self.errcube[:,y,x]):
try:
pi['value'] = p
pi['error'] = e
except ValueError:
# likely to happen for failed fits
pass
self.specfit.plot_fit(**kwargs)
def plot_apspec(self, aperture, coordsys=None, reset_ylimits=True,
wunit='arcsec',
method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
Parameters
----------
aperture : list
A list of aperture parameters, e.g.
* For a circular aperture, len(ap)=3:
+ ``ap = [xcen,ycen,radius]``
* For an elliptical aperture, len(ap)=5:
+ ``ap = [xcen,ycen,height,width,PA]``
coordsys : None or str
The coordinate system of the aperture (e.g., galactic, fk5, None
for pixel)
method : 'mean' or 'sum'
Either average over parellel spectra or sum them.
"""
if self.plot_special is None:
self.set_apspec(aperture, coordsys=coordsys, method=method)
self.plotter(reset_ylimits=reset_ylimits, **kwargs)
else:
#self.plot_special(reset_ylimits=reset_ylimits, **dict(kwargs.items()+self.plot_special_kwargs.items()))
sp = self.get_apspec(aperture, coordsys=coordsys, wunit=wunit, method=method)
sp.plot_special = types.MethodType(self.plot_special, sp, sp.__class__)
sp.plot_special(reset_ylimits=reset_ylimits, **dict(kwargs.items()+self.plot_special_kwargs.items()))
def get_spectrum(self, x, y):
"""
Very simple: get the spectrum at coordinates x,y
(inherits fitter from self)
Returns a SpectroscopicAxis instance
"""
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
self.header.iteritems()
if k != 'HISTORY']),
lon=x, lat=y, system=self.system,
proj=(self.header[ct][-3:]
if ct in self.header else
'CAR'))
sp = pyspeckit.Spectrum(xarr=self.xarr.copy(), data=self.cube[:,y,x],
header=header,
error=(self.errorcube[:,y,x] if self.errorcube
is not None else None))
sp.specfit = copy.copy(self.specfit)
# explicitly re-do this (test)
sp.specfit.includemask = self.specfit.includemask.copy()
sp.specfit.Spectrum = sp
if hasattr(self,'parcube'):
sp.specfit.modelpars = self.parcube[:,y,x]
sp.specfit.fitter.mpp = sp.specfit.modelpars # also for annotations (differs depending on which function... sigh... need to unify)
if hasattr(self.specfit,'parinfo') and self.specfit.parinfo is not None:
# set the parinfo values correctly for annotations
for pi,p,e in zip(sp.specfit.parinfo, sp.specfit.modelpars, self.errcube[:,y,x]):
try:
pi['value'] = p
pi['error'] = e
except ValueError:
pass
if hasattr(self.specfit,'fitter') and self.specfit.fitter is not None:
sp.specfit.npeaks = self.specfit.fitter.npeaks
sp.specfit.fitter.npeaks = len(sp.specfit.modelpars) / sp.specfit.fitter.npars
sp.specfit.fitter.parinfo = sp.specfit.parinfo
sp.specfit.model = sp.specfit.fitter.n_modelfunc(sp.specfit.modelpars,**sp.specfit.fitter.modelfunc_kwargs)(sp.xarr)
return sp
def get_apspec(self, aperture, coordsys=None, method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube pixel coordinates)
*aperture* [tuple or list] (x, y, radius)
The aperture to use when extracting the data
*coordsys* [ 'celestial' | 'galactic' | None]
the coordinate system the aperture is specified in
None indicates pixel coordinates (default)
*wunit* [str]
arcsec, arcmin, or degree
"""
import cubes
if coordsys is not None:
wcs = self.mapplot.wcs
else:
wcs = None
data = cubes.extract_aperture(self.cube, aperture,
coordsys=coordsys,
wcs=wcs,
method=method,
**kwargs)
if self.errorcube is not None:
error = cubes.extract_aperture(self.errorcube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method='error', **kwargs)
else:
error = None
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
self.header.iteritems()
if k != 'HISTORY']),
lon=aperture[0],
lat=aperture[1],
system=self.system,
proj=self.header[ct][-3:])
if len(aperture) == 3:
header['APRADIUS'] = aperture[2]
if len(aperture) == 5:
header['APMAJ'] = aperture[2]
header['APMIN'] = aperture[3]
header['APREFF'] = (aperture[2]*aperture[3])**0.5
header['APPA'] = aperture[4]
sp = pyspeckit.Spectrum(xarr=self.xarr.copy(),
data=data,
error=error,
header=header)
sp.specfit = self.specfit.copy(parent=sp)
return sp
def set_apspec(self, aperture, coordsys=None, method='mean'):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
"""
import cubes
if coordsys is not None:
self.data = cubes.extract_aperture( self.cube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method=method )
else:
self.data = cubes.extract_aperture(self.cube, aperture,
coordsys=None, method=method)
def get_modelcube(self, update=False):
if self._modelcube is None or update:
yy,xx = np.indices(self.mapplot.plane.shape)
self._modelcube = np.zeros_like(self.cube)
for x,y in zip(xx.flat,yy.flat):
self._modelcube[:,y,x] = self.specfit.get_full_model(pars=self.parcube[:,y,x])
return self._modelcube
def fiteach(self, errspec=None, errmap=None, guesses=(), verbose=True,
verbose_level=1, quiet=True, signal_cut=3, usemomentcube=False,
blank_value=0, integral=True, direct=False, absorption=False,
use_nearest_as_guess=False, use_neighbor_as_guess=False,
start_from_point=(0,0), multicore=0, position_order = None,
continuum_map=None, **fitkwargs):
"""
Fit a spectrum to each valid pixel in the cube
For guesses, priority is *use_nearest_as_guess*, *usemomentcube*,
*guesses*, None
Parameters
----------
use_nearest_as_guess: bool
Unless the fitted point is the first, it will find the nearest
other point with a successful fit and use its best-fit parameters
as the guess
use_neighbor_as_guess: bool
Set this keyword to use the average best-fit parameters from
neighboring positions with successful fits as the guess
start_from_point: tuple(int,int)
Either start from the center or from a point defined by a tuple.
Work outward from that starting point.
position_order: ndarray[naxis=2]
2D map of region with pixel values indicating the order in which
to carry out the fitting. Any type with increasing pixel values.
guesses: tuple or ndarray[naxis=3]
Either a tuple/list of guesses with len(guesses) = npars or a cube
of guesses with shape [npars, ny, nx].
NOT TRUE, but a good idea in principle:
You can also use a dictionary of the form {(y,x): [list of length
npars]}, where (y,x) specifies a pixel location. If the dictionary
method is used, npars must be specified and it sets the length of
the first parameter axis
signal_cut: float
Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
spectrum has s/n less than this value, ignore it)
blank_value: float
Value to replace non-fitted locations with. A good alternative is
numpy.nan
verbose: bool
verbose_level: int
Controls how much is output.
0,1 - only changes frequency of updates in loop
2 - print out messages when skipping pixels
3 - print out messages when fitting pixels
4 - specfit will be verbose
multicore: int
if >0, try to use multiprocessing via parallel_map to run on multiple cores
continuum_map: np.ndarray
Same shape as error map. Subtract this from data before estimating noise.
"""
if 'multifit' in fitkwargs:
log.warn("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = ((~self.mapplot.plane.mask) &
self.maskmap.astype('bool')).astype('bool')
else:
OK = (np.isfinite(self.mapplot.plane) &
self.maskmap.astype('bool')).astype('bool')
# NAN guesses rule out the model too
if hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
bad = np.isnan(guesses).sum(axis=0).astype('bool')
OK &= (~bad)
distance = ((xx)**2 + (yy)**2)**0.5
if start_from_point == 'center':
start_from_point = (xx.max()/2., yy.max/2.)
if hasattr(position_order,'shape') and position_order.shape == self.cube.shape[1:]:
sort_distance = np.argsort(position_order.flat)
else:
d_from_start = np.roll( np.roll( distance, start_from_point[0], 0),
start_from_point[1], 1)
sort_distance = np.argsort(d_from_start.flat)
valid_pixels = zip(xx.flat[sort_distance][OK.flat[sort_distance]],
yy.flat[sort_distance][OK.flat[sort_distance]])
if len(valid_pixels) != len(set(valid_pixels)):
raise ValueError("There are non-unique pixels in the 'valid pixel' list. "
"This should not be possible and indicates a major error.")
elif len(valid_pixels) == 0:
raise ValueError("No valid pixels selected.")
if verbose_level > 0:
log.debug("Number of valid pixels: %i" % len(valid_pixels))
if usemomentcube:
npars = self.momentcube.shape[0]
else:
npars = len(guesses)
if npars == 0:
raise ValueError("Parameter guesses are required.")
self.parcube = np.zeros((npars,)+self.mapplot.plane.shape)
self.errcube = np.zeros((npars,)+self.mapplot.plane.shape)
if integral: self.integralmap = np.zeros((2,)+self.mapplot.plane.shape)
# newly needed as of March 27, 2012. Don't know why.
if 'fittype' in fitkwargs: self.specfit.fittype = fitkwargs['fittype']
self.specfit.fitter = self.specfit.Registry.multifitters[self.specfit.fittype]
# array to store whether pixels have fits
self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')
self._counter = 0
t0 = time.time()
def fit_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
# very annoying - cannot use min/max without checking type
# maybe can use np.asarray here?
if hasattr(sp.data,'mask'):
sp.data[sp.data.mask] = np.nan
sp.error[sp.data.mask] = np.nan
sp.data = np.array(sp.data)
sp.error = np.array(sp.error)
if errspec is not None:
sp.error = errspec
elif errmap is not None:
sp.error = np.ones(sp.data.shape) * errmap[y,x]
else:
if verbose_level > 1 and ii==0:
log.warn("WARNING: using data std() as error.")
sp.error[:] = sp.data[sp.data==sp.data].std()
if sp.error is not None and signal_cut > 0:
if continuum_map is not None:
snr = (sp.data-continuum_map[y,x]) / sp.error
else:
snr = sp.data / sp.error
if absorption:
max_sn = np.nanmax(-1*snr)
else:
max_sn = np.nanmax(snr)
if max_sn < signal_cut:
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
return
elif np.isnan(max_sn):
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n is nan; max(data)=%0.2g, min(error)=%0.2g)" %
(x,y,np.nanmax(sp.data),np.nanmin(sp.error)))
return
if verbose_level > 2:
log.info("Fitting %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
else:
max_sn = None
sp.specfit.Registry = self.Registry # copy over fitter registry
# Do some homework for local fits
xpatch = np.array([1,1,1,0,0,0,-1,-1,-1],dtype=np.int)
ypatch = np.array([1,0,-1,1,0,-1,1,0,-1],dtype=np.int)
local_fits = self.has_fit[ypatch+y,xpatch+x]
if use_nearest_as_guess and self.has_fit.sum() > 0:
if verbose_level > 1 and ii == 0 or verbose_level > 4:
log.info("Using nearest fit as guess")
d = np.roll( np.roll( distance, x, 0), y, 1)
# If there's no fit, set its distance to be unreasonably large
nearest_ind = np.argmin(d+1e10*(True-self.has_fit))
nearest_x, nearest_y = xx.flat[nearest_ind],yy.flat[nearest_ind]
gg = self.parcube[:,nearest_y,nearest_x]
elif use_neighbor_as_guess and np.any(local_fits):
# Array is N_guess X Nvalid_nbrs so averaging over
# Axis=1 is the axis of all valid neighbors
gg = np.mean(self.parcube[:,(ypatch+y)[local_fits],(xpatch+x)[local_fits]],axis=1)
elif usemomentcube:
if verbose_level > 1 and ii == 0: log.info("Using moment cube")
gg = self.momentcube[:,y,x]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
if verbose_level > 1 and ii == 0: log.info("Using input guess cube")
gg = guesses[:,y,x]
elif isinstance(guesses, dict):
if verbose_level > 1 and ii == 0: log.info("Using input guess dict")
gg = guesses[(y,x)]
else:
if verbose_level > 1 and ii == 0: log.info("Using input guess")
gg = guesses
if np.all(np.isfinite(gg)):
try:
sp.specfit(guesses=gg, quiet=verbose_level<=3,
verbose=verbose_level>3, **fitkwargs)
except Exception as ex:
log.exception("Fit number %i at %i,%i failed on error %s" % (ii,x,y, str(ex)))
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
if isinstance(ex,KeyboardInterrupt):
raise ex
self.parcube[:,y,x] = sp.specfit.modelpars
self.errcube[:,y,x] = sp.specfit.modelerrs
if integral:
self.integralmap[:,y,x] = sp.specfit.integral(direct=direct,
return_error=True)
self.has_fit[y,x] = True
else:
self.has_fit[y,x] = False
self.parcube[:,y,x] = blank_value
self.errcube[:,y,x] = blank_value
if integral: self.integralmap[:,y,x] = blank_value
if blank_value != 0:
self.parcube[self.parcube == 0] = blank_value
self.errcube[self.parcube == 0] = blank_value
self._counter += 1
if verbose:
if ii % (min(10**(3-verbose_level),1)) == 0:
snmsg = " s/n=%5.1f" % (max_sn) if max_sn is not None else ""
npix = len(valid_pixels)
pct = 100 * self._counter/float(npix) * multicore
log.info("Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds. %%%01.f" %
(self._counter, npix, x, y, snmsg, time.time()-t0, pct))
if sp.specfit.modelerrs is None:
raise TypeError("The fit never completed; something has gone wrong.")
if integral:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs,
self.integralmap[:,y,x])
else:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs)
#### BEGIN TEST BLOCK ####
# This test block is to make sure you don't run a 30 hour fitting
# session that's just going to crash at the end.
# try a first fit for exception-catching
try0 = fit_a_pixel((0,valid_pixels[0][0],valid_pixels[0][1]))
try:
assert len(try0[1]) == len(guesses) == len(self.parcube) == len(self.errcube)
assert len(try0[2]) == len(guesses) == len(self.parcube) == len(self.errcube)
except TypeError as ex:
if try0 is None:
raise AssertionError("The first fitted pixel did not yield a "
"fit. Please try starting from a "
"different pixel.")
else:
raise ex
except AssertionError:
raise AssertionError("The first pixel had the wrong fit "
"parameter shape. This is probably "
"a bug; please report it.")
# This is a secondary test... I'm not sure it's necessary, but it
# replicates what's inside the fit_a_pixel code and so should be a
# useful sanity check
x,y = valid_pixels[0]
sp = self.get_spectrum(x,y)
sp.specfit.Registry = self.Registry # copy over fitter registry
# this reproduced code is needed because the functional wrapping
# required for the multicore case prevents gg from being set earlier
if usemomentcube:
gg = self.momentcube[:,y,x]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
gg = guesses[:,y,x]
else:
gg = guesses
# This is NOT in a try/except block because we want to raise the
# exception here if an exception is going to happen
sp.specfit(guesses=gg, **fitkwargs)
#### END TEST BLOCK ####
if multicore > 0:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
self._result = result # backup - don't want to lose data in the case of a failure
# a lot of ugly hacking to deal with the way parallel_map returns
# its results needs TWO levels of None-filtering, because any
# individual result can be None (I guess?) but apparently (and this
# part I don't believe) any individual *fit* result can be None as
# well (apparently the x,y pairs can also be None?)
merged_result = [core_result for core_result in result if
core_result is not None]
# for some reason, every other time I run this code, merged_result
# ends up with a different intrinsic shape. This is an attempt to
# force it to maintain a sensible shape.
try:
if integral:
((x,y), m1, m2, intgl) = merged_result[0]
else:
((x,y), m1, m2) = merged_result[0]
except ValueError:
if verbose > 1:
log.exception("ERROR: merged_result[0] is {0} which has the"
" wrong shape".format(merged_result[0]))
merged_result = itertools.chain.from_iterable(merged_result)
for TEMP in merged_result:
if TEMP is None:
# this shouldn't be possible, but it appears to happen
# anyway. parallel_map is great, up to a limit that was
# reached long before this level of complexity
log.debug("Skipped a None entry: {0}".format(str(TEMP)))
continue
try:
if integral:
((x,y), modelpars, modelerrs, intgl) = TEMP
else:
((x,y), modelpars, modelerrs) = TEMP
except TypeError:
# implies that TEMP does not have the shape ((a,b),c,d)
# as above, shouldn't be possible, but it happens...
log.debug("Skipped a misshapen entry: {0}".format(str(TEMP)))
continue
if ((len(modelpars) != len(modelerrs)) or
(len(modelpars) != len(self.parcube))):
raise ValueError("There was a serious problem; modelpar and"
" error shape don't match that of the "
"parameter cubes")
if np.any(np.isnan(modelpars)) or np.any(np.isnan(modelerrs)):
self.parcube[:,y,x] = np.nan
self.errcube[:,y,x] = np.nan
self.has_fit[y,x] = False
else:
self.parcube[:,y,x] = modelpars
self.errcube[:,y,x] = modelerrs
self.has_fit[y,x] = max(modelpars) > 0
if integral:
self.integralmap[:,y,x] = intgl
else:
for ii,(x,y) in enumerate(valid_pixels):
fit_a_pixel((ii,x,y))
# March 27, 2014: This is EXTREMELY confusing. This isn't in a loop...
# make sure the fitter / fittype are set for the cube
# this has to be done within the loop because skipped-over spectra
# don't ever get their fittypes set
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
if verbose:
log.info("Finished final fit %i. "
"Elapsed time was %0.1f seconds" % (ii, time.time()-t0))
def momenteach(self, verbose=True, verbose_level=1, multicore=0, **kwargs):
"""
Return a cube of the moments of each pixel
Parameters
----------
multicore: int
if >0, try to use multiprocessing via parallel_map to run on multiple cores
"""
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = (True-self.mapplot.plane.mask) * self.maskmap
else:
OK = np.isfinite(self.mapplot.plane) * self.maskmap
valid_pixels = zip(xx[OK],yy[OK])
# run the moment process to find out how many elements are in a moment
_temp_moment = self.get_spectrum(yy[OK][0],xx[OK][0]).moments(**kwargs)
self.momentcube = np.zeros((len(_temp_moment),)+self.mapplot.plane.shape)
t0 = time.time()
def moment_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
self.momentcube[:,y,x] = sp.moments(**kwargs)
if verbose:
if ii % 10**(3-verbose_level) == 0:
log.info("Finished moment %i. "
"Elapsed time is %0.1f seconds" % (ii, time.time()-t0))
return ((x,y), self.momentcube[:,y,x])
if multicore > 0:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
merged_result = [core_result
for core_result in result
if core_result is not None]
for mr in merged_result:
for TEMP in mr:
((x,y), moments) = TEMP
self.momentcube[:,y,x] = moments
else:
for ii,(x,y) in enumerate(valid_pixels):
moment_a_pixel((ii,x,y))
if verbose:
log.info("Finished final moment %i. "
"Elapsed time was %0.1f seconds" % (ii, time.time()-t0))
def show_moment(self, momentnumber, **kwargs):
"""
If moments have been computed, display them in the mapplot window
"""
if not hasattr(self,'momentcube'):
raise ValueError("Compute moments first")
self.mapplot.plane = self.momentcube[momentnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def show_fit_param(self, parnumber, **kwargs):
"""
If pars have been computed, display them in the mapplot window
Parameters
----------
parnumber : int
The index of the parameter in the parameter cube
"""
if not hasattr(self,'parcube'):
raise ValueError("Compute fit parameters first")
self.mapplot.plane = self.parcube[parnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def load_model_fit(self, fitsfilename, npars, npeaks=1, fittype=None,
_temp_fit_loc=(0,0)):
"""
Load a parameter + error cube into the .parcube and .errcube
attributes.
Parameters
----------
fitsfilename : str
The filename containing the parameter cube written with `write_fit`
npars : int
The number of parameters in the model fit for a single spectrum
npeaks : int
The number of independent peaks fit toward each spectrum
fittype : str, optional
The name of the fittype, e.g. 'gaussian' or 'voigt', from the
pyspeckit fitter registry. This is optional; it should have
been written to the FITS header and will be read from there if
it is not specified
_temp_fit_loc : tuple (int,int)
The initial spectrum to use to generate components of the class.
This should not need to be changed.
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
cubefile = pyfits.open(fitsfilename,ignore_missing_end=True)
cube = cubefile[0].data
if cube.shape[0] != npars * npeaks * 2:
raise ValueError("The cube shape is not correct. The cube has "
"first dimension = {0}, but it should be {1}. "
"The keyword npars = number of parameters per "
"model component, and npeaks = number of "
"independent peaks. You gave npars={2} and "
"npeaks={3}".format(cube.shape[0], npars*npeaks*2,
npars, npeaks))
# grab a spectrum and fit it however badly you want
# this is just to __init__ the relevant data structures
x,y = _temp_fit_loc
sp = self.get_spectrum(x,y)
if fittype is None:
if cubefile[0].header.get('FITTYPE'):
fittype = cubefile[0].header.get('FITTYPE')
else:
raise KeyError("Must specify FITTYPE or include it in cube header.")
self.parcube = cube[:npars*npeaks,:,:]
self.errcube = cube[npars*npeaks:npars*npeaks*2,:,:]
# make sure params are within limits
fitter = self.specfit.Registry.multifitters[fittype]
guesses,throwaway = fitter._make_parinfo(npeaks=npeaks)
try:
guesses.values = self.parcube[:,y,x]
except ValueError:
OKmask = (self.parcube != 0).sum(axis=0) > 0
whereOK = np.where(OKmask)
guesses.values = self.parcube[:,whereOK[0][0],whereOK[1][0]]
sp.specfit(fittype=fittype, guesses=guesses.values)
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
try:
import cubes
def smooth(self,smooth,**kwargs):
"""
Smooth the spectrum by factor `smooth`.
Documentation from the :mod:`cubes.spectral_smooth` module:
"""
import cubes
smooth = round(smooth)
self.cube = cubes.spectral_smooth(self.cube,smooth,**kwargs)
self.xarr = self.xarr[::smooth]
if hasattr(self,'data'):
self.data = pyspeckit.smooth.smooth(self.data,smooth,**kwargs)
if len(self.xarr) != self.cube.shape[0]:
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
if self.errorcube is not None:
self.errorcube = cubes.spectral_smooth(self.errorcube,smooth,**kwargs)
self._smooth_header(smooth)
__doc__ += "cubes.spectral_smooth doc: \n" + cubes.spectral_smooth.__doc__
except ImportError:
def smooth(self):
raise ImportError("Can't import cubes: required for cube spectral smoothing")
def _smooth_header(self,smooth):
"""
Internal - correct the FITS header parameters when smoothing
"""
if self.header.get('CDELT3') is not None and self.header.get('CRPIX3') is not None:
self.header['CDELT3'] = self.header.get('CDELT3') * float(smooth)
self.header['CRPIX3'] = self.header.get('CRPIX3') / float(smooth)
history.write_history(self.header,"SMOOTH: Smoothed and downsampled spectrum by factor %i" % (smooth))
history.write_history(self.header,"SMOOTH: Changed CRPIX3 from %f to %f" % (self.header.get('CRPIX3')*float(smooth),self.header.get('CRPIX3')))
history.write_history(self.header,"SMOOTH: Changed CDELT3 from %f to %f" % (self.header.get('CRPIX3')/float(smooth),self.header.get('CRPIX3')))
def write_fit(self, fitcubefilename, clobber=False):
"""
Write out a fit cube using the information in the fit's parinfo to set the header keywords
Parameters
----------
fitcubefilename: string
Filename to write to
clobber: bool
Overwrite file if it exists?
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
fitcubefile = pyfits.PrimaryHDU(data=np.concatenate([self.parcube,self.errcube]), header=self.header)
fitcubefile.header['FITTYPE'] = self.specfit.fittype
for ii,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % ii
parname = par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# set error parameters
for jj,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % (ii+jj)
parname = "e"+par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# overwrite the WCS
fitcubefile.header['CDELT3'] = 1
fitcubefile.header['CTYPE3'] = 'FITPAR'
fitcubefile.header['CRVAL3'] = 0
fitcubefile.header['CRPIX3'] = 1
except AttributeError:
log.exception("Make sure you run the cube fitter first.")
return
fitcubefile.writeto(fitcubefilename, clobber=clobber)
def write_cube(self):
raise NotImplementedError
class CubeStack(Cube):
"""
The Cube equivalent of Spectra: for stitching multiple cubes with the same
spatial grid but different frequencies together
"""
def __init__(self, cubelist, xunit='GHz', x0=0, y0=0, maskmap=None, **kwargs):
"""
Initialize the Cube. Accepts FITS files.
x0,y0 - initial spectrum to use (defaults to lower-left corner)
"""
log.info("Creating Cube Stack")
cubelist = list(cubelist)
for ii,cube in enumerate(cubelist):
if type(cube) is str:
cube = Cube(cube)
cubelist[ii] = cube
if cube.xarr.unit != xunit:
# convert all inputs to same (non-velocity) unit
cube.xarr.convert_to_unit(xunit, **kwargs)
self.cubelist = cubelist
log.info("Concatenating data")
self.xarr = spectrum.units.SpectroscopicAxes([sp.xarr for sp in cubelist])
self.cube = np.ma.concatenate([cube.cube for cube in cubelist])
if any([cube.errorcube is not None for cube in cubelist]):
if all([cube.errorcube is not None for cube in cubelist]):
self.errorcube = np.ma.concatenate([cube.errorcube for cube in cubelist])
else:
raise ValueError("Mismatched error cubes.")
else:
self.errorcube = None
if hasattr(self.cube,'mask'):
try:
if self.cube.mask in (False,np.bool_(False)):
# mask causes major problems internally for numpy...
self.cube = np.array(self.cube)
except ValueError:
# this means that self.cube.mask is an array;
# techically that's alright
pass
self._sort()
self.data = self.cube[:,y0,x0]
self.error = self.errorcube[:,y0,x0] if self.errorcube is not None else None
self.header = cubelist[0].header
for cube in cubelist:
for key,value in cube.header.items():
self.header[key] = value
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
self.unit = cubelist[0].unit
for cube in cubelist:
if cube.unit != self.unit:
raise ValueError("Mismatched units "
"{0} and {1}".format(cube.unit, self.unit))
self.fileprefix = cubelist[0].fileprefix # first is the best?
if maskmap is not None:
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
self._register_fitters()
self.plotter = spectrum.plotters.Plotter(self)
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers TO DO: DO WRITERS WORK FOR CUBES?
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self.mapplot = mapplot.MapPlotter(self)
def _sort(self):
""" Sort the data in order of increasing X (could be decreasing, but
must be monotonic for plotting reasons) """
indices = self.xarr.argsort()
self.xarr = self.xarr[indices]
self.cube = self.cube[indices,:,:]
if self.errorcube is not None:
self.errorcube = self.errorcube[indices,:,:]
|
|
"""
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import traceback
from datetime import timedelta
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Sum, Q
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.views.decorators.cache import never_cache, cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View as BaseView
from functools import wraps
from raven.contrib.django.models import client as Raven
from sentry import app
from sentry.app import tsdb
from sentry.coreapi import (
APIError, APIForbidden, APIRateLimited, ClientApiHelper
)
from sentry.event_manager import EventManager
from sentry.models import (
AnonymousUser, Group, GroupStatus, Project, TagValue, User
)
from sentry.signals import event_received
from sentry.quotas.base import RateLimit
from sentry.utils import json, metrics
from sentry.utils.data_scrubber import SensitiveDataFilter
from sentry.utils.javascript import to_json
from sentry.utils.http import is_valid_origin, get_origins, is_same_domain
from sentry.utils.safe import safe_execute
from sentry.web.decorators import has_access
from sentry.web.helpers import render_to_response
logger = logging.getLogger('sentry')
# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = 'R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs='.decode('base64')
PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7'))
def api(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if request.is_ajax():
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
else:
ref = request.META.get('HTTP_REFERER')
if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
ref = reverse('sentry')
return HttpResponseRedirect(ref)
return response
return wrapped
class APIView(BaseView):
def _get_project_from_id(self, project_id):
if not project_id:
return
if not project_id.isdigit():
raise APIError('Invalid project_id: %r' % project_id)
try:
return Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
raise APIError('Invalid project_id: %r' % project_id)
def _parse_header(self, request, helper, project):
auth = helper.auth_from_request(request)
if auth.version not in PROTOCOL_VERSIONS:
raise APIError('Client using unsupported server protocol version (%r)' % str(auth.version or ''))
if not auth.client:
raise APIError("Client did not send 'client' identifier")
return auth
@csrf_exempt
@never_cache
def dispatch(self, request, project_id=None, *args, **kwargs):
helper = ClientApiHelper(
agent=request.META.get('HTTP_USER_AGENT'),
project_id=project_id,
ip_address=request.META['REMOTE_ADDR'],
)
origin = None
try:
origin = helper.origin_from_request(request)
response = self._dispatch(request, helper, project_id=project_id,
origin=origin,
*args, **kwargs)
except APIError as e:
context = {
'error': unicode(e.msg).encode('utf-8'),
}
if e.name:
context['error_name'] = e.name
response = HttpResponse(json.dumps(context),
content_type='application/json',
status=e.http_status)
# Set X-Sentry-Error as in many cases it is easier to inspect the headers
response['X-Sentry-Error'] = context['error']
if isinstance(e, APIRateLimited) and e.retry_after is not None:
response['Retry-After'] = str(e.retry_after)
except Exception:
if settings.DEBUG or True:
content = traceback.format_exc()
else:
content = ''
traceback.print_exc()
response = HttpResponse(content,
content_type='text/plain',
status=500)
# TODO(dcramer): it'd be nice if we had an incr_multi method so
# tsdb could optimize this
metrics.incr('client-api.all-versions.requests')
metrics.incr('client-api.all-versions.responses.%s' % (
response.status_code,
))
metrics.incr('client-api.all-versions.responses.%sxx' % (
str(response.status_code)[0],
))
if helper.context.version:
metrics.incr('client-api.v%s.requests' % (
helper.context.version,
))
metrics.incr('client-api.v%s.responses.%s' % (
helper.context.version, response.status_code
))
metrics.incr('client-api.v%s.responses.%sxx' % (
helper.context.version, str(response.status_code)[0]
))
if response.status_code != 200 and origin:
# We allow all origins on errors
response['Access-Control-Allow-Origin'] = '*'
if origin:
response['Access-Control-Allow-Headers'] = \
'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \
'Content-Type, Authentication'
response['Access-Control-Allow-Methods'] = \
', '.join(self._allowed_methods())
return response
def _dispatch(self, request, helper, project_id=None, origin=None,
*args, **kwargs):
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
if project:
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
if origin is not None:
# This check is specific for clients who need CORS support
if not project:
raise APIError('Client must be upgraded for CORS support')
if not is_valid_origin(origin, project):
raise APIForbidden('Invalid origin: %s' % (origin,))
# XXX: It seems that the OPTIONS call does not always include custom headers
if request.method == 'OPTIONS':
response = self.options(request, project)
else:
auth = self._parse_header(request, helper, project)
project_ = helper.project_from_auth(auth)
# Legacy API was /api/store/ and the project ID was only available elsewhere
if not project:
if not project_:
raise APIError('Unable to identify project')
project = project_
helper.context.bind_project(project)
elif project_ != project:
raise APIError('Two different project were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
if auth.version != '2.0':
if request.method == 'GET':
# GET only requires an Origin/Referer check
# If an Origin isn't passed, it's possible that the project allows no origin,
# so we need to explicitly check for that here. If Origin is not None,
# it can be safely assumed that it was checked previously and it's ok.
if origin is None and not is_valid_origin(origin, project):
# Special case an error message for a None origin when None wasn't allowed
raise APIForbidden('Missing required Origin or Referer header')
else:
# Version 3 enforces secret key for server side requests
if not auth.secret_key:
raise APIForbidden('Missing required attribute in authentication header: sentry_secret')
response = super(APIView, self).dispatch(
request=request,
project=project,
auth=auth,
helper=helper,
**kwargs
)
if origin:
response['Access-Control-Allow-Origin'] = origin
return response
# XXX: backported from Django 1.5
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
def options(self, request, *args, **kwargs):
response = HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
class StoreView(APIView):
"""
The primary endpoint for storing new events.
This will validate the client's authentication and data, and if
successful pass on the payload to the internal database handler.
Authentication works in three flavors:
1. Explicit signed requests
These are implemented using the documented signed request protocol, and
require an authentication header which is signed using with the project
member's secret key.
2. CORS Secured Requests
Generally used for communications with client-side platforms (such as
JavaScript in the browser), they require a standard header, excluding
the signature and timestamp requirements, and must be listed in the
origins for the given project (or the global origins).
3. Implicit trusted requests
Used by the Sentry core, they are only available from same-domain requests
and do not require any authentication information. They only require that
the user be authenticated, and a project_id be sent in the GET variables.
"""
def post(self, request, **kwargs):
data = request.body
response_or_event_id = self.process(request, data=data, **kwargs)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(json.dumps({
'id': response_or_event_id,
}), content_type='application/json')
def get(self, request, **kwargs):
data = request.GET.get('sentry_data', '')
response_or_event_id = self.process(request, data=data, **kwargs)
# Return a simple 1x1 gif for browser so they don't throw a warning
response = HttpResponse(PIXEL, 'image/gif')
if not isinstance(response_or_event_id, HttpResponse):
response['X-Sentry-ID'] = response_or_event_id
return response
def process(self, request, project, auth, helper, data, **kwargs):
metrics.incr('events.total')
event_received.send_robust(ip=request.META['REMOTE_ADDR'], sender=type(self))
# TODO: improve this API (e.g. make RateLimit act on __ne__)
rate_limit = safe_execute(app.quotas.is_rate_limited, project=project,
_with_transaction=False)
if isinstance(rate_limit, bool):
rate_limit = RateLimit(is_limited=rate_limit, retry_after=None)
if rate_limit is not None and rate_limit.is_limited:
app.tsdb.incr_multi([
(app.tsdb.models.project_total_received, project.id),
(app.tsdb.models.project_total_rejected, project.id),
(app.tsdb.models.organization_total_received, project.organization_id),
(app.tsdb.models.organization_total_rejected, project.organization_id),
])
metrics.incr('events.dropped')
raise APIRateLimited(rate_limit.retry_after)
else:
app.tsdb.incr_multi([
(app.tsdb.models.project_total_received, project.id),
(app.tsdb.models.organization_total_received, project.organization_id),
])
content_encoding = request.META.get('HTTP_CONTENT_ENCODING', '')
if content_encoding == 'gzip':
data = helper.decompress_gzip(data)
elif content_encoding == 'deflate':
data = helper.decompress_deflate(data)
elif not data.startswith('{'):
data = helper.decode_and_decompress_data(data)
data = helper.safely_load_json_string(data)
# mutates data
helper.validate_data(project, data)
# mutates data
manager = EventManager(data, version=auth.version)
data = manager.normalize()
scrub_ip_address = project.get_option('sentry:scrub_ip_address', False)
# insert IP address if not available
if auth.is_public and not scrub_ip_address:
helper.ensure_has_ip(data, request.META['REMOTE_ADDR'])
event_id = data['event_id']
# TODO(dcramer): ideally we'd only validate this if the event_id was
# supplied by the user
cache_key = 'ev:%s:%s' % (project.id, event_id,)
if cache.get(cache_key) is not None:
raise APIForbidden('An event with the same ID already exists (%s)' % (event_id,))
if project.get_option('sentry:scrub_data', True):
# We filter data immediately before it ever gets into the queue
inst = SensitiveDataFilter(project.get_option('sentry:sensitive_fields', None))
inst.apply(data)
if scrub_ip_address:
# We filter data immediately before it ever gets into the queue
helper.ensure_does_not_have_ip(data)
# mutates data (strips a lot of context if not queued)
helper.insert_data_to_database(data)
cache.set(cache_key, '', 60 * 5)
helper.log.debug('New event received (%s)', event_id)
return event_id
@never_cache
@csrf_exempt
@has_access
def get_group_trends(request, organization, team):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(team=team, user=request.user)
project_dict = dict((p.id, p) for p in project_list)
base_qs = Group.objects.filter(
project__in=project_list,
status=0,
)
cutoff = timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(base_qs.filter(
status=GroupStatus.UNRESOLVED,
last_seen__gte=cutoff_dt
).extra(select={'sort_value': 'score'}).order_by('-score')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_new_groups(request, organization, team):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(team=team, user=request.user)
project_dict = dict((p.id, p) for p in project_list)
cutoff = timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_dict.keys(),
status=GroupStatus.UNRESOLVED,
active_at__gte=cutoff_dt,
).extra(select={'sort_value': 'score'}).order_by('-score', '-first_seen')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_resolved_groups(request, organization, team):
minutes = int(request.REQUEST.get('minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(team=team, user=request.user)
project_dict = dict((p.id, p) for p in project_list)
cutoff = timedelta(minutes=minutes)
cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_list,
status=GroupStatus.RESOLVED,
resolved_at__gte=cutoff_dt,
).order_by('-score')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
data = to_json(group_list, request)
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def get_stats(request, organization, team):
minutes = int(request.REQUEST.get('minutes', 15))
project_list = Project.objects.get_for_user(team=team, user=request.user)
cutoff = timedelta(minutes=minutes)
end = timezone.now()
start = end - cutoff
# TODO(dcramer): this is used in an unreleased feature. reimplement it using
# new API and tsdb
results = tsdb.get_range(
model=tsdb.models.project,
keys=[p.id for p in project_list],
start=start,
end=end,
)
num_events = 0
for project, points in results.iteritems():
num_events += sum(p[1] for p in points)
# XXX: This is too slow if large amounts of groups are resolved
# TODO(dcramer); move this into tsdb
num_resolved = Group.objects.filter(
project__in=project_list,
status=GroupStatus.RESOLVED,
resolved_at__gte=start,
).aggregate(t=Sum('times_seen'))['t'] or 0
data = {
'events': num_events,
'resolved': num_resolved,
}
response = HttpResponse(json.dumps(data))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_tags(request, organization, project):
limit = min(100, int(request.GET.get('limit', 10)))
name = request.GET['name']
query = request.GET['query']
results = list(TagValue.objects.filter(
project=project,
key=name,
value__icontains=query,
).values_list('value', flat=True).order_by('value')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_users(request, organization):
limit = min(100, int(request.GET.get('limit', 10)))
query = request.GET['query']
results = list(User.objects.filter(
Q(email__istartswith=query) | Q(first_name__istartswith=query) | Q(username__istartswith=query),
).filter(
sentry_orgmember_set__organization=organization,
).distinct().order_by('first_name', 'email').values('id', 'username', 'first_name', 'email')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@never_cache
@csrf_exempt
@has_access
def search_projects(request, organization):
limit = min(100, int(request.GET.get('limit', 10)))
query = request.GET['query']
results = list(Project.objects.filter(
Q(name__istartswith=query) | Q(slug__istartswith=query),
organization=organization,
).distinct().order_by('name', 'slug').values('id', 'name', 'slug')[:limit])
response = HttpResponse(json.dumps({
'results': results,
'query': query,
}))
response['Content-Type'] = 'application/json'
return response
@cache_control(max_age=3600, public=True)
def crossdomain_xml_index(request):
response = render_to_response('sentry/crossdomain_index.xml')
response['Content-Type'] = 'application/xml'
return response
@cache_control(max_age=60)
def crossdomain_xml(request, project_id):
if not project_id.isdigit():
return HttpResponse(status=404)
try:
project = Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
return HttpResponse(status=404)
origin_list = get_origins(project)
if origin_list == '*':
origin_list = [origin_list]
response = render_to_response('sentry/crossdomain.xml', {
'origin_list': origin_list
})
response['Content-Type'] = 'application/xml'
return response
|
|
#!/usr/bin/env python
import glob
import logging
import os
import re
import simplejson as json
import struct
import threading
import couchstore
import couchbaseConstants
import pump
from cbcollections import defaultdict
from cbqueue import PumpQueue
SFD_SCHEME = "couchstore-files://"
SFD_VBUCKETS = 1024
SFD_REV_META = ">QIIBBB" # cas, exp, flg, flex_meta, dtype, conf_res
SFD_REV_SEQ = ">Q"
SFD_DB_SEQ = ">Q"
SFD_RE = "^([0-9]+)\\.couch\\.([0-9]+)$"
# TODO: (1) SFDSource - total_msgs.
# TODO: (1) SFDSink - ensure right user for bucket_dir.
# TODO: (1) SFDSink - ensure right user for couchstore file.
class SFDSource(pump.Source):
"""Reads couchstore files from a couchbase server data directory."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSource, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.done = False
self.queue = None
@staticmethod
def can_handle(opts, spec):
return spec.startswith(SFD_SCHEME)
@staticmethod
def check_base(opts, spec):
# Skip immediate superclass Source.check_base(),
# since SFDSource can handle different vbucket states.
return pump.EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec):
rv, d = data_dir(spec)
if rv != 0:
return rv
buckets = []
for bucket_dir in sorted(glob.glob(d + "/*/")):
if not glob.glob(bucket_dir + "/*.couch.*"):
continue
bucket_name = os.path.basename(os.path.dirname(bucket_dir))
if not bucket_name:
return "error: bucket_name too short: " + bucket_dir, None
rv, v = SFDSource.vbucket_states(opts, spec, bucket_dir)
if rv != 0:
return rv, None
buckets.append({'name': bucket_name,
'nodes': [{'hostname': 'N/A',
'vbucket_states': v}]})
if not buckets:
return "error: no bucket subdirectories at: " + d, None
return 0, {'spec': spec, 'buckets': buckets}
@staticmethod
def vbucket_states(opts, spec, bucket_dir):
"""Reads all the latest couchstore files in a directory, and returns
map of state string (e.g., 'active') to map of vbucket_id to doc."""
vbucket_states = defaultdict(dict)
for f in latest_couch_files(bucket_dir):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1))
try:
store = couchstore.CouchStore(f, 'r')
try:
doc_str = store.localDocs['_local/vbstate']
if doc_str:
doc = json.loads(doc_str)
state = doc.get('state', None)
if state:
vbucket_states[state][vbucket_id] = doc
else:
return "error: missing vbucket_state from: %s" \
% (f), None
except Exception, e:
return ("error: could not read _local/vbstate from: %s" +
"; exception: %s") % (f, e), None
store.close()
except Exception, e:
return ("error: could not read couchstore file: %s" +
"; exception: %s") % (f, e), None
if vbucket_states:
return 0, vbucket_states
return "error: no vbucket_states in files: %s" % (bucket_dir), None
@staticmethod
def provide_design(opts, source_spec, source_bucket, source_map):
rv, d = data_dir(source_spec)
if rv != 0:
return rv, None
bucket_dir = d + '/' + source_bucket['name']
if not os.path.isdir(bucket_dir):
return 0, None
rv, store, store_path = \
open_latest_store(bucket_dir,
"master.couch.*",
"^(master)\\.couch\\.([0-9]+)$",
"master.couch.0",
mode='r')
if rv != 0 or not store:
return rv, None
rows = []
for doc_info in store.changesSince(0):
if not doc_info.deleted:
try:
doc_contents = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
except Exception, e:
return ("error: could not read design doc: %s" +
"; source_spec: %s; exception: %s") % \
(doc_info.id, source_spec, e), None
try:
doc = json.loads(doc_contents)
except ValueError, e:
return ("error: could not parse design doc: %s" +
"; source_spec: %s; exception: %s") % \
(doc_info.id, source_spec, e), None
doc['id'] = doc.get('id', doc_info.id)
doc['_rev'] = doc.get('_rev', doc_info.revSequence)
rows.append({'id': doc_info.id, 'doc': doc})
store.close()
return 0, json.dumps(rows)
def provide_batch(self):
if self.done:
return 0, None
if not self.queue:
name = "c" + threading.currentThread().getName()[1:]
self.queue = PumpQueue(2)
self.thread = threading.Thread(target=self.loader, name=name)
self.thread.daemon = True
self.thread.start()
rv, batch = self.queue.get()
self.queue.task_done()
if rv != 0 or batch is None:
self.done = True
return rv, batch
def loader(self):
rv, d = data_dir(self.spec)
if rv != 0:
self.queue.put((rv, None))
return
source_vbucket_state = \
getattr(self.opts, 'source_vbucket_state', 'active')
source_nodes = self.source_bucket['nodes']
if len(source_nodes) != 1:
self.queue.put(("error: expected 1 node in source_bucket: %s"
% (self.source_bucket['name']), None))
return
vbucket_states = source_nodes[0].get('vbucket_states', None)
if not vbucket_states:
self.queue.put(("error: missing vbucket_states in source_bucket: %s"
% (self.source_bucket['name']), None))
return
vbuckets = vbucket_states.get(source_vbucket_state, None)
if vbuckets is None: # Empty dict is valid.
self.queue.put(("error: missing vbuckets in source_bucket: %s"
% (self.source_bucket['name']), None))
return
batch_max_size = self.opts.extra['batch_max_size']
batch_max_bytes = self.opts.extra['batch_max_bytes']
store = None
vbucket_id = None
# Level of indirection since we can't use python 3 nonlocal statement.
abatch = [pump.Batch(self)]
def change_callback(doc_info):
if doc_info:
key = doc_info.id
if self.skip(key, vbucket_id):
return
if doc_info.deleted:
cmd = couchbaseConstants.CMD_TAP_DELETE
val = ''
else:
cmd = couchbaseConstants.CMD_TAP_MUTATION
val = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
try:
cas, exp, flg, flex_meta, dtype, conf_res = struct.unpack(SFD_REV_META, doc_info.revMeta)
meta = doc_info.revSequence
seqno = doc_info.sequence
nmeta = 0
msg = (cmd, vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta, conf_res)
abatch[0].append(msg, len(val))
except Exception, e:
self.queue.put(("error: could not read couchstore file due to unsupported file format version;"
" exception: %s"% e, None))
return
if (abatch[0].size() >= batch_max_size or
abatch[0].bytes >= batch_max_bytes):
self.queue.put((0, abatch[0]))
abatch[0] = pump.Batch(self)
for f in latest_couch_files(d + '/' + self.source_bucket['name']):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1))
if not vbucket_id in vbuckets:
continue
try:
store = couchstore.CouchStore(f, 'r')
store.forEachChange(0, change_callback)
store.close()
except Exception, e:
#MB-12270: Some files may be deleted due to compaction. We can
#safely ingore them and move to next file.
pass
if abatch[0].size():
self.queue.put((0, abatch[0]))
self.queue.put((0, None))
class SFDSink(pump.Sink):
"""Sink for couchstore in couchbase server/file/directory layout."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSink, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.rehash = opts.extra.get("rehash", 0)
self.init_worker(SFDSink.run)
@staticmethod
def run(self):
destination_vbucket_state = \
getattr(self.opts, 'destination_vbucket_state', 'active')
vbucket_states = self.source_node.get('vbucket_states', {})
while not self.ctl['stop']:
batch, future = self.pull_next_batch()
if not batch:
return self.future_done(future, 0)
vbuckets = batch.group_by_vbucket_id(SFD_VBUCKETS, self.rehash)
for vbucket_id, msgs in vbuckets.iteritems():
checkpoint_id = 0
max_deleted_seqno = 0
rv, store, store_path = self.open_store(vbucket_id)
if rv != 0:
return self.future_done(future, rv)
bulk_keys = []
bulk_vals = []
for i, msg in enumerate(msgs):
cmd, _vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta, conf_res = msg
if self.skip(key, vbucket_id):
continue
d = couchstore.DocumentInfo(str(key))
flex_meta = 1
d.revMeta = str(struct.pack(SFD_REV_META, cas, exp, flg, flex_meta, dtype, conf_res))
if meta:
if len(meta) > 8:
meta = meta[0:8]
if len(meta) < 8:
meta = ('\x00\x00\x00\x00\x00\x00\x00\x00' + meta)[-8:]
d.revSequence, = struct.unpack(SFD_REV_SEQ, meta)
else:
d.revSequence = 1
if seqno:
d.sequence = int(seqno)
if cmd == couchbaseConstants.CMD_TAP_MUTATION:
v = str(val)
try:
if (re.match('^\\s*{', v) and
json.loads(v) is not None):
d.contentType = couchstore.DocumentInfo.IS_JSON
except ValueError:
pass # NON_JSON is already the default contentType.
elif cmd == couchbaseConstants.CMD_TAP_DELETE:
v = None
else:
self.future_done(future,
"error: SFDSink bad cmd: " + str(cmd))
store.close()
return
bulk_keys.append(d)
bulk_vals.append(v)
try:
if bulk_keys and bulk_vals:
vm = vbucket_states.get(destination_vbucket_state, None)
if vm:
vi = vm.get(vbucket_id, None)
if vi:
c = int(vi.get("checkpoint_id", checkpoint_id))
checkpoint_id = max(checkpoint_id, c)
m = int(vi.get("max_deleted_seqno", max_deleted_seqno))
max_deleted_seqno = max(max_deleted_seqno, m)
rv = self.save_vbucket_state(store, vbucket_id,
destination_vbucket_state,
checkpoint_id,
max_deleted_seqno)
if rv != 0:
self.future_done(future, rv)
store.close()
return
store.saveMultiple(bulk_keys, bulk_vals,
options=couchstore.CouchStore.COMPRESS)
store.commit()
store.close()
except Exception, e:
self.future_done(future,
"error: could not save couchstore data"
"; vbucket_id: %s; store_path: %s"
"; exception: %s"
% (vbucket_id, store_path, e))
return
self.future_done(future, 0) # No return to keep looping.
def save_vbucket_state(self, store, vbucket_id,
state, checkpoint_id, max_deleted_seqno):
doc = json.dumps({'state': state,
'checkpoint_id': str(checkpoint_id),
'max_deleted_seqno': str(max_deleted_seqno)})
try:
store.localDocs['_local/vbstate'] = doc
except Exception, e:
return "error: save_vbucket_state() failed: " + str(e)
return 0
@staticmethod
def can_handle(opts, spec):
return spec.startswith(SFD_SCHEME)
@staticmethod
def check_base(opts, spec):
if getattr(opts, "destination_operation", None) != None:
return ("error: --destination-operation" +
" is not supported by this destination: %s") % (spec)
# Skip immediate superclass Sink.check_base(),
# since SFDSink can handle different vbucket states.
return pump.EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec, source_map):
# TODO: (2) SFDSink - check disk space.
rv, dir = data_dir(spec)
if rv != 0:
return rv
if not os.path.isdir(dir):
return "error: not a directory: " + dir, None
if not os.access(dir, os.W_OK):
return "error: directory is not writable: " + dir, None
return 0, None
@staticmethod
def consume_design(opts, sink_spec, sink_map,
source_bucket, source_map, source_design):
if not source_design:
return 0
try:
sd = json.loads(source_design)
except ValueError, e:
return "error: could not parse source_design: " + source_design
rv, d = data_dir(sink_spec)
if rv != 0:
return rv
bucket_dir = d + '/' + source_bucket['name']
if not os.path.isdir(bucket_dir):
os.mkdir(bucket_dir)
rv, store, store_path = \
open_latest_store(bucket_dir,
"master.couch.*",
"^(master)\\.couch\\.([0-9]+)$",
"master.couch.1")
if rv != 0:
return rv
bulk_keys = []
bulk_vals = []
if sd:
for row in sd['rows']:
logging.debug("design_doc row: " + str(row))
d = couchstore.DocumentInfo(str(row['id']))
if '_rev' in row['doc']:
d.revMeta = str(row['doc']['_rev'])
del row['doc']['_rev']
d.contentType = couchstore.DocumentInfo.IS_JSON
bulk_keys.append(d)
bulk_vals.append(json.dumps(row['doc']))
if bulk_keys and bulk_vals:
store.saveMultiple(bulk_keys, bulk_vals) # TODO: Compress ddocs?
store.commit()
store.close()
return 0
def consume_batch_async(self, batch):
return self.push_next_batch(batch, pump.SinkBatchFuture(self, batch))
def open_store(self, vbucket_id):
# data_dir => /opt/couchbase/var/lib/couchbase/data/
# bucket_dir => default/
# store_path => VBUCKET_ID.couch.COMPACTION_NUM
if vbucket_id >= SFD_VBUCKETS:
return "error: vbucket_id too large: %s" % (vbucket_id), None, None
rv, bucket_dir = self.find_bucket_dir()
if rv != 0:
return rv, None, None
return open_latest_store(bucket_dir, "%s.couch.*" % (vbucket_id), SFD_RE,
str(vbucket_id) + ".couch.1", mode='c')
def find_bucket_dir(self):
rv, d = data_dir(self.spec)
if rv != 0:
return rv, None
bucket_dir = d + '/' + self.source_bucket['name']
if not os.path.isdir(bucket_dir):
try:
os.mkdir(bucket_dir)
except OSError, e:
return ("error: could not create bucket_dir: %s; exception: %s"
% (bucket_dir, e)), None
return 0, bucket_dir
def open_latest_store(bucket_dir, glob_pattern, filter_re, default_name, mode='c'):
store_paths = latest_couch_files(bucket_dir,
glob_pattern=glob_pattern,
filter_re=filter_re)
if not store_paths:
if mode == 'r':
return 0, None, None
store_paths = [bucket_dir + '/' + default_name]
if len(store_paths) != 1:
return ("error: no single, latest couchstore file: %s" +
"; found: %s") % (glob_pattern, store_paths), None, None
try:
return 0, couchstore.CouchStore(str(store_paths[0]), mode), store_paths[0]
except Exception, e:
return ("error: could not open couchstore file: %s" +
"; exception: %s") % (store_paths[0], e), None, None
def latest_couch_files(bucket_dir, glob_pattern='*.couch.*', filter_re=SFD_RE):
"""Given directory of *.couch.VER files, returns files with largest VER suffixes."""
files = glob.glob(bucket_dir + '/' + glob_pattern)
files = [f for f in files if re.match(filter_re, os.path.basename(f))]
matches = [(re.match(filter_re, os.path.basename(f)), f) for f in files]
latest = {}
for match, file in matches:
top, _ = latest.get(match.group(1), (-1, None))
cur = int(match.group(2))
if cur > top:
latest[match.group(1)] = (cur, file)
return sorted([file for top, file in latest.values()])
def data_dir(spec):
if not spec.startswith(SFD_SCHEME):
return "error: wrong scheme in spec: " + spec, None
dir = spec[len(SFD_SCHEME):]
if dir:
return 0, os.path.normpath(dir)
else:
return "error: missing dir in spec: " + spec, None
|
|
import os
from hippy.builtin import (wrap_method, Optional, ThisUnwrapper,
handle_as_exception, StreamContextArg, Nullable)
from hippy.objects.instanceobject import W_InstanceObject
from hippy.objects.intobject import W_IntObject
from hippy.objects.resources.file_resource import W_FileResource
from hippy.error import PHPException
from hippy.builtin_klass import (def_class, k_RuntimeException,
k_LogicException, GetterSetterWrapper)
from hippy.module.standard.file.funcs import (_is_dir, _is_file, _is_link,
_is_executable, _is_readable, _is_writable, _filetype, _fseek, _fstat,
_fopen, _basename, FopenError)
from rpython.rlib import rpath
from hippy import consts
class W_SplFileInfo(W_InstanceObject):
file_name = None
path_name = None
def __init__(self, klass, dct_w):
W_InstanceObject.__init__(self, klass, dct_w)
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
w_res.file_name = self.file_name
w_res.path_name = self.path_name
return w_res
class W_SplFileObject(W_SplFileInfo):
delimiter = None
enclosure = None
open_mode = None
def __init__(self, klass, dct_w):
W_InstanceObject.__init__(self, klass, dct_w)
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
w_res.file_name = self.file_name
w_res.path_name = self.path_name
w_res.delimiter = self.delimiter
w_res.enclosure = self.enclosure
w_res.open_mode = self.open_mode
return w_res
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), str],
name='SplFileInfo::__construct')
def construct(interp, this, file_name):
this.file_name = file_name
this.path_name = rpath.realpath(file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::__toString')
def spl_toString(interp, this):
return interp.space.wrap(this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), Optional(str)],
name='SplFileInfo::getBasename')
def get_basename(interp, this, suffix=''):
return _basename(interp.space, this.file_name, suffix)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getExtension')
def get_extension(interp, this):
path = this.file_name
filename = rpath.split(path)[1]
name_split = filename.rsplit('.', 1)
if len(name_split) == 2:
filename, extension = name_split
else:
extension = ''
return interp.space.wrap(extension)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getFilename')
def get_filename(interp, this):
return _get_filename(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPath')
def get_path(interp, this):
parts = this.file_name.split('/')
parts.pop()
path = ''
for i in parts:
path += i + '/'
path = path.rstrip('/')
return interp.space.wrap(path)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPathname')
def get_pathname(interp, this):
return interp.space.wrap(this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getGroup', error_handler=handle_as_exception)
def get_group(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
res = os.stat(filename).st_gid
return interp.space.wrap(res)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getGroup(): stat failed for %s" % filename
)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getInode', error_handler=handle_as_exception)
def get_inode(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
res = os.stat(filename).st_ino
return interp.space.wrap(res)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getInode(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getOwner', error_handler=handle_as_exception)
def get_owner(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
res = os.stat(filename).st_uid
return interp.space.wrap(res)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getOwner(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPerms', error_handler=handle_as_exception)
def get_perms(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
res = os.stat(filename).st_mode
return interp.space.wrap(res)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getPerms(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getSize', error_handler=handle_as_exception)
def get_size(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
res = os.stat(filename).st_size
return interp.space.wrap(res)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getSize(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getType', error_handler=handle_as_exception)
def get_type(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _filetype(interp.space, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getType(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isDir')
def is_dir(interp, this):
filename = this.file_name
assert filename is not None
return _is_dir(interp.space, filename)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isLink')
def is_link(interp, this):
filename = this.file_name
assert filename is not None
return _is_link(interp.space, filename)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isExecutable')
def is_executable(interp, this):
return _is_executable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isFile')
def is_file(interp, this):
return _is_file(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isReadable')
def is_readable(interp, this):
return _is_readable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isWritable')
def is_writable(interp, this):
return _is_writable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getATime', error_handler=handle_as_exception)
def getatime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_atime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getATime(): "
"stat failed for %s" % this.file_name)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getCTime', error_handler=handle_as_exception)
def getctime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_ctime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getCTime(): "
"stat failed for %s" % this.file_name)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getMTime', error_handler=handle_as_exception)
def getmtime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_mtime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getMTime(): "
"stat failed for %s" % this.file_name)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getRealPath')
def get_realpath(interp, this):
try:
path = rpath.realpath(this.file_name)
return interp.space.wrap(path)
except OSError:
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getLinkTarget',
error_handler=handle_as_exception)
def get_linktarget(interp, this):
filename = this.file_name
assert filename is not None
try:
return interp.space.wrap(os.readlink(filename))
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getLinkTarget(): %s" % os.strerror(e.errno))]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), Optional(str),
Optional(bool), Optional(Nullable(StreamContextArg(None)))],
name='SplFileInfo::openFile', error_handler=handle_as_exception)
def openfile(interp, this, open_mode='r', use_include_path=False, w_ctx=None):
if open_mode == '':
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile(): Invalid parameters")]))
args = [interp.space.wrap(this.file_name), interp.space.wrap(open_mode),
interp.space.wrap(use_include_path)]
if w_ctx:
if not interp.space.is_resource(w_ctx):
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile() expects "
"parameter 3 to be resource, %s given"
% interp.space.get_type_name(w_ctx.tp).lower())]))
args.append(w_ctx)
try:
file_object = SplFileObjectClass.call_args(interp, args)
return file_object
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile(): %s" % os.strerror(e.errno))]))
def _get_pathname(interp, this):
return interp.space.wrap(this.path_name)
def _set_pathname(interp, this, value):
raise NotImplementedError()
def _get_filename(interp, this):
if this.file_name:
i = this.file_name.rfind('/') + 1
assert i >= 0
return interp.space.wrap(this.file_name[i:])
def _set_filename(interp, this, value):
raise NotImplementedError()
SplFileInfoClass = def_class(
'SplFileInfo',
methods=[construct,
spl_toString,
get_basename,
get_extension,
get_filename,
get_path,
get_pathname,
get_group,
get_inode,
get_owner,
get_perms,
get_size,
get_type,
is_dir,
is_link,
is_executable,
is_file,
is_readable,
is_writable,
getatime,
getctime,
getmtime,
get_realpath,
get_linktarget,
openfile],
properties=[GetterSetterWrapper(_get_pathname, _set_pathname,
"pathName", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_filename, _set_filename,
"fileName", consts.ACC_PRIVATE), ],
instance_class=W_SplFileInfo
)
SFO_DROP_NEW_LINE = 1
SFO_READ_AHEAD = 2
SFO_SKIP_EMPTY = 4
SFO_READ_CSV = 8
def _sfo_readline(interp, sfo):
if sfo.open_mode not in ('w', 'a', 'x', 'c'):
return sfo.w_res.readline(sfo.flags & SFO_DROP_NEW_LINE)
else:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap("SplFileObject: File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), str, Optional(str),
Optional(bool), Optional(Nullable(StreamContextArg(None)))],
name='SplFileObject::__construct',
error_handler=handle_as_exception)
def sfo_construct(interp, this, filename, open_mode='r',
use_include_path=False, w_ctx=None):
this.file_name = filename
this.path_name = rpath.realpath(filename)
this.delimiter = ","
this.enclosure = '"'
this.flags = 0
this.open_mode = open_mode
this.use_include_path = use_include_path
this.w_res = None
this.max_line_len = 0
if w_ctx:
if not interp.space.is_resource(w_ctx):
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::__construct() expects "
"parameter 4 to be resource, %s given"
% interp.space.get_type_name(w_ctx.tp).lower())]))
assert filename is not None
if os.path.isdir(filename):
raise PHPException(k_LogicException.call_args(
interp, [interp.space.wrap(
"Cannot use SplFileObject with directories"
)]))
try:
this.w_res = _fopen(interp.space, filename, this.open_mode,
use_include_path, w_ctx)
if this.w_res == interp.space.w_False:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::__construct(): Failed to open stream")]))
except FopenError as e:
raise PHPException(k_RuntimeException.call_args(interp,
[interp.space.wrap(e.reasons.pop())]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::rewind')
def sfo_rewind(interp, this):
try:
this.w_res.rewind()
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::rewind(): %s" % os.strerror(e.errno))]))
if this.flags & SFO_READ_AHEAD:
_sfo_readline(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::valid')
def sfo_valid(interp, this):
return interp.space.newbool(not this.w_res.feof())
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::seek')
def sfo_seek(interp, this, line_pos):
if line_pos < 0:
raise PHPException(k_LogicException.call_args(
interp, [interp.space.wrap(
"SplFileObject::seek(): Can't seek file %s "
"to negative line %d" % (this.file_name, line_pos))]))
this.w_res.seek_to_line(line_pos, this.flags & SFO_DROP_NEW_LINE)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getChildren')
def sfo_get_children(interp, this):
return interp.space.w_Null
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::hasChildren')
def sfo_has_children(interp, this):
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), str, Optional(int)],
name='SplFileObject::fwrite')
def sfo_fwrite(interp, this, data, length=-1):
try:
if length > 0:
n = this.w_res.write(data, length)
else:
n = this.w_res.writeall(data)
this.w_res.flush()
return interp.space.newint(n)
except IOError:
return interp.space.w_Null
except ValueError:
return interp.space.w_Null
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetc')
def sfo_fgetc(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
res = w_res.read(1)
if w_res.feof():
return interp.space.w_False
if res == os.linesep:
w_res.cur_line_no += 1
return interp.space.newstr(res)
def _fgets(interp, this):
line = _sfo_readline(interp, this)
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
if not line:
w_res.eof = True
return interp.space.w_False
return interp.space.newstr(line)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), 'args_w'],
name='SplFileObject::fgets',
error_handler=handle_as_exception)
def sfo_fgets(interp, this, args_w=[]):
if len(args_w) != 0:
interp.space.ec.warn("SplFileObject::fgets() expects exactly 0 "
"parameters, %d given" % len(args_w))
return interp.space.w_Null
try:
return _fgets(interp, this)
except IOError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::fgets(): File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getCurrentLine',
error_handler=handle_as_exception)
def sfo_get_current_line(interp, this):
try:
return _fgets(interp, this)
except IOError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::fgets(): File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::key')
def sfo_key(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
return interp.space.newint(w_res.cur_line_no)
def _current(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
res = w_res.cur_line
if not res:
res = _sfo_readline(interp, this)
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::current')
def sfo_current(interp, this):
return _current(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::__toString')
def sfo_tostring(interp, this):
return _current(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::next')
def sfo_next(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
w_res.cur_line = None
if this.flags & SFO_READ_AHEAD:
_sfo_readline(interp, this)
w_res.cur_line_no += 1
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::eof')
def sfo_eof(interp, this):
return interp.space.newbool(this.w_res.feof())
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fflush')
def sfo_fflush(interp, this):
res = this.w_res.flush()
return interp.space.newbool(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fstat')
def sfo_fstat(interp, this):
return _fstat(interp.space, this.w_res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::ftell')
def sfo_ftell(interp, this):
pos = this.w_res.tell()
return interp.space.newint(pos)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::ftruncate')
def sfo_ftruncate(interp, this, size):
res = this.w_res.truncate(size)
return interp.space.newbool(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int, Optional(int)],
name='SplFileObject::fseek')
def sfo_fseek(interp, this, offset, whence=0):
return _fseek(interp.space, this.w_res, offset, whence)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), int, Optional(int)],
name='SplFileObject::fpassthru')
def sfo_fpassthru(interp, this, offset, whence=0):
bytes_thru = this.w_res.passthru()
return interp.space.newint(bytes_thru)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileObject::getMaxLineLen')
def sfo_get_max_line_len(interp, this):
return interp.space.newint(this.max_line_len)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::setMaxLineLen',
error_handler=handle_as_exception)
def sfo_set_max_line_len(interp, this, max_len):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetss')
def sfo_fgetss(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetcsv')
def sfo_fgetcsv(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fputcsv')
def sfo_fputcsv(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::flock')
def sfo_flock(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fscanf')
def sfo_fscanf(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getCsvControl')
def sfo_get_csv_control(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::setCsvControl')
def sfo_set_csv_control(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getFlags')
def sfo_get_flags(interp, this):
return interp.space.wrap(this.flags)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::setFlags')
def sfo_set_flags(interp, this, flags):
this.flags = flags
def _get_openmode(interp, this):
return interp.space.wrap(this.open_mode)
def _set_openmode(interp, this, w_value):
raise NotImplementedError()
def _get_delimiter(interp, this):
return interp.space.wrap(this.delimiter)
def _set_delimiter(interp, this, w_value):
raise NotImplementedError()
def _get_enclosure(interp, this):
return interp.space.wrap(this.enclosure)
def _set_enclosure(interp, this, w_value):
raise NotImplementedError()
SplFileObjectClass = def_class(
'SplFileObject',
[sfo_construct, sfo_rewind, sfo_valid, sfo_key, sfo_current, sfo_next,
sfo_seek, sfo_get_children, sfo_has_children, sfo_fwrite, sfo_eof,
sfo_fgets, sfo_fgetc, sfo_tostring, sfo_get_max_line_len, sfo_fgetss,
sfo_set_max_line_len, sfo_fflush, sfo_fgetcsv, sfo_flock, sfo_fputcsv,
sfo_fscanf, sfo_fseek, sfo_fstat, sfo_ftell, sfo_ftruncate,
sfo_get_csv_control, sfo_set_csv_control, sfo_get_flags, sfo_set_flags,
sfo_get_current_line, sfo_fpassthru],
properties=[GetterSetterWrapper(_get_openmode, _set_openmode,
"openMode", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_delimiter, _set_delimiter,
"delimiter", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_enclosure, _set_enclosure,
"enclosure", consts.ACC_PRIVATE), ],
constants=[(
'DROP_NEW_LINE', W_IntObject(1)), ('READ_AHEAD', W_IntObject(2)),
('SKIP_EMPTY', W_IntObject(4)), ('READ_CSV', W_IntObject(8))],
implements=["RecursiveIterator", "SeekableIterator"],
instance_class=W_SplFileObject,
extends='SplFileInfo',)
|
|
from django.test import TestCase
from survey.forms.question import *
from survey.models import Batch
from survey.models.question import Question
from survey.models.householdgroups import HouseholdMemberGroup
class QuestionFormTest(TestCase):
def setUp(self):
self.batch = Batch.objects.create(name='Batch A',description='description')
self.household_member_group = HouseholdMemberGroup.objects.create(name='Age 4-5', order=1)
self.question_module = QuestionModule.objects.create(name="Education")
self.form_data = {
'batch': self.batch.id,
'text': 'whaat?',
'answer_type': Question.NUMBER,
'identifier': 'ID 1',
'options':"some option text",
'group' : self.household_member_group.id,
'module' : self.question_module.id
}
def test_valid(self):
question_form = QuestionForm(self.form_data)
question_form.is_valid()
self.assertTrue(question_form.is_valid())
def test_invalid(self):
question_form = QuestionForm()
self.assertFalse(question_form.is_valid())
def test_question_form_fields(self):
question_form = QuestionForm()
fields = ['module', 'text', 'answer_type', 'group']
[self.assertIn(field, question_form.fields) for field in fields]
def test_question_form_has_tuple_of_all_question_modules_as_choices(self):
health_module = QuestionModule.objects.create(name="Health")
education_module = QuestionModule.objects.create(name="Education")
question_modules = [health_module, education_module]
question_form = QuestionForm()
[self.assertIn((module.id, module.name), question_form.fields['module'].choices) for module in question_modules]
def test_question_form_has_no_choices_if_there_are_no_question_modules(self):
QuestionModule.objects.all().delete()
question_form = QuestionForm()
self.assertEqual(0, len(question_form.fields['module'].choices))
def test_should_know_household_member_group_id_and_name_tuple_is_the_group_choice(self):
question_form = QuestionForm(self.form_data)
self.assertEqual(question_form.fields['group'].choices, [(self.household_member_group.id, self.household_member_group.name)])
def test_should_not_save_multichoice_question_if_no_options_given(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=''
question_form = QuestionForm(form_data)
self.assertFalse(question_form.is_valid())
expected_form_error = 'Question Options missing.'
self.assertEqual(1, len(question_form.errors['answer_type']))
self.assertEqual(expected_form_error, question_form.errors['answer_type'][0])
def test_should_save_options_and_batch_attached_to_questions_if_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
batch = Batch.objects.create()
question = question_form.save(batch=batch, group=[self.household_member_group.id])
self.assertEqual(1, question.batches.all().count())
self.assertEqual(batch, question.batches.all()[0])
options = question.options.all()
self.assertEqual(2, options.count())
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
def test_should_save_questions_and_options_even_if_batch_is_not_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, len(question.batches.all()))
options = question.options.all()
self.assertEqual(2, options.count())
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
def test_should_edit_options_text_and_order_of_question_if_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
question = question_form.save(group=[self.household_member_group.id])
form_data['options'] = ['option 2', 'option aaaaaaa 1']
question_form = QuestionForm(instance=question, data=form_data)
edited_question = question_form.save(group=[self.household_member_group.id])
options = question.options.all()
self.assertEqual(2, options.count())
self.assertEqual(QuestionOption.objects.get(text=form_data['options'][0], order=1), options[0])
self.assertEqual(QuestionOption.objects.get(text=form_data['options'][1], order=2), options[1])
self.failIf(QuestionOption.objects.filter(text='options 1'))
self.assertEqual(question.id, edited_question.id)
def test_should_not_save_options_if_not_multichoice_even_if_options_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.TEXT
form_data['options']=['some option question']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, question.batches.all().count())
self.assertEquals(0, question.options.all().count())
def test_should_filter_options_not_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.TEXT
del form_data['options']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, question.batches.all().count())
self.assertEquals(0, question.options.all().count())
def test_form_should_not_be_valid_for_subquestion_if_same_subquestion_already_exist(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
sub_question = Question.objects.create(text="this is a sub question", answer_type=Question.NUMBER,
subquestion=True, parent=question, group=self.household_member_group,
identifier='Q2')
question.batches.add(self.batch)
sub_question.batches.add(self.batch)
form_data = self.form_data.copy()
form_data['text'] = sub_question.text
form_data['answer_type'] = sub_question.answer_type
del form_data['options']
question_form = QuestionForm(data=form_data, parent_question=sub_question.parent)
self.assertFalse(question_form.is_valid())
message= "Sub question for this question with this text already exists."
self.assertIn(message, question_form.errors.values()[0])
def test_form_has_parent_groups_only_if_parent_question_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm(parent_question=question)
self.assertIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertNotIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_has_no_groups_only_if_parent_question_has_no_group_and_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm(parent_question=question)
self.assertNotIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertNotIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_has_all_groups_only_if_no_parent_question_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm()
self.assertIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_is_invalid_if_parent_question_group_is_different_from_subquestion_group(self):
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=another_member_group, identifier='Q1')
question_form = QuestionForm(parent_question=question, data=self.form_data)
self.assertFalse(question_form.is_valid())
error_message = "Subquestions cannot have a different group from its parent."
self.assertEqual([error_message], question_form.errors['group'])
def test_form_is_invalid_if_module_not_selected(self):
form_data = self.form_data.copy()
form_data['module'] = ''
question_form = QuestionForm(form_data)
self.assertFalse(question_form.is_valid())
def test_form_has_parent_module_only_if_parent_question_has_one(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
module=self.question_module, identifier='Q1')
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm(parent_question=question)
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertNotIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_has_all_module_if_parent_question_has_no_module(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
identifier='Q1')
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm(parent_question=question)
self.assertEqual(2, len(question_form.fields['module'].choices))
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_has_all_module_if_parent_question_is_not_supplied(self):
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm()
self.assertEqual(2, len(question_form.fields['module'].choices))
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_is_invalid_if_parent_question_module_is_different_from_subquestion_module(self):
another_module = QuestionModule.objects.create(name="haha")
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
module=another_module, identifier='Q1')
question_form = QuestionForm(parent_question=question, data=self.form_data)
self.assertFalse(question_form.is_valid())
error_message = "Subquestions cannot have a different module from its parent."
self.assertEqual([error_message], question_form.errors['module'])
def test_form_is_invalid_if_trying_to_add_duplicate_subquestion_under_question(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
sub_question_data = {'text': 'Subquestion 1?',
'answer_type':Question.NUMBER,
'group': self.household_member_group,
'identifier': 'ID 1',
'subquestion': True,
'parent': question}
sub_question = Question.objects.create(**sub_question_data)
error_message = 'Sub question for this question with this text already exists.'
sub_question_data['group'] = self.household_member_group.id
question_form = QuestionForm(parent_question=question, data=sub_question_data)
is_valid = question_form.is_valid()
self.assertFalse(is_valid)
self.assertIn(error_message, question_form.errors['text'])
|
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Combinators for composing layers."""
import copy
from trax import fastmath
from trax.fastmath import numpy as jnp
from trax.layers import base
from trax.layers.base import Fn
from trax.shapes import ShapeDtype
class Serial(base.Layer):
"""Combinator that applies layers serially (by function composition).
This combinator is commonly used to construct deep networks, e.g., like this::
mlp = tl.Serial(
tl.Dense(128),
tl.Relu(),
tl.Dense(10),
)
A Serial combinator uses stack semantics to manage data for its sublayers.
Each sublayer sees only the inputs it needs and returns only the outputs it
has generated. The sublayers interact via the data stack. For instance, a
sublayer k, following sublayer j, gets called with the data stack in the
state left after layer j has applied. The Serial combinator then:
- takes n_in items off the top of the stack (n_in = k.n_in) and calls
layer k, passing those items as arguments; and
- takes layer k's n_out return values (n_out = k.n_out) and pushes
them onto the data stack.
A Serial instance with no sublayers acts as a special-case (but useful)
1-input 1-output no-op.
"""
def __init__(self, *sublayers, name=None, sublayers_to_print=None):
super().__init__(
name=name, sublayers_to_print=sublayers_to_print)
sublayers = _ensure_flat(sublayers)
self._sublayers = sublayers
self._n_layers = len(sublayers)
if sublayers:
self._n_in, self._n_out = self._n_inputs_n_outputs(sublayers)
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
def forward(self, xs):
"""Executes this layer as part of a forward pass through the model."""
self._validate_forward_inputs(xs)
if not self.sublayers: # No-op: outputs = inputs
return xs
state, weights = self.state, self.weights
rngs = _split_rngs(self.rng, self._n_layers)
stack = xs
new_state = []
n_layers = self._n_layers
if len(weights) != n_layers:
raise ValueError(
f'Number of weight elements ({len(weights)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(state) != n_layers:
raise ValueError(
f'Number of state elements ({len(state)}) does not equal '
f'number of sublayers ({n_layers}).')
for layer, w, s, rng in zip(self.sublayers, weights, state, rngs):
inputs = inputs_from_stack(stack, layer.n_in)
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
stack = outputs_onto_stack(outputs, stack, layer.n_in)
new_state.append(s)
self.state = tuple(new_state)
return stack
# pylint: disable=protected-access
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
weights = []
states = []
# In the code below, stack, inputs, and outputs are abstract (shapes and
# dtypes), but weights and states are non-abstract actual values.
stack = input_signature
for sublayer in self.sublayers:
inputs = inputs_from_stack(stack, sublayer.n_in)
weights_or_cache_marker, state_or_cache_marker = (
sublayer.init(inputs, use_cache=True))
outputs, _ = sublayer._forward_abstract(inputs)
stack = outputs_onto_stack(outputs, stack, sublayer.n_in)
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self.state = tuple(states)
self.weights = tuple(weights)
# pylint: enable=protected-access
def _n_inputs_n_outputs(self, layers):
del self
running_max = 0
running_total = 0
for layer in layers:
running_total += layer.n_in
running_max = max(running_max, running_total)
running_total -= layer.n_out
return running_max, (running_max - running_total)
def _validate_forward_inputs(self, xs):
if not isinstance(xs, (tuple, list)) and self._n_in != 1:
raise TypeError(f'Serial.forward input must be a tuple or list; '
f'instead got {type(xs)}.')
# TODO(jonni): Include full xs (or shape) in error message?
len_xs = 1 if isinstance(xs, jnp.ndarray) else len(xs)
if len_xs < self.n_in:
raise ValueError(
f'Number of inputs ({len(xs)}) to Serial.forward less than n_in '
f'({self.n_in}).')
class Parallel(base.Layer):
"""Combinator that applies a list of layers in parallel to its inputs.
Layers in the list apply to successive spans of inputs, where the spans are
determined how many inputs each layer takes. The resulting output is the
(flattened) concatenation of the respective layer outputs.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Parallel(F, G, H) will take 6 inputs and give 4 outputs:
- inputs: a, b, c, d, e, f
- outputs: F(a), G(b, c, d), h1, h2 where h1, h2 = H(e, f)
As an important special case, a None argument to Parallel acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.) For
example:
Parallel(None, F)
creates a layer that passes its first input unchanged and applies F to the
following input(s).
"""
def __init__(self, *sublayers, name=None):
"""The constructor.
Args:
*sublayers: A list of sublayers.
name: Descriptive name for this layer.
Returns:
A new layer in which each of the given sublayers applies to its
corresponding span of elements in the dataflow stack.
"""
super().__init__(name=name)
sublayers = self._validate(sublayers)
self._n_layers = len(sublayers)
self._sublayers = sublayers
self._n_in = sum(l.n_in for l in sublayers)
self._n_out = sum(l.n_out for l in sublayers)
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
def forward(self, inputs):
"""Executes this layer as part of a forward pass through the model."""
n_layers, layers = self._n_layers, self.sublayers
sublayer_inputs = self._allot_to_sublayers(inputs)
state, weights = self.state, self.weights
rngs = _split_rngs(self.rng, n_layers)
if len(sublayer_inputs) != n_layers:
raise ValueError(
f'Number of inputs for sublayers ({len(sublayer_inputs)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(weights) != n_layers:
raise ValueError(
f'Number of weight elements ({len(weights)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(state) != n_layers:
raise ValueError(
f'Number of state elements ({len(state)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(rngs) != n_layers:
raise ValueError(
f'Number of rngs ({len(rngs)}) does not equal '
f'number of sublayers ({n_layers}).')
outputs = []
new_state = []
for layer, x, w, s, r in zip(layers, sublayer_inputs, weights, state, rngs):
# Note that zip silently truncates its result if lengths don't match.
sub_outputs, sub_state = layer.pure_fn(x, w, s, r, use_cache=True)
if layer.n_out == 1:
outputs.append(sub_outputs)
else:
outputs.extend(sub_outputs)
new_state.append(sub_state)
output = outputs[0] if self.n_out == 1 else tuple(outputs)
self.state = tuple(new_state)
return output
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
sublayer_signatures = self._allot_to_sublayers(input_signature)
inits = [layer.init(signature, use_cache=True)
for layer, signature
in zip(self.sublayers, sublayer_signatures)]
if inits:
weights, state = tuple(zip(*inits))
self.state = state
self.weights = weights
def _validate(self, layers):
if not layers or len(layers) < 2:
raise ValueError(
f'layers ({layers}) must be a list with at least two elements')
layers = list(layers) # Ensure we can modify layers.
for i, obj in enumerate(layers):
if obj is None or obj == []: # pylint: disable=g-explicit-bool-comparison
layers[i] = Serial(None)
elif isinstance(obj, (list, tuple)):
layers[i] = Serial(obj)
else:
if not isinstance(obj, base.Layer):
raise ValueError(
f'Found nonlayer object ({obj}) in layers list: [{layers}]')
if layers[i].n_in == 0:
raise ValueError(
f'Sublayer with n_in = 0 not allowed in Parallel: {layers[i]}')
return layers
def _allot_to_sublayers(self, inputs):
"""Divides Parallel's inputs for use by the sublayers.
Args:
inputs: Tuple of ndarrays or ShapeDtype instances.
Returns:
A tuple that partitions this layer's inputs among its sublayers.
Sublayers that take one argument get that argument directly. All other
sublayers get a tuple of items.
"""
start, end = 0, 0
sub_inputs = []
for layer in self.sublayers:
n_in = layer.n_in
end = start + n_in
if n_in == 1:
sub_inputs.append(inputs[start])
else:
sub_inputs.append(inputs[start:end])
start = end
return tuple(sub_inputs)
class Concatenate(base.Layer):
"""Concatenates a number of tensors into a single tensor.
For example::
x = np.array([1, 2])
y = np.array([3, 4])
z = np.array([5, 6])
concat3 = tl.Concatenate(n_items=3)
z = concat3((x, y, z)) # z = [1, 2, 3, 4, 5, 6]
Use the `axis` argument to specify on which axis to concatenate the tensors.
By default it's the last axis, `axis=-1`, and `n_items=2`.
"""
def __init__(self, n_items=2, axis=-1):
name = 'Concatenate' if axis == -1 else f'Concatenate_axis{axis}'
super().__init__(n_in=n_items, name=name)
self._n_items = n_items
self._axis = axis
def forward(self, xs):
"""Executes this layer as part of a forward pass through the model."""
return jnp.concatenate(xs, self._axis)
class Split(base.Layer):
"""Splits the input into n items along an axis."""
def __init__(self, n_items=2, axis=-1):
super().__init__(n_out=n_items)
self._n_items = n_items
self._axis = axis
def forward(self, inputs):
"""Executes this layer as part of a forward pass through the model."""
return tuple(jnp.split(inputs, self._n_items, self._axis))
def _scan(f, xs, init_value, axis=0, remat=False):
"""Scans the f over the given axis of xs.
In pseudo-python, the scan function would look as follows:
def scan(f, xs, init_value, axis):
xs = [xs[..., i, ...] for i in range(xs.shape[axis])]
cur_value = init_value
ys = []
for x in xs:
y, cur_value = f(x, cur_value)
ys.append(y)
return np.stack(ys, axis), cur_value
Args:
f: function (x, carry) -> (y, new_carry)
xs: tensor, x will be xs slices on axis
init_value: tensor, initial value of the carry-over
axis: int, the axis on which to slice xs
remat: whether to re-materialize f
Returns:
A pair (ys, last_value) as described above.
"""
def swapaxes(x):
transposed_axes = list(range(len(x.shape)))
transposed_axes[axis] = 0
transposed_axes[0] = axis
return jnp.transpose(x, axes=transposed_axes)
if axis != 0:
xs = fastmath.nested_map(swapaxes, xs)
def transposed_f(c, x):
y, d = f(x, c)
return d, y
if remat:
transposed_f = fastmath.remat(transposed_f)
last_value, ys = fastmath.scan(transposed_f, init_value, xs)
if axis != 0:
ys = fastmath.nested_map(swapaxes, ys)
return ys, last_value
class Scan(base.Layer):
"""Applies a layer progressively/cumulatively to an axis-derived sequence.
Conceptually, this is a function from a list to a same-length list of partial
(cumulative) results. For instance, a list of values (`[1, 2, 3, 4, 5]`) can
transform to a list of cumulative sums (`[1, 3, 6, 10, 15]`). Functions for
the same concept are called `scan` in Scala, `scanl` in Haskell, and
`accumulate*` in Factor.
In more detail, we assume the layer takes a tuple of inputs of the following
form:
(input1, ..., inputN, carry1, ..., carryM)
and returns:
(output1, ..., outputK, new_carry1, ..., new_carryM)
The scanned version applies the layer iteratively to a tensor treating values
at the given axis as if they were a list. For example, to calculate all
sums of prefixes of a tensor, we can do this::
def add(x, carry):
def f(input, carry):
res = input + carry
return res, res # output and carry are the same
return tl.Fn('add', f, n_out=2)
Scan(add)([1, 2, 3], 0) = [1, 3, 6], 6
"""
def __init__(self, layer, axis=0, n_carry=1, remat=False, mode='train'):
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
self._n_carry = n_carry
self._axis = axis
self._remat = remat
self._weights = (None,)
self._state = (None, ())
self._mode = mode
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
@property
def state(self):
"""Returns a tuple containing this layer's state."""
return (self.sublayer.state, self._state[1])
@state.setter
def state(self, state):
"""Recursively sets state on this layer the sublayer."""
if isinstance(state, dict) and state == base.GET_STATE_FROM_CACHE:
return
self._state = (None, state[1])
self.sublayer.state = state[0]
def forward(self, inputs):
"""Executes this layer as part of a forward pass through the model."""
weights = self.weights[0]
if isinstance(inputs, list):
inputs = tuple(inputs) # so that inputs structure matches outputs
n_carry = self._n_carry
def scannable_fn(x, carry_and_state): # pylint: disable=invalid-name
carry, state, i = carry_and_state
x_and_carry = x + carry if n_carry > 0 else x
rng = fastmath.random.fold_in(self.rng, i)
res, new_state = self.sublayer.pure_fn(
x_and_carry, weights, state, rng, use_cache=True)
if n_carry > 0:
return (res[:-n_carry], (res[-n_carry:], new_state, i+1))
else:
return (res, ([], new_state, i+1))
if n_carry > 0:
xs = inputs[:-n_carry] # Split input stack into inputs and carry.
xs_carry = inputs[-n_carry:]
if self._mode == 'predict' and self._state[1] is not (): # pylint: disable=literal-comparison
xs_carry = self._state[1]
init = (xs_carry, self.state[0], jnp.array(0, dtype=jnp.int32))
else:
xs_carry = ()
xs, init = inputs, ([], self.state[0], jnp.array(0, dtype=jnp.int32))
ys, (carry, new_state, _) = _scan(scannable_fn, xs, init,
axis=self._axis, remat=self._remat)
res = ys + carry if n_carry > 0 else ys
state_carry = carry if self._mode == 'predict' and n_carry > 0 else ()
self.state = (new_state, state_carry)
return res # Put outputs and carry back on stack.
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
n_carry = self._n_carry
if n_carry == 0:
if isinstance(input_signature, (list, tuple)):
layer_sig = [ShapeDtype(_shape_without_axis(x, self._axis), x.dtype)
for x in input_signature]
layer_sig = tuple(layer_sig)
else:
layer_sig = ShapeDtype(_shape_without_axis(input_signature, self._axis),
input_signature.dtype)
weights, state = self.sublayer.init(layer_sig)
self.state = (state, ())
self.weights = (weights,)
else:
xs = input_signature[:-n_carry]
init = input_signature[-n_carry:]
xs_slices = [ShapeDtype(_shape_without_axis(x, self._axis), x.dtype)
for x in xs]
layer_signature = tuple(xs_slices + list(init))
weights, state = self.sublayer.init(layer_signature, use_cache=True)
self.state = (state, ())
self.weights = (weights,)
class Cond(base.Layer):
"""Applies layers conditionally.
For parameters `cond`, `true`, and `false` runs the equivalent of `true(y)
if cond(x) else false(y)`, where `x` is `cond.n_in` elements from front of the
stack and `y` is the rest of the stack.
Exactly one of `true` and `false` functions is executed, so it can be used to
conditionally run long computations. The state of non-executed function is not
updated. Note that different branches may be executed on different devices
if `cond` returns different values on them.
By default 'false' function is an identity.
`cond` must return exactly one element: a Boolean value.
`true` and `false` must have the same n_in, and the same n_out.
"""
def __init__(self, cond, true, false=None, name=None):
super(Cond, self).__init__(name=name)
if false is None:
self._identity_false_fun = True
# We don't need this function, but it will be useful for checking if
# 'true' has proper n_in/n_out.
false = Serial()
self._false = false
else:
self._identity_false_fun = False
self._false = false
sublayers = [cond, true, false]
self._sublayers = sublayers
self._n_layers = len(sublayers)
self._cond = cond
self._true = true
if cond.n_out != 1:
raise ValueError(
'cond.n_out must be 1: cond:{}->{}'.format(cond.n_in, cond.n_out))
if true.n_in != false.n_in:
raise ValueError(
'true.n_in and false.n_in must be equal: true:{}->{} ; false:{}->{}'
.format(true.n_in, true.n_out, false.n_in, false.n_out))
if true.n_out != false.n_out:
raise ValueError(
'true.n_out and false.n_out must be equal: true:{}->{} ; false:{}->{}'
.format(true.n_in, true.n_out, false.n_in, false.n_out))
self._n_in = cond.n_in + true.n_in
self._n_out = true.n_out
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
# pylint: disable=protected-access
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
weights = []
states = []
# In the code below, stack, inputs, and outputs are abstract (shapes and
# dtypes), but weights and states are non-abstract actual values.
stack = _make_tuple(input_signature)
# Inputs/outputs of `cond`.
inputs = inputs_from_stack(stack, self._cond.n_in)
weights_or_cache_marker, state_or_cache_marker = (
self._cond.init(inputs, use_cache=True))
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self._cond._forward_abstract(inputs)
stack = _make_tuple(outputs_onto_stack([], stack, self._cond.n_in))
# Inputs/outputs of `true` and `false`.
for sublayer in [self._true, self._false]:
inputs = inputs_from_stack(stack, sublayer.n_in)
weights_or_cache_marker, state_or_cache_marker = (
sublayer.init(inputs, use_cache=True))
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self.state = states
self.weights = weights
# pylint: enable=protected-access
def _validate_forward_inputs(self, xs):
xs = _make_tuple(xs)
if len(xs) < self.n_in:
raise ValueError(
f'Number of inputs ({len(xs)}) to Cond.forward less than n_in '
f'({self.n_in}).')
def forward(self, xs):
"""Executes this layer as part of a forward pass through the model.
Args:
xs: Tensors of as required by the branches of this conditional.
Returns:
Tensors resulting from running the chosen branch.
"""
# TODO(jaszczur): modify; it's a copy from SkippingSerial
self._validate_forward_inputs(xs)
layers_state = self.state
# Get 3 rngs, one for each layer.
rngs = _split_rngs(self.rng, 3)
# Prepare the stack and do some safety checks as in the parent class.
stack = _make_tuple(xs)
weights = self.weights
if len(weights) != 3:
raise ValueError('number of weights ({}) not equal to 3'
.format(len(weights)))
if len(layers_state) != 3:
raise ValueError('length of state ({}) not equal to 3'
.format(len(layers_state)))
def true_func(t):
outputs, new_true_state = self._true.pure_fn(
t[0][0], t[1][0], t[2][0], t[3][0])
# t[2][1] is old_false_state which is not changing if true is executed.
return outputs, (new_true_state, t[2][1])
def false_func(t):
if self._identity_false_fun:
# Memory optimization: we don't need pure_fn call.
return t[0][1], t[2]
outputs, new_false_state = self._false.pure_fn(
t[0][1], t[1][1], t[2][1], t[3][1])
# t[2][1] is old_true_state, which is not changing if false is executed.
return outputs, (t[2][0], new_false_state)
cond_inputs = inputs_from_stack(xs, self._cond.n_in)
cond_output, s = self._cond.pure_fn(cond_inputs, self.weights[0],
self.state[0], rngs[0], use_cache=True)
stack = outputs_onto_stack([], stack, self._cond.n_in)
self._cond.state = s
outputs, both_states = fastmath.cond(
cond_output,
true_func,
false_func,
[(stack, stack),
(self.weights[1], self.weights[2]),
(self.state[1], self.state[2]),
(rngs[1], rngs[2])]
)
stack = outputs_onto_stack([], stack, self._cond.n_in)
# We don't know which (`true` or `false`) branch was run, but both of them
# are adding (n_out) and removing (n_in) the same number of elements of the
# stack (this was checked in __init__). outputs_onto_stack just uses the
# layer's n_in, so we can pass either `true` or `false` to it.
# Note that `outputs` is the actual output of `true` or `false` branch,
# whichever was run, and we add it to the stack in any case.
stack = outputs_onto_stack(outputs, stack, self._true.n_in)
self._true.state = both_states[0]
self._false.state = both_states[1]
return _make_singleitem_or_original(stack)
# pylint: disable=invalid-name
def Chunk(layer, chunk_size, pass_unchunkable=True):
"""Executes `layer` using batch chunks of size `chunk_size` to save memory."""
if chunk_size < 1:
return layer
def reshape_to_chunks(x):
chunk_batch = x.shape[0]
size = chunk_size
n_chunks = chunk_batch // size
if chunk_batch % size != 0:
if pass_unchunkable:
n_chunks = 1
size = chunk_batch
else:
raise ValueError(f'Chunk size {size} must divide batch '
f'size {chunk_batch}')
return jnp.reshape(x, [n_chunks, size] + list(x.shape[1:]))
reshape_to_chunks_layer = base.PureLayer(
lambda xs: fastmath.nested_map(reshape_to_chunks, xs),
n_in=layer.n_in, n_out=layer.n_in, name='ReshapeToChunks')
def reshape_from_chunks(x):
batch_size = x.shape[0] * x.shape[1]
return jnp.reshape(x, [batch_size] + list(x.shape[2:]))
reshape_from_chunks_layer = base.PureLayer(
lambda xs: fastmath.nested_map(reshape_from_chunks, xs),
n_in=layer.n_out, n_out=layer.n_out, name='ReshapeFromChunks')
return Serial(
reshape_to_chunks_layer,
Scan(layer, axis=0, n_carry=0, remat=True),
reshape_from_chunks_layer,
)
def Branch(*layers, name='Branch'):
"""Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
As an important special case, a None argument to Branch acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.)
Args:
*layers: List of layers.
name: Descriptive name for this layer.
Returns:
A branch layer built from the given sublayers.
"""
if len(layers) == 1:
return layers[0]
parallel_layer = Parallel(*layers)
indices = [list(range(layer.n_in)) for layer in parallel_layer.sublayers]
return Serial(Select(_deep_flatten(indices)), parallel_layer,
name=name, sublayers_to_print=layers)
def Residual(*layers, shortcut=None):
"""Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else Serial(layers)
# TODO(jonni): Should we require layer.n_out = 1 and shortcut.n_out = 1?
return Serial(
Branch(shortcut, layer),
Add(), # pylint: disable=no-value-for-parameter
)
def Select(indices, n_in=None, name=None):
"""Copies, reorders, or deletes stack elements according to `indices`.
Args:
indices: A list or tuple of 0-based indices to select elements relative to
the top of the stack.
n_in: Number of input elements to pop from the stack, and replace with
those specified by `indices`. If not specified, its value will be
calculated as `max(indices) + 1`.
name: Descriptive name for this layer.
Returns:
Tensors, matching the number selected (`n_out = len(indices)`).
Specifically:
- n_out = 0: an empty tuple
- n_out = 1: one tensor (NOT wrapped in a tuple)
- n_out > 1: a tuple of tensors, with n_out items
"""
if n_in is None:
n_in = max(indices) + 1
if name is None:
name = f'Select{indices}'.replace(' ', '')
def select(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
selected = tuple(xs[i] for i in indices)
return selected[0] if len(selected) == 1 else selected
return base.PureLayer(select, n_in=n_in, n_out=len(indices), name=name)
def Drop():
"""Drops the top stack element."""
return Fn('Drop', lambda x: (), n_out=0)
def Dup():
"""Duplicates (copies) the top element on the data stack."""
return Fn('Dup', lambda x: (x, x), n_out=2)
def Swap():
"""Swaps the top two stack elements."""
return Fn('Swap', lambda x0, x1: (x1, x0), n_out=2)
def SerialWithSideOutputs(layers, n_side_outputs=1):
"""Serial layer with side outputs.
This layer makes it easier to manage the stack when layers have side outputs.
In the simplest case of layers with n_in=1, n_out=2 and with
n_side_outputs=1, this layer runs the following computation on x::
side_outputs = []
for i in range(len(layers)):
x, side_output = layers[i](x)
side_outputs.append(side_output)
return [x] + side_outputs
In the general case of layers with variable n_in and n_out and
n_side_outputs being a list of N integers, it does the following::
side_outputs = []
for i in range(N):
res = layer[i](cur_stack) # remove n_in from stack
cur_stack.append(res[:n_side_outputs[i]]) # put back some on stack
side_outputs.extend(res[n_side_outputs:])
return cur_stack + side_outputs
Args:
layers: a list of layers to execute
n_side_outputs: an int or a list of ints, how many outputs of each layer
to put aside
Returns:
A layer that performs the above computation.
"""
if isinstance(n_side_outputs, int):
n_side_outputs = [n_side_outputs] * len(layers)
# Calculate the n_in for this layer.
running_max = 0
running_total = 0
for layer, n_side_output in zip(layers, n_side_outputs):
running_total += layer.n_in
running_max = max(running_max, running_total)
running_total -= layer.n_out - n_side_output
n_in = running_max
# Create the list of layers to run serially.
cur_stack_size = n_in
serial_layers = []
for layer, n_side_output in zip(layers, n_side_outputs):
serial_layers.append(layer)
cur_stack_size += layer.n_out - layer.n_in
# Indices to move n_side_outputs to the back of the stack.
# Don't touch first n_out - n_side_outputs.
move_back_indices = list(range(layer.n_out - n_side_output))
# Then comes the rest of the stack that we're not moving.
move_back_indices += [i + layer.n_out
for i in range(cur_stack_size - layer.n_out)]
# Finally the indices we move.
move_back_indices += [i + layer.n_out - n_side_output
for i in range(n_side_output)]
# Swap them on stack.
serial_layers.append(Select(move_back_indices))
return Serial(serial_layers)
def FlattenList():
"""Flatten lists."""
# TODO(jonni): Consider renaming layer to DeepFlatten.
return Fn('FlattenList', lambda x: tuple(_deep_flatten(x)))
def Add():
"""Adds two tensors."""
return Fn('Add', lambda x0, x1: x0 + x1)
def SubtractTop():
"""Subtracts the first tensor from the second."""
return Fn('SubtractTop', lambda x0, x1: x1 - x0)
def Multiply():
"""Multiplies two tensors."""
return Fn('Multiply', lambda x0, x1: x0 * x1)
def Gate():
"""Returns a gating layer on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1 - gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
"""
return Fn('Gate', lambda m, g, c: g * m + (1.0 - g) * c)
class Cache(base.Layer):
"""Applies a layer on the first run and returns the outputs on next calls."""
def __init__(self, layer):
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
@property
def state(self):
"""Returns a tuple containing this layer's state; may be empty."""
return self._state
@state.setter
def state(self, state):
"""Recursively sets state on this layer and all sublayers."""
if isinstance(state, dict) and state == base.GET_STATE_FROM_CACHE:
return
self._state = state
self.sublayer.state = state[1]
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
weights, layer_state = self.sublayer.init(input_signature, use_cache=True)
self.state = ((), layer_state)
self._weights = (weights,)
def forward(self, inputs):
"""Executes this layer as part of a forward pass through the model.
Args:
inputs: Tensors required by the sublayer.
Returns:
Tensors resulting from running the sublayer the first time.
"""
state, weights = self.state, self.weights[0]
if state[0] is (): # pylint: disable=literal-comparison
res, layer_state = self.sublayer.pure_fn(
inputs, weights, state[1], self.rng)
self.state = (res, layer_state)
return res
else:
return state[0]
class BatchLeadingAxes(base.Layer):
"""Applies a layer after flattening all but n_last_axes_to_keep to batch.
This can be used to make layers accept an arbitrary number of leading
axes (dimensions) as batch. For example, a Convolution layer may normally
only operate on tensors of shape [B, W, H, C]. In this case, the layer
BatchLeadingAxes(Convolution(), n_last_axes_to_keep=3)
will operate on any tensor [..., W, H, C] and treat the leading axes as batch.
"""
def __init__(self, layer, n_last_axes_to_keep=1):
if layer.n_out != 1:
raise ValueError('BatchLeadingAxes currently only works for layers with '
f'n_out = 1, got {layer.n_out}.')
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
self._n_last_axes_to_keep = n_last_axes_to_keep
self._weights = (None,)
self._state = (None,)
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
def forward(self, inputs):
"""Executes this layer as part of a forward pass through the model."""
if self._n_in == 1:
inputs = [inputs]
new_inputs = []
for old_input in inputs:
batched_axes_shape = list(old_input.shape[:-self._n_last_axes_to_keep])
batched_shape = [-1] + list(old_input.shape[-self._n_last_axes_to_keep:])
new_inputs.append(jnp.reshape(old_input, batched_shape))
new_inputs = tuple(new_inputs)
if self._n_in == 1:
new_inputs = new_inputs[0]
res, layer_state = self.sublayer.pure_fn(
new_inputs, self.weights[0], self.state[0], self.rng)
self.state = (layer_state,)
return jnp.reshape(res, batched_axes_shape + list(res.shape[1:]))
def init_weights_and_state(self, input_signature):
"""Initializes weights and state for inputs with the given signature."""
if self._n_in == 1 and not isinstance(input_signature, (list, tuple)):
input_signature = (input_signature,)
batched_signature = []
for sub_input_signature in input_signature:
batched_size = 1
for d in sub_input_signature.shape[:-self._n_last_axes_to_keep]:
batched_size *= d
batched_shape = [batched_size] + list(
sub_input_signature.shape[-self._n_last_axes_to_keep:])
batched_signature.append(ShapeDtype(batched_shape,
sub_input_signature.dtype))
if self._n_in == 1:
batched_signature = batched_signature[0]
weights, layer_state = self.sublayer.init(batched_signature, use_cache=True)
self.state = (layer_state,)
self.weights = (weights,)
def Bidirectional(forward_layer, axis=1, merge_layer=Concatenate()):
"""Bidirectional combinator for RNNs.
Args:
forward_layer: A layer, such as `trax.layers.LSTM` or `trax.layers.GRU`.
axis: a time axis of the inputs. Default value is `1`.
merge_layer: A combinator used to combine outputs of the forward
and backward RNNs. Default value is 'trax.layers.Concatenate'.
Example:
Bidirectional(RNN(n_units=8))
Returns:
The Bidirectional combinator for RNNs.
"""
backward_layer = copy.deepcopy(forward_layer)
flip = base.Fn('_FlipAlongTimeAxis', lambda x: jnp.flip(x, axis=axis))
backward = Serial(
flip,
backward_layer,
flip,
)
return Serial(
Branch(forward_layer, backward),
merge_layer,
)
# All module-private helper functions are below.
# pylint: disable=invalid-name
def _deep_flatten(items):
"""Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects.
"""
def _flat_gen(xs):
for x in xs:
if isinstance(x, (list, tuple)):
for y in _flat_gen(x):
yield y
else:
yield x
return list(_flat_gen(items))
def _ensure_sublayers(layers):
"""Ensures that elements in a layer list are layers.
Args:
layers: A tuple or list whose elements can each be a layer, tuple, or list,
and so on recursively.
Returns:
An analogous collection of layers in which embedded layer lists are
wrapped in Serial layer instances.
"""
if not layers: # None or an empty list can signal a no-op.
return Serial(None) # no-op, but still handles shapes and initialization
elif isinstance(layers, (list, tuple)):
sublayers_not_lists = []
for layer in layers:
sublayers_not_lists.append(
Serial(layer) if isinstance(layer, (list, tuple)) else layer)
return sublayers_not_lists
else:
raise TypeError(type(layers))
def _split_rngs(rng, n_copies):
if rng is None:
return (None,) * n_copies
return fastmath.random.split(rng, n_copies)
def inputs_from_stack(stack, n):
"""Returns n inputs from stack."""
stack = _make_tuple(stack)
return _make_singleitem_or_original(stack[:n])
def outputs_onto_stack(outputs, stack, n):
""""Returns the new stack after removing n items and pushing outputs there."""
outputs = _make_tuple(outputs)
stack = _make_tuple(stack)
return _make_singleitem_or_original(outputs + stack[n:])
def _make_tuple(xs):
"""Returns a tuple from a list, a tuple, or a single element."""
if isinstance(xs, (list, tuple)):
return tuple(xs)
else:
return (xs,)
def _make_singleitem_or_original(xs):
"""Returns a single element if possible, or the original list/tuple if not."""
if isinstance(xs, (list, tuple)) and len(xs) == 1:
return xs[0]
else:
return xs
def _shape_without_axis(x, axis):
return x.shape[:axis] + x.shape[axis + 1:]
def _ensure_flat(layers):
"""Ensures that layers is a single flat list of Layer instances."""
if len(layers) == 1 and layers[0] is None:
layers = ()
else:
layers = _deep_flatten(layers)
for obj in layers:
if not isinstance(obj, base.Layer):
raise ValueError(
f'Found nonlayer object ({obj}) in layers: {layers}')
return layers
|
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.loyalty_ledger import LoyaltyLedger # noqa: E501
from talon_one.rest import ApiException
class TestLoyaltyLedger(unittest.TestCase):
"""LoyaltyLedger unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LoyaltyLedger
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.loyalty_ledger.LoyaltyLedger() # noqa: E501
if include_optional :
return LoyaltyLedger(
ledger = talon_one.models.loyalty_sub_ledger.LoyaltySubLedger(
total = 1.337,
total_active_points = 1.337,
total_pending_points = 1.337,
total_spent_points = 1.337,
total_expired_points = 1.337,
transactions = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expiring_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
active_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
pending_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expired_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
], ),
sub_ledgers = {
'key' : talon_one.models.loyalty_sub_ledger.LoyaltySubLedger(
total = 1.337,
total_active_points = 1.337,
total_pending_points = 1.337,
total_spent_points = 1.337,
total_expired_points = 1.337,
transactions = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expiring_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
active_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
pending_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expired_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
], )
}
)
else :
return LoyaltyLedger(
ledger = talon_one.models.loyalty_sub_ledger.LoyaltySubLedger(
total = 1.337,
total_active_points = 1.337,
total_pending_points = 1.337,
total_spent_points = 1.337,
total_expired_points = 1.337,
transactions = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expiring_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
active_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
pending_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
],
expired_points = [
talon_one.models.loyalty_ledger_entry.LoyaltyLedgerEntry(
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
program_id = 56,
customer_profile_id = '0',
customer_session_id = '0',
event_id = 56,
type = '0',
amount = 1.337,
start_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '0',
sub_ledger_id = '0',
user_id = 56, )
], ),
)
def testLoyaltyLedger(self):
"""Test LoyaltyLedger"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the improvements controllers."""
from __future__ import annotations
import datetime
from core import feconf
from core.domain import config_domain
from core.domain import exp_services
from core.domain import improvements_domain
from core.domain import improvements_services
from core.platform import models
from core.tests import test_utils
(improvements_models,) = (
models.Registry.import_models([models.NAMES.improvements]))
class ImprovementsTestBase(test_utils.GenericTestBase):
"""Base class with helper methods related to building improvement tasks."""
EXP_ID = 'eid'
MOCK_DATE = datetime.datetime(2020, 6, 22)
def _new_obsolete_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1):
"""Constructs a new default obsolete task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
Returns:
improvements_domain.TaskEntry. A new obsolete task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_OBSOLETE,
resolver_id=None,
resolved_on=None)
def _new_open_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1):
"""Constructs a new default open task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
Returns:
improvements_domain.TaskEntry. A new open task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_OPEN,
resolver_id=None,
resolved_on=None)
def _new_resolved_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1, resolved_on=MOCK_DATE):
"""Constructs a new default resolved task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
resolved_on: datetime.datetime. Time at which the task was resolved.
Returns:
improvements_domain.TaskEntry. A new resolved task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_RESOLVED,
resolver_id=self.owner_id,
resolved_on=resolved_on)
class ExplorationImprovementsHandlerTests(ImprovementsTestBase):
def setUp(self):
super(ExplorationImprovementsHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
def get_url(self, exp_id=None):
"""Returns the URL corresponding to the handler.
Args:
exp_id: str. The exploration id to fetch. Uses self's EXP_ID
constant by default.
Returns:
str. The URL of the handler.
"""
return '%s/%s/%s' % (
feconf.IMPROVEMENTS_URL_PREFIX,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.EXP_ID if exp_id is None else exp_id)
def test_get_with_invalid_exploration_returns_invalid_input_page(self):
with self.login_context(self.OWNER_EMAIL):
self.get_json(
self.get_url(exp_id='bad_exp_id'), expected_status_int=404)
def test_get_with_non_creator_returns_401_error(self):
with self.login_context(self.VIEWER_EMAIL):
self.get_json(self.get_url(), expected_status_int=401)
def test_get_when_no_tasks_exist_returns_response_with_empty_fields(self):
with self.login_context(self.OWNER_EMAIL):
self.assertEqual(self.get_json(self.get_url()), {
'open_tasks': [],
'resolved_task_types_by_state_name': {},
})
def test_get_returns_open_tasks(self):
task_entries = [
self._new_open_task(state_name=name) for name in ['A', 'B', 'C']]
improvements_services.put_tasks(task_entries)
with self.login_context(self.OWNER_EMAIL):
self.assertEqual(self.get_json(self.get_url()), {
'open_tasks': [t.to_dict() for t in task_entries],
'resolved_task_types_by_state_name': {},
})
def test_get_returns_resolved_tasks(self):
task_entries = [
self._new_resolved_task(
state_name=name,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE)
for name in ['A', 'B', 'C']]
improvements_services.put_tasks(task_entries)
with self.login_context(self.OWNER_EMAIL):
self.assertEqual(self.get_json(self.get_url()), {
'open_tasks': [],
'resolved_task_types_by_state_name': {
'A': ['high_bounce_rate'],
'B': ['high_bounce_rate'],
'C': ['high_bounce_rate'],
},
})
def test_post_with_non_creator_returns_401_error(self):
with self.login_context(self.VIEWER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': 1,
'task_type': 'high_bounce_rate',
'target_id': 'Introduction',
'issue_description': 'issue description',
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=401)
def test_post_invalid_exploration_returns_invalid_input_page(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(exp_id='bad_exp_id'), {
'task_entries': [{
'entity_version': 1,
'task_type': 'high_bounce_rate',
'target_id': 'Introduction',
'issue_description': 'issue description',
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=404)
def test_post_without_csrf_token_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': self.exp.version,
'task_type': improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
'target_id': feconf.DEFAULT_INIT_STATE_NAME,
'issue_description': 'issue description',
'status': improvements_models.TASK_STATUS_OPEN,
}]
}, csrf_token=None, expected_status_int=401)
def test_post_with_missing_task_entries_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
}, csrf_token=self.get_new_csrf_token(), expected_status_int=400)
def test_post_with_missing_entity_version_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
# 'entity_version': 1.
'task_type': 'high_bounce_rate',
'target_id': 'Introduction',
'issue_description': 'issue description',
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=400)
def test_post_with_missing_task_type_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': 1,
# 'task_type': 'high_bounce_rate'.
'target_id': 'Introduction',
'issue_description': 'issue description',
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=400)
def test_post_with_missing_target_id_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': 1,
'task_type': 'high_bounce_rate',
# 'target_id': 'Introduction'.
'issue_description': 'issue description',
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=400)
def test_post_with_missing_issue_description_is_allowed(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': 1,
'task_type': 'high_bounce_rate',
'target_id': 'Introduction',
# 'issue_description': 'issue description'.
'status': 'open',
}]
}, csrf_token=self.get_new_csrf_token())
task_entry_model = improvements_models.TaskEntryModel.get_by_id(
improvements_models.TaskEntryModel.generate_task_id(
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.exp.id,
self.exp.version,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
improvements_models.TASK_TARGET_TYPE_STATE,
feconf.DEFAULT_INIT_STATE_NAME))
self.assertIsNotNone(task_entry_model)
self.assertIsNone(task_entry_model.issue_description)
def test_post_with_missing_status_returns_401_error(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': 1,
'task_type': 'high_bounce_rate',
'target_id': 'Introduction',
'issue_description': 'issue description',
# 'status': 'open'.
}]
}, csrf_token=self.get_new_csrf_token(), expected_status_int=400)
def test_post_can_create_new_open_task_in_storage(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': self.exp.version,
'task_type': improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
'target_id': feconf.DEFAULT_INIT_STATE_NAME,
'issue_description': 'issue description',
'status': improvements_models.TASK_STATUS_OPEN,
}]
}, csrf_token=self.get_new_csrf_token())
task_id = improvements_models.TaskEntryModel.generate_task_id(
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.exp.id,
self.exp.version,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
improvements_models.TASK_TARGET_TYPE_STATE,
feconf.DEFAULT_INIT_STATE_NAME)
task_entry_model = improvements_models.TaskEntryModel.get_by_id(task_id)
self.assertIsNotNone(task_entry_model)
self.assertEqual(task_entry_model.id, task_id)
self.assertEqual(
task_entry_model.entity_type,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION)
self.assertEqual(task_entry_model.entity_id, self.exp.id)
self.assertEqual(task_entry_model.entity_version, self.exp.version)
self.assertEqual(
task_entry_model.task_type,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE)
self.assertEqual(
task_entry_model.target_type,
improvements_models.TASK_TARGET_TYPE_STATE)
self.assertEqual(
task_entry_model.target_id, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(
task_entry_model.issue_description, 'issue description')
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_OPEN)
self.assertIsNone(task_entry_model.resolver_id)
self.assertIsNone(task_entry_model.resolved_on)
def test_post_can_create_new_obsolete_task_in_storage(self):
with self.login_context(self.OWNER_EMAIL):
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': self.exp.version,
'task_type': improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
'target_id': feconf.DEFAULT_INIT_STATE_NAME,
'issue_description': 'issue description',
'status': improvements_models.TASK_STATUS_OBSOLETE,
}]
}, csrf_token=self.get_new_csrf_token())
task_id = improvements_models.TaskEntryModel.generate_task_id(
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.exp.id,
self.exp.version,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
improvements_models.TASK_TARGET_TYPE_STATE,
feconf.DEFAULT_INIT_STATE_NAME)
task_entry_model = improvements_models.TaskEntryModel.get_by_id(task_id)
self.assertIsNotNone(task_entry_model)
self.assertEqual(task_entry_model.id, task_id)
self.assertEqual(
task_entry_model.entity_type,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION)
self.assertEqual(task_entry_model.entity_id, self.exp.id)
self.assertEqual(task_entry_model.entity_version, self.exp.version)
self.assertEqual(
task_entry_model.task_type,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE)
self.assertEqual(
task_entry_model.target_type,
improvements_models.TASK_TARGET_TYPE_STATE)
self.assertEqual(
task_entry_model.target_id, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(
task_entry_model.issue_description, 'issue description')
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_OBSOLETE)
self.assertIsNone(task_entry_model.resolver_id)
self.assertIsNone(task_entry_model.resolved_on)
def test_post_can_create_new_resolved_task_in_storage_with_utcnow(self):
login_context = self.login_context(self.OWNER_EMAIL)
mock_datetime_utcnow = self.mock_datetime_utcnow(self.MOCK_DATE)
with login_context, mock_datetime_utcnow:
self.post_json(self.get_url(), {
'task_entries': [{
'entity_version': self.exp.version,
'task_type': improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
'target_id': feconf.DEFAULT_INIT_STATE_NAME,
'issue_description': 'issue description',
'status': improvements_models.TASK_STATUS_RESOLVED,
}]
}, csrf_token=self.get_new_csrf_token())
task_id = improvements_models.TaskEntryModel.generate_task_id(
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.exp.id,
self.exp.version,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
improvements_models.TASK_TARGET_TYPE_STATE,
feconf.DEFAULT_INIT_STATE_NAME)
task_entry_model = improvements_models.TaskEntryModel.get_by_id(task_id)
self.assertIsNotNone(task_entry_model)
self.assertEqual(task_entry_model.id, task_id)
self.assertEqual(
task_entry_model.entity_type,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION)
self.assertEqual(task_entry_model.entity_id, self.exp.id)
self.assertEqual(task_entry_model.entity_version, self.exp.version)
self.assertEqual(
task_entry_model.task_type,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE)
self.assertEqual(
task_entry_model.target_type,
improvements_models.TASK_TARGET_TYPE_STATE)
self.assertEqual(
task_entry_model.target_id, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(
task_entry_model.issue_description, 'issue description')
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_RESOLVED)
self.assertEqual(task_entry_model.resolver_id, self.owner_id)
self.assertEqual(task_entry_model.resolved_on, self.MOCK_DATE)
class ExplorationImprovementsHistoryHandlerTests(ImprovementsTestBase):
def setUp(self):
super(ExplorationImprovementsHistoryHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
def get_url(self, exp_id=None, cursor=None):
"""Returns the URL corresponding to the handler.
Args:
exp_id: str. The exploration id to fetch. Uses self's EXP_ID
constant by default.
cursor: str or None. Starting point for the search. When None, the
starting point is the very beginning of the history results
(i.e. starting from the most recently resolved task entry).
Returns:
str. The URL of the handler.
"""
url = '%s/%s/%s' % (
feconf.IMPROVEMENTS_HISTORY_URL_PREFIX,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.EXP_ID if exp_id is None else exp_id)
if cursor is not None:
url = '%s?cursor=%s' % (url, cursor)
return url
def test_get_with_invalid_exploration_returns_invalid_input_page(self):
with self.login_context(self.OWNER_EMAIL):
self.get_json(
self.get_url(exp_id='bad_exp_id'), expected_status_int=404)
def test_get_with_non_creator_returns_401_error(self):
with self.login_context(self.VIEWER_EMAIL):
self.get_json(self.get_url(), expected_status_int=401)
def test_get_with_invalid_cursor_returns_500_error(self):
with self.login_context(self.OWNER_EMAIL):
self.get_json(self.get_url(cursor=234), expected_status_int=500)
def test_get_when_no_tasks_exist_returns_response_with_empty_fields(self):
with self.login_context(self.OWNER_EMAIL):
self.assertEqual(self.get_json(self.get_url()), {
'results': [],
'cursor': None,
'more': False,
})
def test_get_with_cursor_as_none_returns_first_page(self):
task_entries = [
self._new_resolved_task(
state_name='State %d' % i,
resolved_on=self.MOCK_DATE + datetime.timedelta(minutes=i * 5))
for i in range(1, 26)]
improvements_services.put_tasks(task_entries)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url(cursor=None))
self.assertEqual(
[t['target_id'] for t in json_response['results']], [
'State 25', 'State 24', 'State 23', 'State 22', 'State 21',
'State 20', 'State 19', 'State 18', 'State 17', 'State 16',
])
self.assertIsNotNone(json_response['cursor'])
self.assertTrue(json_response['more'])
def test_get_can_build_full_task_list_after_enough_fetches(self):
task_entries = [
self._new_resolved_task(
state_name='State %d' % i,
resolved_on=self.MOCK_DATE + datetime.timedelta(minutes=i * 5))
for i in range(1, 26)]
improvements_services.put_tasks(task_entries)
with self.login_context(self.OWNER_EMAIL):
all_results, cursor, more = [], None, True
while more:
json_response = self.get_json(self.get_url(cursor=cursor))
all_results.extend(json_response['results'])
cursor = json_response['cursor']
more = json_response['more']
self.assertEqual(
[t['target_id'] for t in all_results], [
'State 25', 'State 24', 'State 23', 'State 22', 'State 21',
'State 20', 'State 19', 'State 18', 'State 17', 'State 16',
'State 15', 'State 14', 'State 13', 'State 12', 'State 11',
'State 10', 'State 9', 'State 8', 'State 7', 'State 6',
'State 5', 'State 4', 'State 3', 'State 2', 'State 1',
])
class ExplorationImprovementsConfigHandlerTests(test_utils.GenericTestBase):
EXP_ID = 'eid'
def setUp(self):
super(ExplorationImprovementsConfigHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
def get_url(self, exp_id=None):
"""Returns the URL corresponding to the handler.
Args:
exp_id: str. The exploration id to fetch. Uses self's EXP_ID
constant by default.
Returns:
str. The URL of the handler.
"""
return '%s/%s/%s' % (
feconf.IMPROVEMENTS_CONFIG_URL_PREFIX,
improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
self.EXP_ID if exp_id is None else exp_id)
def test_get_for_public_exploration_as_non_owning_user_fails(self):
self.publish_exploration(self.owner_id, self.EXP_ID)
with self.login_context(self.VIEWER_EMAIL):
self.get_json(self.get_url(), expected_status_int=401)
def test_get_for_private_exploration_as_non_owning_user_fails(self):
# Fail to call `publish_exploration`.
with self.login_context(self.VIEWER_EMAIL):
self.get_json(self.get_url(), expected_status_int=401)
def test_get_for_non_existing_exploration_fails(self):
with self.login_context(self.OWNER_EMAIL):
self.get_json(
self.get_url(exp_id='bad_exp_id'), expected_status_int=404)
def test_get_returns_exploration_id(self):
self.set_config_property(
config_domain.IS_IMPROVEMENTS_TAB_ENABLED, False)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertEqual(json_response['exploration_id'], self.EXP_ID)
def test_get_returns_exploration_version(self):
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertEqual(json_response['exploration_version'], 1)
# Update to version 2.
exp_services.update_exploration(self.owner_id, self.EXP_ID, None, '')
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertEqual(json_response['exploration_version'], 2)
def test_improvements_tab_disabled(self):
self.set_config_property(
config_domain.IS_IMPROVEMENTS_TAB_ENABLED, False)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertFalse(json_response['is_improvements_tab_enabled'])
def test_improvements_tab_enabled(self):
self.set_config_property(
config_domain.IS_IMPROVEMENTS_TAB_ENABLED, True)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertTrue(json_response['is_improvements_tab_enabled'])
def test_custom_high_bounce_rate_creation_threshold(self):
self.set_config_property((
config_domain
.HIGH_BOUNCE_RATE_TASK_STATE_BOUNCE_RATE_CREATION_THRESHOLD), 0.35)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertAlmostEqual(
json_response[
'high_bounce_rate_task_state_bounce_rate_creation_threshold'],
0.35)
def test_custom_high_bounce_rate_obsoletion_threshold(self):
self.set_config_property(
(
config_domain
.HIGH_BOUNCE_RATE_TASK_STATE_BOUNCE_RATE_OBSOLETION_THRESHOLD),
0.05)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertAlmostEqual(
json_response[
'high_bounce_rate_task_state_bounce_rate_obsoletion_threshold'],
0.05)
def test_custom_high_bounce_rate_task_minimum_exploration_starts(self):
self.set_config_property(
config_domain.HIGH_BOUNCE_RATE_TASK_MINIMUM_EXPLORATION_STARTS,
20)
with self.login_context(self.OWNER_EMAIL):
json_response = self.get_json(self.get_url())
self.assertAlmostEqual(
json_response['high_bounce_rate_task_minimum_exploration_starts'],
20)
|
|
# -*- coding: utf-8 -*-
import logging
import os
import shutil
import string
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from subprocess import Popen
from subprocess import PIPE
from subprocess import STDOUT
from xml.dom import minidom
""" Dictionary with following structure: repo url => GAV (string) => effective pom (string). """
effective_pom_cache = {}
""" Dictionary with following structure: repo url => GAV (string) => management type => MavenArtifact[]. """
managed_gavs_cache = {}
class MGMT_TYPE:
PLUGINS = "plugins"
DEPENDENCIES = "dependencies"
BOTH = "both"
def read_managed_gavs(artifact, repo_url=None, mgmt_type=MGMT_TYPE.DEPENDENCIES, mvn_repo_local=None):
global managed_gavs_cache
gav = artifact.get_gav()
result = None
if repo_url in managed_gavs_cache.keys():
if gav in managed_gavs_cache[repo_url].keys():
if mgmt_type in managed_gavs_cache[repo_url][gav]:
result = managed_gavs_cache[repo_url][gav][mgmt_type]
if not result:
result = _read_managed_gavs(artifact, repo_url, mgmt_type, mvn_repo_local)
if result:
cache = managed_gavs_cache.setdefault(repo_url, {}).setdefault(gav, {})
if mgmt_type in [MGMT_TYPE.BOTH, MGMT_TYPE.DEPENDENCIES]:
cache[MGMT_TYPE.DEPENDENCIES] = result[MGMT_TYPE.DEPENDENCIES]
if mgmt_type in [MGMT_TYPE.BOTH, MGMT_TYPE.PLUGINS]:
result[MGMT_TYPE.PLUGINS] = result[MGMT_TYPE.PLUGINS]
return result
def _read_managed_gavs(artifact, repo_url=None, mgmt_type=MGMT_TYPE.DEPENDENCIES, mvn_repo_local=None):
"""
Reads all artifacts managed in dependencyManagement section of effective pom of the given artifact. It places the
repo_url in settings.xml and then runs help:effective-pom with these settings. There should be the POM with its
parent and dependencies available in the repository and there should also be all plugins available needed to execute
help:effective-pom goal.
:param artifact: MavenArtifact instance representing the POM
:param repo_url: repository URL to use
:param mgmt_type: type of management to read, values available are defined in MGMT_TYPE class
:param mvn_repo_local: path to local Maven repository to be used when getting effective POM
:returns: dictionary, where key is the management type and value is the list of artifacts managed by
dependencyManagement/pluginManagement or None, if a problem occurs
"""
# download the pom
pom_path = download_pom(repo_url, artifact)
if pom_path:
pom_dir = os.path.split(pom_path)[0]
# get effective pom
eff_pom = get_effective_pom(pom_dir, repo_url, mvn_repo_local)
shutil.rmtree(pom_dir, True)
if not eff_pom:
return None
# read dependencyManagement/pluginManagement section
managed_arts = read_management(eff_pom, mgmt_type)
else:
managed_arts = None
return managed_arts
def get_effective_pom(pom_dir, repo_url, mvn_repo_local, profiles=None, additional_params=None):
"""
Gets the effective pom from the downloaded pom. There has to be complete source tree (at least the pom tree) in case
that the root pom contains some modules.
:param pom_dir: directory where the pom is prepared (including potential patches)
:param repo_url: repository URL, where all dependencies needed to resolve the effective POM are available
:param mvn_repo_local: path to local repository to use if a non-default location is required
:returns: the effective pom as a string or None if a problem occurs
"""
global effective_pom_cache
pom_file = None
try:
pom_file = open(os.path.join(pom_dir, "pom.xml"))
pom = pom_file.read()
finally:
if pom_file:
pom_file.close()
artifact = MavenArtifact(pom=pom)
gav = artifact.get_gav()
eff_pom = None
if repo_url in effective_pom_cache.keys():
if gav in effective_pom_cache[repo_url].keys():
if profiles in effective_pom_cache[repo_url][gav].keys():
if additional_params in effective_pom_cache[repo_url][gav][profiles].keys():
eff_pom = effective_pom_cache[repo_url][gav][profiles][additional_params]
if not eff_pom:
try:
eff_pom = _read_effective_pom(pom_dir, repo_url, mvn_repo_local, profiles, additional_params)
finally:
if eff_pom:
effective_pom_cache.setdefault(repo_url, {}).setdefault(gav, {}).setdefault(profiles, {})[additional_params] = eff_pom
return eff_pom
def _read_effective_pom(pom_dir, repo_url, mvn_repo_local, profiles, additional_params):
work_dir = os.getcwd()
os.chdir(pom_dir)
try:
settings_filename = create_mirror_settings(repo_url)
eff_pom_filename = "effective-pom.xml"
args = ["mvn", "org.apache.maven.plugins:maven-help-plugin:2.2:effective-pom", "-Doutput=%s" % eff_pom_filename,
"-s", settings_filename]
if mvn_repo_local:
args.append("-Dmaven.repo.local=%s" % mvn_repo_local)
if profiles:
args.append("-P%s" % profiles)
if additional_params:
param_list = additional_params.split(" ")
args.extend(param_list)
logging.debug("Running command: %s", " ".join(args))
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
logging.error("Getting effective POM failed. Output:\n%s" % stdout)
eff_pom = None
else:
logging.debug("Getting effective POM succeeded. Output:\n%s" % stdout)
eff_pom_file = None
try:
eff_pom_file = open(eff_pom_filename)
eff_pom = eff_pom_file.read()
finally:
if eff_pom_file:
eff_pom_file.close()
finally:
os.chdir(work_dir)
return eff_pom
def alter_poms(pom_dir, additional_params, repo_url=None, mvn_repo_local=None):
"""
Runs mvn clean command with provided additional parameters to perform pom updates by pom-manipulation-ext.
"""
work_dir = os.getcwd()
os.chdir(pom_dir)
try:
if repo_url:
settings_filename = create_mirror_settings(repo_url)
else:
settings_filename = None
args = ["mvn", "clean"]
if mvn_repo_local:
args.extend(["-s", settings_filename])
if mvn_repo_local:
args.append("-Dmaven.repo.local=%s" % mvn_repo_local)
param_list = additional_params.split(" ")
args.extend(param_list)
logging.debug("Running command: %s", " ".join(args))
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
logging.error("POM manipulation failed. Output:\n%s" % stdout)
else:
logging.debug("POM manipulation succeeded. Output:\n%s" % stdout)
finally:
os.chdir(work_dir)
def pom_contains_modules():
"""
Reads pom.xml in current working directory and checks, if there is non-empty modules tag.
"""
pom_file = None
try:
pom_file = open("pom.xml")
pom = pom_file.read()
finally:
if pom_file:
pom_file.close()
artifact = MavenArtifact(pom=pom)
if artifact.modules:
return True
else:
return False
def get_repo_url(mead_tag, nexus_base_url, prefix="hudson-", suffix=""):
"""
Creates repository Nexus group URL composed of:
<nexus_base_url>/content/groups/<prefix><mead_tag><suffix>
:param mead_tag: name of the MEAD tag used to create the proxy URL in settings.xml
:param nexus_base_url: the base URL of a Nexus instance
:param prefix: Nexus group name prefix, default is "hudson-"
:param suffix: Nexus group name suffix, e.g. "-jboss-central" or "-reverse"
:returns:
"""
result = urlparse.urljoin(nexus_base_url, "content/groups/")
result = urlparse.urljoin(result, "%s%s%s/" % (prefix, mead_tag, suffix))
return result
def download_pom(repo_url=None, artifact=None, pom_url=None, target_dir=None):
"""
Downloads a pom file with give GAV (as array) or from given pom_url and saves it as pom.xml into target_dir.
:param repo_url: repository URL from which the pom should be downloaded, mandatory only if no pom_url provided
:param artifact: MavenArtifact instance, mandatory only if no pom_url provided
:param pom_url: URL of the pom to download, not mandatory
:target_dir: target directory path, where the pom should be saved, not mandatory
:returns: path to the saved pom, useful if no target_dir provided
"""
if not pom_url:
pom_url = urlparse.urljoin(repo_url, "%s/" % string.replace(artifact.groupId, ".", "/"))
pom_url = urlparse.urljoin(pom_url, "%s/" % artifact.artifactId)
pom_url = urlparse.urljoin(pom_url, "%s/" % artifact.version)
pom_url = urlparse.urljoin(pom_url, "%s-%s.pom" % (artifact.artifactId, artifact.version))
handler = None
try:
handler = urlopen(pom_url)
except HTTPError as err:
logging.error("Failed to download POM %s. %s", pom_url, err)
return None
if not target_dir:
num = 1
while not target_dir or os.path.exists(target_dir):
target_dir = "/tmp/maven-temp-path-%s" % num
num += 1
pom_path = os.path.join(target_dir, "pom.xml")
if handler.getcode() == 200:
pom = handler.read()
handler.close()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
pom_file = None
try:
pom_file = open(pom_path, "w")
pom_file.write(pom)
finally:
if pom_file:
pom_file.close()
return pom_path
def read_management(pom, mgmt_type):
xmldoc = minidom.parseString(pom)
result = {}
if mgmt_type in [MGMT_TYPE.BOTH, MGMT_TYPE.DEPENDENCIES]:
result[MGMT_TYPE.DEPENDENCIES] = _read_management(xmldoc, "dependencyManagement", "dependency")
if mgmt_type in [MGMT_TYPE.BOTH, MGMT_TYPE.PLUGINS]:
result[MGMT_TYPE.PLUGINS] = _read_management(xmldoc, "pluginManagement", "plugin")
return result
def _read_management(xmldoc, management_tag, artifact_tag):
mgmts = xmldoc.getElementsByTagName(management_tag)
result = []
if len(mgmts):
mgmt = mgmts[0]
art_elem_list = mgmt.getElementsByTagName(artifact_tag)
for art_elem in art_elem_list:
groupid = None
artifactid = None
version = None
g_elem_list = art_elem.getElementsByTagName('groupId')
for groupid_elem in g_elem_list:
if groupid_elem.parentNode.localName == artifact_tag:
groupid = groupid_elem.childNodes[0].data
break
a_elem_list = art_elem.getElementsByTagName('artifactId')
for artifactid_elem in a_elem_list:
if artifactid_elem.parentNode.localName == artifact_tag:
artifactid = artifactid_elem.childNodes[0].data
break
v_elem_list = art_elem.getElementsByTagName('version')
for version_elem in v_elem_list:
if version_elem.parentNode.localName == artifact_tag:
version = version_elem.childNodes[0].data
break
if not groupid or not artifactid or not version:
logging.warning("Incomplete GAV information in %s: %s:%s:%s", management_tag, groupid, artifactid,
version)
else:
artifact = MavenArtifact(groupId=groupid, artifactId=artifactid, version=version)
result.append(artifact)
return result
def get_properties(gav, repo_url, mvn_repo_local):
artifact = MavenArtifact(gav=gav)
pom_path = download_pom(repo_url, artifact)
if pom_path:
pom_dir = os.path.split(pom_path)[0]
eff_pom = get_effective_pom(pom_dir, repo_url, mvn_repo_local)
shutil.rmtree(pom_dir, True)
if not eff_pom:
return None
return read_properties(eff_pom)
else:
return None
def read_properties(pom):
xmldoc = minidom.parseString(pom)
propertiesElemList = xmldoc.getElementsByTagName("properties")
result = {}
for propertiesElem in propertiesElemList:
for propertyElem in propertiesElem.childNodes:
if propertyElem.nodeType == propertyElem.ELEMENT_NODE:
name = propertyElem.localName
value_list = []
for childnode in propertyElem.childNodes:
if childnode.nodeType == childnode.TEXT_NODE:
value_list.append(childnode.data)
value = ''.join(value_list)
result[name] = value
return result
def create_mirror_settings(repo_url):
"""
Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all
repositories to look at.
:param repo_url: the repository URL to use
:returns: filepath to the created file
"""
cwd = os.getcwd()
settings_path = os.path.join(cwd, "settings.xml")
settings_file = None
try:
settings_file = open(settings_path, "w")
settings_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
settings_file.write('<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n')
settings_file.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
settings_file.write(' xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n')
settings_file.write('<mirrors>\n')
settings_file.write(' <mirror>\n')
settings_file.write(' <id>repo-mirror</id>\n')
settings_file.write(' <url>%s</url>\n' % repo_url)
settings_file.write(' <mirrorOf>*</mirrorOf>\n')
settings_file.write(' </mirror>\n')
settings_file.write(' </mirrors>\n')
settings_file.write('</settings>\n')
finally:
if settings_file:
settings_file.close()
return settings_path
class MavenArtifact:
def __init__(self, pom=None, profiles=[], gav=None, groupId=None, artifactId=None, version=None, parentGav=None):
self.parentGav = parentGav
self.modules = None
if pom:
xmldoc = minidom.parseString(pom)
project = xmldoc.getElementsByTagName('project')[0]
groupIdElemList = project.getElementsByTagName('groupId')
groupId = None
for groupIdElem in groupIdElemList:
if groupIdElem.parentNode.localName == "parent":
groupId = groupIdElem.childNodes[0].data
elif groupIdElem.parentNode.localName == "project":
groupId = groupIdElem.childNodes[0].data
break
self.groupId = groupId
artifactIdElemList = project.getElementsByTagName('artifactId')
for artifactIdElem in artifactIdElemList:
if artifactIdElem.parentNode.localName == "project":
self.artifactId = artifactIdElem.childNodes[0].data
break
version = None
versionElemList = project.getElementsByTagName('version')
for versionElem in versionElemList:
if versionElem.parentNode.localName == "parent":
version = versionElem.childNodes[0].data
elif versionElem.parentNode.localName == "project":
version = versionElem.childNodes[0].data
break
self.version = version
parentElemList = project.getElementsByTagName('parent')
if len(parentElemList):
groupIdElemList = parentElemList[0].getElementsByTagName('groupId')
groupId = groupIdElemList[0].childNodes[0].data
artifactIdElemList = parentElemList[0].getElementsByTagName('artifactId')
artifactId = artifactIdElemList[0].childNodes[0].data
versionElemList = parentElemList[0].getElementsByTagName('version')
version = versionElemList[0].childNodes[0].data
self.parentGav = "%s:%s:%s" % (groupId, artifactId, version)
else:
self.parentGav = None
modulesElemList = project.getElementsByTagName('modules')
if len(modulesElemList):
for modulesElem in modulesElemList:
modulesActive = False
if modulesElem.parentNode.localName == "project":
modulesActive = True
elif modulesElem.parentNode.localName == "profile":
profileElem = modulesElem.parentNode
profileIdElems = profileElem.getElementsByTagName('id')
if len(profileIdElems):
profileIdElem = profileElem.getElementsByTagName('id')[0]
profileId = profileIdElem.childNodes[0].data
else:
profileId = "<no profile id specified>"
if profileId in profiles:
modulesActive = True
else:
abdElemList = profileElem.getElementsByTagName('activeByDefault')
if len(abdElemList):
abdElem = abdElemList[0]
abd = abdElem.childNodes[0].data
if abd == "true" and ("!%s" % profileId) not in profiles:
modulesActive = True
if modulesActive:
moduleElemList = modulesElem.getElementsByTagName('module')
for moduleElem in moduleElemList:
if not self.modules:
self.modules = {}
module_name = moduleElem.childNodes[0].data
if not self.modules.__contains__(module_name):
self.modules[module_name] = None
elif gav:
parts = string.split(gav, ":")
if len(parts) != 3:
raise ValueError("%s is not a GAV." % gav)
else:
self.groupId = parts[0]
self.artifactId = parts[1]
self.version = parts[2]
else:
self.groupId = groupId
self.artifactId = artifactId
self.version = version
def get_ga(self):
return "%s:%s" % (self.groupId, self.artifactId)
def get_gav(self):
return "%s:%s:%s" % (self.groupId, self.artifactId, self.version)
|
|
#!/usr/bin/env python
# Written by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from download_bt1 import BT1Download
from RawServer import RawServer
from SocketHandler import UPnP_ERROR
from RateLimiter import RateLimiter
from ServerPortHandler import MultiHandler
from parsedir import parsedir
from natpunch import UPnP_test
from random import seed
from socket import error as socketerror
from threading import Event
import sys, os
from clock import clock
from __init__ import createPeerID, mapbase64
from cStringIO import StringIO
from traceback import print_exc
try:
True
except:
True = 1
False = 0
def fmttime(n):
try:
n = int(n) # n may be None or too large
assert n < 5184000 # 60 days
except:
return 'downloading'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
return '%d:%02d:%02d' % (h, m, s)
class SingleDownload:
def __init__(self, controller, hash, response, config, myid):
self.controller = controller
self.hash = hash
self.response = response
self.config = config
self.doneflag = Event()
self.waiting = True
self.checking = False
self.working = False
self.seed = False
self.closed = False
self.status_msg = ''
self.status_err = ['']
self.status_errtime = 0
self.status_done = 0.0
self.rawserver = controller.handler.newRawServer(hash, self.doneflag)
d = BT1Download(self.display,
self.finished,
self.error,
controller.exchandler,
self.doneflag,
config,
response,
hash,
myid,
self.rawserver,
controller.listen_port)
self.d = d
def start(self):
if not self.d.saveAs(self.saveAs):
self._shutdown()
return
self._hashcheckfunc = self.d.initFiles()
if not self._hashcheckfunc:
self._shutdown()
return
self.controller.hashchecksched(self.hash)
def saveAs(self, name, length, saveas, isdir):
return self.controller.saveAs(self.hash, name, saveas, isdir)
def hashcheck_start(self, donefunc):
if self.is_dead():
self._shutdown()
return
self.waiting = False
self.checking = True
self._hashcheckfunc(donefunc)
def hashcheck_callback(self):
self.checking = False
if self.is_dead():
self._shutdown()
return
if not self.d.startEngine(ratelimiter = self.controller.ratelimiter):
self._shutdown()
return
self.d.startRerequester()
self.statsfunc = self.d.startStats()
self.rawserver.start_listening(self.d.getPortHandler())
self.working = True
def is_dead(self):
return self.doneflag.isSet()
def _shutdown(self):
self.shutdown(False)
def shutdown(self, quiet=True):
if self.closed:
return
self.doneflag.set()
self.rawserver.shutdown()
if self.checking or self.working:
self.d.shutdown()
self.waiting = False
self.checking = False
self.working = False
self.closed = True
self.controller.was_stopped(self.hash)
if not quiet:
self.controller.died(self.hash)
def display(self, activity = None, fractionDone = None):
# really only used by StorageWrapper now
if activity:
self.status_msg = activity
if fractionDone is not None:
self.status_done = float(fractionDone)
def finished(self):
self.seed = True
def error(self, msg):
if self.doneflag.isSet():
self._shutdown()
self.status_err.append(msg)
self.status_errtime = clock()
class LaunchMany:
def __init__(self, config, Output):
try:
self.config = config
self.Output = Output
self.torrent_dir = config['torrent_dir']
self.torrent_cache = {}
self.file_cache = {}
self.blocked_files = {}
self.scan_period = config['parse_dir_interval']
self.stats_period = config['display_interval']
self.torrent_list = []
self.downloads = {}
self.counter = 0
self.doneflag = Event()
self.hashcheck_queue = []
self.hashcheck_current = None
self.rawserver = RawServer(self.doneflag,
config['timeout_check_interval'],
config['timeout'],
ipv6_enable = config['ipv6_enabled'],
failfunc = self.failed,
errorfunc = self.exchandler)
upnp_type = UPnP_test(config['upnp_nat_access'])
while 1:
try:
self.listen_port = self.rawserver.find_and_bind(
config['minport'], config['maxport'], config['bind'],
ipv6_socket_style = config['ipv6_binds_v4'],
upnp = upnp_type, randomizer = config['random_port'])
break
except socketerror, e:
if upnp_type and e == UPnP_ERROR:
self.Output.message('WARNING: COULD NOT FORWARD VIA UPnP')
upnp_type = 0
continue
self.failed("Couldn't listen - " + str(e))
return
self.ratelimiter = RateLimiter(self.rawserver.add_task,
config['upload_unit_size'])
self.ratelimiter.set_upload_rate(config['max_upload_rate'])
self.handler = MultiHandler(self.rawserver, self.doneflag)
seed(createPeerID())
self.rawserver.add_task(self.scan, 0)
self.rawserver.add_task(self.stats, 0)
self.start()
except:
data = StringIO()
print_exc(file = data)
Output.exception(data.getvalue())
def start(self):
try:
self.handler.listen_forever()
except:
data = StringIO()
print_exc(file=data)
self.Output.exception(data.getvalue())
self.hashcheck_queue = []
for hash in self.torrent_list:
self.Output.message('dropped "'+self.torrent_cache[hash]['path']+'"')
self.downloads[hash].shutdown()
self.rawserver.shutdown()
def scan(self):
self.rawserver.add_task(self.scan, self.scan_period)
r = parsedir(self.torrent_dir, self.torrent_cache,
self.file_cache, self.blocked_files,
return_metainfo = True, errfunc = self.Output.message)
( self.torrent_cache, self.file_cache, self.blocked_files,
added, removed ) = r
for hash, data in removed.items():
self.Output.message('dropped "'+data['path']+'"')
self.remove(hash)
for hash, data in added.items():
self.Output.message('added "'+data['path']+'"')
self.add(hash, data)
def stats(self):
self.rawserver.add_task(self.stats, self.stats_period)
data = []
for hash in self.torrent_list:
cache = self.torrent_cache[hash]
if self.config['display_path']:
name = cache['path']
else:
name = cache['name']
size = cache['length']
d = self.downloads[hash]
progress = '0.0%'
peers = 0
seeds = 0
seedsmsg = "S"
dist = 0.0
uprate = 0.0
dnrate = 0.0
upamt = 0
dnamt = 0
t = 0
if d.is_dead():
status = 'stopped'
elif d.waiting:
status = 'waiting for hash check'
elif d.checking:
status = d.status_msg
progress = '%.1f%%' % (d.status_done*100)
else:
stats = d.statsfunc()
s = stats['stats']
if d.seed:
status = 'seeding'
progress = '100.0%'
seeds = s.numOldSeeds
seedsmsg = "s"
dist = s.numCopies
else:
if s.numSeeds + s.numPeers:
t = stats['time']
if t == 0: # unlikely
t = 0.01
status = fmttime(t)
else:
t = -1
status = 'connecting to peers'
progress = '%.1f%%' % (int(stats['frac']*1000)/10.0)
seeds = s.numSeeds
dist = s.numCopies2
dnrate = stats['down']
peers = s.numPeers
uprate = stats['up']
upamt = s.upTotal
dnamt = s.downTotal
if d.is_dead() or d.status_errtime+300 > clock():
msg = d.status_err[-1]
else:
msg = ''
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ))
stop = self.Output.display(data)
if stop:
self.doneflag.set()
def remove(self, hash):
self.torrent_list.remove(hash)
self.downloads[hash].shutdown()
del self.downloads[hash]
def add(self, hash, data):
c = self.counter
self.counter += 1
x = ''
for i in xrange(3):
x = mapbase64[c & 0x3F]+x
c >>= 6
peer_id = createPeerID(x)
d = SingleDownload(self, hash, data['metainfo'], self.config, peer_id)
self.torrent_list.append(hash)
self.downloads[hash] = d
d.start()
return d
def saveAs(self, hash, name, saveas, isdir):
x = self.torrent_cache[hash]
style = self.config['saveas_style']
if style == 1 or style == 3:
if saveas:
saveas = os.path.join(saveas, x['file'][:-1-len(x['type'])])
else:
saveas = x['path'][:-1-len(x['type'])]
if style == 3:
if not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
if not isdir:
saveas = os.path.join(saveas, name)
else:
if saveas:
saveas = os.path.join(saveas, name)
else:
saveas = os.path.join(os.path.split(x['path'])[0], name)
if isdir and not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
return saveas
def hashchecksched(self, hash = None):
if hash:
self.hashcheck_queue.append(hash)
# Check smallest torrents first
self.hashcheck_queue.sort(lambda x, y: cmp(self.downloads[x].d.datalength, self.downloads[y].d.datalength))
if not self.hashcheck_current:
self._hashcheck_start()
def _hashcheck_start(self):
self.hashcheck_current = self.hashcheck_queue.pop(0)
self.downloads[self.hashcheck_current].hashcheck_start(self.hashcheck_callback)
def hashcheck_callback(self):
self.downloads[self.hashcheck_current].hashcheck_callback()
if self.hashcheck_queue:
self._hashcheck_start()
else:
self.hashcheck_current = None
def died(self, hash):
if self.torrent_cache.has_key(hash):
self.Output.message('DIED: "'+self.torrent_cache[hash]['path']+'"')
def was_stopped(self, hash):
try:
self.hashcheck_queue.remove(hash)
except:
pass
if self.hashcheck_current == hash:
self.hashcheck_current = None
if self.hashcheck_queue:
self._hashcheck_start()
def failed(self, s):
self.Output.message('FAILURE: '+s)
def exchandler(self, s):
self.Output.exception(s)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import warnings
from argparse import ArgumentParser, _HelpAction
from collections import namedtuple
import six
from pants.base.deprecated import check_deprecated_semver
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.errors import ParseError, RegistrationError
from pants.option.help_formatter import PantsAdvancedHelpFormatter, PantsBasicHelpFormatter
from pants.option.ranked_value import RankedValue
# Standard ArgumentParser prints usage and exits on error. We subclass so we can raise instead.
# Note that subclassing ArgumentParser for this purpose is allowed by the argparse API.
class CustomArgumentParser(ArgumentParser):
def __init__(self, scope, *args, **kwargs):
super(CustomArgumentParser, self).__init__(*args, **kwargs)
self._scope = scope
def error(self, message):
scope = 'global' if self._scope == GLOBAL_SCOPE else self._scope
raise ParseError('{0} in {1} scope'.format(message, scope))
def walk_actions(self):
"""Iterates over the argparse.Action objects for options registered on this parser."""
for action_group in self._action_groups:
for action in action_group._group_actions:
if not isinstance(action, _HelpAction):
yield action
class Parser(object):
"""An argument parser in a hierarchy.
Each node in the hierarchy is a 'scope': the root is the global scope, and the parent of
a node is the scope it's immediately contained in. E.g., the 'compile.java' scope is
a child of the 'compile' scope, which is a child of the global scope.
Options registered on a parser are also registered transitively on all the scopes it encloses.
Registration must be in outside-in order: we forbid registering options on an outer scope if
we've already registered an option on one of its inner scopes. This is to ensure that
re-registering the same option name on an inner scope correctly replaces the identically-named
option from the outer scope.
:param env: a dict of environment variables.
:param config: data from a config file (must support config.get[list](section, name, default=)).
:param scope: the scope this parser acts for.
:param parent_parser: the parser for the scope immediately enclosing this one, or
None if this is the global scope.
"""
class BooleanConversionError(ParseError):
"""Raised when a value other than 'True' or 'False' is encountered."""
pass
class Flag(namedtuple('Flag', ['name', 'inverse_name', 'help_arg'])):
"""A struct describing a single flag and its corresponding help representation.
No-argument boolean flags also support an `inverse_name` to set the corresponding option value
in the opposite sense from its default. All other flags will have no `inverse_name`
"""
@classmethod
def _create(cls, flag, **kwargs):
if kwargs.get('action') in ('store_false', 'store_true') and flag.startswith('--'):
if flag.startswith('--no-'):
raise RegistrationError(
'Invalid flag name "{}". Boolean flag names cannot start with --no-'.format(flag))
name = flag[2:]
return cls(flag, '--no-' + name, '--[no-]' + name)
else:
return cls(flag, None, flag)
@classmethod
def expand_flags(cls, *args, **kwargs):
"""Returns a list of the flags associated with an option registration.
For example:
>>> from pants.option.parser import Parser
>>> def print_flags(flags):
... print('\n'.join(map(str, flags)))
...
>>> print_flags(Parser.expand_flags('-q', '--quiet', action='store_true',
... help='Squelches all console output apart from errors.'))
Flag(name='-q', inverse_name=None, help_arg='-q')
Flag(name='--quiet', inverse_name=u'--no-quiet', help_arg=u'--[no-]quiet')
>>>
:param *args: The args (flag names), that would be passed to an option registration.
:param **kwargs: The kwargs that would be passed to an option registration.
"""
return [cls.Flag._create(flag, **kwargs) for flag in args]
def __init__(self, env, config, scope, help_request, parent_parser):
self._env = env
self._config = config
self._scope = scope
self._help_request = help_request
# If True, no more registration is allowed on this parser.
self._frozen = False
# The argparser we use for actually parsing args.
self._argparser = CustomArgumentParser(scope=self._scope, conflict_handler='resolve')
# The argparser we use for formatting help messages.
# We don't use self._argparser for this as it will have all options from enclosing scopes
# registered on it too, which would create unnecessarily repetitive help messages.
formatter_class = (PantsAdvancedHelpFormatter if help_request and help_request.advanced
else PantsBasicHelpFormatter)
self._help_argparser = CustomArgumentParser(scope=self._scope, conflict_handler='resolve',
formatter_class=formatter_class)
# Options are registered in two groups. The first group will always be displayed in the help
# output. The second group is for advanced options that are not normally displayed, because
# they're intended as sitewide config and should not typically be modified by individual users.
self._help_argparser_group = self._help_argparser.add_argument_group(title=scope)
self._help_argparser_advanced_group = \
self._help_argparser.add_argument_group(title='*{0}'.format(scope))
# If True, we have at least one option to show help for.
self._has_help_options = False
# Map of external to internal dest names. See docstring for _set_dest below.
self._dest_forwardings = {}
# Keep track of deprecated flags. Maps flag -> (deprecated_version, deprecated_hint)
self._deprecated_flags = {}
# A Parser instance, or None for the global scope parser.
self._parent_parser = parent_parser
# List of Parser instances.
self._child_parsers = []
if self._parent_parser:
self._parent_parser._register_child_parser(self)
@staticmethod
def str_to_bool(s):
if isinstance(s, six.string_types):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise Parser.BooleanConversionError('Got "{0}". Expected "True" or "False".'.format(s))
if s is True:
return True
elif s is False:
return False
else:
raise Parser.BooleanConversionError('Got {0}. Expected True or False.'.format(s))
def parse_args(self, args, namespace):
"""Parse the given args and set their values onto the namespace object's attributes."""
namespace.add_forwardings(self._dest_forwardings)
new_args = self._argparser.parse_args(args)
namespace.update(vars(new_args))
self.deprecated_check(args)
return namespace
def format_help(self):
"""Return a help message for the options registered on this object."""
return self._help_argparser.format_help() if self._has_help_options else ''
def register(self, *args, **kwargs):
"""Register an option, using argparse params.
Custom extensions to argparse params:
:param advanced: if True, the option willally be suppressed when displaying help.
:param deprecated_version: Mark an option as deprecated. The value is a semver that indicates
the release at which the option should be removed from the code.
:param deprecated_hint: A message to display to the user when displaying help for or invoking
a deprecated option.
"""
if self._frozen:
raise RegistrationError('Cannot register option {0} in scope {1} after registering options '
'in any of its inner scopes.'.format(args[0], self._scope))
# Prevent further registration in enclosing scopes.
ancestor = self._parent_parser
while ancestor:
ancestor._freeze()
ancestor = ancestor._parent_parser
# Pull out our custom arguments, they aren't valid for argparse.
recursive = kwargs.pop('recursive', False)
advanced = kwargs.pop('advanced', False)
self._validate(args, kwargs)
dest = self._set_dest(args, kwargs)
deprecated_version = kwargs.pop('deprecated_version', None)
deprecated_hint = kwargs.pop('deprecated_hint', '')
if deprecated_version is not None:
check_deprecated_semver(deprecated_version)
flag = '--' + dest.replace('_', '-')
self._deprecated_flags[flag] = (deprecated_version, deprecated_hint)
help = kwargs.pop('help', '')
kwargs['help'] = 'DEPRECATED: {}\n{}'.format(self.deprecated_message(flag), help)
inverse_args = []
help_args = []
for flag in self.expand_flags(*args, **kwargs):
if flag.inverse_name:
inverse_args.append(flag.inverse_name)
if deprecated_version:
self._deprecated_flags[flag.inverse_name] = (deprecated_version, deprecated_hint)
help_args.append(flag.help_arg)
is_invertible = len(inverse_args) > 0
# Register the option, only on this scope, for the purpose of displaying help.
# Note that we'll only display the default value for this scope, even though the
# default may be overridden in inner scopes.
raw_default = self._compute_default(dest, is_invertible, kwargs).value
kwargs_with_default = dict(kwargs, default=raw_default)
if advanced:
arg_group = self._help_argparser_advanced_group
else:
arg_group = self._help_argparser_group
arg_group.add_argument(*help_args, **kwargs_with_default)
self._has_help_options = True
# Register the option for the purpose of parsing, on this and all enclosed scopes.
if is_invertible:
inverse_kwargs = self._create_inverse_kwargs(kwargs)
self._register_boolean(dest, args, kwargs, inverse_args, inverse_kwargs, recursive)
else:
self._register(dest, args, kwargs, recursive)
def is_deprecated(self, flag):
"""Returns True if the flag has been marked as deprecated with 'deprecated_version'.
:param flag: flag to test (if it starts with --{scope}-, or --no-{scope}, the scope will be
stripped out)
"""
flag = flag.split('=')[0]
if flag.startswith('--{}-'.format(self._scope)):
flag = '--{}'.format(flag[3 + len(self._scope):]) # strip off the --{scope}- prefix
elif flag.startswith('--no-{}-'.format(self._scope)):
flag = '--no-{}'.format(flag[6 + len(self._scope):]) # strip off the --no-{scope}- prefix
return flag in self._deprecated_flags
def deprecated_message(self, flag):
"""Returns the message to be displayed when a deprecated flag is invoked or asked for help.
The caller must insure that the flag has already been tagged as deprecated with the
is_deprecated() method.
:param flag: The flag being invoked, e.g. --foo
"""
flag = flag.split('=')[0]
deprecated_version, deprecated_hint = self._deprecated_flags[flag]
scope = self._scope or 'DEFAULT'
message = 'Option {flag} in scope {scope} is deprecated and will be removed in version ' \
'{removal_version}'.format(flag=flag, scope=scope,
removal_version=deprecated_version)
hint = deprecated_hint or ''
return '{}. {}'.format(message, hint)
def deprecated_check(self, flags):
"""Emit a warning message if one of these flags is marked as deprecated.
:param flags: list of string flags to check. e.g. [ '--foo', '--no-bar', ... ]
"""
for flag in flags:
if self.is_deprecated(flag):
warnings.warn('*** {}'.format(self.deprecated_message(flag)), DeprecationWarning,
stacklevel=9999) # out of range stacklevel to suppress printing source line.
def _register(self, dest, args, kwargs, recursive):
"""Recursively register the option for parsing."""
ranked_default = self._compute_default(dest, is_invertible=False, kwargs=kwargs)
kwargs_with_default = dict(kwargs, default=ranked_default)
self._argparser.add_argument(*args, **kwargs_with_default)
if recursive:
# Propagate registration down to inner scopes.
for child_parser in self._child_parsers:
child_parser._register(dest, args, kwargs, recursive)
def _register_boolean(self, dest, args, kwargs, inverse_args, inverse_kwargs, recursive):
"""Recursively register the boolean option, and its inverse, for parsing."""
group = self._argparser.add_mutually_exclusive_group()
ranked_default = self._compute_default(dest, is_invertible=True, kwargs=kwargs)
kwargs_with_default = dict(kwargs, default=ranked_default)
group.add_argument(*args, **kwargs_with_default)
group.add_argument(*inverse_args, **inverse_kwargs)
if recursive:
# Propagate registration down to inner scopes.
for child_parser in self._child_parsers:
child_parser._register_boolean(dest, args, kwargs, inverse_args, inverse_kwargs, recursive)
def _validate(self, args, kwargs):
"""Ensure that the caller isn't trying to use unsupported argparse features."""
for arg in args:
if not arg.startswith('-'):
raise RegistrationError('Option {0} in scope {1} must begin '
'with a dash.'.format(arg, self._scope))
if not arg.startswith('--') and len(arg) > 2:
raise RegistrationError('Multicharacter option {0} in scope {1} must begin '
'with a double-dash'.format(arg, self._scope))
if 'nargs' in kwargs and kwargs['nargs'] != '?':
raise RegistrationError('nargs={0} unsupported in registration of option {1} in '
'scope {2}.'.format(kwargs['nargs'], args, self._scope))
if 'required' in kwargs:
raise RegistrationError('required unsupported in registration of option {0} in '
'scope {1}.'.format(args, self._scope))
def _set_dest(self, args, kwargs):
"""Maps the externally-used dest to a scoped one only seen internally.
If an option is re-registered in an inner scope, it'll shadow the external dest but will
use a different internal one. This is important in the case that an option is registered
with two names (say -x, --xlong) and we only re-register one of them, say --xlong, in an
inner scope. In this case we no longer want them to write to the same dest, so we can
use both (now with different meanings) in the inner scope.
Note: Modfies kwargs.
"""
dest = self._select_dest(args, kwargs)
scoped_dest = '_{0}_{1}__'.format(self._scope or 'DEFAULT', dest)
# Make argparse write to the internal dest.
kwargs['dest'] = scoped_dest
# Make reads from the external dest forward to the internal one.
self._dest_forwardings[dest] = scoped_dest
# Also forward all option aliases, so we can reference -x (as options.x) in the example above.
for arg in args:
self._dest_forwardings[arg.lstrip('-').replace('-', '_')] = scoped_dest
return dest
def _select_dest(self, args, kwargs):
"""Select the dest name for the option.
Replicated from the dest inference logic in argparse:
'--foo-bar' -> 'foo_bar' and '-x' -> 'x'.
"""
dest = kwargs.get('dest')
if dest:
return dest
arg = next((a for a in args if a.startswith('--')), args[0])
return arg.lstrip('-').replace('-', '_')
def _compute_default(self, dest, is_invertible, kwargs):
"""Compute the default value to use for an option's registration.
The source of the default value is chosen according to the ranking in RankedValue.
"""
config_section = 'DEFAULT' if self._scope == GLOBAL_SCOPE else self._scope
udest = dest.upper()
if self._scope == GLOBAL_SCOPE:
# For convenience, we allow three forms of env var for global scope options.
# The fully-specified env var is PANTS_DEFAULT_FOO, which is uniform with PANTS_<SCOPE>_FOO
# for all the other scopes. However we also allow simply PANTS_FOO. And if the option name
# itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of
# PANTS_PANTS_WORKDIR or PANTS_DEFAULT_PANTS_WORKDIR. We take the first specified value we
# find, in this order: PANTS_DEFAULT_FOO, PANTS_FOO, FOO.
env_vars = ['PANTS_DEFAULT_{0}'.format(udest), 'PANTS_{0}'.format(udest)]
if udest.startswith('PANTS_'):
env_vars.append(udest)
else:
env_vars = ['PANTS_{0}_{1}'.format(config_section.upper().replace('.', '_'), udest)]
value_type = self.str_to_bool if is_invertible else kwargs.get('type', str)
env_val_str = None
if self._env:
for env_var in env_vars:
if env_var in self._env:
env_val_str = self._env.get(env_var)
break
env_val = None if env_val_str is None else value_type(env_val_str)
if kwargs.get('action') == 'append':
config_val_strs = self._config.getlist(config_section, dest) if self._config else None
config_val = (None if config_val_strs is None else
[value_type(config_val_str) for config_val_str in config_val_strs])
default = []
else:
config_val_str = (self._config.get(config_section, dest, default=None)
if self._config else None)
config_val = None if config_val_str is None else value_type(config_val_str)
default = None
hardcoded_val = kwargs.get('default')
return RankedValue.choose(None, env_val, config_val, hardcoded_val, default)
def _create_inverse_kwargs(self, kwargs):
"""Create the kwargs for registering the inverse of a boolean flag."""
inverse_kwargs = copy.copy(kwargs)
inverse_action = 'store_true' if kwargs.get('action') == 'store_false' else 'store_false'
inverse_kwargs['action'] = inverse_action
inverse_kwargs.pop('default', None)
return inverse_kwargs
def _register_child_parser(self, child):
self._child_parsers.append(child)
def _freeze(self):
self._frozen = True
def __str__(self):
return 'Parser({})'.format(self._scope)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from .energy_point_log import get_energy_points as _get_energy_points, create_review_points_log, review
from frappe.utils.testutils import add_custom_field, clear_custom_fields
from frappe.desk.form.assign_to import add as assign_to
class TestEnergyPointLog(unittest.TestCase):
def setUp(self):
frappe.cache().delete_value('energy_point_rule_map')
def tearDown(self):
frappe.set_user('Administrator')
frappe.db.sql('DELETE FROM `tabEnergy Point Log`')
frappe.db.sql('DELETE FROM `tabEnergy Point Rule`')
frappe.cache().delete_value('energy_point_rule_map')
def test_user_energy_point(self):
frappe.set_user('[email protected]')
todo_point_rule = create_energy_point_rule_for_todo()
energy_point_of_user = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
points_after_closing_todo = get_points('[email protected]')
self.assertEqual(points_after_closing_todo, energy_point_of_user + todo_point_rule.points)
created_todo.save()
points_after_double_save = get_points('[email protected]')
# point should not be awarded more than once for same doc
self.assertEqual(points_after_double_save, energy_point_of_user + todo_point_rule.points)
def test_points_based_on_multiplier_field(self):
frappe.set_user('[email protected]')
add_custom_field('ToDo', 'multiplier', 'Float')
multiplier_value = 0.51
todo_point_rule = create_energy_point_rule_for_todo('multiplier')
energy_point_of_user = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.multiplier = multiplier_value
created_todo.save()
points_after_closing_todo = get_points('[email protected]')
self.assertEqual(points_after_closing_todo,
energy_point_of_user + round(todo_point_rule.points * multiplier_value))
clear_custom_fields('ToDo')
def test_points_based_on_max_points(self):
frappe.set_user('[email protected]')
# here multiplier is high
# let see if points get capped to max_point limit
multiplier_value = 15
max_points = 50
add_custom_field('ToDo', 'multiplier', 'Float')
todo_point_rule = create_energy_point_rule_for_todo('multiplier', max_points=max_points)
energy_point_of_user = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.multiplier = multiplier_value
created_todo.save()
points_after_closing_todo = get_points('[email protected]')
# test max_points cap
self.assertNotEqual(points_after_closing_todo,
energy_point_of_user + round(todo_point_rule.points * multiplier_value))
self.assertEqual(points_after_closing_todo,
energy_point_of_user + max_points)
clear_custom_fields('ToDo')
def test_disabled_energy_points(self):
settings = frappe.get_single('Energy Point Settings')
settings.enabled = 0
settings.save()
frappe.set_user('[email protected]')
create_energy_point_rule_for_todo()
energy_point_of_user = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
points_after_closing_todo = get_points('[email protected]')
# no change in points
self.assertEqual(points_after_closing_todo, energy_point_of_user)
settings.enabled = 1
settings.save()
def test_review(self):
created_todo = create_a_todo()
review_points = 20
create_review_points_log('[email protected]', review_points)
# reviewer
frappe.set_user('[email protected]')
review_points_before_review = get_points('[email protected]', 'review_points')
self.assertEqual(review_points_before_review, review_points)
# for appreciation
appreciation_points = 5
energy_points_before_review = get_points('[email protected]')
review(created_todo, appreciation_points, '[email protected]', 'good job')
energy_points_after_review = get_points('[email protected]')
review_points_after_review = get_points('[email protected]', 'review_points')
self.assertEqual(energy_points_after_review, energy_points_before_review + appreciation_points)
self.assertEqual(review_points_after_review, review_points_before_review - appreciation_points)
# for criticism
criticism_points = 2
energy_points_before_review = energy_points_after_review
review_points_before_review = review_points_after_review
review(created_todo, criticism_points, '[email protected]', 'You could have done better.', 'Criticism')
energy_points_after_review = get_points('[email protected]')
review_points_after_review = get_points('[email protected]', 'review_points')
self.assertEqual(energy_points_after_review, energy_points_before_review - criticism_points)
self.assertEqual(review_points_after_review, review_points_before_review - criticism_points)
def test_user_energy_point_as_admin(self):
frappe.set_user('Administrator')
create_energy_point_rule_for_todo()
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
points_after_closing_todo = get_points('Administrator')
# no points for admin
self.assertEqual(points_after_closing_todo, 0)
def test_revert_points_on_cancelled_doc(self):
frappe.set_user('[email protected]')
create_energy_point_rule_for_todo()
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
energy_point_logs = frappe.get_all('Energy Point Log')
self.assertEqual(len(energy_point_logs), 1)
# for submit and cancel permission
frappe.set_user('Administrator')
# submit
created_todo.docstatus = 1
created_todo.save()
# cancel
created_todo.docstatus = 2
created_todo.save()
energy_point_logs = frappe.get_all('Energy Point Log', fields=['reference_name', 'type', 'reverted'])
self.assertListEqual(energy_point_logs, [
{'reference_name': created_todo.name, 'type': 'Revert', 'reverted': 0},
{'reference_name': created_todo.name, 'type': 'Auto', 'reverted': 1}
])
def test_energy_point_for_new_document_creation(self):
frappe.set_user('[email protected]')
todo_point_rule = create_energy_point_rule_for_todo(for_doc_event='New')
points_before_todo_creation = get_points('[email protected]')
create_a_todo()
points_after_todo_creation = get_points('[email protected]')
self.assertEqual(points_after_todo_creation,
points_before_todo_creation + todo_point_rule.points)
def test_point_allocation_for_assigned_users(self):
todo = create_a_todo()
assign_users_to_todo(todo.name, ['[email protected]', '[email protected]'])
test_user_before_points = get_points('[email protected]')
test2_user_before_points = get_points('[email protected]')
rule = create_energy_point_rule_for_todo(for_assigned_users=1)
todo.status = 'Closed'
todo.save()
test_user_after_points = get_points('[email protected]')
test2_user_after_points = get_points('[email protected]')
self.assertEqual(test_user_after_points,
test_user_before_points + rule.points)
self.assertEqual(test2_user_after_points,
test2_user_before_points + rule.points)
def test_points_on_field_value_change(self):
rule = create_energy_point_rule_for_todo(for_doc_event='Value Change',
field_to_check='description')
frappe.set_user('[email protected]')
points_before_todo_creation = get_points('[email protected]')
todo = create_a_todo()
todo.status = 'Closed'
todo.save()
points_after_closing_todo = get_points('[email protected]')
self.assertEqual(points_after_closing_todo,
points_before_todo_creation)
todo.description = 'This is new todo'
todo.save()
points_after_changing_todo_description = get_points('[email protected]')
self.assertEqual(points_after_changing_todo_description,
points_before_todo_creation + rule.points)
def test_apply_only_once(self):
frappe.set_user('[email protected]')
todo_point_rule = create_energy_point_rule_for_todo(apply_once=True, user_field='modified_by')
first_user_points = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
first_user_points_after_closing_todo = get_points('[email protected]')
self.assertEqual(first_user_points_after_closing_todo, first_user_points + todo_point_rule.points)
frappe.set_user('[email protected]')
second_user_points = get_points('[email protected]')
created_todo.save(ignore_permissions=True)
second_user_points_after_closing_todo = get_points('[email protected]')
# point should not be awarded more than once for same doc (irrespective of user)
self.assertEqual(second_user_points_after_closing_todo, second_user_points)
def test_allow_creation_of_new_log_if_the_previous_log_was_reverted(self):
frappe.set_user('[email protected]')
todo_point_rule = create_energy_point_rule_for_todo()
energy_point_of_user = get_points('[email protected]')
created_todo = create_a_todo()
created_todo.status = 'Closed'
created_todo.save()
points_after_closing_todo = get_points('[email protected]')
log_name = frappe.db.exists('Energy Point Log', {'reference_name': created_todo.name})
frappe.get_doc('Energy Point Log', log_name).revert('Just for test')
points_after_reverting_todo = get_points('[email protected]')
created_todo.save()
points_after_saving_todo_again = get_points('[email protected]')
rule_points = todo_point_rule.points
self.assertEqual(points_after_closing_todo, energy_point_of_user + rule_points)
self.assertEqual(points_after_reverting_todo, points_after_closing_todo - rule_points)
self.assertEqual(points_after_saving_todo_again, points_after_reverting_todo + rule_points)
def create_energy_point_rule_for_todo(multiplier_field=None, for_doc_event='Custom', max_points=None,
for_assigned_users=0, field_to_check=None, apply_once=False, user_field='owner'):
name = 'ToDo Closed'
point_rule_exists = frappe.db.exists('Energy Point Rule', name)
if point_rule_exists: return frappe.get_doc('Energy Point Rule', name)
return frappe.get_doc({
'doctype': 'Energy Point Rule',
'rule_name': name,
'points': 5,
'reference_doctype': 'ToDo',
'condition': 'doc.status == "Closed"',
'for_doc_event': for_doc_event,
'user_field': user_field,
'for_assigned_users': for_assigned_users,
'multiplier_field': multiplier_field,
'max_points': max_points,
'field_to_check': field_to_check,
'apply_only_once': apply_once
}).insert(ignore_permissions=1)
def create_a_todo():
return frappe.get_doc({
'doctype': 'ToDo',
'description': 'Fix a bug',
}).insert()
def get_points(user, point_type='energy_points'):
return _get_energy_points(user).get(point_type) or 0
def assign_users_to_todo(todo_name, users):
for user in users:
assign_to({
'assign_to': [user],
'doctype': 'ToDo',
'name': todo_name
})
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Fri Apr 15 16:00:42 2011 by generateDS.py version 2.4c.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
STRING_CLEANUP_PAT = re_.compile(r"[\n\r\s]+")
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class frameIndex(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, frame=None):
if frame is None:
self.frame = []
else:
self.frame = frame
def factory(*args_, **kwargs_):
if frameIndex.subclass:
return frameIndex.subclass(*args_, **kwargs_)
else:
return frameIndex(*args_, **kwargs_)
factory = staticmethod(factory)
def get_frame(self): return self.frame
def set_frame(self, frame): self.frame = frame
def add_frame(self, value): self.frame.append(value)
def insert_frame(self, index, value): self.frame[index] = value
def export(self, outfile, level, namespace_='fn:', name_='frameIndex', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='frameIndex')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='fn:', name_='frameIndex'):
pass
def exportChildren(self, outfile, level, namespace_='fn:', name_='frameIndex', fromsubclass_=False):
for frame_ in self.frame:
frame_.export(outfile, level, namespace_, name_='frame')
def hasContent_(self):
if (
self.frame
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='frameIndex'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('frame=[\n')
level += 1
for frame_ in self.frame:
showIndent(outfile, level)
outfile.write('model_.frame(\n')
frame_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'frame':
obj_ = frame.factory()
obj_.build(child_)
self.frame.append(obj_)
# end class frameIndex
class frame(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, mDate=None, name=None, ID=None, valueOf_=None):
self.mDate = _cast(None, mDate)
self.name = _cast(None, name)
self.ID = _cast(int, ID)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if frame.subclass:
return frame.subclass(*args_, **kwargs_)
else:
return frame(*args_, **kwargs_)
factory = staticmethod(factory)
def get_mDate(self): return self.mDate
def set_mDate(self, mDate): self.mDate = mDate
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_ID(self): return self.ID
def set_ID(self, ID): self.ID = ID
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='fn:', name_='frame', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='frame')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_)
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='fn:', name_='frame'):
if self.mDate is not None and 'mDate' not in already_processed:
already_processed.append('mDate')
outfile.write(' mDate=%s' % (quote_attrib(self.mDate), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.ID is not None and 'ID' not in already_processed:
already_processed.append('ID')
outfile.write(' ID="%s"' % self.gds_format_integer(self.ID, input_name='ID'))
def exportChildren(self, outfile, level, namespace_='fn:', name_='frame', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='frame'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.mDate is not None and 'mDate' not in already_processed:
already_processed.append('mDate')
showIndent(outfile, level)
outfile.write('mDate = %s,\n' % (self.mDate,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.ID is not None and 'ID' not in already_processed:
already_processed.append('ID')
showIndent(outfile, level)
outfile.write('ID = %d,\n' % (self.ID,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = attrs.get('mDate')
if value is not None and 'mDate' not in already_processed:
already_processed.append('mDate')
self.mDate = value
value = attrs.get('name')
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
value = attrs.get('ID')
if value is not None and 'ID' not in already_processed:
already_processed.append('ID')
try:
self.ID = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class frame
class lexemeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, order=None, headword=None, breakBefore=None, name=None, POS=None, valueOf_=None):
self.order = _cast(None, order)
self.headword = _cast(bool, headword)
self.breakBefore = _cast(bool, breakBefore)
self.name = _cast(None, name)
self.POS = _cast(None, POS)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if lexemeType.subclass:
return lexemeType.subclass(*args_, **kwargs_)
else:
return lexemeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_order(self): return self.order
def set_order(self, order): self.order = order
def get_headword(self): return self.headword
def set_headword(self, headword): self.headword = headword
def get_breakBefore(self): return self.breakBefore
def set_breakBefore(self, breakBefore): self.breakBefore = breakBefore
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_POS(self): return self.POS
def set_POS(self, POS): self.POS = POS
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='fn:', name_='lexemeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='lexemeType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_)
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='fn:', name_='lexemeType'):
if self.order is not None and 'order' not in already_processed:
already_processed.append('order')
outfile.write(' order=%s' % (quote_attrib(self.order), ))
if self.headword is not None and 'headword' not in already_processed:
already_processed.append('headword')
outfile.write(' headword="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.headword)), input_name='headword'))
if self.breakBefore is not None and 'breakBefore' not in already_processed:
already_processed.append('breakBefore')
outfile.write(' breakBefore="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.breakBefore)), input_name='breakBefore'))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.POS is not None and 'POS' not in already_processed:
already_processed.append('POS')
outfile.write(' POS=%s' % (quote_attrib(self.POS), ))
def exportChildren(self, outfile, level, namespace_='fn:', name_='lexemeType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='lexemeType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.order is not None and 'order' not in already_processed:
already_processed.append('order')
showIndent(outfile, level)
outfile.write('order = %s,\n' % (self.order,))
if self.headword is not None and 'headword' not in already_processed:
already_processed.append('headword')
showIndent(outfile, level)
outfile.write('headword = %s,\n' % (self.headword,))
if self.breakBefore is not None and 'breakBefore' not in already_processed:
already_processed.append('breakBefore')
showIndent(outfile, level)
outfile.write('breakBefore = %s,\n' % (self.breakBefore,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.POS is not None and 'POS' not in already_processed:
already_processed.append('POS')
showIndent(outfile, level)
outfile.write('POS = %s,\n' % (self.POS,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = attrs.get('order')
if value is not None and 'order' not in already_processed:
already_processed.append('order')
self.order = value
value = attrs.get('headword')
if value is not None and 'headword' not in already_processed:
already_processed.append('headword')
if value in ('true', '1'):
self.headword = True
elif value in ('false', '0'):
self.headword = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = attrs.get('breakBefore')
if value is not None and 'breakBefore' not in already_processed:
already_processed.append('breakBefore')
if value in ('true', '1'):
self.breakBefore = True
elif value in ('false', '0'):
self.breakBefore = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = attrs.get('name')
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
value = attrs.get('POS')
if value is not None and 'POS' not in already_processed:
already_processed.append('POS')
self.POS = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class lexemeType
class semTypeRefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ID=None, name=None, valueOf_=None):
self.ID = _cast(None, ID)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if semTypeRefType.subclass:
return semTypeRefType.subclass(*args_, **kwargs_)
else:
return semTypeRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ID(self): return self.ID
def set_ID(self, ID): self.ID = ID
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='fn:', name_='semTypeRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='semTypeRefType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_)
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='fn:', name_='semTypeRefType'):
if self.ID is not None and 'ID' not in already_processed:
already_processed.append('ID')
outfile.write(' ID=%s' % (quote_attrib(self.ID), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='fn:', name_='semTypeRefType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='semTypeRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ID is not None and 'ID' not in already_processed:
already_processed.append('ID')
showIndent(outfile, level)
outfile.write('ID = %s,\n' % (self.ID,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = attrs.get('ID')
if value is not None and 'ID' not in already_processed:
already_processed.append('ID')
self.ID = value
value = attrs.get('name')
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class semTypeRefType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'frameIndex'
rootClass = frameIndex
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'frameIndex'
rootClass = frameIndex
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="frameIndex",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'frameIndex'
rootClass = frameIndex
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from frameIndex import *\n\n')
sys.stdout.write('import frameIndex as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"frame",
"frameIndex",
"lexemeType",
"semTypeRefType"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.