gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""Tests for the Config Entry Flow helper."""
from unittest.mock import patch, Mock
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.helpers import config_entry_flow
from tests.common import (
MockConfigEntry, MockModule, mock_coro, mock_integration,
mock_entity_platform)
@pytest.fixture
def discovery_flow_conf(hass):
"""Register a handler."""
handler_conf = {
'discovered': False,
}
async def has_discovered_devices(hass):
"""Mock if we have discovered devices."""
return handler_conf['discovered']
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_discovery_flow(
'test', 'Test', has_discovered_devices,
config_entries.CONN_CLASS_LOCAL_POLL)
yield handler_conf
@pytest.fixture
def webhook_flow_conf(hass):
"""Register a handler."""
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_webhook_flow(
'test_single', 'Test Single', {}, False)
config_entry_flow.register_webhook_flow(
'test_multiple', 'Test Multiple', {}, True)
yield {}
async def test_single_entry_allowed(hass, discovery_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
async def test_user_no_devices_found(hass, discovery_flow_conf):
"""Test if no devices found."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
flow.context = {
'source': config_entries.SOURCE_USER
}
result = await flow.async_step_confirm(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'no_devices_found'
async def test_user_has_confirmation(hass, discovery_flow_conf):
"""Test user requires no confirmation to setup."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
@pytest.mark.parametrize('source', ['discovery', 'ssdp', 'zeroconf'])
async def test_discovery_single_instance(hass, discovery_flow_conf, source):
"""Test we not allow duplicates."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await getattr(flow, "async_step_{}".format(source))({})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
@pytest.mark.parametrize('source', ['discovery', 'ssdp', 'zeroconf'])
async def test_discovery_confirmation(hass, discovery_flow_conf, source):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
result = await getattr(flow, "async_step_{}".format(source))({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'confirm'
result = await flow.async_step_confirm({})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_multiple_discoveries(hass, discovery_flow_conf):
"""Test we only create one instance for multiple discoveries."""
mock_entity_platform(hass, 'config_flow.test', None)
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Second discovery
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_only_one_in_progress(hass, discovery_flow_conf):
"""Test a user initialized one will finish and cancel discovered one."""
mock_entity_platform(hass, 'config_flow.test', None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# User starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_USER}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Discovery flow has not been aborted
assert len(hass.config_entries.flow.async_progress()) == 2
# Discovery should be aborted once user confirms
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_no_confirmation(hass, discovery_flow_conf):
"""Test import requires no confirmation to set up."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_single_instance(hass, discovery_flow_conf):
"""Test import doesn't create second instance."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_webhook_single_entry_allowed(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
MockConfigEntry(domain='test_single').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'one_instance_allowed'
async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf):
"""Test multiple entries are allowed when specified."""
flow = config_entries.HANDLERS['test_multiple']()
flow.hass = hass
MockConfigEntry(domain='test_multiple').add_to_hass(hass)
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf):
"""Test setting up an entry creates a webhook."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data']['webhook_id'] is not None
async def test_webhook_create_cloudhook(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
assert await setup.async_setup_component(hass, 'cloud', {})
async_setup_entry = Mock(return_value=mock_coro(True))
async_unload_entry = Mock(return_value=mock_coro(True))
mock_integration(hass, MockModule(
'test_single',
async_setup_entry=async_setup_entry,
async_unload_entry=async_unload_entry,
async_remove_entry=config_entry_flow.webhook_async_remove_entry,
))
mock_entity_platform(hass, 'config_flow.test_single', None)
result = await hass.config_entries.flow.async_init(
'test_single', context={'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
coro = mock_coro({
'cloudhook_url': 'https://example.com'
})
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_create',
return_value=coro) as mock_create, \
patch('homeassistant.components.cloud.async_active_subscription',
return_value=True), \
patch('homeassistant.components.cloud.async_is_logged_in',
return_value=True):
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['description_placeholders']['webhook_url'] == \
'https://example.com'
assert len(mock_create.mock_calls) == 1
assert len(async_setup_entry.mock_calls) == 1
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_delete',
return_value=coro) as mock_delete:
result = \
await hass.config_entries.async_remove(result['result'].entry_id)
assert len(mock_delete.mock_calls) == 1
assert result['require_restart'] is False
|
|
#coding:utf-8
import os
import string
import sys
import time
import re
import StringIO
import tempfile
import threading
import traceback
import select
from datetime import datetime
from email.parser import Parser
from email.message import Message
from threading import Thread
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.crypto.state import *
from mailpile.crypto.mime import MimeSigningWrapper, MimeEncryptingWrapper
from mailpile.safe_popen import Popen, PIPE, Safe_Pipe
DEFAULT_SERVER = "hkp://subset.pool.sks-keyservers.net"
GPG_KEYID_LENGTH = 8
GNUPG_HOMEDIR = None # None=use what gpg uses
GPG_BINARY = 'gpg'
if sys.platform.startswith('win'):
GPG_BINARY = 'GnuPG\\gpg.exe'
BLOCKSIZE = 65536
openpgp_trust = {"-": _("Trust not calculated"),
"o": _("Unknown trust"),
"q": _("Undefined trust"),
"n": _("Never trust"),
"m": _("Marginally trust"),
"f": _("Full trust"),
"u": _("Ultimate trust"),
"e": _("Expired key, not trusted"),
"d": _("Disabled key, not trusted"), # Deprecated flag.
"r": _("Revoked key, not trusted")}
openpgp_algorithms = {1: _("RSA"),
2: _("RSA (encrypt only)"),
3: _("RSA (sign only)"),
16: _("Elgamal (encrypt only)"),
17: _("DSA"),
20: _("Elgamal (encrypt/sign) [COMPROMISED]")}
# For details on type 20 compromisation, see
# http://lists.gnupg.org/pipermail/gnupg-announce/2003q4/000160.html
class GnuPGResultParser:
"""
Parse the GPG response into EncryptionInfo and SignatureInfo.
"""
def __init__(rp):
rp.signature_info = SignatureInfo()
rp.signature_info["protocol"] = "openpgp"
rp.encryption_info = EncryptionInfo()
rp.encryption_info["protocol"] = "openpgp"
rp.plaintext = ""
def parse(rp, retvals):
signature_info = rp.signature_info
encryption_info = rp.encryption_info
from mailpile.mailutils import ExtractEmailAndName
# First pass, set some initial state.
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "DECRYPTION_FAILED":
missing = [x[1] for x in retvals[1]["status"]
if x[0] == "NO_SECKEY"]
if missing:
encryption_info.part_status = "missingkey"
encryption_info["missing_keys"] = missing
else:
encryption_info.part_status = "error"
elif keyword == "DECRYPTION_OKAY":
encryption_info.part_status = "decrypted"
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ENC_TO":
keylist = encryption_info.get("have_keys", [])
if data[0] not in keylist:
keylist.append(data[1])
encryption_info["have_keys"] = keylist
elif signature_info.part_status == "none":
# Only one of these will ever be emitted per key, use
# this to set initial state. We may end up revising
# the status depending on more info later.
if keyword in ("GOODSIG", "BADSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "GOODSIG")
and "unverified"
or "invalid")
elif keyword == "ERRSIG":
signature_info.part_status = "error"
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[5])
# Second pass, this may update/mutate the state set above
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "NO_SECKEY":
if "missing_keys" not in encryption_info:
encryption_info["missing_keys"] = [data[1]]
else:
encryption_info["missing_keys"].append(data[1])
try:
encryption_info["have_keys"].remove(data[1])
except (KeyError, ValueError):
pass
elif keyword == "VALIDSIG":
# FIXME: Determine trust level, between new, unverified,
# verified, untrusted.
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[3])
elif keyword in ("EXPKEYSIG", "REVKEYSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "EXPKEYSIG")
and "expired"
or "revoked")
# FIXME: This appears to be spammy. Is my key borked, or
# is GnuPG being stupid?
#
# elif keyword == "KEYEXPIRED": # Ignoring: SIGEXPIRED
# signature_info.part_status = "expired"
elif keyword == "KEYREVOKED":
signature_info.part_status = "revoked"
elif keyword == "NO_PUBKEY":
signature_info.part_status = "unknown"
elif keyword in ("TRUST_ULTIMATE", "TRUST_FULLY"):
if signature_info.part_status == "unverified":
signature_info.part_status = "verified"
return rp
class GnuPGRecordParser:
def __init__(self):
self.keys = {}
self.curkey = None
self.record_fields = ["record", "validity", "keysize", "keytype",
"keyid", "creation_date", "expiration_date",
"uidhash", "ownertrust", "uid", "sigclass",
"capabilities", "flag", "sn", "hashtype",
"curve"]
self.record_types = ["pub", "sub", "ssb", "fpr", "uat", "sec", "tru",
"sig", "rev", "uid", "gpg"]
self.record_parsers = [self.parse_pubkey, self.parse_subkey,
self.parse_subkey, self.parse_fingerprint,
self.parse_userattribute, self.parse_privkey,
self.parse_trust, self.parse_signature,
self.parse_revoke, self.parse_uidline,
self.parse_none]
self.dispatch = dict(zip(self.record_types, self.record_parsers))
def parse(self, lines):
for line in lines:
self.parse_line(line)
return self.keys
def parse_line(self, line):
line = dict(zip(self.record_fields, map(lambda s: s.replace("\\x3a", ":"), line.strip().split(":"))))
r = self.dispatch.get(line["record"], self.parse_unknown)
r(line)
def parse_pubkey(self, line):
self.curkey = line["keyid"]
line["keytype_name"] = openpgp_algorithms[int(line["keytype"])]
line["capabilities_map"] = {
"encrypt": "E" in line["capabilities"],
"sign": "S" in line["capabilities"],
"certify": "C" in line["capabilities"],
"authenticate": "A" in line["capabilities"],
}
line["disabled"] = "D" in line["capabilities"]
line["revoked"] = "r" in line["validity"]
line["private_key"] = False
line["subkeys"] = []
line["uids"] = []
if line["record"] == "sec":
line["secret"] = True
self.keys[self.curkey] = line
self.parse_uidline(line)
def parse_subkey(self, line):
subkey = {"id": line["keyid"],
"keysize": line["keysize"],
"creation_date": line["creation_date"],
"keytype_name": openpgp_algorithms[int(line["keytype"])]}
self.keys[self.curkey]["subkeys"].append(subkey)
def parse_fingerprint(self, line):
self.keys[self.curkey]["fingerprint"] = line["uid"]
self.keys[line["uid"]] = self.keys[self.curkey]
del(self.keys[self.curkey])
self.curkey = line["uid"]
def parse_userattribute(self, line):
# TODO: We are currently ignoring user attributes as not useful.
# We may at some point want to use --attribute-fd and read
# in user photos and such?
pass
def parse_privkey(self, line):
self.parse_pubkey(line)
def parse_uidline(self, line):
email, name, comment = parse_uid(line["uid"])
if email or name or comment:
self.keys[self.curkey]["uids"].append({
"email": email,
"name": name,
"comment": comment,
"creation_date": line["creation_date"]
})
else:
pass # This is the case where a uid or sec line have no
# information aside from the creation date, which we
# parse elsewhere. As these lines are effectively blank,
# we omit them to simplify presentation to the user.
def parse_trust(self, line):
# TODO: We are currently ignoring commentary from the Trust DB.
pass
def parse_signature(self, line):
if "signatures" not in self.keys[self.curkey]:
self.keys[self.curkey]["signatures"] = []
sig = {
"signer": line[9],
"signature_date": line[5],
"keyid": line[4],
"trust": line[10],
"keytype": line[4]
}
self.keys[self.curkey]["signatures"].append(sig)
def parse_revoke(self, line):
# FIXME: should set revocation_date (checked in existing code)
print line
def parse_unknown(self, line):
print "Unknown line with code '%s'" % line[0]
def parse_none(line):
pass
UID_PARSE_RE = "^([^\(\<]+?){0,1}( \((.+?)\)){0,1}( \<(.+?)\>){0,1}\s*$"
def parse_uid(uidstr):
matches = re.match(UID_PARSE_RE, uidstr)
if matches:
email = matches.groups(0)[4] or ""
comment = matches.groups(0)[2] or ""
name = matches.groups(0)[0] or ""
else:
if '@' in uidstr and ' ' not in uidstr:
email, name = uidstr, ""
else:
email, name = "", uidstr
comment = ""
try:
name = name.decode("utf-8")
except UnicodeDecodeError:
try:
name = name.decode("iso-8859-1")
except UnicodeDecodeError:
name = name.decode("utf-8", "replace")
try:
comment = comment.decode("utf-8")
except UnicodeDecodeError:
try:
comment = comment.decode("iso-8859-1")
except UnicodeDecodeError:
comment = comment.decode("utf-8", "replace")
return email, name, comment
class StreamReader(Thread):
def __init__(self, name, fd, callback, lines=True):
Thread.__init__(self, target=self.readin, args=(fd, callback))
self.name = name
self.state = 'startup'
self.lines = lines
self.start()
def __str__(self):
return '%s(%s/%s, lines=%s)' % (Thread.__str__(self),
self.name, self.state, self.lines)
def readin(self, fd, callback):
try:
if self.lines:
self.state = 'read'
for line in iter(fd.readline, b''):
self.state = 'callback'
callback(line)
self.state = 'read'
else:
while True:
self.state = 'read'
buf = fd.read(BLOCKSIZE)
self.state = 'callback'
callback(buf)
if buf == "":
break
except:
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
class StreamWriter(Thread):
def __init__(self, name, fd, output, partial_write_ok=False):
Thread.__init__(self, target=self.writeout, args=(fd, output))
self.name = name
self.state = 'startup'
self.partial_write_ok = partial_write_ok
self.start()
def __str__(self):
return '%s(%s/%s)' % (Thread.__str__(self), self.name, self.state)
def writeout(self, fd, output):
if isinstance(output, (str, unicode)):
total = len(output)
output = StringIO.StringIO(output)
else:
total = 0
try:
while True:
self.state = 'read'
line = output.read(BLOCKSIZE)
if line == "":
break
self.state = 'write'
fd.write(line)
total -= len(line)
output.close()
except:
if not self.partial_write_ok:
print '%s: %s bytes left' % (self, total)
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
DEBUG_GNUPG = False
class GnuPG:
"""
Wrap GnuPG and make all functionality feel Pythonic.
"""
ARMOR_BEGIN_SIGNED = '-----BEGIN PGP SIGNED MESSAGE-----'
ARMOR_BEGIN_SIGNATURE = '-----BEGIN PGP SIGNATURE-----'
ARMOR_END_SIGNATURE = '-----END PGP SIGNATURE-----'
ARMOR_END_SIGNED = '-----END PGP SIGNATURE-----'
ARMOR_BEGIN_ENCRYPTED = '-----BEGIN PGP MESSAGE-----'
ARMOR_END_ENCRYPTED = '-----END PGP MESSAGE-----'
def __init__(self, config, session=None, use_agent=False, debug=False):
global DEBUG_GNUPG
self.available = None
self.gpgbinary = GPG_BINARY
self.outputfds = ["stdout", "stderr", "status"]
self.errors = []
self.session = session
self.config = config or (session and session.config) or None
self.use_agent = use_agent
if self.config:
self.homedir = self.config.sys.gpg_home or GNUPG_HOMEDIR
DEBUG_GNUPG = ('gnupg' in self.config.sys.debug)
self.passphrase = self.config.gnupg_passphrase.get_reader()
else:
self.passphrase = None
self.homedir = GNUPG_HOMEDIR
self.debug = (self._debug_all if (debug or DEBUG_GNUPG)
else self._debug_none)
def _debug_all(self, msg):
if self.session:
self.session.debug(msg.rstrip())
else:
print '%s' % str(msg).rstrip()
def _debug_none(self, msg):
pass
def set_home(self, path):
self.homedir = path
def version(self):
retvals = self.run(["--version"])
return retvals[1]["stdout"][0].split('\n')[0]
def is_available(self):
try:
retvals = self.run(["--version"])
self.available = True
except OSError:
self.available = False
return self.available
def run(self,
args=None, gpg_input=None, outputfd=None, partial_read_ok=False,
send_passphrase=False, _raise=None):
self.outputbuffers = dict([(x, []) for x in self.outputfds])
self.threads = {}
wtf = ' '.join(args)
args = args[:] if args else []
args.insert(0, self.gpgbinary)
args.insert(1, "--utf8-strings")
args.insert(1, "--with-colons")
args.insert(1, "--verbose")
args.insert(1, "--batch")
args.insert(1, "--enable-progress-filter")
if not self.use_agent:
args.insert(1, "--no-use-agent")
if self.homedir:
args.insert(1, "--homedir=%s" % self.homedir)
gpg_retcode = -1
proc = None
try:
args.insert(1, "--status-fd=2")
if self.passphrase and send_passphrase:
if self.use_agent:
args.insert(1, "--no-use-agent")
args.insert(2, "--passphrase-fd=0")
if not self.passphrase and send_passphrase:
self.debug('Running WITHOUT PASSPHRASE %s' % ' '.join(args))
self.debug(traceback.format_stack())
else:
self.debug('Running %s' % ' '.join(args))
# Here we go!
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0)
# GnuPG is a bit crazy, and requires that the passphrase
# be sent and the filehandle closed before anything else
# interesting happens.
if self.passphrase and send_passphrase:
c = self.passphrase.read(BLOCKSIZE)
while c != '':
proc.stdin.write(c)
c = self.passphrase.read(BLOCKSIZE)
proc.stdin.write('\n')
self.threads = {
"stderr": StreamReader('gpgi-stderr(%s)' % wtf,
proc.stderr, self.parse_stderr)
}
if outputfd:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-to-fd(%s)' % wtf,
proc.stdout, outputfd.write, lines=False)
else:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-parsed(%s)' % wtf,
proc.stdout, self.parse_stdout)
if gpg_input:
# If we have output, we just stream it. Technically, this
# doesn't really need to be a thread at the moment.
self.debug('<<STDOUT<< %s' % gpg_input)
StreamWriter('gpgi-output(%s)' % wtf,
proc.stdin, gpg_input,
partial_write_ok=partial_read_ok).join()
else:
proc.stdin.close()
# Reap GnuPG
gpg_retcode = proc.wait()
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
# Reap the threads
self._reap_threads()
if outputfd:
outputfd.close()
if gpg_retcode != 0 and _raise:
raise _raise('GnuPG failed, exit code: %s' % gpg_retcode)
return gpg_retcode, self.outputbuffers
def _reap_threads(self):
for name, thr in self.threads.iteritems():
if thr.isAlive():
thr.join(timeout=15)
if thr.isAlive():
print 'SCARY WARNING: FAILED TO REAP THREAD %s' % thr
def parse_status(self, line, *args):
self.debug('<<STATUS<< %s' % line)
line = line.replace("[GNUPG:] ", "")
if line == "":
return
elems = line.split(" ")
self.outputbuffers["status"].append(elems)
def parse_stdout(self, line):
self.debug('<<STDOUT<< %s' % line)
self.outputbuffers["stdout"].append(line)
def parse_stderr(self, line):
if line.startswith("[GNUPG:] "):
return self.parse_status(line)
self.debug('<<STDERR<< %s' % line)
self.outputbuffers["stderr"].append(line)
def parse_keylist(self, keylist):
rlp = GnuPGRecordParser()
return rlp.parse(keylist)
def list_keys(self):
"""
>>> g = GnuPG(None)
>>> g.list_keys()[0]
0
"""
retvals = self.run(["--fingerprint", "--list-keys"])
return self.parse_keylist(retvals[1]["stdout"])
def list_secret_keys(self):
#
# Note: The "." parameter that is passed is to work around a bug
# in GnuPG < 2.1, where --list-secret-keys does not list
# details about key capabilities or expiry for
# --list-secret-keys unless a selector is provided. A dot
# is reasonably likely to appear in all PGP keys, as it is
# a common component of e-mail addresses (and @ does not
# work as a selector for some reason...)
#
# The downside of this workaround is that keys with no e-mail
# address or an address like alice@localhost won't be found.
# Therefore, this paramter should be removed when GnuPG >= 2.1
# becomes commonplace.
#
# (This is a better workaround than doing an additional
# --list-keys and trying to aggregate it though...)
#
# BRE: Put --fingerprint at the front and added selectors
# for the worlds MOST POPULAR LETTERS! Yaaay!
#
retvals = self.run(["--fingerprint",
"--list-secret-keys", ".",
"--list-secret-keys", "a",
"--list-secret-keys", "e",
"--list-secret-keys", "i",
"--list-secret-keys", "p",
"--list-secret-keys", "t",
"--list-secret-keys", "k"])
secret_keys = self.parse_keylist(retvals[1]["stdout"])
# Another unfortunate thing GPG does, is it hides the disabled
# state when listing secret keys; it seems internally only the
# public key is disabled. This makes it hard for us to reason about
# which keys can actually be used, so we compensate...
list_keys = ["--fingerprint"]
for fprint in secret_keys:
list_keys += ["--list-keys", fprint]
retvals = self.run(list_keys)
public_keys = self.parse_keylist(retvals[1]["stdout"])
for fprint, info in public_keys.iteritems():
if fprint in secret_keys:
for k in ("disabled", "revoked"): # FIXME: Copy more?
secret_keys[fprint][k] = info[k]
return secret_keys
def import_keys(self, key_data=None):
"""
Imports gpg keys from a file object or string.
>>> key_data = open("testing/pub.key").read()
>>> g = GnuPG(None)
>>> g.import_keys(key_data)
{'failed': [], 'updated': [{'details_text': 'unchanged', 'details': 0, 'fingerprint': '08A650B8E2CBC1B02297915DC65626EED13C70DA'}], 'imported': [], 'results': {'sec_dups': 0, 'unchanged': 1, 'num_uids': 0, 'skipped_new_keys': 0, 'no_userids': 0, 'num_signatures': 0, 'num_revoked': 0, 'sec_imported': 0, 'sec_read': 0, 'not_imported': 0, 'count': 1, 'imported_rsa': 0, 'imported': 0, 'num_subkeys': 0}}
"""
retvals = self.run(["--import"], gpg_input=key_data)
return self._parse_import(retvals[1]["status"])
def _parse_import(self, output):
res = {"imported": [], "updated": [], "failed": []}
for x in output:
if x[0] == "IMPORTED":
res["imported"].append({
"fingerprint": x[1],
"username": x[2]
})
elif x[0] == "IMPORT_OK":
reasons = {
"0": "unchanged",
"1": "new key",
"2": "new user IDs",
"4": "new signatures",
"8": "new subkeys",
"16": "contains private key",
}
res["updated"].append({
"details": int(x[1]),
"details_text": reasons[x[1]],
"fingerprint": x[2],
})
elif x[0] == "IMPORT_PROBLEM":
reasons = {
"0": "no reason given",
"1": "invalid certificate",
"2": "issuer certificate missing",
"3": "certificate chain too long",
"4": "error storing certificate",
}
res["failed"].append({
"details": int(x[1]),
"details_text": reasons[x[1]],
"fingerprint": x[2]
})
elif x[0] == "IMPORT_RES":
res["results"] = {
"count": int(x[1]),
"no_userids": int(x[2]),
"imported": int(x[3]),
"imported_rsa": int(x[4]),
"unchanged": int(x[5]),
"num_uids": int(x[6]),
"num_subkeys": int(x[7]),
"num_signatures": int(x[8]),
"num_revoked": int(x[9]),
"sec_read": int(x[10]),
"sec_imported": int(x[11]),
"sec_dups": int(x[12]),
"skipped_new_keys": int(x[13]),
"not_imported": int(x[14]),
}
return res
def decrypt(self, data, outputfd=None, passphrase=None, as_lines=False):
"""
Note that this test will fail if you don't replace the recipient with
one whose key you control.
>>> g = GnuPG(None)
>>> ct = g.encrypt("Hello, World", to=["[email protected]"])[1]
>>> g.decrypt(ct)["text"]
'Hello, World'
"""
if passphrase:
self.passphrase = passphrase
action = ["--decrypt"]
retvals = self.run(action, gpg_input=data, outputfd=outputfd,
send_passphrase=True)
self.passphrase = None
if as_lines:
as_lines = retvals[1]["stdout"]
retvals[1]["stdout"] = []
rp = GnuPGResultParser().parse(retvals)
return (rp.signature_info, rp.encryption_info,
as_lines or rp.plaintext)
def remove_armor(self, text):
lines = text.strip().splitlines(True)
if lines[0].startswith(self.ARMOR_BEGIN_SIGNED):
for idx in reversed(range(0, len(lines))):
if lines[idx].startswith(self.ARMOR_BEGIN_SIGNATURE):
lines = lines[:idx]
while lines and lines[0].strip():
lines.pop(0)
break
return ''.join(lines).strip()
def verify(self, data, signature=None):
"""
>>> g = GnuPG(None)
>>> s = g.sign("Hello, World", _from="[email protected]",
clearsign=True)[1]
>>> g.verify(s)
"""
params = ["--verify"]
if signature:
sig = tempfile.NamedTemporaryFile()
sig.write(signature)
sig.flush()
params.append(sig.name)
params.append("-")
ret, retvals = self.run(params, gpg_input=data, partial_read_ok=True)
return GnuPGResultParser().parse([None, retvals]).signature_info
def encrypt(self, data, tokeys=[], armor=True,
sign=False, fromkey=None):
"""
>>> g = GnuPG(None)
>>> g.encrypt("Hello, World", to=["[email protected]"])[0]
0
"""
action = ["--encrypt", "--yes", "--expert", "--trust-model", "always"]
if armor:
action.append("--armor")
for r in tokeys:
action.append("--recipient")
action.append(r)
if sign:
action.append("--sign")
if sign and fromkey:
action.append("--local-user")
action.append(fromkey)
retvals = self.run(action, gpg_input=data, send_passphrase=sign)
return retvals[0], "".join(retvals[1]["stdout"])
def sign(self, data,
fromkey=None, armor=True, detatch=True, clearsign=False,
passphrase=None):
"""
>>> g = GnuPG(None)
>>> g.sign("Hello, World", fromkey="[email protected]")[0]
0
"""
if passphrase:
self.passphrase = passphrase
if detatch and not clearsign:
action = ["--detach-sign"]
elif clearsign:
action = ["--clearsign"]
else:
action = ["--sign"]
if armor:
action.append("--armor")
if fromkey:
action.append("--local-user")
action.append(fromkey)
retvals = self.run(action, gpg_input=data, send_passphrase=True)
self.passphrase = None
return retvals[0], "".join(retvals[1]["stdout"])
def sign_encrypt(self, data, fromkey=None, tokeys=[], armor=True,
detatch=False, clearsign=True):
retval, signblock = self.sign(data, fromkey=fromkey, armor=armor,
detatch=detatch, clearsign=clearsign)
if detatch:
# TODO: Deal with detached signature.
retval, cryptblock = self.encrypt(data, tokeys=tokeys,
armor=armor)
else:
retval, cryptblock = self.encrypt(signblock, tokeys=tokeys,
armor=armor)
return cryptblock
def sign_key(self, keyid, signingkey=None):
action = ["--yes", "--sign-key", keyid]
if signingkey:
action.insert(1, "-u")
action.insert(2, signingkey)
retvals = self.run(action, send_passphrase=True)
return retvals
def recv_key(self, keyid, keyserver=DEFAULT_SERVER):
retvals = self.run(['--keyserver', keyserver, '--recv-key', keyid])
return self._parse_import(retvals[1]["status"])
def search_key(self, term, keyserver=DEFAULT_SERVER):
retvals = self.run(['--keyserver', keyserver,
'--fingerprint',
'--search-key', self._escape_hex_keyid_term(term)]
)[1]["stdout"]
results = {}
lines = [x.strip().split(":") for x in retvals]
curpub = None
for line in lines:
if line[0] == "info":
pass
elif line[0] == "pub":
curpub = line[1]
validity = line[6]
if line[5]:
if int(line[5]) < time.time():
validity += 'e'
results[curpub] = {
"created": datetime.fromtimestamp(int(line[4])),
"keytype_name": openpgp_algorithms[int(line[2])],
"keysize": line[3],
"validity": validity,
"uids": [],
"fingerprint": curpub
}
elif line[0] == "uid":
email, name, comment = parse_uid(line[1])
results[curpub]["uids"].append({"name": name,
"email": email,
"comment": comment})
return results
def get_pubkey(self, keyid):
retvals = self.run(['--armor',
'--export', keyid]
)[1]["stdout"]
return "".join(retvals)
def address_to_keys(self, address):
res = {}
keys = self.list_keys()
for key, props in keys.iteritems():
if any([x["email"] == address for x in props["uids"]]):
res[key] = props
return res
def _escape_hex_keyid_term(self, term):
"""Prepends a 0x to hexadecimal key ids, e.g. D13C70DA is converted to 0xD13C70DA.
This is necessary because version 1 and 2 of GnuPG show a different behavior here,
version 1 allows to search without 0x while version 2 requires 0x in front of the key id.
"""
is_hex_keyid = False
if len(term) == GPG_KEYID_LENGTH or len(term) == 2*GPG_KEYID_LENGTH:
hex_digits = set(string.hexdigits)
is_hex_keyid = all(c in hex_digits for c in term)
if is_hex_keyid:
return '0x%s' % term
else:
return term
def chat(self, gpg_args, callback, *args, **kwargs):
"""This lets a callback have a chat with the GPG process..."""
gpg_args = [self.gpgbinary,
"--utf8-strings",
"--no-use-agent",
"--no-tty",
"--command-fd=0",
"--status-fd=1"] + (gpg_args or [])
if self.homedir:
gpg_args.insert(1, "--homedir=%s" % self.homedir)
proc = None
try:
# Here we go!
proc = Popen(gpg_args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
bufsize=0)
return callback(proc, *args, **kwargs)
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
if proc:
proc.wait()
def GetKeys(gnupg, config, people):
keys = []
missing = []
# First, we go to the contact database and get a list of keys.
for person in set(people):
if '#' in person:
keys.append(person.rsplit('#', 1)[1])
else:
vcard = config.vcards.get_vcard(person)
if vcard:
# FIXME: Rather than get_all, we should give the vcard the
# option of providing us with its favorite key.
lines = [vcl for vcl in vcard.get_all('KEY')
if vcl.value.startswith('data:application'
'/x-pgp-fingerprint,')]
if len(lines) == 1:
keys.append(lines[0].value.split(',', 1)[1])
else:
missing.append(person)
else:
missing.append(person)
# FIXME: This doesn't really feel scalable...
all_keys = gnupg.list_keys()
for key_id, key in all_keys.iteritems():
for uid in key.get("uids", []):
if uid.get("email", None) in missing:
missing.remove(uid["email"])
keys.append(key_id)
# Next, we go make sure all those keys are really in our keychain.
fprints = all_keys.keys()
for key in keys:
if key not in keys and key not in fprints:
missing.append(key)
if missing:
raise KeyLookupError(_('Keys missing or ambiguous for %s'
) % ', '.join(missing), missing)
return keys
class OpenPGPMimeSigningWrapper(MimeSigningWrapper):
CONTAINER_PARAMS = (('micalg', 'pgp-sha1'),
('protocol', 'application/pgp-signature'))
SIGNATURE_TYPE = 'application/pgp-signature'
SIGNATURE_DESC = 'OpenPGP Digital Signature'
def crypto(self):
return GnuPG(self.config)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeEncryptingWrapper(MimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeSignEncryptWrapper(OpenPGPMimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def _encrypt(self, message_text, tokeys=None, armor=False):
from_key = self.get_keys([self.sender])[0]
return self.crypto().encrypt(message_text,
tokeys=tokeys, armor=True,
sign=True, fromkey=from_key)
def _update_crypto_status(self, part):
part.signature_info.part_status = 'verified'
part.encryption_info.part_status = 'decrypted'
class GnuPGExpectScript(threading.Thread):
STARTUP = 'Startup'
START_GPG = 'Start GPG'
FINISHED = 'Finished'
SCRIPT = []
VARIABLES = {}
RUNNING_STATES = [STARTUP, START_GPG]
def __init__(self, sps=None, logfile=None, variables={}, on_complete=None):
threading.Thread.__init__(self)
self.daemon = True
self._lock = threading.RLock()
self.before = ''
with self._lock:
self.state = self.STARTUP
self.logfile = logfile
self.variables = variables or self.VARIABLES
self._on_complete = [on_complete] if on_complete else []
self.gpg = None
self.main_script = self.SCRIPT[:]
self.sps = sps
if sps:
self.variables['passphrase'] = '!!<SPS'
def __str__(self):
return '%s: %s' % (threading.Thread.__str__(self), self.state)
running = property(lambda self: (self.state in self.RUNNING_STATES))
failed = property(lambda self: False)
def __del__(self):
if self.gpg:
self.gpg.close(force=True)
def in_state(self, state):
pass
def set_state(self, state):
self.state = state
self.in_state(state)
def sendline(self, proc, line):
if line == '!!<SPS':
reader = self.sps.get_reader()
while True:
c = reader.read()
if c != '':
proc.stdin.write(c)
else:
proc.stdin.write('\n')
break
else:
proc.stdin.write(line.encode('utf-8'))
proc.stdin.write('\n')
def _expecter(self, proc, exp, timebox):
while timebox[0] > 0:
self.before += proc.stdout.read(1)
if exp in self.before:
self.before = self.before.split(exp)[0]
return True
return False
def expect_exact(self, proc, exp, timeout=None):
from mailpile.util import RunTimed, TimedOut
timeout = timeout if (timeout and timeout > 0) else 5
timebox = [timeout]
self.before = ''
try:
if RunTimed(timeout, self._expecter, proc, exp, timebox):
return True
else:
raise TimedOut()
except TimedOut:
timebox[0] = 0
print 'Boo! %s not found in %s' % (exp, self.before)
raise
def run_script(self, proc, script):
for exp, rpl, tmo, state in script:
self.expect_exact(proc, exp, timeout=tmo)
if rpl:
self.sendline(proc, (rpl % self.variables).strip())
if state:
self.set_state(state)
def gpg_args(self):
return ['--no-use-agent', '--list-keys']
def run(self):
try:
self.set_state(self.START_GPG)
GnuPG(None).chat(self.gpg_args(),
self.run_script, self.main_script)
self.set_state(self.FINISHED)
except:
import traceback
traceback.print_exc()
finally:
with self._lock:
if self.gpg is not None:
self.gpg.close(force=(self.state != self.FINISHED))
self.gpg = None
if self.state != self.FINISHED:
self.state = 'Failed: ' + self.state
for name, callback in self._on_complete:
callback()
self._on_complete = None
def on_complete(self, name, callback):
with self._lock:
if self._on_complete is not None:
if name not in [o[0] for o in self._on_complete]:
self._on_complete.append((name, callback))
else:
callback()
class GnuPGKeyGenerator(GnuPGExpectScript):
"""This is a background thread which generates a new PGP key."""
KEY_SETUP = 'Key Setup'
GATHER_ENTROPY = 'Creating key'
CREATED_KEY = 'Created key'
HAVE_KEY = 'Have Key'
SCRIPT = [
('GET_LINE keygen.algo', '%(keytype)s', -1, KEY_SETUP),
('GET_LINE keygen.size', '%(bits)s', -1, None),
('GET_LINE keygen.valid', '0', -1, None),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOT_IT', None, -1, GATHER_ENTROPY),
('KEY_CREATED', None, 1800, CREATED_KEY),
('\n', None, -1, HAVE_KEY)
]
VARIABLES = {
'keytype': '1',
'bits': '4096',
'name': 'Mailpile Generated Key',
'email': '',
'comment': 'www.mailpile.is',
'passphrase': 'mailpile'
}
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[KEY_SETUP, GATHER_ENTROPY, HAVE_KEY])
failed = property(lambda self: (not self.running and
not self.generated_key))
def __init__(self, *args, **kwargs):
GnuPGExpectScript.__init__(self, *args, **kwargs)
self.generated_key = None
def gpg_args(self):
return ['--no-use-agent', '--gen-key']
def in_state(self, state):
if state == self.HAVE_KEY:
self.generated_key = self.before.strip().split()[-1]
class GnuPGKeyEditor(GnuPGExpectScript):
"""This is a background thread which edits the UIDs on a PGP key."""
HAVE_SKEY = 'Have Secret Key'
DELETING_UID = 'Deleting a UID'
DELETED_UIDS = 'Deleted UIDs'
ADDING_UID = 'Adding a UID'
ADDED_UID = 'Added a UID'
SAVED = 'Saved keychain'
SCRIPT = [
]
DELETE_SCRIPT = [
('GET_LINE keyedit.prompt', 'uid %(n)s', -1, DELETING_UID),
('GET_LINE keyedit.prompt', 'deluid', -1, DELETING_UID),
('GNUPG', 'Y', -1, None),
]
ADD_UID_SCRIPT = [
('GET_LINE keyedit.prompt', 'adduid', -1, ADDING_UID),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOOD_PASSPHRASE', '', -1, ADDED_UID),
]
SAVE_SCRIPT = [
('GET_LINE keyedit.prompt', 'save', -1, SAVED),
]
VARIABLES = {
'name': '',
'email': '',
'comment': '',
'passphrase': 'mailpile'
}
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[HAVE_SKEY,
DELETING_UID, DELETED_UIDS, ADDING_UID, ADDED_UID])
def __init__(self, keyid, set_uids=None, deletes=5, **kwargs):
GnuPGExpectScript.__init__(self, **kwargs)
self.keyid = keyid
# First, we try and delete all the existing UIDs.
# We should be able to delete all but the last one..
for i in reversed(range(2, deletes+1)):
for want, snd, tmo, st in self.DELETE_SCRIPT:
self.main_script.append((want, snd % {'n': i}, tmo, st))
# Next, add scripts to add our new UIDs.
first = True
self.uids = set_uids
for uid in set_uids:
# Magic: the in_state() method updates the variables for each
# instance of this script.
self.main_script.extend(self.ADD_UID_SCRIPT)
if first:
# We added one, so we can delete the last of the old ones
for want, snd, tmo, st in self.DELETE_SCRIPT:
self.main_script.append((want, snd % {'n': 1}, tmo, st))
first = False
self.main_script.extend(self.SAVE_SCRIPT)
def in_state(self, state):
if state == self.ADDING_UID:
self.variables = {}
self.variables.update(self.VARIABLES)
self.variables.update(self.uids.pop(0))
if not self.variables.get('name'):
self.variables['name'] = 'An Ony Mouse'
if len(self.variables['name']) < 5:
self.variables['name'] += ' ....'
if self.sps:
self.variables['passphrase'] = '!!<SPS'
def gpg_args(self):
return ['--no-use-agent', '--edit-key', self.keyid]
|
|
from django.core.urlresolvers import reverse
from django.test import TestCase, LiveServerTestCase, Client
from django.utils import timezone
from buc.models import Article, Category, Tag
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
import markdown2 as markdown
import feedparser
import factory.django
# Factories for tests
class SiteFactory(factory.django.DjangoModelFactory):
class Meta:
model = Site
django_get_or_create = (
'name',
'domain'
)
name = 'example.com'
domain = 'example.com'
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = Category
django_get_or_create = (
'name',
'description',
'slug'
)
name = 'python'
description = 'The Python programming language'
slug = 'python'
class TagFactory(factory.django.DjangoModelFactory):
class Meta:
model = Tag
django_get_or_create = (
'name',
'description',
'slug'
)
name = 'python'
description = 'The Python programming language'
slug = 'python'
class AuthorFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username','email', 'password',)
username = 'testuser'
email = '[email protected]'
password = 'Aaa12345'
class FlatPageFactory(factory.django.DjangoModelFactory):
class Meta:
model = FlatPage
django_get_or_create = (
'url',
'title',
'content'
)
url = '/about/'
title = 'About me'
content = 'All about me'
class ArticleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Article
django_get_or_create = (
'title',
'text',
'slug',
'pub_date'
)
title = 'My first post'
text = 'This is my first blog post'
slug = 'my-first-post'
pub_date = timezone.now()
author = factory.SubFactory(AuthorFactory)
site = factory.SubFactory(SiteFactory)
category = factory.SubFactory(CategoryFactory)
# Create your tests here.
class ArticleTest(TestCase):
def test_create_category(self):
# Create the category
category = CategoryFactory()
# Check we can find it
all_categories = Category.objects.all()
self.assertEqual(len(all_categories), 1)
only_category = all_categories[0]
self.assertEqual(only_category, category)
# Check attributes
self.assertEqual(only_category.name, 'python')
self.assertEqual(only_category.description, 'The Python programming language')
self.assertEqual(only_category.slug, 'python')
def test_create_tag(self):
# Create the tag
tag = TagFactory()
# Check we can find it
all_tags = Tag.objects.all()
self.assertEqual(len(all_tags), 1)
only_tag = all_tags[0]
self.assertEqual(only_tag, tag)
# Check attributes
self.assertEqual(only_tag.name, 'python')
self.assertEqual(only_tag.description, 'The Python programming language')
self.assertEqual(only_tag.slug, 'python')
def test_create_post(self):
# Create the post
post = ArticleFactory()
# Create the tag
tag = TagFactory()
# Add the tag
post.tags.add(tag)
# Check we can find it
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post, post)
# Check attributes
self.assertEqual(only_post.title, 'My first post')
self.assertEqual(only_post.text, 'This is my first blog post')
self.assertEqual(only_post.slug, 'my-first-post')
self.assertEqual(only_post.site.name, 'example.com')
self.assertEqual(only_post.site.domain, 'example.com')
self.assertEqual(only_post.pub_date.day, post.pub_date.day)
self.assertEqual(only_post.pub_date.month, post.pub_date.month)
self.assertEqual(only_post.pub_date.year, post.pub_date.year)
self.assertEqual(only_post.pub_date.hour, post.pub_date.hour)
self.assertEqual(only_post.pub_date.minute, post.pub_date.minute)
self.assertEqual(only_post.pub_date.second, post.pub_date.second)
self.assertEqual(only_post.author.username, 'testuser')
self.assertEqual(only_post.author.email, '[email protected]')
self.assertEqual(only_post.category.name, 'python')
self.assertEqual(only_post.category.description, 'The Python programming language')
# Check tags
post_tags = only_post.tags.all()
self.assertEqual(len(post_tags), 1)
only_post_tag = post_tags[0]
self.assertEqual(only_post_tag, tag)
self.assertEqual(only_post_tag.name, 'python')
self.assertEqual(only_post_tag.description, 'The Python programming language')
class BaseAcceptanceTest(LiveServerTestCase):
def setUp(self):
self.client = Client()
class AdminTest(BaseAcceptanceTest):
fixtures = ['users.json']
def test_login(self):
# Get login page
response = self.client.get('/admin/', follow=True)
# Check response code
self.assertEqual(response.status_code, 200)
# Check 'Log in' in response
self.assertTrue('Log in' in response.content.decode('utf-8'))
# Log the user in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/')
self.assertEqual(response.status_code, 200)
# Check 'Log out' in response
self.assertTrue('Log out' in response.content.decode('utf-8'))
def test_logout(self):
# Log in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/')
self.assertEqual(response.status_code, 200)
# Check 'Log out' in response
self.assertTrue('Log out' in response.content.decode('utf-8'))
# Log out
self.client.logout()
# Check response code
response = self.client.get('/admin/', follow=True)
self.assertEqual(response.status_code, 200)
# Check 'Log in' in response
self.assertTrue('Log in' in response.content.decode('utf-8'))
def test_create_category(self):
# Log in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/buc/category/add/')
self.assertEqual(response.status_code, 200)
# Create the new category
response = self.client.post('/admin/buc/category/add/', {
'name': 'python',
'description': 'The Python programming language'
},
follow=True
)
self.assertEqual(response.status_code, 200)
# Check added successfully
self.assertTrue('added successfully' in response.content.decode('utf-8'))
# Check new category now in database
all_categories = Category.objects.all()
self.assertEqual(len(all_categories), 1)
def test_edit_category(self):
# Create the category
category = CategoryFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Edit the category
response = self.client.post('/admin/buc/category/' + str(category.pk) + '/change/', {
'name': 'perl',
'description': 'The Perl programming language'
}, follow=True)
self.assertEqual(response.status_code, 200)
# Check changed successfully
self.assertTrue('changed successfully' in response.content.decode('utf-8'))
# Check category amended
all_categories = Category.objects.all()
self.assertEqual(len(all_categories), 1)
only_category = all_categories[0]
self.assertEqual(only_category.name, 'perl')
self.assertEqual(only_category.description, 'The Perl programming language')
def test_delete_category(self):
# Create the category
category = CategoryFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Delete the category
response = self.client.post('/admin/buc/category/' + str(category.pk) + '/delete/', {
'post': 'yes'
}, follow=True)
self.assertEqual(response.status_code, 200)
# Check deleted successfully
self.assertTrue('deleted successfully' in response.content.decode('utf-8'))
# Check category deleted
all_categories = Category.objects.all()
self.assertEqual(len(all_categories), 0)
def test_create_tag(self):
# Log in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/buc/tag/add/')
self.assertEqual(response.status_code, 200)
# Create the new tag
response = self.client.post('/admin/buc/tag/add/', {
'name': 'python',
'description': 'The Python programming language'
},
follow=True
)
self.assertEqual(response.status_code, 200)
# Check added successfully
self.assertTrue('added successfully' in response.content.decode('utf-8'))
# Check new tag now in database
all_tags = Tag.objects.all()
self.assertEqual(len(all_tags), 1)
def test_edit_tag(self):
# Create the tag
tag = TagFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Edit the tag
response = self.client.post('/admin/buc/tag/' + str(tag.pk) + '/change/', {
'name': 'perl',
'description': 'The Perl programming language'
}, follow=True)
self.assertEqual(response.status_code, 200)
# Check changed successfully
self.assertTrue('changed successfully' in response.content.decode('utf-8'))
# Check tag amended
all_tags = Tag.objects.all()
self.assertEqual(len(all_tags), 1)
only_tag = all_tags[0]
self.assertEqual(only_tag.name, 'perl')
self.assertEqual(only_tag.description, 'The Perl programming language')
def test_delete_tag(self):
# Create the tag
tag = TagFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Delete the tag
response = self.client.post('/admin/buc/tag/' + str(tag.pk) + '/delete/', {
'post': 'yes'
}, follow=True)
self.assertEqual(response.status_code, 200)
# Check deleted successfully
self.assertTrue('deleted successfully' in response.content.decode('utf-8'))
# Check tag deleted
all_tags = Tag.objects.all()
self.assertEqual(len(all_tags), 0)
def test_create_post(self):
# Create the category
category = CategoryFactory()
# Create the tag
tag = TagFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/buc/post/add/')
self.assertEqual(response.status_code, 200)
# Create the new post
response = self.client.post('/admin/buc/post/add/', {
'title': 'My first post',
'text': 'This is my first post',
'pub_date_0': '2013-12-28',
'pub_date_1': '22:00:04',
'slug': 'my-first-post',
'site': '1',
'category': str(category.pk),
'tags': str(tag.pk)
},
follow=True
)
self.assertEqual(response.status_code, 200)
# Check added successfully
self.assertTrue('added successfully' in response.content.decode('utf-8'))
# Check new post now in database
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
def test_create_post_without_tag(self):
# Create the category
category = CategoryFactory()
# Log in
self.client.login(username='bobsmith', password="password")
# Check response code
response = self.client.get('/admin/buc/post/add/')
self.assertEqual(response.status_code, 200)
# Create the new post
response = self.client.post('/admin/buc/post/add/', {
'title': 'My first post',
'text': 'This is my first post',
'pub_date_0': '2013-12-28',
'pub_date_1': '22:00:04',
'slug': 'my-first-post',
'site': '1',
'category': str(category.pk)
},
follow=True
)
self.assertEqual(response.status_code, 200)
# Check added successfully
self.assertTrue('added successfully' in response.content.decode('utf-8'))
# Check new post now in database
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
def test_edit_post(self):
# Create the post
post = ArticleFactory()
# Create the category
category = CategoryFactory()
# Create the tag
tag = TagFactory()
post.tags.add(tag)
# Log in
self.client.login(username='bobsmith', password="password")
# Edit the post
response = self.client.post('/admin/buc/post/' + str(post.pk) + '/change/', {
'title': 'My second post',
'text': 'This is my second blog post',
'pub_date_0': '2013-12-28',
'pub_date_1': '22:00:04',
'slug': 'my-second-post',
'site': '1',
'category': str(category.pk),
'tags': str(tag.pk)
},
follow=True
)
self.assertEqual(response.status_code, 200)
# Check changed successfully
self.assertTrue('changed successfully' in response.content.decode('utf-8'))
# Check post amended
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post.title, 'My second post')
self.assertEqual(only_post.text, 'This is my second blog post')
def test_delete_post(self):
# Create the post
post = ArticleFactory()
# Create the tag
tag = TagFactory()
post.tags.add(tag)
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
# Log in
self.client.login(username='bobsmith', password="password")
# Delete the post
response = self.client.post('/admin/buc/post/' + str(post.pk) + '/delete/', {
'post': 'yes'
}, follow=True)
self.assertEqual(response.status_code, 200)
# Check deleted successfully
self.assertTrue('deleted successfully' in response.content.decode('utf-8'))
# Check post deleted
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 0)
class ArticleViewTest(BaseAcceptanceTest):
def test_index(self):
# Create the post
post = ArticleFactory(text='This is [my first blog post](http://127.0.0.1:8000/)')
# Create the tag
tag = TagFactory(name='perl', description='The Perl programming language')
post.tags.add(tag)
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
# Fetch the index
response = self.client.get(reverse('buc:index'))
self.assertEqual(response.status_code, 200)
# Check the post title is in the response
self.assertTrue(post.title in response.content.decode('utf-8'))
# Check the post text is in the response
self.assertTrue(markdown.markdown(post.text) in response.content.decode('utf-8'))
# Check the post category is in the response
self.assertTrue(post.category.name in response.content.decode('utf-8'))
# Check the post tag is in the response
post_tag = all_posts[0].tags.all()[0]
self.assertTrue(post_tag.name in response.content.decode('utf-8'))
# Check the post date is in the response
self.assertTrue(str(post.pub_date.year) in response.content.decode('utf-8'))
self.assertTrue(post.pub_date.strftime('%b') in response.content.decode('utf-8'))
self.assertTrue(str(post.pub_date.day) in response.content.decode('utf-8'))
# Check the link is marked up properly
self.assertTrue('<a href="http://127.0.0.1:8000/">my first blog post</a>' in response.content.decode('utf-8'))
# Check the correct template was used
self.assertTemplateUsed(response, 'buc/post_list.html')
def test_post_page(self):
# Create the post
post = ArticleFactory(text='This is [my first blog post](http://127.0.0.1:8000/)')
# Create the tag
tag = TagFactory(name='perl', description='The Perl programming language')
post.tags.add(tag)
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post, post)
# Get the post URL
post_url = only_post.get_absolute_url()
# Fetch the post
response = self.client.get(post_url)
self.assertEqual(response.status_code, 200)
# Check the post title is in the response
self.assertTrue(post.title in response.content.decode('utf-8'))
# Check the post category is in the response
self.assertTrue(post.category.name in response.content.decode('utf-8'))
# Check the post tag is in the response
post_tag = all_posts[0].tags.all()[0]
self.assertTrue(post_tag.name in response.content.decode('utf-8'))
# Check the post text is in the response
self.assertTrue(markdown.markdown(post.text) in response.content.decode('utf-8'))
# Check the post date is in the response
self.assertTrue(str(post.pub_date.year) in response.content.decode('utf-8'))
self.assertTrue(post.pub_date.strftime('%b') in response.content.decode('utf-8'))
self.assertTrue(str(post.pub_date.day) in response.content.decode('utf-8'))
# Check the link is marked up properly
self.assertTrue('<a href="http://127.0.0.1:8000/">my first blog post</a>' in response.content.decode('utf-8'))
# Check the correct template was used
self.assertTemplateUsed(response, 'buc/post_detail.html')
def test_category_page(self):
# Create the post
post = ArticleFactory(text='This is [my first blog post](http://127.0.0.1:8000/)')
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post, post)
# Get the category URL
category_url = post.category.get_absolute_url()
# Fetch the category
response = self.client.get(category_url)
self.assertEqual(response.status_code, 200)
# Check the category name is in the response
self.assertTrue(post.category.name in response.content.decode('utf-8'))
# Check the post text is in the response
self.assertTrue(markdown.markdown(post.text) in response.content.decode('utf-8'))
# Check the post date is in the response
self.assertTrue(str(post.pub_date.year) in response.content.decode('utf-8'))
self.assertTrue(post.pub_date.strftime('%b') in response.content.decode('utf-8'))
self.assertTrue(str(post.pub_date.day) in response.content.decode('utf-8'))
# Check the link is marked up properly
self.assertTrue('<a href="http://127.0.0.1:8000/">my first blog post</a>' in response.content.decode('utf-8'))
# Check the correct template was used
self.assertTemplateUsed(response, 'buc/category_post_list.html')
def test_nonexistent_category_page(self):
category_url = '/category/blah/'
response = self.client.get(category_url)
self.assertEqual(response.status_code, 200)
self.assertTrue('No posts found' in response.content.decode('utf-8'))
def test_tag_page(self):
# Create the author
author = AuthorFactory()
# Create the site
site = SiteFactory()
# Create the post
post = ArticleFactory(text='This is [my first blog post](http://127.0.0.1:8000/)')
# Create the tag
tag = TagFactory()
post.tags.add(tag)
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post, post)
# Get the tag URL
tag_url = post.tags.all()[0].get_absolute_url()
# Fetch the tag
response = self.client.get(tag_url)
self.assertEqual(response.status_code, 200)
# Check the tag name is in the response
self.assertTrue(post.tags.all()[0].name in response.content.decode('utf-8'))
# Check the post text is in the response
self.assertTrue(markdown.markdown(post.text) in response.content.decode('utf-8'))
# Check the post date is in the response
self.assertTrue(str(post.pub_date.year) in response.content.decode('utf-8'))
self.assertTrue(post.pub_date.strftime('%b') in response.content.decode('utf-8'))
self.assertTrue(str(post.pub_date.day) in response.content.decode('utf-8'))
# Check the link is marked up properly
self.assertTrue('<a href="http://127.0.0.1:8000/">my first blog post</a>' in response.content.decode('utf-8'))
# Check the correct template was used
self.assertTemplateUsed(response, 'buc/tag_post_list.html')
def test_nonexistent_tag_page(self):
tag_url = '/tag/blah/'
response = self.client.get(tag_url)
self.assertEqual(response.status_code, 200)
self.assertTrue('No posts found' in response.content.decode('utf-8'))
def test_clear_cache(self):
# Create the tag
# Create the first post
post = ArticleFactory(text='This is [my first blog post](http://127.0.0.1:8000/)')
tag = TagFactory(name='perl', description='The Perl programming language')
post.tags.add(tag)
# Check new post saved
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
# Fetch the index
response = self.client.get(reverse('buc:index'))
self.assertEqual(response.status_code, 200)
# Create the second post
post = ArticleFactory(title='My second post',text='This is [my second blog post](http://127.0.0.1:8000/)', slug='my-second-post')
post.tags.add(tag)
# Fetch the index again
response = self.client.get(reverse('buc:index'))
# Check second post present
self.assertTrue('my second blog post' in response.content.decode('utf-8'))
class FeedTest(BaseAcceptanceTest):
def test_all_post_feed(self):
# Create a post
post = ArticleFactory(text='This is my *first* blog post')
# Create the tag
tag = TagFactory()
post.tags.add(tag)
# Check we can find it
all_posts = Article.objects.all()
self.assertEqual(len(all_posts), 1)
only_post = all_posts[0]
self.assertEqual(only_post, post)
# Fetch the feed
response = self.client.get('/feeds/posts/')
self.assertEqual(response.status_code, 200)
# Parse the feed
feed = feedparser.parse(response.content)
# Check length
self.assertEqual(len(feed.entries), 1)
# Check post retrieved is the correct one
feed_post = feed.entries[0]
self.assertEqual(feed_post.title, post.title)
self.assertTrue('This is my <em>first</em> blog post' in feed_post.description)
def test_category_feed(self):
# Create a post
post = ArticleFactory(text='This is my *first* blog post')
# Create another post in a different category
category = CategoryFactory(name='perl', description='The Perl programming language', slug='perl')
post2 = ArticleFactory(text='This is my *second* blog post', title='My second post', slug='my-second-post', category=category)
# Fetch the feed
response = self.client.get('/feeds/posts/category/python/')
self.assertEqual(response.status_code, 200)
# Parse the feed
feed = feedparser.parse(response.content)
# Check length
self.assertEqual(len(feed.entries), 1)
# Check post retrieved is the correct one
feed_post = feed.entries[0]
self.assertEqual(feed_post.title, post.title)
self.assertTrue('This is my <em>first</em> blog post' in feed_post.description)
# Check other post is not in this feed
self.assertTrue('This is my <em>second</em> blog post' not in response.content.decode('utf-8'))
def test_tag_feed(self):
# Create a post
post = ArticleFactory(text='This is my *first* blog post')
tag = TagFactory()
post.tags.add(tag)
post.save()
# Create another post with a different tag
tag2 = TagFactory(name='perl', description='The Perl programming language', slug='perl')
post2 = ArticleFactory(text='This is my *second* blog post', title='My second post', slug='my-second-post')
post2.tags.add(tag2)
post2.save()
# Fetch the feed
response = self.client.get('/feeds/posts/tag/python/')
self.assertEqual(response.status_code, 200)
# Parse the feed
feed = feedparser.parse(response.content)
# Check length
self.assertEqual(len(feed.entries), 1)
# Check post retrieved is the correct one
feed_post = feed.entries[0]
self.assertEqual(feed_post.title, post.title)
self.assertTrue('This is my <em>first</em> blog post' in feed_post.description)
# Check other post is not in this feed
self.assertTrue('This is my <em>second</em> blog post' not in response.content.decode('utf-8'))
class FlatPageViewTest(BaseAcceptanceTest):
def test_create_flat_page(self):
# Create flat page
page = FlatPageFactory()
# Add the site
page.sites.add(Site.objects.all()[0])
page.save()
# Check new page saved
all_pages = FlatPage.objects.all()
self.assertEqual(len(all_pages), 1)
only_page = all_pages[0]
self.assertEqual(only_page, page)
# Check data correct
self.assertEqual(only_page.url, '/about/')
self.assertEqual(only_page.title, 'About me')
self.assertEqual(only_page.content, 'All about me')
# Get URL
page_url = str(only_page.get_absolute_url())
# Get the page
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
# Check title and content in response
self.assertTrue('About me' in response.content.decode('utf-8'))
self.assertTrue('All about me' in response.content.decode('utf-8'))
class SearchViewTest(BaseAcceptanceTest):
def test_search(self):
# Create a post
post = ArticleFactory()
# Create another post
post2 = ArticleFactory(text='This is my *second* blog post', title='My second post', slug='my-second-post')
# Search for first post
response = self.client.get(reverse('buc:search') + '?q=first')
self.assertEqual(response.status_code, 200)
# Check the first post is contained in the results
self.assertTrue('My first post' in response.content.decode('utf-8'))
# Check the second post is not contained in the results
self.assertTrue('My second post' not in response.content.decode('utf-8'))
# Search for second post
response = self.client.get(reverse('buc:search') + '?q=second')
self.assertEqual(response.status_code, 200)
# Check the first post is not contained in the results
self.assertTrue('My first post' not in response.content.decode('utf-8'))
# Check the second post is contained in the results
self.assertTrue('My second post' in response.content.decode('utf-8'))
def test_failing_search(self):
# Search for something that is not present
response = self.client.get(reverse('buc:search') + '?q=wibble')
self.assertEqual(response.status_code, 200)
self.assertTrue('No posts found' in response.content.decode('utf-8'))
# Try to get nonexistent second page
response = self.client.get(reverse('buc:search') + '?q=wibble&page=2')
self.assertEqual(response.status_code, 200)
self.assertTrue('No posts found' in response.content.decode('utf-8'))
class SitemapTest(BaseAcceptanceTest):
def test_sitemap(self):
# Create a post
post = ArticleFactory()
# Create a flat page
page = FlatPageFactory()
# Get sitemap
response = self.client.get('/sitemap.xml')
self.assertEqual(response.status_code, 200)
# Check post is present in sitemap
self.assertTrue('my-first-post' in response.content.decode('utf-8'))
# Check page is present in sitemap
self.assertTrue('/about/' in response.content.decode('utf-8'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.domains_v1beta1.types import domains
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-domains",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DomainsTransport(abc.ABC):
"""Abstract transport class for Domains."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "domains.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.search_domains: gapic_v1.method.wrap_method(
self.search_domains, default_timeout=None, client_info=client_info,
),
self.retrieve_register_parameters: gapic_v1.method.wrap_method(
self.retrieve_register_parameters,
default_timeout=None,
client_info=client_info,
),
self.register_domain: gapic_v1.method.wrap_method(
self.register_domain, default_timeout=None, client_info=client_info,
),
self.retrieve_transfer_parameters: gapic_v1.method.wrap_method(
self.retrieve_transfer_parameters,
default_timeout=None,
client_info=client_info,
),
self.transfer_domain: gapic_v1.method.wrap_method(
self.transfer_domain, default_timeout=None, client_info=client_info,
),
self.list_registrations: gapic_v1.method.wrap_method(
self.list_registrations, default_timeout=None, client_info=client_info,
),
self.get_registration: gapic_v1.method.wrap_method(
self.get_registration, default_timeout=None, client_info=client_info,
),
self.update_registration: gapic_v1.method.wrap_method(
self.update_registration, default_timeout=None, client_info=client_info,
),
self.configure_management_settings: gapic_v1.method.wrap_method(
self.configure_management_settings,
default_timeout=None,
client_info=client_info,
),
self.configure_dns_settings: gapic_v1.method.wrap_method(
self.configure_dns_settings,
default_timeout=None,
client_info=client_info,
),
self.configure_contact_settings: gapic_v1.method.wrap_method(
self.configure_contact_settings,
default_timeout=None,
client_info=client_info,
),
self.export_registration: gapic_v1.method.wrap_method(
self.export_registration, default_timeout=None, client_info=client_info,
),
self.delete_registration: gapic_v1.method.wrap_method(
self.delete_registration, default_timeout=None, client_info=client_info,
),
self.retrieve_authorization_code: gapic_v1.method.wrap_method(
self.retrieve_authorization_code,
default_timeout=None,
client_info=client_info,
),
self.reset_authorization_code: gapic_v1.method.wrap_method(
self.reset_authorization_code,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def search_domains(
self,
) -> Callable[
[domains.SearchDomainsRequest],
Union[domains.SearchDomainsResponse, Awaitable[domains.SearchDomainsResponse]],
]:
raise NotImplementedError()
@property
def retrieve_register_parameters(
self,
) -> Callable[
[domains.RetrieveRegisterParametersRequest],
Union[
domains.RetrieveRegisterParametersResponse,
Awaitable[domains.RetrieveRegisterParametersResponse],
],
]:
raise NotImplementedError()
@property
def register_domain(
self,
) -> Callable[
[domains.RegisterDomainRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def retrieve_transfer_parameters(
self,
) -> Callable[
[domains.RetrieveTransferParametersRequest],
Union[
domains.RetrieveTransferParametersResponse,
Awaitable[domains.RetrieveTransferParametersResponse],
],
]:
raise NotImplementedError()
@property
def transfer_domain(
self,
) -> Callable[
[domains.TransferDomainRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_registrations(
self,
) -> Callable[
[domains.ListRegistrationsRequest],
Union[
domains.ListRegistrationsResponse,
Awaitable[domains.ListRegistrationsResponse],
],
]:
raise NotImplementedError()
@property
def get_registration(
self,
) -> Callable[
[domains.GetRegistrationRequest],
Union[domains.Registration, Awaitable[domains.Registration]],
]:
raise NotImplementedError()
@property
def update_registration(
self,
) -> Callable[
[domains.UpdateRegistrationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def configure_management_settings(
self,
) -> Callable[
[domains.ConfigureManagementSettingsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def configure_dns_settings(
self,
) -> Callable[
[domains.ConfigureDnsSettingsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def configure_contact_settings(
self,
) -> Callable[
[domains.ConfigureContactSettingsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_registration(
self,
) -> Callable[
[domains.ExportRegistrationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_registration(
self,
) -> Callable[
[domains.DeleteRegistrationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def retrieve_authorization_code(
self,
) -> Callable[
[domains.RetrieveAuthorizationCodeRequest],
Union[domains.AuthorizationCode, Awaitable[domains.AuthorizationCode]],
]:
raise NotImplementedError()
@property
def reset_authorization_code(
self,
) -> Callable[
[domains.ResetAuthorizationCodeRequest],
Union[domains.AuthorizationCode, Awaitable[domains.AuthorizationCode]],
]:
raise NotImplementedError()
__all__ = ("DomainsTransport",)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GTFlow Model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_util
class ModelBuilderOutputType(object):
MODEL_FN_OPS = 0
ESTIMATOR_SPEC = 1
def model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
if config.session_config is not None:
session_config = config.session_config
session_config.allow_soft_placement = True
else:
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = config.replace(session_config=session_config)
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Create GBDT model.
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=training_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
predictions_dict = gbdt_model.predict(mode)
logits = predictions_dict["predictions"]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
def ranking_model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model for ranking.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* ranking_model_pair_keys (Optional): Keys to distinguish between features
for left and right part of the training pairs for ranking. For example,
for an Example with features "a.f1" and "b.f1", the keys would be
("a", "b").
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
ranking_model_pair_keys = params["ranking_model_pair_keys"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Extract the features.
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
# For ranking pairwise training, we extract two sets of features.
if len(ranking_model_pair_keys) != 2:
raise ValueError("You must provide keys for ranking.")
left_pair_key = ranking_model_pair_keys[0]
right_pair_key = ranking_model_pair_keys[1]
if left_pair_key is None or right_pair_key is None:
raise ValueError("Both pair keys should be provided for ranking.")
features_1 = {}
features_2 = {}
for name in training_features:
feature = training_features[name]
new_name = name[2:]
if name.startswith(left_pair_key + "."):
features_1[new_name] = feature
else:
assert name.startswith(right_pair_key + ".")
features_2[new_name] = feature
main_features = features_1
supplementary_features = features_2
else:
# For non-ranking or inference ranking, we have only 1 set of features.
main_features = training_features
# Create GBDT model.
gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=main_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
# Logits for inference.
if mode == learn.ModeKeys.INFER:
predictions_dict = gbdt_model_main.predict(mode)
logits = predictions_dict[gbdt_batch.PREDICTIONS]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
else:
gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=supplementary_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index)
# Logits for train and eval.
if not supplementary_features:
raise ValueError("Features for ranking must be specified.")
predictions_dict_1 = gbdt_model_main.predict(mode)
predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]
predictions_dict_2 = gbdt_model_supplementary.predict(mode)
predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]
logits = predictions_1 - predictions_2
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
predictions_dict = predictions_dict_1
predictions_dict[gbdt_batch.PREDICTIONS] = logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model_main.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = (
gbdt_model_main.get_number_of_trees_tensor())
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
|
|
import argparse
import os
from os import path
import subprocess
import sys
import socket
import time
import warnings
from math import floor
import gc # garbage collector
import smtplib
import numpy as np
from scipy import signal, linalg
from matplotlib import pyplot as plt
import GPy
import classes as cls
import utilities as util
from utilities import bcolors
# import rpy2.robjects as ro
# from rpy2.robjects.packages import importr
# from rpy2.robjects.numpy2ri import numpy2ri
# # Activate automatic conversion of ndarray to R objects
# ro.conversion.py2ri = numpy2ri
from progressbar import ProgressBar, SimpleProgress, ETA, Percentage, Bar, \
AnimatedMarker, Timer, Counter
if __name__ == "__main__":
# gc.set_debug(gc.DEBUG_LEAK)
# Parsing input from command line
parser = argparse.ArgumentParser(
description = "SN lightcurve fitter and classifier.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
actionGroup = parser.add_argument_group('ACTION')
inputGroup = parser.add_argument_group('INPUT')
"""
ACTION OPTIONS
----------------------------------------------------------------------------
"""
actionGroup.add_argument(
"--fit", dest="fit",
action="store_true",
help="Fit lightcurves with Gaussian processes method."
)
actionGroup.add_argument(
'--prior', dest='prior',
action='store_true', help='Use priors in GP regression.'
)
actionGroup.add_argument(
'--length', dest='testLength',
action='store_true',
help='Set length scale hyper parameter to random value to ease \
optimization.'
)
actionGroup.add_argument(
"--cross-correlation", dest="crossCor",
action="store_true",
help="Performs cross correlation between non peaked lcs (with maximum in \
r-band at one of the MJD extremes) and all the peaked lcs. Produces \
an estimate for maximum in r-band. VERY TIME CONSUMING."
)
actionGroup.add_argument(
"--distance-matrix", dest="distMatrix",
action="store_true",
help="Calculate distance between fitted lightcurves in same band. \
It is use to build a diffusion map (see Coifman & Lafon (2006) \
and Lafon & Lee (2006)).")
actionGroup.add_argument(
"--diffuse", dest="diffuse",
action="store_true",
help="Computes the diffusion map coefficients. Run together or after \
--distance-matrix option. Uses `diffusionMap` R package developed \
by Joseph Richards.")
actionGroup.add_argument(
"--train", dest="train",
action="store_true",
help="Train the classifier - Random Forest. Uses `randomForest` R \
package.")
actionGroup.add_argument(
"--classify", dest="classify",
action="store_true")
actionGroup.add_argument(
"--plot", dest="plot",
action="store_true",
help="Save on `pdf` file the plot of fitting curve over data.")
actionGroup.add_argument(
'--nice-plots', dest='nicePlots',
action='store_true',
help='Produces plot suitable for publication (pdf, 300dpi).'
)
"""-------------------------------------------------------------------------
INPUT OPTIONS
----------------------------------------------------------------------------
"""
inputGroup.add_argument(
"--data-directory", dest="dirData",
default="train_data" + os.sep + "SIMGEN_PUBLIC_DES",
help="Path to directory containing training data.")
inputGroup.add_argument(
"--fit-directory", dest="dirFit",
default="results" + os.sep + "FIT",
help="Path to directory containing fitted data.")
# the use of this keyword is developed in dev_magnitudes branch
inputGroup.add_argument(
"--mag", dest="mag",
action="store_true",
help="Reads in magnitudes from file."
)
inputGroup.add_argument(
"--fit-file", dest="fitFile",
help="Path to file in which to dump fitting results.")
inputGroup.add_argument(
"-f", "--file",
help="")
inputGroup.add_argument(
"-c", "--candidate", dest="cand",
default=-1, type=int,
help="ID of a candidate."
)
inputGroup.add_argument(
"--all-bands", dest="allBands",
action="store_true",
help="Plot all bands --nice-plots option."
)
inputGroup.add_argument(
"-b", "--band", dest="band", default='r',
help="Which band to plot with --nice-plots.")
inputGroup.add_argument(
"--nBands", dest="nBands",
default=-1, type=int,
help="Number of bands to plot with --nice-plots.")
inputGroup.add_argument(
'--limits', nargs=2, dest='limits',
default=[0, 5], type=int,
help='Starting ending indeces for fitting and cross-correlation.'
)
inputGroup.add_argument(
'--offset', '-o', dest='offset',
default=0, type=int,
help='Offset for columns WRT limits (which are referred to rows).'
)
inputGroup.add_argument(
'--plot-offset', dest='plotOffset',
default=-1, type=int,
help='Offset in index to begin light curves plotting from.'
)
"""-------------------------------------------------------------------------
"""
args = parser.parse_args()
bands = ['g', 'r', 'i', 'z']
else:
pass
if __name__ == "__main__":
# os.system("clear")
fromAddress = '[email protected]'
toAddress = '[email protected]'
sent = False
indent = " "
resDir = "results"+os.sep
peakIdx = np.empty(0)
nopeakIdx = np.empty(0)
print bcolors.bldpur
print indent + "* * * * * * * * * * * * * * *"
print indent + "* Miniature Adventure *"
print indent + "* ------------------- *"
print indent + "* lightcurves fitting *"
print indent + "* and *"
print indent + "* SN classification *"
print indent + "* * * * * * * * * * * * * * *"
print bcolors.txtrst
if args.dirFit == 'results/FIT':
yesno = str(raw_input(indent + 'Set fit directory other then default (' + \
parser.get_default('dirFit') + ')? (y/n)'))
if yesno == 'y':
args.dirFit = str(raw_input(indent + 'Specify new directory '\
+'for fit: '))
if args.dirData[-1] != os.sep:
args.dirData += os.sep
if args.dirFit[-1] != os.sep:
args.dirFit += os.sep
print indent + 'Fit directory will be: ' + path.abspath(args.dirFit)
if not os.path.exists(path.abspath(args.dirFit)):
os.makedirs(path.abspath(args.dirFit))
start_time = time.time()
"""
Get list of files in data directory and fit directory
----------------------------------------------------------------------------
"""
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirData)
lsDirData = p.stdout.read()
lsDirData = lsDirData.split('\n')
lsDirData.sort()
lsDirData.remove('')
p = subprocess.Popen("ls *SN*.DAT", shell=True, stdout=subprocess.PIPE,
cwd=args.dirFit)
lsDirFit = p.stdout.read()
lsDirFit = lsDirFit.split('\n')
lsDirFit.sort()
lsDirFit.remove('')
"""-------------------------------------------------------------------------
"""
"""
PERFORMS LCs FITTING
"""
if args.fit:
if args.limits[1] > len(lsDirData):
print indent + \
"WARNING: upper limit > than the number of files. Corrected.\n"
args.limits[1] = len(lsDirData)
filePath = args.dirFit + 'PEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fPeaked = open(filePath, 'w')
filePath = args.dirFit + 'NOPEAKED_{:<}_{:<5.3f}.LIST'.format(
socket.gethostname(), time.time()
)
fNopeaked = open(filePath, 'w')
# Relevant input data
print "\n" + indent + "[1] * Fit lightcurves ..."
print "\n" + indent + "Index interval [{:<},{:<})".format(
args.limits[0], args.limits[1]
)
print "\n" + indent + \
"Data directory: " + os.curdir + args.dirData
print "\n" + indent \
+ "Number of candidates = {:<d}".format(len(lsDirData))
"""
GP kernel specification
------------------------------------------------------------------------
"""
# kern = GPy.kern.RatQuad(1)
kern = GPy.kern.RBF(1)
# kern = GPy.kern.Matern32(1)
# kern = GPy.kern.Matern52(1)
"""---------------------------------------------------------------------
"""
print "\n" + indent \
+ "Data will be smoothed using GP kernel " + kern.name.upper()
print '\n' + indent + \
"INDEX | SN ID | BAND"
for i in range(args.limits[0], args.limits[1]):
filePath = path.splitext(lsDirData[i])[0] + "_FIT.DAT"
"""
Check if file with fit results already exits. If positive skip
to next loop iteration.
"""
if filePath in lsDirFit:
continue
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i],
args.mag
)
# Creating SupernovaFit object
candidateFit = cls.SupernovaFit(candidate, kern.name)
for b in candidate.lcsDict.keys():
# Correcting for time dilution
epoch = util.time_correct(
candidate.lcsDict[b].mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost
)
# Correcting for absorption
flux = util.correct_for_absorption(
candidate.lcsDict[b].flux,
candidate.MWEBV, b
)
errFlux = candidate.lcsDict[b].fluxErr
if (candidate.lcsDict[b].badCurve) or (len(flux) <= 3):
candidateFit.lcsDict[b].badCurve = True
print indent + bcolors.FAIL + \
"{:<} {:<} {:<} Bad Curve".format(i, candidate.SNID, b) + \
bcolors.txtrst
"""
>>> if 'break' instead of 'continue' the candidate would not be
>>> processed and the further code would be easier (no double
>>> checks both on data and fit).
"""
continue
"""
Fitting Lightcurve
----------------------------------------------------------------
"""
try:
predMjd, predFlux, predErr, GPModel = util.gp_fit(
epoch, flux, errFlux,
kern, n_restarts=10,
parallel=False,
test_length=args.testLength,
test_prior=args.prior)
except linalg.LinAlgError as e:
if sent == False:
server = smtplib.SMTP('mailauth.oapd.inaf.it',587)
server.starttls()
server.login('marco.depascale', 'M@p3d_8$')
msg = 'Subject: LinAlgError\n\n' + \
'index = {:<d}, SNID = {:<d}'.format(i, candidate.SNID)
server.sendmail(fromAddress, toAddress, msg)
server.close()
sent = True
"""
if LinAlgError light curve won't be saved.
"""
print indent + \
"{:>5d} {:>5d} {:>4s} > FAIL".format(
i, candidate.SNID, b
) + bcolors.FAIL + ' LinAlgError' + bcolors.txtrst
candidateFit.r.badCurve = True
raise ValueError(
'LinAlgError from GPy. Mail sent to {:s}'.format(
toAddress
)
)
else:
candidateFit.set_lightcurve(b, predMjd, predFlux, predErr)
print indent + bcolors.OKGREEN + \
"{:>5d} {:>5d} {:>4s} > DONE".format(
i, candidate.SNID, b
) + bcolors.txtrst
"""-------------------------------------------------------------
"""
else:
"""
Saving fit results on file
----------------------------------------------------------------
"""
if (candidateFit.r.badCurve == False):
filePath = args.dirFit + \
path.splitext(lsDirData[i])[0] + "_FIT.DAT"
candidateFit.save_on_txt(filePath)
print indent + 'file saved!'
if candidateFit.peaked:
peakIdx = np.append(peakIdx, i)
fPeaked.write('{:<}\n'.format(filePath))
else:
nopeakIdx = np.append(nopeakIdx, i)
fNopeaked.write('{:<}\n'.format(filePath))
"""-------------------------------------------------------------
"""
gc.collect()
# free memory
gc.collect()
fPeaked.close()
fNopeaked.close()
filePath = 'peaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(args.dirFit + filePath, peakIdx,
header='Indexes of fitted LCs with r maximum.', fmt='%d')
filePath = args.dirFit + 'nopeaked_{:<}_{:<5.3f}.dat'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, nopeakIdx,
header='Indexes of fitted LCs without an r maximum.', fmt='%d')
gc.collect()
"""#########################################################################
############################################################################
PERFORMING CROSS-CORRELATION
############################################################################
############################################################################
"""
if args.crossCor:
"""
File are sorted by SNID.
In the following peakIdx and nopeakIdx contain index referring to the
full list of files. For this reason the list of files it is queried on
dirData. It is then filtered using the above variables.
"""
print "\n" + indent + bcolors.undwht + \
"(*) Calculate cross-correlation of not peaked- with " + \
"peaked-lcs ..." + bcolors.txtrst
print "\n" + indent + "Interval [{:<},{:<})".format(args.limits[0], args.limits[1])
filePath = args.dirFit + 'PEAKED.LIST'
if path.exists(filePath) == False:
# create the file concatenating existing partial files
print '{:<s} created!'.format(filePath)
peakedFileList = util.list_files(args.dirFit+'PEAKED*.LIST')
util.concat_files(peakedFileList, filePath)
peakList = np.loadtxt(filePath, dtype=np.str)
filePath = args.dirFit + 'NOPEAKED.LIST'
if path.exists(filePath) == False:
# create the file from existing partial files
print '{:<s} created!'.format(filePath)
noPeakedFileList = util.list_files(args.dirFit+'NOPEAKED*.LIST')
util.concat_files(noPeakedFileList, filePath)
tmp = np.loadtxt(filePath, dtype=np.str)
if tmp.size == 1:
nopeakList = np.asarray([tmp])
else:
nopeakList = np.asarray(tmp)
if args.limits[1] > len(nopeakList):
args.limits[1] = len(nopeakList)
#
# filePath = 'repeats.txt'
# repeats = np.loadtxt(args.dirFit + filePath, dtype=np.str)
filePath = 'cross_correlated_files_{:<5.3f}.dat'.format(time.time())
reWrite = open(args.dirFit + filePath, 'w')
prog = 0
for i in nopeakList[args.limits[0]:args.limits[1]]:
z = 0 # goes on peakIdx to index the progress bar
"""
READ DATA FROM NOT-PEAKED FILE
creates a Supernova object
"""
filePath = i
try:
tmpSN = util.get_sn_from_file(filePath)
print "Progress: {:<d} -- {:<}".format(prog, filePath)
prog += 1
ccIndent = "ID:{: ^7d}".format(tmpSN.SNID)
widgets = [ccIndent, Percentage(), ' ',
Bar(marker='#',left='[',right=']'),
' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=len(peakList)).start()
except IOError:
print "IOError: {:<}".format(filePath)
continue
if tmpSN.r.badCurve:
print "IOError (BAD r curve): {:<}".format(filePath)
continue
"""
create SupernovaFit object
"""
notPeaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
notPeaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in not-peaked
"""
notPeaked.shift_mjds()
ccMax = list()#np.zeros(peakIdx.size)
k = 0 # goes on ccMax
# for j in peakIdx:
for j in peakList:
"""
READ DATA FROM PEAKED FILE
"""
# if j in repeats:
# print indent + bcolors.WARNING + \
# 'File appears also in unpeaked list: ignoring it.' + \
# bcolors.txtrst
# continue
filePath = j#args.dirFit + lsDirData[j][0:12] + '_FIT.DAT'
try:
tmpSN = util.get_sn_from_file(filePath)
except IOError:
print indent + bcolors.WARNING + \
'File appears also in peaked list but it does not exists: ignoring it.' + \
bcolors.txtrst
continue
if tmpSN.r.badCurve:
print indent + bcolors.WARNING + \
'Peaked file has bad r curve: ignoring it.' + \
bcolors.txtrst
continue
peaked = cls.SupernovaFit(tmpSN)
for l in tmpSN.lcsDict.keys():
peaked.set_lightcurve(l,
tmpSN.lcsDict[l].mjd,
tmpSN.lcsDict[l].flux,
tmpSN.lcsDict[l].fluxErr
)
"""
Shifting mjds in peaked
"""
peaked.shift_mjds()
"""
Performing cross-correlation
"""
ycorr = signal.correlate(
notPeaked.normalized_flux('r'),
peaked.normalized_flux('r')
)
xcorr = np.arange(ycorr.size)
lags = xcorr - (
len(notPeaked.normalized_flux('r'))-1
)
distancePerLag = (
notPeaked.r.shiftedMjd[-1] - \
notPeaked.r.shiftedMjd[0])/float(
len(notPeaked.r.shiftedMjd)
)
offsets = -lags*distancePerLag
# ccMax[k] = offsets[np.argmax(ycorr)]
ccMax.append(offsets[np.argmax(ycorr)])
# k += 1
pbar.update(z+1)
z += 1
# gc.collect()
notPeaked.ccMjdMaxFlux = np.mean(ccMax)#ccMax.mean()
"""
re-writing file of not peaked lc to include information on maximum
position from CC.
"""
filePath = i#args.dirFit + lsDirData[i][0:12] + '_FIT.DAT'
notPeaked.save_on_txt(filePath)
reWrite.write(filePath+'\n')
pbar.finish()
# gc.collect()
reWrite.close()
print 'CC ended!'
gc.collect()
"""
CALCULATING DISTANCE MATRIX
needs:
- args.distMatrix
- args.limits
- args.offset
- args.dirFit
"""
if args.distMatrix:
if not os.path.exists(path.abspath(args.dirFit + 'distance_matrix' + os.sep)):
os.makedirs(path.abspath(args.dirFit + 'distance_matrix' + os.sep))
"""
Calculate distance between fitted lightcurves.
Distance values are saved in a R matrix. This will be used by the R
package `diffusionMap` through rpy2 Python package.
"""
j_offset = args.offset
i_start = args.limits[0]
i_end = args.limits[1]
j_start = i_start + j_offset
j_end = (i_end + j_offset) if (i_end+j_offset<=len(lsDirFit)) else len(lsDirFit)
print "\n" + indent + bcolors.undwht + \
"(*) Calculate distances between lightcurves ..." + \
bcolors.txtrst
print indent + "Rows in [{:<d}, {:<d})".format(i_start, i_end)
print indent + "Cols in [{:<d}, {:<d})".format(j_start, j_end)
"""
setting value for big distance
"""
distFlag = 5
missColCount = 0
missRowlist = list()
bandDict = {
'g':0,
'r':1,
'i':2,
'z':3
}
widgets = [indent, 'Processing:', ' ', Counter(), ' ',
AnimatedMarker(), indent, Timer()]
# creating list of 4 lists
distList = list([[], [], [], []])
nCols = 0
# distList = np.zeros((4,
# len(lsDirFit[i_start:i_end]), len(lsDirFit[i_start:i_end])),
# dtype=float
# )
pbar = ProgressBar(widgets=widgets, maxval=(i_end-i_start)).start()
for i in range(i_start, i_end):
missColCount = 0
"""
Reading in i-candidate
"""
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i]
)
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band - ".format(lsDirFit[i]) + \
# "THE FILE HAS TO BE DELETED" +\
# " indices {:<d}".format(i)
missRowlist.append(i)
continue
iCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
# set_lightcurve set also if the lc is peaked or not
iCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in i-candidate
"""
iCandidate.shift_mjds()
if iCandidate.peaked == False:
# print i, iCandidate.SNID
"""
keeping to perform check with other non peaked LC
"""
iElMax = iCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
iCandidate.lcsDict[b].shiftedMjd = [
iCandidate.lcsDict[b].shiftedMjd[l] +
iCandidate.ccMjdMaxFlux for l in range(len(
iCandidate.lcsDict[b].shiftedMjd
))
]
iElSize = iCandidate.r.size
iPeaked = iCandidate.peaked
for j in range(j_start, j_end):
"""
if this SN has badCurve in this band it will be far from all
the others by default.
here will save time from not opening all the other files
to create new SupernovaFit objcets.
"""
if j == i:
# filling elements on the distance matrix diagonal
for b in bands:
# adding one element to each sub list in distList
distList[bandDict[b]].append(0.)
# distList[bandDict[b], i-i_start, j-j_start] = 0.
continue
if j < i:
# filling matrix elements below the diagonal
if j in missRowlist:
missColCount += 1
continue
for b in bands:
# appending the symmetric element in the list: i-i_start
distList[bandDict[b]].append(
distList[bandDict[b]][
(j-j_start-missColCount)*nCols+\
i-i_start-len(missRowlist)
])
# distList[bandDict[b], i-i_start, j-j_start] = \
# distList[bandDict[b], j-j_start, i-i_start]
continue # jump to the next iteration of the loop
"""
Reading in j-candidate
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[j]
)
except IndexError:
print j, len(lsDirFit)
raise IndexError("list index out of range")
if tmpSN.r.badCurve:
# nothing has to be added to the distance matrix. Print and
#
# continue to nex object
# print "{:<} Has bad curve in r band -".format(lsDirFit[j])+\
# " THE FILE HAS TO BE DELETED:" +\
# " indices {:<d}, {:<d}".format(i, j)
continue
jCandidate = cls.SupernovaFit(tmpSN)
for b in tmpSN.lcsDict.keys():
jCandidate.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr
)
"""
Shifting mjds in j-candidate
"""
jCandidate.shift_mjds()
if jCandidate.peaked == False:
"""
keeping to perform check with other non peaked LC
"""
jElMax = jCandidate.r.shiftedMjd.index(0.)
"""
correcting using CC results
"""
for b in bands:
jCandidate.lcsDict[b].shiftedMjd = [
jCandidate.lcsDict[b].shiftedMjd[l] +
jCandidate.ccMjdMaxFlux for l in range(len(
jCandidate.lcsDict[b].shiftedMjd
))
]
jElSize = jCandidate.r.size
for b in bands:
if not jCandidate.lcsDict[b].badCurve \
and not iCandidate.lcsDict[b].badCurve:
distList[bandDict[b]].append(
iCandidate.get_distance(jCandidate, b)
)
# distList[bandDict[b], i-i_start, j-j_start] = \
# iCandidate.get_distance(jCandidate, b)
else:
# in case of bad curve
"""
This works like a flag. These elements will be set
equal to a neutral value (the mean of the other)
"""
distList[bandDict[b]].append(distFlag)
# distList[bandDict[b], i-i_start, j-j_start] = distFlag
"""
# >>> !! Checking for i being equal to its beginning value in the loop
does not take into account the
possibility of the first SN having a bad r curve, in which case
the loop will never arrive here, since it is reset by a continue.
Checking on nCols being still equal to zero is much better, since is
the only way to verify if the first loop has been completed.
"""
# if (i == i_start):
if (nCols == 0):
nCols = len(distList[0])
print 'nCols updated! {:<d}'.format(nCols)
pbar.update(i-i_start+1)
pbar.finish()
# del iCandidate
# del jCandidate
# del tmpSN
gc.collect()
distMatrix = np.zeros((4,
len(distList[0])/nCols, nCols),
dtype=float
)
for b in bands:
distMatrix[bandDict[b]] = np.reshape(
distList[bandDict[b]], (len(distList[bandDict[b]])/nCols, nCols)
)
"""
distList is no more used from now on. I delete it to save memory
"""
del distList
gc.collect()
# fixing flagged elements
# raise SystemExit
if distMatrix[0, distMatrix[0] == distFlag].size > 0:
ind = np.where(distMatrix[0] == distFlag)
distMatrix[0, ind[0], ind[1]] = np.add(
np.add(
distMatrix[1, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[1, distMatrix[1] == distFlag].size > 0:
ind = np.where(distMatrix[1] == distFlag)
# distMatrix[1, ind[0], ind[1]] = distMatrix[1,:,:].max()
distMatrix[1, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[2, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[2, distMatrix[2] == distFlag].size > 0:
ind = np.where(distMatrix[2] == distFlag)
# distMatrix[2, ind[0], ind[1]] = distMatrix[2].max()
distMatrix[2, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[3, ind[0], ind[1]]
)/3.
if distMatrix[3, distMatrix[3] == distFlag].size > 0:
ind = np.where(distMatrix[3] == distFlag)
# distMatrix[3, ind[0], ind[1]] = distMatrix[3].max()
distMatrix[3, ind[0], ind[1]] = np.add(
np.add(
distMatrix[0, ind[0], ind[1]],
distMatrix[1, ind[0], ind[1]]
),
distMatrix[2, ind[0], ind[1]]
)/3.
distMatrixSum = np.sum(distMatrix, 0)
"""
Saving on text files
"""
fileHeader = "distMatrix[{:<d}:{:<d},{:<d}:{:<d}] --- ".format(
i_start, i_end, j_start, j_end
) + \
"Created by {:<}".format(socket.gethostname())
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_Sum_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrixSum, fmt='%6.4f', header=fileHeader)
del distMatrixSum
gc.collect()
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_g_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[0], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_r_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[1], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_i_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[2], fmt='%6.4f', header=fileHeader)
filePath = args.dirFit + 'distance_matrix' + os.sep + \
'dist_matrix_z_{:<}_{:<5.3f}.txt'.format(
socket.gethostname(), time.time()
)
np.savetxt(filePath, distMatrix[3], fmt='%6.4f', header=fileHeader)
del distMatrix
gc.collect()
"""
CALCULATING DIFFUSION MAP
"""
if args.diffuse:
if 'diffusionMap' not in globals():
diffusionMap = importr('diffusionMap')
ndim = ro.r.attributes(Rmatrix)[0][0]
dmap = diffusionMap.diffuse(Rmatrix, neigen=5)
util.dump_pkl('diffusion_map.pkl', dmap)
"""
TRAINING RANDOM FOREST CLASSIFIER
"""
if args.train:
randomForest = importr('randomForest')
if 'dmap' not in globals():
print indent + 'Loading catalog from dump file ...'
dmap = util.open_pkl('tmp_diffusion_map.pkl')
dmap_rf = randomForest.randomForest(dmap)
"""
PLOT OBSERVATION AND FIT
--plot
"""
if args.plot:
timeMark = time.time()
"""
getting file list from directory
File will be sorted by SNID
"""
print indent + 'Plotting ...'
'''
Column index is always increasing, no check on its value.
'''
nrows = 5
ncols = 5
"""
If plotOffset is to specified, get a proper random value
"""
if (args.plotOffset == -1):
np.random.RandomState
offset = int(np.random.uniform(low=0, high=len(lsDirFit)-nrows*ncols))
else:
offset = args.plotOffset
fig_g, ax_g = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_r, ax_r = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_i, ax_i = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
#tight_layout=True
)
fig_z, ax_z = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(16.5, 11.7)#,
# tight_layout=True
)
dictFig = {'g':fig_g,
'r':fig_r,
'i':fig_i,
'z':fig_z}
dictAx = {'g':ax_g,
'r':ax_r,
'i':ax_i,
'z':ax_z}
r = {'g':0,
'r':0,
'i':0,
'z':0}
c = {'g':0,
'r':0,
'i':0,
'z':0}
"""
Adjust subplot margins and title
"""
for b in dictFig.keys():
dictFig[b].subplots_adjust(
top=0.96, right=0.99, bottom=0.03, left=0.02,
wspace=0.08, hspace=0.13
)
dictFig[b].suptitle('band {:<1} - offset {:<d}'.format(b, offset))
GPkern = ''
for i in range(nrows*ncols):
"""
Getting the observational data from file
"""
candidate = util.get_sn_from_file(
args.dirData + lsDirData[i+offset]#candidateIdx]
)
"""
Reading fit data from file
"""
try:
tmpSN = util.get_sn_from_file(
args.dirFit+lsDirFit[i+offset],
magFlag=args.mag,
)
except IndexError:
warnStr = 'IndexError: list index out of range. '+\
'i={:<d}.'.format(i+offset)
print warnings.warn(warnStr)
print '\n'+indent+'Saving files as they are and stopping.'
else:
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN,
tmpSN.kern if hasattr(tmpSN, 'kern') else None)
if (i == 0) and hasattr(tmpSN, 'kern'):
GPkern = tmpSN.kern
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
print 'SN ID{:>06d} has bad r band light curve!'.format(
fit.SNID)
# continue
else:
"""
Shift fit mjd to have 0 at r band maximum
"""
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if (fit.peaked == False) and (fit.r.badCurve == False) :
"""
correcting using CC results
"""
for b in bands:
fit.lcsDict[b].shiftedMjd = [
el + fit.ccMjdMaxFlux for el in fit.lcsDict[b].shiftedMjd
]
for b in dictAx.keys():
"""
variable `data` initialized as light curve in band b for
cleaner code.
"""
data = candidate.lcsDict[b]
fit_b = fit.lcsDict[b]
fit_r = fit.lcsDict['r']
if c[b] > nrows-1:
c[b] = 0
r[b] += 1
xlim = dictAx[b][r[b], c[b]].get_xlim()
ylim = dictAx[b][r[b], c[b]].get_ylim()
dictAx[b][r[b], c[b]].set_xticks([0])
dictAx[b][r[b], c[b]].set_yticks([0])
dictAx[b][r[b], c[b]].set_xticklabels(['0'])
dictAx[b][r[b], c[b]].set_yticklabels(['0'])
if (data.badCurve == False) and (fit_b.badCurve == False) and (fit.r.badCurve == False):
epoch = util.time_correct(data.mjd,
candidate.zSpec if candidate.zSpec else candidate.zPhotHost)
epoch = [val-fit_r.mjd[fit_r.max_flux_index] for val in epoch]
if fit.peaked == False:
epoch = [val+fit.ccMjdMaxFlux for val in epoch]
flux = util.correct_for_absorption(data.flux,
candidate.MWEBV, b)
"""
Setting limits for plot axes
"""
if min(fit_b.flux) < min(flux):
y_min = min(fit_b.flux) - 3*max(fit_b.fluxErr)
else:
y_min = min(flux) - np.median(data.fluxErr)
if max(fit_b.flux) > max(flux):
y_max = max(fit_b.flux) + 3*max(fit_b.fluxErr)
else:
y_max = max(flux) + np.median(data.fluxErr)
dictAx[b][r[b], c[b]].set_ylim(y_min, y_max)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 2*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [val for val in [
fit_b.flux[el] + 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
fluxLowLim = [val for val in [
fit_b.flux[el] - 3*fit_b.fluxErr[el]
for el in range(len(fit_b.flux))
]]
dictAx[b][r[b], c[b]].fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
dictAx[b][r[b], c[b]].plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2)
scatterLab = 'SN ID {:<d}'.format(candidate.SNID)
dictAx[b][r[b], c[b]].scatter(epoch, flux,
s=10, label=scatterLab, c='black', marker='x')
dictAx[b][r[b], c[b]].errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
if not fit.peaked:
pass
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
else:
label = str(candidate.SNID)+" BAD CURVE"
dictAx[b][r[b], c[b]].plot([0, 1], [0, 1], color='red',
label=label)
dictAx[b][r[b], c[b]].plot([0, 1], [1, 0], color='red')
dictAx[b][r[b], c[b]].legend(
loc='best', framealpha=0.3, fontsize='10')
c[b] += 1
print indent + "Plots saved in files:"
if not os.path.exists(path.abspath(args.dirFit + "plots" + os.sep)):
os.makedirs(args.dirFit + "plots")
for b in dictFig.keys():
dictFig[b].savefig(
args.dirFit + "plots"+ os.sep + GPkern + \
"_band_{:<1}_{:<f}.png".format(b,timeMark),
dpi=300
)
print indent + " - " + args.dirFit + "plots" + os.sep + \
GPkern + "_band_{:<1}_{:<f}.png".format(b,timeMark)
plt.close('all')
"""
PLOT OBSERVATION AND FIT (publication style)
--nice-plots
"""
if args.nicePlots:
"""
1 candidate
choose how many bands
make the plot with confidence regions
"""
# if args.nBands != 1 or args.nBands != 4:
# args.nBands = 1
if args.cand == -1:
args.cand = np.random.random_integers(
low=0, high=len(lsDirData))
fname = 'DES_SN{:0>6d}.DAT'.format(args.cand)
candidate = util.get_sn_from_file(
args.dirData+fname
)
fname = 'DES_SN{:0>6d}_FIT.DAT'.format(args.cand)
tmpSN = util.get_sn_from_file(
args.dirFit+fname,
magFlag=args.mag,
)
"""
Initializing SupernovaFit object
"""
fit = cls.SupernovaFit(tmpSN, tmpSN.kern if hasattr(tmpSN, 'kern') else None)
for b in tmpSN.lcsDict.keys():
fit.set_lightcurve(b,
tmpSN.lcsDict[b].mjd,
tmpSN.lcsDict[b].flux,
tmpSN.lcsDict[b].fluxErr,
magFlag=args.mag
)
if fit.r.badCurve:
raise SystemExit('Bad r curve!')
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if fit.peaked == False:
"""
correcting using CC results
"""
for b in candidate.lcsDict.keys():
fit.lcsDict[b].shiftedMjd = [el + fit.ccMjdMaxFlux
for el in fit.lcsDict[b].shiftedMjd]
bands = candidate.lcsDict.keys() if args.allBands else args.band
"""
Pre-process data so to be compared with fit (made from
pre-precessed data)
"""
for b in bands:
if (not candidate.lcsDict[b].badCurve) and (not fit.lcsDict[b].badCurve):
candidate = util.pre_process(candidate, b)
candidate.lcsDict[b].mjd = [el - fit.r.mjd[fit.r.max_flux_index]
for el in candidate.lcsDict[b].mjd]
if fit.peaked == False:
candidate.lcsDict[b].mjd = [el + fit.ccMjdMaxFlux
for el in candidate.lcsDict[b].mjd]
else:
raise SystemExit('Bad {:1s} curve!'.format(b))
if args.allBands:
fig, ax = plt.subplots(nrows=2, ncols=2,
# figsize=(16.5, 11.7),
tight_layout=False
)
axDict = {
'g':ax[0,0],
'r':ax[0,1],
'i':ax[1,0],
'z':ax[1,1]
}
# fig.subplots_adjust(left=0.05, right=0.97, top=0.94, wspace=0.29)
else:
fig = plt.figure()
xlim = [-35,12]
ylim = [-10,10]
# fig, ax = plt.subplots(nrows=2, ncols=1,
# # figsize=(16.5, 11.7),
# tight_layout=False
# )
# axDict = {
# 'g':ax[0,0],
# 'r':ax[0,1],
# 'i':ax[1,0],
# 'z':ax[1,1]
# }
if not args.allBands:
fit_b = fit.lcsDict[args.band]
data = candidate.lcsDict[args.band]
if not data.badCurve and not fit_b.badCurve:
epoch = data.mjd
flux = data.flux
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.4, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.4, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 2*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.2, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.2, linewidth=0.5)
"""
Setting limits for fill_between
"""
fluxUpLim = [el for el in [
fit_b.flux[i] + 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
fluxLowLim = [el for el in [
fit_b.flux[i] - 3*fit_b.fluxErr[i]
for i in range(len(fit_b.flux))
]]
plt.fill_between(fit_b.shiftedMjd,
fluxUpLim, fluxLowLim,
facecolor='red', alpha=0.1, linewidth=0.5)
# axDict[b].fill_between(fit_b.shiftedMjd,
# fluxUpLim, fluxLowLim,
# facecolor='red', alpha=0.1, linewidth=0.5)
plt.plot(fit_b.shiftedMjd, fit_b.flux,
color='#7f0000',
linewidth=2,
label='GP fit')
# axDict[b].plot(fit_b.shiftedMjd, fit_b.flux,
# color='#7f0000',
# linewidth=2)
plt.scatter(epoch, flux,
s=30, label='data', c='black', marker='x')
# axDict[b].scatter(epoch, flux,
# s=10, label=str(candidate.SNID), c='black', marker='x')
plt.errorbar(epoch, flux,
data.fluxErr, fmt=None, color='black', ecolor='black')
# plt.xlim(xlim)
plt.ylim(ylim)
title = 'SN ID {:d} - Band {:s}'.format(candidate.SNID, args.band)
plt.title(title)
plt.xlabel('Epoch [mjd]')
plt.ylabel('Flux [adu]')
plt.legend(loc='upper right', scatterpoints=1)
# axDict[b].errorbar(epoch, flux,
# data.fluxErr, fmt=None, color='black', ecolor='black')
print "\n" + indent \
+ "The process took {:5.3f} secs.".format(time.time()-start_time)
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from neutronclient.neutron.v2_0 import port
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20PortJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PortJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_port(self):
"""Create port: netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args_ip_version(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': "4"},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': "6"},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45',
'ip_version': "4"}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s,'
'ip_version=%(ip_version)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
"""Create port: --mac_address mac --device_id deviceid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_normal(self):
"""Create port: --vnic_type normal netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_direct(self):
"""Create port: --vnic_type direct netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_macvtap(self):
"""Create port: --vnic_type macvtap netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_binding_profile(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--binding_profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--binding-profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
"""Create port: --tenant_id tenantid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
"""Create port: netid mac_address device_id --tags a b."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
"""Create port: --security-group sg1_id netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
"""Create port: <security_groups> netid
The <security_groups> are
--security-group sg1_id --security-group sg2_id
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroup_off(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--no-security-group', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, []]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups_list(self):
"""Create port: netid <security_groups>
The <security_groups> are
--security-groups list=true sg_id1 sg_id2
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--security-groups', 'list=true', 'sg_id1', 'sg_id2']
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg_id1', 'sg_id2']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_qos_policy(self):
"""Create port: --qos-policy mypolicy."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
qos_policy_name = 'mypolicy'
args = [netid, '--qos-policy', qos_policy_name]
position_names = ['network_id', 'qos_policy']
position_values = [netid, qos_policy_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
"""List ports: -D."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
"""list ports: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
"""list ports: -P."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
"""List ports: -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
"""List ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_list_ports_with_fixed_ips_in_csv(self):
"""List ports: -f csv."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
fixed_ips = [{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.12"},
{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.4"}]
contents = [{'name': 'name1', 'fixed_ips': fixed_ips}]
self._test_list_resources(resources, cmd, True,
response_contents=contents,
output_format='csv')
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query % myid),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_router_ports(self):
"""List router ports: -D."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
"""List router ports: -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
"""List router ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
"""Update port: myid --name myname --admin-state-up False
--tags a b.
"""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--admin-state-up', 'False',
'--tags', 'a', 'b'],
{'name': 'myname',
'admin_state_up': 'False',
'tags': ['a', 'b'], })
def test_update_port_secgroup(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id', myid]
updatefields = {'security_groups': ['sg1_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_secgroups(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
myid]
updatefields = {'security_groups': ['sg1_id', 'sg2_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_fixed_ip(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
net_id = 'net_id'
ip_addr = '123.123.123.123'
args = [myid,
'--fixed-ip', "network_id=%(net_id)s,ip_address=%(ip_addr)s" %
{'net_id': net_id,
'ip_addr': ip_addr}]
updated_fields = {"fixed_ips": [{'network_id': net_id,
'ip_address': ip_addr}]}
self._test_update_resource(resource, cmd, myid, args, updated_fields)
def test_update_port_device_id_device_owner(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--device-id', 'dev_id', '--device-owner', 'fake', myid]
updatefields = {'device_id': 'dev_id',
'device_owner': 'fake'}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts_ip_version(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0,ip_version=4",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=2001:192:168::1,ip_version=6",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=null,ip_version=4"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': '4'},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': '6'},
{'opt_name': 'server-ip-address',
'opt_value': None,
'ip_version': '4'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_with_qos_policy(self):
"""Update port: myid --qos-policy mypolicy."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--qos-policy', 'mypolicy'],
{'qos_policy': 'mypolicy', })
def test_update_port_with_no_qos_policy(self):
"""Update port: myid --no-qos-policy."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-qos-policy'],
{'qos_policy': None, })
def test_delete_extra_dhcp_opts_from_port(self):
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=null",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
# the client code will change the null to None and send to server,
# where its interpreted as delete the DHCP option on the port.
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_security_group_off(self):
"""Update port: --no-security-groups myid."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': []})
def test_show_port(self):
"""Show port: --fields id --fields name myid."""
resource = 'port'
cmd = port.ShowPort(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
"""Delete port: myid."""
resource = 'port'
cmd = port.DeletePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
class CLITestV20PortXML(CLITestV20PortJSON):
format = 'xml'
|
|
from multiprocessing import Process, Queue
import sys
import os
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from casexml.apps.stock.models import StockTransaction, StockReport, DocDomainMapping
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import get_db, iter_docs
from corehq.apps.domainsync.config import DocumentTransform, save
from couchdbkit.client import Database
from optparse import make_option
from datetime import datetime
# doctypes we want to be careful not to copy, which must be explicitly
# specified with --include
DEFAULT_EXCLUDE_TYPES = [
'ReportNotification',
'WeeklyNotification',
'DailyNotification'
]
NUM_PROCESSES = 8
class Command(BaseCommand):
help = "Copies the contents of a domain to another database."
args = '<sourcedb> <domain>'
option_list = BaseCommand.option_list + (
make_option('--include',
action='store',
dest='doc_types',
default='',
help='Comma-separated list of Document Types to copy'),
make_option('--exclude',
action='store',
dest='doc_types_exclude',
default='',
help='Comma-separated list of Document Types to NOT copy.'),
make_option('--since',
action='store',
dest='since',
default='',
help='Only copy documents newer than this date. Format: yyyy-MM-dd. Only '),
make_option('--list-types',
action='store_true',
dest='list_types',
default=False,
help='Don\'t copy anything, just list all the available document types.'),
make_option('--simulate',
action='store_true',
dest='simulate',
default=False,
help='Don\'t copy anything, print what would be copied.'),
make_option('--id-file',
action='store',
dest='id_file',
default='',
help="File containing one document ID per line. Only docs with these ID's will be copied"),
make_option('--postgres-db',
action='store',
dest='postgres_db',
default='',
help="Name of postgres database to pull additional data from. This should map to a "
"key in settings.DATABASES. If not specified no additional postgres data will be "
"copied. This is currently used to pull CommTrack models."),
make_option('--postgres-password',
action='store',
dest='postgres_password',
default='',
help="Password for postgres database to pull additional data from. If not specified will "
"default to the value in settings.DATABASES")
)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError('Usage is copy_domain %s' % self.args)
sourcedb = Database(args[0])
domain = args[1].strip()
simulate = options['simulate']
since = datetime.strptime(options['since'], '%Y-%m-%d').isoformat() if options['since'] else None
if options['list_types']:
self.list_types(sourcedb, domain, since)
sys.exit(0)
if simulate:
print "\nSimulated run, no data will be copied.\n"
if options['postgres_db'] and options['postgres_password']:
settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']
self.targetdb = get_db()
domain_doc = Domain.get_by_name(domain)
if domain_doc is None:
self.copy_domain(sourcedb, domain)
if options['doc_types']:
doc_types = options['doc_types'].split(',')
for type in doc_types:
startkey = [x for x in [domain, type, since] if x is not None]
endkey = [x for x in [domain, type, {}] if x is not None]
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, type=type, since=since,
postgres_db=options['postgres_db'])
elif options['id_file']:
path = options['id_file']
if not os.path.isfile(path):
print "Path '%s' does not exist or is not a file" % path
sys.exit(1)
with open(path) as input:
doc_ids = [line.rstrip('\n') for line in input]
if not doc_ids:
print "Path '%s' does not contain any document ID's" % path
sys.exit(1)
self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'])
else:
startkey = [domain]
endkey = [domain, {}]
exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
postgres_db=options['postgres_db'])
def list_types(self, sourcedb, domain, since):
doc_types = sourcedb.view("domain/docs", startkey=[domain],
endkey=[domain, {}], reduce=True, group=True, group_level=2)
doc_count = dict([(row['key'][1], row['value']) for row in doc_types])
if since:
for doc_type in sorted(doc_count.iterkeys()):
num_since = sourcedb.view("domain/docs", startkey=[domain, doc_type, since],
endkey=[domain, doc_type, {}], reduce=True).all()
num = num_since[0]['value'] if num_since else 0
print "{0:<30}- {1:<6} total {2}".format(doc_type, num, doc_count[doc_type])
else:
for doc_type in sorted(doc_count.iterkeys()):
print "{0:<30}- {1}".format(doc_type, doc_count[doc_type])
def copy_docs(self, sourcedb, domain, simulate, startkey=None, endkey=None, doc_ids=None,
type=None, since=None, exclude_types=None, postgres_db=None):
if not doc_ids:
doc_ids = [result["id"] for result in sourcedb.view("domain/docs", startkey=startkey,
endkey=endkey, reduce=False)]
total = len(doc_ids)
count = 0
msg = "Found %s matching documents in domain: %s" % (total, domain)
msg += " of type: %s" % (type) if type else ""
msg += " since: %s" % (since) if since else ""
print msg
err_log = self._get_err_log()
queue = Queue(150)
for i in range(NUM_PROCESSES):
Worker(queue, sourcedb, self.targetdb, exclude_types, total, simulate, err_log).start()
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
count += 1
queue.put((doc, count))
# shutdown workers
for i in range(NUM_PROCESSES):
queue.put(None)
err_log.close()
if os.stat(err_log.name)[6] == 0:
os.remove(err_log.name)
else:
print 'Failed document IDs written to %s' % err_log.name
if postgres_db:
self.copy_postgres_data(sourcedb, domain, postgres_db, doc_ids=doc_ids, simulate=simulate)
def copy_domain(self, sourcedb, domain):
print "Copying domain doc"
result = sourcedb.view(
"domain/domains",
key=domain,
reduce=False,
include_docs=True
).first()
if result and 'doc' in result:
domain_doc = Domain.wrap(result['doc'])
dt = DocumentTransform(domain_doc, sourcedb)
save(dt, self.targetdb)
else:
print "Domain doc not found for domain %s." % domain
def copy_postgres_data(self, sourcedb, domain, postgres_slug, simulate, doc_ids):
# can make this more configurable or less hard coded eventually
# also note that ordering here is important for foreign key dependencies
postgres_models = [
(StockReport, 'form_id'),
(StockTransaction, 'case_id'),
(DocDomainMapping, 'doc_id'),
# StockState objects are "derived" and get created by StockTransaction post_save signal.
# We may want to directly port these over in the future.
# (StockState, 'case_id'),
]
for model, doc_field in postgres_models:
query_set = model.objects.using(postgres_slug).filter(
**{'{}__in'.format(doc_field): doc_ids}
)
count = query_set.count()
print "Copying {} models ({})".format(model.__name__, count)
if not simulate:
for i, item in enumerate(query_set):
# this can cause primary key conflicts to overwrite local data I think. Oh well?
item.save(using='default')
print 'Synced {}/{} {}'.format(i, count, model.__name__)
def _get_err_log(self):
name = 'copy_domain.err.%s'
for i in range(1000): # arbitrarily large number
candidate = name % i
if not os.path.isfile(candidate):
return open(candidate, 'a', buffering=1)
class Worker(Process):
def __init__(self, queue, sourcedb, targetdb, exclude_types, total, simulate, err_log):
super(Worker, self).__init__()
self.queue = queue
self.sourcedb = sourcedb
self.targetdb = targetdb
self.exclude_types = exclude_types
self.total = total
self.simulate = simulate
self.err_log = err_log
def run(self):
for doc, count in iter(self.queue.get, None):
try:
if self.exclude_types and doc["doc_type"] in self.exclude_types:
print " SKIPPED (excluded type: %s). Synced %s/%s docs (%s: %s)" % \
(doc["doc_type"], count, self.total, doc["doc_type"], doc["_id"])
else:
if not self.simulate:
dt = DocumentTransform(doc, self.sourcedb)
save(dt, self.targetdb)
print " Synced %s/%s docs (%s: %s)" % (count, self.total, doc["doc_type"], doc["_id"])
except Exception, e:
self.err_log.write('%s\n' % doc["_id"])
print " Document %s failed! Error is: %s" % (doc["_id"], e)
|
|
u"""
Fixer for print function to print statement
print(spam,ham,eggs,sep=sep,end=end,file=file)
->
print >>file, sep.join((str(spam),str(ham),str(eggs))),; file.write(end)
in the most complicated case. Simpler cases:
print() -> print
print("spam") -> print "spam"
print(1,2,3) -> print 1,2,3
print(1,2,3,end=" ") -> print 1,2,3,
print(1,2,3,end="") -> print 1,2,3,; sys.stdout.write("")
print(1,2,3,file=file) -> print >>file, 1,2,3
print(1,2,3,sep=" ",end="\n") -> print 1,2,3
"""
from __future__ import with_statement # Aiming for 2.5-compatible code
from lib2to3 import fixer_base
from lib2to3.pytree import Node, Leaf
from lib2to3.pygram import python_symbols as syms, token
from lib2to3.fixer_util import (Name, FromImport, Newline, Call, Comma, Dot,
LParen, RParen, touch_import)
import warnings
import sys
def gen_printargs(lst):
u"""
Accepts a list of all nodes in the print call's trailer.
Yields nodes that will be easier to deal with
"""
for node in lst:
if node.type == syms.arglist:
# arglist<pos=any* kwargs=(argument<"file"|"sep"|"end" "=" any>*)>
kids = node.children
it = kids.__iter__()
try:
while True:
arg = it.next()
if arg.type == syms.argument:
# argument < "file"|"sep"|"end" "=" (any) >
yield arg
it.next()
else:
yield arg
it.next()
except StopIteration:
continue
else:
yield node
def isNone(arg):
u"""
Returns True if arg is a None node
"""
return arg.type == token.NAME and arg.value == u"None"
def _unicode(arg):
u"""
Calls unicode() on the arg in the node.
"""
prefix = arg.prefix
arg = arg.clone()
arg.prefix = u""
ret = Call(Name(u"unicode", prefix=prefix), [arg])
return ret
def add_file_part(file, lst):
if file is None or isNone(file):
return
lst.append(Leaf(token.RIGHTSHIFT, u">>", prefix=u" "))
lst.append(file.clone())
lst.append(Comma())
def add_sep_part(sep, pos, lst):
if sep is not None and not isNone(sep) and \
not (sep.type == token.STRING and sep.value in (u"' '", u'" "')):
temp = []
for arg in pos:
temp.append(_unicode(arg.clone()))
if sys.version_info >= (2, 6):
warnings.warn(
u"Calling unicode() on what may be a bytes object")
temp.append(Comma())
del temp[-1]
sep = sep.clone()
sep.prefix = u" "
args = Node(syms.listmaker, temp)
new_list = Node(syms.atom, [Leaf(token.LSQB, u"["), args,
Leaf(token.RSQB, u"]")])
join_arg = Node(syms.trailer, [LParen(), new_list, RParen()])
sep_join = Node(syms.power, [sep, Node(syms.trailer,
[Dot(), Name(u"join")])])
lst.append(sep_join)
lst.append(join_arg)
else:
if pos:
pos[0].prefix = u" "
for arg in pos:
lst.append(arg.clone())
lst.append(Comma())
del lst[-1]
def add_end_part(end, file, parent, loc):
if isNone(end):
return
if end.type == token.STRING and end.value in (u"' '", u'" "',
u"u' '", u'u" "',
u"b' '", u'b" "'):
return
if file is None:
touch_import(None, u"sys", parent)
file = Node(syms.power, [Name(u"sys"),
Node(syms.trailer, [Dot(), Name(u"stdout")])])
end_part = Node(syms.power, [file,
Node(syms.trailer, [Dot(), Name(u"write")]),
Node(syms.trailer, [LParen(), end, RParen()])])
end_part.prefix = u" "
parent.insert_child(loc, Leaf(token.SEMI, u";"))
parent.insert_child(loc + 1, end_part)
def replace_print(pos, opts, old_node=None):
u"""
Replace old_node with a new statement.
Also hacks in the "end" functionality.
"""
new_node = new_print(*pos, **opts)
end = None if u"end" not in opts else opts[u"end"].clone()
file = None if u"file" not in opts else opts[u"file"].clone()
if old_node is None:
parent = Node(syms.simple_stmt, [Leaf(token.NEWLINE, u"\n")])
i = 0
else:
parent = old_node.parent
i = old_node.remove()
parent.insert_child(i, new_node)
if end is not None and not (end.type == token.STRING and
end.value in (u"'\\n'", u'"\\n"')):
add_end_part(end, file, parent, i + 1)
return new_node
def new_print(*pos, **opts):
u"""
Constructs a new print_stmt node
args is all positional arguments passed to print()
kwargs contains zero or more of the following mappings:
'sep': some string
'file': some file-like object that supports the write() method
'end': some string
"""
children = [Name(u"print")]
sep = None if u"sep" not in opts else opts[u"sep"]
file = None if u"file" not in opts else opts[u"file"]
end = None if u"end" not in opts else opts[u"end"]
add_file_part(file, children)
add_sep_part(sep, pos, children)
if end is not None and not isNone(end):
if not end.value in (u'"\\n"', u"'\\n'"):
children.append(Comma())
return Node(syms.print_stmt, children)
def map_printargs(args):
u"""
Accepts a list of all nodes in the print call's trailer.
Returns {'pos':[all,pos,args], 'sep':sep, 'end':end, 'file':file}
"""
printargs = [arg for arg in gen_printargs(args)]
mapping = {}
pos = []
for arg in printargs:
if arg.type == syms.argument:
kids = arg.children
assert kids[0].type == token.NAME, repr(arg)
assert len(kids) > 1, repr(arg)
assert unicode(kids[0].value) in (u"sep", u"end", u"file")
assert unicode(kids[0].value) not in mapping, mapping
mapping[unicode(kids[0].value)] = kids[2]
elif arg.type == token.STAR:
return (None, None)
else:
pos.append(arg)
return (pos, mapping)
class FixPrint(fixer_base.BaseFix):
PATTERN = u"""
power< 'print' parens=trailer < '(' args=any* ')' > any* >
"""
def match(self, node):
u"""
Since the tree needs to be fixed once and only once if and only if it
matches, then we can start discarding matches after we make the first.
"""
return super(FixPrint, self).match(node)
def transform(self, node, results):
args = results.get(u"args")
if not args:
parens = results.get(u"parens")
parens.remove()
return
pos, opts = map_printargs(args)
if pos is None or opts is None:
self.cannot_convert(
node, u"-fprint does not support argument unpacking. fix using -xprint and then again with -fprintfunction.")
return
if u"file" in opts and \
u"end" in opts and \
opts[u"file"].type != token.NAME:
self.warning(opts[u"file"], u"file is not a variable name; "
u"print fixer suggests to bind the file to a variable "
u"name first before passing it to print function")
try:
with warnings.catch_warnings(record=True) as w:
new_node = replace_print(pos, opts, old_node=node)
if len(w) > 0:
self.warning(
node, u"coercing to unicode even though this may be a bytes object")
except AttributeError:
# Python 2.5 doesn't have warnings.catch_warnings, so we're in
# Python 2.5 code here...
new_node = replace_print(
pos, dict([(str(k), opts[k]) for k in opts]), old_node=node)
new_node.prefix = node.prefix
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
from StringIO import StringIO
from django.core.exceptions import ValidationError # noqa
import django.template
from django.template import defaultfilters
from horizon import forms
from horizon.test import helpers as test
from horizon.utils.babel_extract_angular import extract_angular
from horizon.utils import filters
# we have to import the filter in order to register it
from horizon.utils.filters import parse_isotime # noqa
from horizon.utils import functions
from horizon.utils import memoized
from horizon.utils import secret_key
from horizon.utils import units
from horizon.utils import validators
class ValidatorsTests(test.TestCase):
def test_validate_ipv4_cidr(self):
GOOD_CIDRS = ("192.168.1.1/16",
"192.0.0.1/17",
"0.0.0.0/16",
"10.144.11.107/4",
"255.255.255.255/0",
"0.1.2.3/16",
"0.0.0.0/32",
# short form
"128.0/16",
"128/4")
BAD_CIDRS = ("255.255.255.256\\",
"256.255.255.255$",
"1.2.3.4.5/41",
"0.0.0.0/99",
"127.0.0.1/",
"127.0.0.1/33",
"127.0.0.1/-1",
"127.0.0.1/100",
# some valid IPv6 addresses
"fe80::204:61ff:254.157.241.86/4",
"fe80::204:61ff:254.157.241.86/0",
"2001:0DB8::CD30:0:0:0:0/60",
"2001:0DB8::CD30:0/90")
ip = forms.IPField(mask=True, version=forms.IPv4)
for cidr in GOOD_CIDRS:
self.assertIsNone(ip.validate(cidr))
for cidr in BAD_CIDRS:
self.assertRaises(ValidationError, ip.validate, cidr)
def test_validate_ipv6_cidr(self):
GOOD_CIDRS = ("::ffff:0:0/56",
"2001:0db8::1428:57ab/17",
"FEC0::/10",
"fe80::204:61ff:254.157.241.86/4",
"fe80::204:61ff:254.157.241.86/0",
"2001:0DB8::CD30:0:0:0:0/60",
"2001:0DB8::CD30:0/90",
"::1/128")
BAD_CIDRS = ("1111:2222:3333:4444:::/",
"::2222:3333:4444:5555:6666:7777:8888:\\",
":1111:2222:3333:4444::6666:1.2.3.4/1000",
"1111:2222::4444:5555:6666::8888@",
"1111:2222::4444:5555:6666:8888/",
"::ffff:0:0/129",
"1.2.3.4:1111:2222::5555//22",
"fe80::204:61ff:254.157.241.86/200",
# some valid IPv4 addresses
"10.144.11.107/4",
"255.255.255.255/0",
"0.1.2.3/16")
ip = forms.IPField(mask=True, version=forms.IPv6)
for cidr in GOOD_CIDRS:
self.assertIsNone(ip.validate(cidr))
for cidr in BAD_CIDRS:
self.assertRaises(ValidationError, ip.validate, cidr)
def test_validate_mixed_cidr(self):
GOOD_CIDRS = ("::ffff:0:0/56",
"2001:0db8::1428:57ab/17",
"FEC0::/10",
"fe80::204:61ff:254.157.241.86/4",
"fe80::204:61ff:254.157.241.86/0",
"2001:0DB8::CD30:0:0:0:0/60",
"0.0.0.0/16",
"10.144.11.107/4",
"255.255.255.255/0",
"0.1.2.3/16",
# short form
"128.0/16",
"10/4")
BAD_CIDRS = ("1111:2222:3333:4444::://",
"::2222:3333:4444:5555:6666:7777:8888:",
":1111:2222:3333:4444::6666:1.2.3.4/1/1",
"1111:2222::4444:5555:6666::8888\\2",
"1111:2222::4444:5555:6666:8888/",
"1111:2222::4444:5555:6666::8888/130",
"127.0.0.1/",
"127.0.0.1/33",
"127.0.0.1/-1")
ip = forms.IPField(mask=True, version=forms.IPv4 | forms.IPv6)
for cidr in GOOD_CIDRS:
self.assertIsNone(ip.validate(cidr))
for cidr in BAD_CIDRS:
self.assertRaises(ValidationError, ip.validate, cidr)
def test_validate_IPs(self):
GOOD_IPS_V4 = ("0.0.0.0",
"10.144.11.107",
"169.144.11.107",
"172.100.11.107",
"255.255.255.255",
"0.1.2.3")
GOOD_IPS_V6 = ("",
"::ffff:0:0",
"2001:0db8::1428:57ab",
"FEC0::",
"fe80::204:61ff:254.157.241.86",
"fe80::204:61ff:254.157.241.86",
"2001:0DB8::CD30:0:0:0:0")
BAD_IPS_V4 = ("1111:2222:3333:4444:::",
"::2222:3333:4444:5555:6666:7777:8888:",
":1111:2222:3333:4444::6666:1.2.3.4",
"1111:2222::4444:5555:6666::8888",
"1111:2222::4444:5555:6666:8888/",
"1111:2222::4444:5555:6666::8888/130",
"127.0.0.1/",
"127.0.0.1/33",
"127.0.0.1/-1")
BAD_IPS_V6 = ("1111:2222:3333:4444:::",
"::2222:3333:4444:5555:6666:7777:8888:",
":1111:2222:3333:4444::6666:1.2.3.4",
"1111:2222::4444:5555:6666::8888",
"1111:2222::4444:5555:6666:8888/",
"1111:2222::4444:5555:6666::8888/130")
ipv4 = forms.IPField(required=True, version=forms.IPv4)
ipv6 = forms.IPField(required=False, version=forms.IPv6)
ipmixed = forms.IPField(required=False,
version=forms.IPv4 | forms.IPv6)
for ip_addr in GOOD_IPS_V4:
self.assertIsNone(ipv4.validate(ip_addr))
self.assertIsNone(ipmixed.validate(ip_addr))
for ip_addr in GOOD_IPS_V6:
self.assertIsNone(ipv6.validate(ip_addr))
self.assertIsNone(ipmixed.validate(ip_addr))
for ip_addr in BAD_IPS_V4:
self.assertRaises(ValidationError, ipv4.validate, ip_addr)
self.assertRaises(ValidationError, ipmixed.validate, ip_addr)
for ip_addr in BAD_IPS_V6:
self.assertRaises(ValidationError, ipv6.validate, ip_addr)
self.assertRaises(ValidationError, ipmixed.validate, ip_addr)
self.assertRaises(ValidationError, ipv4.validate, "") # required=True
iprange = forms.IPField(required=False,
mask=True,
mask_range_from=10,
version=forms.IPv4 | forms.IPv6)
self.assertRaises(ValidationError, iprange.validate,
"fe80::204:61ff:254.157.241.86/6")
self.assertRaises(ValidationError, iprange.validate,
"169.144.11.107/8")
self.assertIsNone(iprange.validate("fe80::204:61ff:254.157.241.86/36"))
self.assertIsNone(iprange.validate("169.144.11.107/18"))
def test_validate_multi_ip_field(self):
GOOD_CIDRS_INPUT = ("192.168.1.1/16, 192.0.0.1/17",)
BAD_CIDRS_INPUT = ("1.2.3.4.5/41,0.0.0.0/99",
"1.2.3.4.5/41;0.0.0.0/99",
"1.2.3.4.5/41 0.0.0.0/99",
"192.168.1.1/16 192.0.0.1/17")
ip = forms.MultiIPField(mask=True, version=forms.IPv4)
for cidr in GOOD_CIDRS_INPUT:
self.assertIsNone(ip.validate(cidr))
for cidr in BAD_CIDRS_INPUT:
self.assertRaises(ValidationError, ip.validate, cidr)
def test_port_validator(self):
VALID_PORTS = (-1, 65535)
INVALID_PORTS = (-2, 65536)
for port in VALID_PORTS:
self.assertIsNone(validators.validate_port_range(port))
for port in INVALID_PORTS:
self.assertRaises(ValidationError,
validators.validate_port_range,
port)
def test_ip_proto_validator(self):
VALID_PROTO = (-1, 255)
INVALID_PROTO = (-2, 256)
for proto in VALID_PROTO:
self.assertIsNone(validators.validate_ip_protocol(proto))
for proto in INVALID_PROTO:
self.assertRaises(ValidationError,
validators.validate_ip_protocol,
proto)
def test_port_range_validator(self):
VALID_RANGE = ('1:65535',
'-1:-1')
INVALID_RANGE = ('22:22:22:22',
'-1:65536')
test_call = validators.validate_port_or_colon_separated_port_range
for prange in VALID_RANGE:
self.assertIsNone(test_call(prange))
for prange in INVALID_RANGE:
self.assertRaises(ValidationError, test_call, prange)
def test_metadata_validator(self):
VALID_METADATA = (
"key1=val1", "key1=val1,key2=val2",
"key1=val1,key2=val2,key3=val3", "key1="
)
INVALID_METADATA = (
"key1==val1", "key1=val1,", "=val1",
"=val1", " "
)
for mdata in VALID_METADATA:
self.assertIsNone(validators.validate_metadata(mdata))
for mdata in INVALID_METADATA:
self.assertRaises(ValidationError,
validators.validate_metadata,
mdata)
class SecretKeyTests(test.TestCase):
def test_generate_secret_key(self):
key = secret_key.generate_key(32)
self.assertEqual(32, len(key))
self.assertNotEqual(key, secret_key.generate_key(32))
def test_generate_or_read_key_from_file(self):
key_file = ".test_secret_key_store"
key = secret_key.generate_or_read_from_file(key_file)
# Consecutive reads should come from the already existing file:
self.assertEqual(secret_key.generate_or_read_from_file(key_file), key)
# Key file only be read/writable by user:
self.assertEqual("0600", oct(os.stat(key_file).st_mode & 0o777))
os.chmod(key_file, 0o644)
self.assertRaises(secret_key.FilePermissionError,
secret_key.generate_or_read_from_file, key_file)
os.remove(key_file)
class FiltersTests(test.TestCase):
def test_replace_underscore_filter(self):
res = filters.replace_underscores("__under_score__")
self.assertEqual(" under score ", res)
def test_parse_isotime_filter(self):
c = django.template.Context({'time': ''})
t = django.template.Template('{{ time|parse_isotime }}')
output = u""
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': 'error'})
t = django.template.Template('{{ time|parse_isotime }}')
output = u""
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': 'error'})
t = django.template.Template('{{ time|parse_isotime:"test" }}')
output = u"test"
self.assertEqual(output, t.render(c))
c = django.template.Context({'time': '2007-03-04T21:08:12'})
t = django.template.Template('{{ time|parse_isotime:"test" }}')
output = u"March 4, 2007, 3:08 p.m."
self.assertEqual(output, t.render(c))
adate = '2007-01-25T12:00:00Z'
result = filters.parse_isotime(adate)
self.assertIsInstance(result, datetime.datetime)
class TimeSinceNeverFilterTests(test.TestCase):
default = u"Never"
def test_timesince_or_never_returns_default_for_empty_string(self):
c = django.template.Context({'time': ''})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_default_for_none(self):
c = django.template.Context({'time': None})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_default_for_gibberish(self):
c = django.template.Context({'time': django.template.Context()})
t = django.template.Template('{{ time|timesince_or_never }}')
self.assertEqual(self.default, t.render(c))
def test_timesince_or_never_returns_with_custom_default(self):
custom = "Hello world"
c = django.template.Context({'date': ''})
t = django.template.Template('{{ date|timesince_or_never:"%s" }}'
% custom)
self.assertEqual(custom, t.render(c))
def test_timesince_or_never_returns_with_custom_empty_string_default(self):
c = django.template.Context({'date': ''})
t = django.template.Template('{{ date|timesince_or_never:"" }}')
self.assertEqual("", t.render(c))
def test_timesince_or_never_returns_same_output_as_django_date(self):
d = datetime.date(year=2014, month=3, day=7)
c = django.template.Context({'date': d})
t = django.template.Template('{{ date|timesince_or_never }}')
self.assertEqual(defaultfilters.timesince(d), t.render(c))
def test_timesince_or_never_returns_same_output_as_django_datetime(self):
now = datetime.datetime.now()
c = django.template.Context({'date': now})
t = django.template.Template('{{ date|timesince_or_never }}')
self.assertEqual(defaultfilters.timesince(now), t.render(c))
class MemoizedTests(test.TestCase):
def test_memoized_decorator_cache_on_next_call(self):
values_list = []
@memoized.memoized
def cache_calls(remove_from):
values_list.append(remove_from)
return True
def non_cached_calls(remove_from):
values_list.append(remove_from)
return True
for x in range(0, 5):
non_cached_calls(1)
self.assertEqual(5, len(values_list))
values_list = []
for x in range(0, 5):
cache_calls(1)
self.assertEqual(1, len(values_list))
class GetPageSizeTests(test.TestCase):
def test_bad_session_value(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
request.session['horizon_pagesize'] = 'not int-able'
default = 30
self.assertEqual(functions.get_page_size(request, default), default)
def test_bad_cookie_value(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
if 'horizon_pagesize' in request.session:
del request.session['horizon_pagesize']
request.COOKIES['horizon_pagesize'] = 'not int-able'
default = 30
self.assertEqual(functions.get_page_size(request, default), default)
def test_float_default_value(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
request.session['horizon_pagesize'] = 'not int-able'
default = 30.1
expected = 30
self.assertEqual(functions.get_page_size(request, default), expected)
def test_session_gets_set(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
request.session['horizon_pagesize'] = 'not int-able'
default = 30
functions.get_page_size(request, default)
self.assertEqual(request.session['horizon_pagesize'], default)
def test_bad_default_value(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
request.session['horizon_pagesize'] = 'not int-able'
default = 'also not int-able'
self.assertRaises(ValueError,
functions.get_page_size,
request, default)
class UnitsTests(test.TestCase):
def test_is_supported(self):
self.assertTrue(units.is_supported('MB'))
self.assertTrue(units.is_supported('min'))
self.assertFalse(units.is_supported('KWh'))
self.assertFalse(units.is_supported('unknown_unit'))
def test_is_larger(self):
self.assertTrue(units.is_larger('KB', 'B'))
self.assertTrue(units.is_larger('MB', 'B'))
self.assertTrue(units.is_larger('GB', 'B'))
self.assertTrue(units.is_larger('TB', 'B'))
self.assertTrue(units.is_larger('GB', 'MB'))
self.assertFalse(units.is_larger('B', 'KB'))
self.assertFalse(units.is_larger('MB', 'GB'))
self.assertTrue(units.is_larger('min', 's'))
self.assertTrue(units.is_larger('hr', 'min'))
self.assertTrue(units.is_larger('hr', 's'))
self.assertFalse(units.is_larger('s', 'min'))
def test_convert(self):
self.assertEqual(units.convert(4096, 'MB', 'GB'), (4, 'GB'))
self.assertEqual(units.convert(4, 'GB', 'MB'), (4096, 'MB'))
self.assertEqual(units.convert(1.5, 'hr', 'min'), (90, 'min'))
self.assertEqual(units.convert(12, 'hr', 'day'), (0.5, 'day'))
def test_normalize(self):
self.assertEqual(units.normalize(1, 'B'), (1, 'B'))
self.assertEqual(units.normalize(1000, 'B'), (1000, 'B'))
self.assertEqual(units.normalize(1024, 'B'), (1, 'KB'))
self.assertEqual(units.normalize(1024 * 1024, 'B'), (1, 'MB'))
self.assertEqual(units.normalize(10 * 1024 ** 3, 'B'), (10, 'GB'))
self.assertEqual(units.normalize(1000 * 1024 ** 4, 'B'), (1000, 'TB'))
self.assertEqual(units.normalize(1024, 'KB'), (1, 'MB'))
self.assertEqual(units.normalize(1024 ** 2, 'KB'), (1, 'GB'))
self.assertEqual(units.normalize(10 * 1024, 'MB'), (10, 'GB'))
self.assertEqual(units.normalize(0.5, 'KB'), (512, 'B'))
self.assertEqual(units.normalize(0.0001, 'MB'), (104.9, 'B'))
self.assertEqual(units.normalize(1, 's'), (1, 's'))
self.assertEqual(units.normalize(120, 's'), (2, 'min'))
self.assertEqual(units.normalize(3600, 's'), (60, 'min'))
self.assertEqual(units.normalize(3600 * 24, 's'), (24, 'hr'))
self.assertEqual(units.normalize(10 * 3600 * 24, 's'), (10, 'day'))
self.assertEqual(units.normalize(90, 'min'), (90, 'min'))
self.assertEqual(units.normalize(150, 'min'), (2.5, 'hr'))
self.assertEqual(units.normalize(60 * 24, 'min'), (24, 'hr'))
self.assertEqual(units.normalize(0.5, 'day'), (12, 'hr'))
self.assertEqual(units.normalize(10800000000000, 'ns'), (3, 'hr'))
self.assertEqual(units.normalize(14, 'day'), (2, 'week'))
self.assertEqual(units.normalize(91, 'day'), (3, 'month'))
self.assertEqual(units.normalize(18, 'month'), (18, 'month'))
self.assertEqual(units.normalize(24, 'month'), (2, 'year'))
self.assertEqual(units.normalize(1, 'unknown_unit'),
(1, 'unknown_unit'))
default_keys = []
class ExtractAngularTestCase(test.TestCase):
def test_extract_no_tags(self):
buf = StringIO('<html></html>')
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual([], messages)
def test_simple_string(self):
buf = StringIO(
"""<html><translate>hello world!</translate>'
<div translate>hello world!</div></html>"""
)
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, u'gettext', 'hello world!', []),
(2, u'gettext', 'hello world!', [])
],
messages)
def test_interpolation(self):
buf = StringIO(
"""<html>
<translate>hello {$name$}!</translate>
<div translate>hello {$name$}!</div>
</html>
"""
)
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(2, u'gettext', 'hello %(name)!', []),
(3, u'gettext', 'hello %(name)!', [])
], messages)
def test_interpolation_func_call(self):
buf = StringIO(
"""<html><div translate>hello {$func(name)$}!</div>
'<translate>hello {$func(name)$}!</translate>"""
)
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, u'gettext', 'hello %(func(name))!', []),
(2, u'gettext', 'hello %(func(name))!', [])
],
messages)
def test_interpolation_list(self):
buf = StringIO(
"""<html><div translate>hello {$name[1]$}!</div>
<translate>hello {$name[1]$}!</translate></html>"""
)
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, 'gettext', 'hello %(name[1])!', []),
(2, 'gettext', 'hello %(name[1])!', [])
],
messages)
def test_interpolation_dict(self):
buf = StringIO(
"""<html><div translate>hello {$name['key']$}!</div>
<translate>hello {$name['key']$}!</translate></html>"""
)
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, 'gettext', r"hello %(name['key'])!", []),
(2, 'gettext', r"hello %(name['key'])!", [])
],
messages)
def test_interpolation_dict_double_quote(self):
buf = StringIO(
"""<html><div translate>hello {$name["key"]$}!</div>
<translate>hello {$name["key"]$}!</translate></html>""")
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, 'gettext', r'hello %(name["key"])!', []),
(2, 'gettext', r'hello %(name["key"])!', [])
],
messages)
def test_interpolation_object(self):
buf = StringIO(
"""<html><div translate>hello {$name.attr$}!</div>
<translate>hello {$name.attr$}!</translate></html>""")
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, 'gettext', 'hello %(name.attr)!', []),
(2, 'gettext', 'hello %(name.attr)!', [])
],
messages)
def test_interpolation_spaces(self):
"""Spaces are not valid in interpolation expressions, but we don't
currently complain about them
"""
buf = StringIO("""<html><div translate>hello {$name attr$}!</div>
<translate>hello {$name attr$}!</translate></html>""")
messages = list(extract_angular(buf, default_keys, [], {}))
self.assertEqual(
[
(1, 'gettext', 'hello {$name attr$}!', []),
(2, 'gettext', 'hello {$name attr$}!', [])
],
messages)
def test_attr_value(self):
"""We should not translate tags that have translate as the value of an
attribute.
"""
buf = StringIO('<html><div id="translate">hello world!</div>')
messages = list(extract_angular(buf, [], [], {}))
self.assertEqual([], messages)
def test_attr_value_plus_directive(self):
"""Unless they also have a translate directive.
"""
buf = StringIO(
'<html><div id="translate" translate>hello world!</div>')
messages = list(extract_angular(buf, [], [], {}))
self.assertEqual([(1, 'gettext', 'hello world!', [])], messages)
def test_translate_tag(self):
buf = StringIO('<html><translate>hello world!</translate>')
messages = list(extract_angular(buf, [], [], {}))
self.assertEqual([(1, 'gettext', 'hello world!', [])], messages)
|
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class DisputeCloseSplitTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
charlie = self.nodes[2]
# generate some coins and send them to bob
generated_coins = 10
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Address endpoint not found")
else:
raise TestFailure("DisputeCloseSplitTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, generated_coins)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["moderators"] = [moderatorId]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings/index.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("DisputeCloseSplitTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "CONFIRMED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "CONFIRMED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FUNDED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "FUNDED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice incorrectly saved as unfunded")
# Bob open dispute
dispute = {
"orderId": orderId,
"claim": "Bastard ripped me off"
}
api_url = bob["gateway_url"] + "ob/opendispute/"
r = requests.post(api_url, data=json.dumps(dispute, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: OpenDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: OpenDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Bob check dispute opened correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to detect his dispute")
# Alice check dispute opened correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to detect the dispute")
# Charlie check dispute opened correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load case from Clarlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DISPUTED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Charlie failed to detect the dispute")
# Charlie close dispute
dispute_resolution = {
"OrderID": orderId,
"Resolution": "I'm siding with Bob",
"BuyerPercentage": 50,
"VendorPercentage": 50
}
api_url = charlie["gateway_url"] + "ob/closedispute/"
r = requests.post(api_url, data=json.dumps(dispute_resolution, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: CloseDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: CloseDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Alice check dispute closed correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DECIDED":
self.print_logs(alice, "ob.log")
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to detect the dispute resolution")
# Bob check dispute closed correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DECIDED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to detect the dispute resolution")
# Charlie check dispute closed correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load case from Clarlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Charlie failed to detect the dispute resolution")
# Bob relase funds
release = {
"OrderID": orderId,
}
api_url = bob["gateway_url"] + "ob/releasefunds/"
r = requests.post(api_url, data=json.dumps(release, indent=4))
if r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: ReleaseFunds post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("DisputeCloseSplitTest - FAIL: ReleaseFunds POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# Check bob received payout
api_url = bob["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
unconfirmed = int(resp["unconfirmed"])
if confirmed + unconfirmed <= (generated_coins*100000000) - payment_amount:
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to detect dispute payout")
elif r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Receive coins endpoint not found")
else:
raise TestFailure("DisputeCloseSplitTest - FAIL: Unknown response")
# Check alice received payout
api_url = alice["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
unconfirmed = int(resp["unconfirmed"])
if confirmed + unconfirmed <= 0:
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to detect dispute payout")
elif r.status_code == 404:
raise TestFailure("DisputeCloseSplitTest - FAIL: Receive coins endpoint not found")
else:
raise TestFailure("DisputeCloseSplitTest - FAIL: Unknown response")
# Bob check payout transaction recorded
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["transactions"]) != 2:
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Bob failed to set state to RESOLVED")
# Alice check payout transaction recorded
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("DisputeCloseSplitTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["transactions"]) != 2:
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("DisputeCloseSplitTest - FAIL: Alice failed to set state to RESOLVED")
print("DisputeCloseSplitTest - PASS")
if __name__ == '__main__':
print("Running DisputeCloseSplitTest")
DisputeCloseSplitTest().main(["--regtest", "--disableexchangerates"])
|
|
"""
Test cdflib functions versus mpmath, if available.
The following functions still need tests:
- ncfdtr
- ncfdtri
- ncfdtridfn
- ncfdtridfd
- ncfdtrinc
- nbdtrik
- nbdtrin
- nrdtrimn
- nrdtrisd
- pdtrik
- nctdtr
- nctdtrit
- nctdtridf
- nctdtrinc
"""
import itertools
import numpy as np
from numpy.testing import assert_equal
import pytest
import scipy.special as sp
from scipy.special._testutils import (
MissingModule, check_version, FuncData)
from scipy.special._mptestutils import (
Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
class ProbArg(object):
"""Generate a set of probabilities on [0, 1]."""
def __init__(self):
# Include the endpoints for compatibility with Arg et. al.
self.a = 0
self.b = 1
def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v)
class EndpointFilter(object):
def __init__(self, a, b, rtol, atol):
self.a = a
self.b = b
self.rtol = rtol
self.atol = atol
def __call__(self, x):
mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
return np.where(mask1 | mask2, False, True)
class _CDFData(object):
def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
dps=20, n=5000, rtol=None, atol=None,
endpt_rtol=None, endpt_atol=None):
self.spfunc = spfunc
self.mpfunc = mpfunc
self.index = index
self.argspec = argspec
self.spfunc_first = spfunc_first
self.dps = dps
self.n = n
self.rtol = rtol
self.atol = atol
if not isinstance(argspec, list):
self.endpt_rtol = None
self.endpt_atol = None
elif endpt_rtol is not None or endpt_atol is not None:
if isinstance(endpt_rtol, list):
self.endpt_rtol = endpt_rtol
else:
self.endpt_rtol = [endpt_rtol]*len(self.argspec)
if isinstance(endpt_atol, list):
self.endpt_atol = endpt_atol
else:
self.endpt_atol = [endpt_atol]*len(self.argspec)
else:
self.endpt_rtol = None
self.endpt_atol = None
def idmap(self, *args):
if self.spfunc_first:
res = self.spfunc(*args)
if np.isnan(res):
return np.nan
args = list(args)
args[self.index] = res
with mpmath.workdps(self.dps):
res = self.mpfunc(*tuple(args))
# Imaginary parts are spurious
res = mpf2float(res.real)
else:
with mpmath.workdps(self.dps):
res = self.mpfunc(*args)
res = mpf2float(res.real)
args = list(args)
args[self.index] = res
res = self.spfunc(*tuple(args))
return res
def get_param_filter(self):
if self.endpt_rtol is None and self.endpt_atol is None:
return None
filters = []
for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
if rtol is None and atol is None:
filters.append(None)
continue
elif rtol is None:
rtol = 0.0
elif atol is None:
atol = 0.0
filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
return filters
def check(self):
# Generate values for the arguments
args = get_args(self.argspec, self.n)
param_filter = self.get_param_filter()
param_columns = tuple(range(args.shape[1]))
result_columns = args.shape[1]
args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1)))
FuncData(self.idmap, args,
param_columns=param_columns, result_columns=result_columns,
rtol=self.rtol, atol=self.atol, vectorized=False,
param_filter=param_filter).check()
def _assert_inverts(*a, **kw):
d = _CDFData(*a, **kw)
d.check()
def _binomial_cdf(k, n, p):
k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
if k <= 0:
return mpmath.mpf(0)
elif k >= n:
return mpmath.mpf(1)
onemp = mpmath.fsub(1, p, exact=True)
return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
def _f_cdf(dfn, dfd, x):
if x < 0:
return mpmath.mpf(0)
dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
ub = dfn*x/(dfn*x + dfd)
res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
return res
def _student_t_cdf(df, t, dps=None):
if dps is None:
dps = mpmath.mp.dps
with mpmath.workdps(dps):
df, t = mpmath.mpf(df), mpmath.mpf(t)
fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
fac *= t*mpmath.gamma(0.5*(df + 1))
fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
return 0.5 + fac
def _noncentral_chi_pdf(t, df, nc):
res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
return res
def _noncentral_chi_cdf(x, df, nc, dps=None):
if dps is None:
dps = mpmath.mp.dps
x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
with mpmath.workdps(dps):
res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
return res
def _tukey_lmbda_quantile(p, lmbda):
# For lmbda != 0
return (p**lmbda - (1 - p)**lmbda)/lmbda
@pytest.mark.slow
@check_version(mpmath, '0.19')
class TestCDFlib(object):
@pytest.mark.xfail(run=False)
def test_bdtrik(self):
_assert_inverts(
sp.bdtrik,
_binomial_cdf,
0, [ProbArg(), IntArg(1, 1000), ProbArg()],
rtol=1e-4)
def test_bdtrin(self):
_assert_inverts(
sp.bdtrin,
_binomial_cdf,
1, [IntArg(1, 1000), ProbArg(), ProbArg()],
rtol=1e-4, endpt_atol=[None, None, 1e-6])
def test_btdtria(self):
_assert_inverts(
sp.btdtria,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-6)
def test_btdtrib(self):
# Use small values of a or mpmath doesn't converge
_assert_inverts(
sp.btdtrib,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15])
@pytest.mark.xfail(run=False)
def test_fdtridfd(self):
_assert_inverts(
sp.fdtridfd,
_f_cdf,
1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
rtol=1e-7)
def test_gdtria(self):
_assert_inverts(
sp.gdtria,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
0, [ProbArg(), Arg(0, 1e3, inclusive_a=False),
Arg(0, 1e4, inclusive_a=False)], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_gdtrib(self):
# Use small values of a and x or mpmath doesn't converge
_assert_inverts(
sp.gdtrib,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1e3, inclusive_a=False)], rtol=1e-5)
def test_gdtrix(self):
_assert_inverts(
sp.gdtrix,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_stdtr(self):
# Ideally the left endpoint for Arg() should be 0.
assert_mpmath_equal(
sp.stdtr,
_student_t_cdf,
[IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
@pytest.mark.xfail(run=False)
def test_stdtridf(self):
_assert_inverts(
sp.stdtridf,
_student_t_cdf,
0, [ProbArg(), Arg()], rtol=1e-7)
def test_stdtrit(self):
_assert_inverts(
sp.stdtrit,
_student_t_cdf,
1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-10])
def test_chdtriv(self):
_assert_inverts(
sp.chdtriv,
lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
@pytest.mark.xfail(run=False)
def test_chndtridf(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtridf,
_noncentral_chi_cdf,
1, [Arg(0, 100, inclusive_a=False), ProbArg(),
Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15)
@pytest.mark.xfail(run=False)
def test_chndtrinc(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrinc,
_noncentral_chi_cdf,
2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
n=1000, rtol=1e-4, atol=1e-15)
def test_chndtrix(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrix,
_noncentral_chi_cdf,
0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15,
endpt_atol=[1e-6, None, None])
def test_tklmbda_zero_shape(self):
# When lmbda = 0 the CDF has a simple closed form
one = mpmath.mpf(1)
assert_mpmath_equal(
lambda x: sp.tklmbda(x, 0),
lambda x: one/(mpmath.exp(-x) + one),
[Arg()], rtol=1e-7)
def test_tklmbda_neg_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
spfunc_first=False, rtol=1e-5,
endpt_atol=[1e-9, 1e-5])
@pytest.mark.xfail(run=False)
def test_tklmbda_pos_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
spfunc_first=False, rtol=1e-5)
def test_nonfinite():
funcs = [
("btdtria", 3),
("btdtrib", 3),
("bdtrik", 3),
("bdtrin", 3),
("chdtriv", 2),
("chndtr", 3),
("chndtrix", 3),
("chndtridf", 3),
("chndtrinc", 3),
("fdtridfd", 3),
("ncfdtr", 4),
("ncfdtri", 4),
("ncfdtridfn", 4),
("ncfdtridfd", 4),
("ncfdtrinc", 4),
("gdtrix", 3),
("gdtrib", 3),
("gdtria", 3),
("nbdtrik", 3),
("nbdtrin", 3),
("nrdtrimn", 3),
("nrdtrisd", 3),
("pdtrik", 2),
("stdtr", 2),
("stdtrit", 2),
("stdtridf", 2),
("nctdtr", 3),
("nctdtrit", 3),
("nctdtridf", 3),
("nctdtrinc", 3),
("tklmbda", 2),
]
np.random.seed(1)
for func, numargs in funcs:
func = getattr(sp, func)
args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in
np.random.rand(numargs)]
for args in itertools.product(*args_choices):
res = func(*args)
if any(np.isnan(x) for x in args):
# Nan inputs should result to nan output
assert_equal(res, np.nan)
else:
# All other inputs should return something (but not
# raise exceptions or cause hangs)
pass
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.manager import NeutronManager
from neutron.openstack.common.notifier import api as notifer_api
from neutron.openstack.common import policy as common_policy
from neutron.openstack.common import uuidutils
from neutron import policy
from neutron import quota
from neutron.tests import base
from neutron.tests.unit import testlib_api
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
EXTDIR = os.path.join(ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
self.addCleanup(self._plugin_patcher.stop)
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy') or
info.get('primary_key')]
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict((arg, mock.ANY)
for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def setUp(self):
super(JSONV2TestCase, self).setUp()
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.iteritems():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True,
extra_environ=env)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_body(self):
data = {'whoa': None}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_resource(self):
data = {}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bulk_no_networks(self):
data = {'networks': []}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': unicode(tenant_id)}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
policy.init()
common_policy._rules['get_network:name'] = common_policy.parse_rule(
"rule:admin_only")
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
try:
self.assertNotIn('name', res['network'])
finally:
del common_policy._rules['get_network:name']
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin'
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
self.setup_coreplugin(plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
self.addCleanup(self._plugin_patcher.stop)
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class XMLV2TestCase(JSONV2TestCase):
fmt = 'xml'
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def _resource_op_notifier(self, opname, resource, expected_errors=False,
notification_level='INFO'):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(notifer_api, 'notify') as mynotifier:
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected = [mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".start",
notification_level,
mock.ANY),
mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".end",
notification_level,
mock.ANY)]
self.assertEqual(expected, mynotifier.call_args_list)
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
def test_network_create_notifer_with_log_level(self):
cfg.CONF.set_override('default_notification_level', 'DEBUG')
self._resource_op_notifier('create', 'network',
notification_level='DEBUG')
class DHCPNotificationTest(APIv2TestBase):
def _test_dhcp_notifier(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
'notify') as dhcp_notifier:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
expected_item = mock.call(mock.ANY, mock.ANY,
resource + "." + opname + ".end")
if initial_input and resource not in initial_input:
resource += 's'
num = len(initial_input[resource]) if initial_input and isinstance(
initial_input[resource], list) else 1
expected = [expected_item for x in xrange(num)]
self.assertEqual(expected, dhcp_notifier.call_args_list)
self.assertEqual(num, dhcp_notifier.call_count)
self.assertEqual(expected_code, res.status_int)
def test_network_create_dhcp_notifer(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_dhcp_notifier('create', 'network', input)
def test_network_delete_dhcp_notifer(self):
self._test_dhcp_notifier('delete', 'network')
def test_network_update_dhcp_notifer(self):
input = {'network': {'name': 'net'}}
self._test_dhcp_notifier('update', 'network', input)
def test_networks_create_bulk_dhcp_notifer(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_dhcp_notifier('create', 'network', input)
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
NeutronManager.get_plugin().supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self._plugin_patcher.stop()
self.api = None
self.plugin = None
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin():
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
|
|
# -*- test-case-name: twisted.test.test_application,twisted.test.test_twistd -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import absolute_import, division, print_function
import sys
import os
import pdb
import getpass
import traceback
import signal
import warnings
from operator import attrgetter
from twisted import copyright, plugin, logger
from twisted.application import service, reactors
from twisted.internet import defer
from twisted.persisted import sob
from twisted.python import runtime, log, usage, failure, util, logfile
from twisted.python.reflect import qual, namedAny
# Expose the new implementation of installReactor at the old location.
from twisted.application.reactors import installReactor
from twisted.application.reactors import NoSuchReactor
class _BasicProfiler(object):
"""
@ivar saveStats: if C{True}, save the stats information instead of the
human readable format
@type saveStats: C{bool}
@ivar profileOutput: the name of the file use to print profile data.
@type profileOutput: C{str}
"""
def __init__(self, profileOutput, saveStats):
self.profileOutput = profileOutput
self.saveStats = saveStats
def _reportImportError(self, module, e):
"""
Helper method to report an import error with a profile module. This
has to be explicit because some of these modules are removed by
distributions due to them being non-free.
"""
s = "Failed to import module %s: %s" % (module, e)
s += """
This is most likely caused by your operating system not including
the module due to it being non-free. Either do not use the option
--profile, or install the module; your operating system vendor
may provide it in a separate package.
"""
raise SystemExit(s)
class ProfileRunner(_BasicProfiler):
"""
Runner for the standard profile module.
"""
def run(self, reactor):
"""
Run reactor under the standard profiler.
"""
try:
import profile
except ImportError as e:
self._reportImportError("profile", e)
p = profile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
tmp, sys.stdout = sys.stdout, open(self.profileOutput, 'a')
try:
p.print_stats()
finally:
sys.stdout, tmp = tmp, sys.stdout
tmp.close()
class CProfileRunner(_BasicProfiler):
"""
Runner for the cProfile module.
"""
def run(self, reactor):
"""
Run reactor under the cProfile profiler.
"""
try:
import cProfile
import pstats
except ImportError as e:
self._reportImportError("cProfile", e)
p = cProfile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
with open(self.profileOutput, 'w') as stream:
s = pstats.Stats(p, stream=stream)
s.strip_dirs()
s.sort_stats(-1)
s.print_stats()
class AppProfiler(object):
"""
Class which selects a specific profile runner based on configuration
options.
@ivar profiler: the name of the selected profiler.
@type profiler: C{str}
"""
profilers = {"profile": ProfileRunner, "cprofile": CProfileRunner}
def __init__(self, options):
saveStats = options.get("savestats", False)
profileOutput = options.get("profile", None)
self.profiler = options.get("profiler", "cprofile").lower()
if self.profiler in self.profilers:
profiler = self.profilers[self.profiler](profileOutput, saveStats)
self.run = profiler.run
else:
raise SystemExit("Unsupported profiler name: %s" %
(self.profiler,))
class AppLogger(object):
"""
An L{AppLogger} attaches the configured log observer specified on the
commandline to a L{ServerOptions} object, a custom L{logger.ILogObserver},
or a legacy custom {log.ILogObserver}.
@ivar _logfilename: The name of the file to which to log, if other than the
default.
@type _logfilename: C{str}
@ivar _observerFactory: Callable object that will create a log observer, or
None.
@ivar _observer: log observer added at C{start} and removed at C{stop}.
@type _observer: a callable that implements L{logger.ILogObserver} or
L{log.ILogObserver}.
"""
_observer = None
def __init__(self, options):
"""
Initialize an L{AppLogger} with a L{ServerOptions}.
"""
self._logfilename = options.get("logfile", "")
self._observerFactory = options.get("logger") or None
def start(self, application):
"""
Initialize the global logging system for the given application.
If a custom logger was specified on the command line it will be used.
If not, and an L{logger.ILogObserver} or legacy L{log.ILogObserver}
component has been set on C{application}, then it will be used as the
log observer. Otherwise a log observer will be created based on the
command line options for built-in loggers (e.g. C{--logfile}).
@param application: The application on which to check for an
L{logger.ILogObserver} or legacy L{log.ILogObserver}.
@type application: L{twisted.python.components.Componentized}
"""
if self._observerFactory is not None:
observer = self._observerFactory()
else:
observer = application.getComponent(logger.ILogObserver, None)
if observer is None:
# If there's no new ILogObserver, try the legacy one
observer = application.getComponent(log.ILogObserver, None)
if observer is None:
observer = self._getLogObserver()
self._observer = observer
if logger.ILogObserver.providedBy(self._observer):
observers = [self._observer]
elif log.ILogObserver.providedBy(self._observer):
observers = [logger.LegacyLogObserverWrapper(self._observer)]
else:
warnings.warn(
("Passing a logger factory which makes log observers which do "
"not implement twisted.logger.ILogObserver or "
"twisted.python.log.ILogObserver to "
"twisted.application.app.AppLogger was deprecated in "
"Twisted 16.2. Please use a factory that produces "
"twisted.logger.ILogObserver (or the legacy "
"twisted.python.log.ILogObserver) implementing objects "
"instead."),
DeprecationWarning,
stacklevel=2)
observers = [logger.LegacyLogObserverWrapper(self._observer)]
logger.globalLogBeginner.beginLoggingTo(observers)
self._initialLog()
def _initialLog(self):
"""
Print twistd start log message.
"""
from twisted.internet import reactor
logger._loggerFor(self).info(
"twistd {version} ({exe} {pyVersion}) starting up.",
version=copyright.version, exe=sys.executable,
pyVersion=runtime.shortPythonVersion())
logger._loggerFor(self).info('reactor class: {reactor}.',
reactor=qual(reactor.__class__))
def _getLogObserver(self):
"""
Create a log observer to be added to the logging system before running
this application.
"""
if self._logfilename == '-' or not self._logfilename:
logFile = sys.stdout
else:
logFile = logfile.LogFile.fromFullPath(self._logfilename)
return logger.textFileLogObserver(logFile)
def stop(self):
"""
Remove all log observers previously set up by L{AppLogger.start}.
"""
logger._loggerFor(self).info("Server Shut Down.")
if self._observer is not None:
logger.globalLogPublisher.removeObserver(self._observer)
self._observer = None
def fixPdb():
def do_stop(self, arg):
self.clear_all_breaks()
self.set_continue()
from twisted.internet import reactor
reactor.callLater(0, reactor.stop)
return 1
def help_stop(self):
print("stop - Continue execution, then cleanly shutdown the twisted "
"reactor.")
def set_quit(self):
os._exit(0)
pdb.Pdb.set_quit = set_quit
pdb.Pdb.do_stop = do_stop
pdb.Pdb.help_stop = help_stop
def runReactorWithLogging(config, oldstdout, oldstderr, profiler=None,
reactor=None):
"""
Start the reactor, using profiling if specified by the configuration, and
log any error happening in the process.
@param config: configuration of the twistd application.
@type config: L{ServerOptions}
@param oldstdout: initial value of C{sys.stdout}.
@type oldstdout: C{file}
@param oldstderr: initial value of C{sys.stderr}.
@type oldstderr: C{file}
@param profiler: object used to run the reactor with profiling.
@type profiler: L{AppProfiler}
@param reactor: The reactor to use. If L{None}, the global reactor will
be used.
"""
if reactor is None:
from twisted.internet import reactor
try:
if config['profile']:
if profiler is not None:
profiler.run(reactor)
elif config['debug']:
sys.stdout = oldstdout
sys.stderr = oldstderr
if runtime.platformType == 'posix':
signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace())
signal.signal(signal.SIGINT, lambda *args: pdb.set_trace())
fixPdb()
pdb.runcall(reactor.run)
else:
reactor.run()
except:
close = False
if config['nodaemon']:
file = oldstdout
else:
file = open("TWISTD-CRASH.log", "a")
close = True
try:
traceback.print_exc(file=file)
file.flush()
finally:
if close:
file.close()
def getPassphrase(needed):
if needed:
return getpass.getpass('Passphrase: ')
else:
return None
def getSavePassphrase(needed):
if needed:
return util.getPassword("Encryption passphrase: ")
else:
return None
class ApplicationRunner(object):
"""
An object which helps running an application based on a config object.
Subclass me and implement preApplication and postApplication
methods. postApplication generally will want to run the reactor
after starting the application.
@ivar config: The config object, which provides a dict-like interface.
@ivar application: Available in postApplication, but not
preApplication. This is the application object.
@ivar profilerFactory: Factory for creating a profiler object, able to
profile the application if options are set accordingly.
@ivar profiler: Instance provided by C{profilerFactory}.
@ivar loggerFactory: Factory for creating object responsible for logging.
@ivar logger: Instance provided by C{loggerFactory}.
"""
profilerFactory = AppProfiler
loggerFactory = AppLogger
def __init__(self, config):
self.config = config
self.profiler = self.profilerFactory(config)
self.logger = self.loggerFactory(config)
def run(self):
"""
Run the application.
"""
self.preApplication()
self.application = self.createOrGetApplication()
self.logger.start(self.application)
self.postApplication()
self.logger.stop()
def startReactor(self, reactor, oldstdout, oldstderr):
"""
Run the reactor with the given configuration. Subclasses should
probably call this from C{postApplication}.
@see: L{runReactorWithLogging}
"""
runReactorWithLogging(
self.config, oldstdout, oldstderr, self.profiler, reactor)
def preApplication(self):
"""
Override in subclass.
This should set up any state necessary before loading and
running the Application.
"""
raise NotImplementedError()
def postApplication(self):
"""
Override in subclass.
This will be called after the application has been loaded (so
the C{application} attribute will be set). Generally this
should start the application and run the reactor.
"""
raise NotImplementedError()
def createOrGetApplication(self):
"""
Create or load an Application based on the parameters found in the
given L{ServerOptions} instance.
If a subcommand was used, the L{service.IServiceMaker} that it
represents will be used to construct a service to be added to
a newly-created Application.
Otherwise, an application will be loaded based on parameters in
the config.
"""
if self.config.subCommand:
# If a subcommand was given, it's our responsibility to create
# the application, instead of load it from a file.
# loadedPlugins is set up by the ServerOptions.subCommands
# property, which is iterated somewhere in the bowels of
# usage.Options.
plg = self.config.loadedPlugins[self.config.subCommand]
ser = plg.makeService(self.config.subOptions)
application = service.Application(plg.tapname)
ser.setServiceParent(application)
else:
passphrase = getPassphrase(self.config['encrypted'])
application = getApplication(self.config, passphrase)
return application
def getApplication(config, passphrase):
s = [(config[t], t)
for t in ['python', 'source', 'file'] if config[t]][0]
filename, style = s[0], {'file': 'pickle'}.get(s[1], s[1])
try:
log.msg("Loading %s..." % filename)
application = service.loadApplication(filename, style, passphrase)
log.msg("Loaded.")
except Exception as e:
s = "Failed to load application: %s" % e
if isinstance(e, KeyError) and e.args[0] == "application":
s += """
Could not find 'application' in the file. To use 'twistd -y', your .tac
file must create a suitable object (e.g., by calling service.Application())
and store it in a variable named 'application'. twistd loads your .tac file
and scans the global variables for one of this name.
Please read the 'Using Application' HOWTO for details.
"""
traceback.print_exc(file=log.logfile)
log.msg(s)
log.deferr()
sys.exit('\n' + s + '\n')
return application
def _reactorAction():
return usage.CompleteList([r.shortName for r in
reactors.getReactorTypes()])
class ReactorSelectionMixin:
"""
Provides options for selecting a reactor to install.
If a reactor is installed, the short name which was used to locate it is
saved as the value for the C{"reactor"} key.
"""
compData = usage.Completions(
optActions={"reactor": _reactorAction})
messageOutput = sys.stdout
_getReactorTypes = staticmethod(reactors.getReactorTypes)
def opt_help_reactors(self):
"""
Display a list of possibly available reactor names.
"""
rcts = sorted(self._getReactorTypes(), key=attrgetter('shortName'))
for r in rcts:
self.messageOutput.write(' %-4s\t%s\n' %
(r.shortName, r.description))
raise SystemExit(0)
def opt_reactor(self, shortName):
"""
Which reactor to use (see --help-reactors for a list of possibilities)
"""
# Actually actually actually install the reactor right at this very
# moment, before any other code (for example, a sub-command plugin)
# runs and accidentally imports and installs the default reactor.
#
# This could probably be improved somehow.
try:
installReactor(shortName)
except NoSuchReactor:
msg = ("The specified reactor does not exist: '%s'.\n"
"See the list of available reactors with "
"--help-reactors" % (shortName,))
raise usage.UsageError(msg)
except Exception as e:
msg = ("The specified reactor cannot be used, failed with error: "
"%s.\nSee the list of available reactors with "
"--help-reactors" % (e,))
raise usage.UsageError(msg)
else:
self["reactor"] = shortName
opt_r = opt_reactor
class ServerOptions(usage.Options, ReactorSelectionMixin):
longdesc = ("twistd reads a twisted.application.service.Application out "
"of a file and runs it.")
optFlags = [['savestats', None,
"save the Stats object rather than the text output of "
"the profiler."],
['no_save', 'o', "do not save state on shutdown"],
['encrypted', 'e',
"The specified tap/aos file is encrypted."]]
optParameters = [['logfile', 'l', None,
"log to a specified file, - for stdout"],
['logger', None, None,
"A fully-qualified name to a log observer factory to "
"use for the initial log observer. Takes precedence "
"over --logfile and --syslog (when available)."],
['profile', 'p', None,
"Run in profile mode, dumping results to specified "
"file."],
['profiler', None, "cprofile",
"Name of the profiler to use (%s)." %
", ".join(AppProfiler.profilers)],
['file', 'f', 'twistd.tap',
"read the given .tap file"],
['python', 'y', None,
"read an application from within a Python file "
"(implies -o)"],
['source', 's', None,
"Read an application from a .tas file (AOT format)."],
['rundir', 'd', '.',
'Change to a supplied directory before running']]
compData = usage.Completions(
mutuallyExclusive=[("file", "python", "source")],
optActions={"file": usage.CompleteFiles("*.tap"),
"python": usage.CompleteFiles("*.(tac|py)"),
"source": usage.CompleteFiles("*.tas"),
"rundir": usage.CompleteDirs()}
)
_getPlugins = staticmethod(plugin.getPlugins)
def __init__(self, *a, **kw):
self['debug'] = False
usage.Options.__init__(self, *a, **kw)
def opt_debug(self):
"""
Run the application in the Python Debugger (implies nodaemon),
sending SIGUSR2 will drop into debugger
"""
defer.setDebugging(True)
failure.startDebugMode()
self['debug'] = True
opt_b = opt_debug
def opt_spew(self):
"""
Print an insanely verbose log of everything that happens.
Useful when debugging freezes or locks in complex code.
"""
sys.settrace(util.spewer)
try:
import threading
except ImportError:
return
threading.settrace(util.spewer)
def parseOptions(self, options=None):
if options is None:
options = sys.argv[1:] or ["--help"]
usage.Options.parseOptions(self, options)
def postOptions(self):
if self.subCommand or self['python']:
self['no_save'] = True
if self['logger'] is not None:
try:
self['logger'] = namedAny(self['logger'])
except Exception as e:
raise usage.UsageError("Logger '%s' could not be imported: %s"
% (self['logger'], e))
def subCommands(self):
plugins = self._getPlugins(service.IServiceMaker)
self.loadedPlugins = {}
for plug in sorted(plugins, key=attrgetter('tapname')):
self.loadedPlugins[plug.tapname] = plug
yield (plug.tapname,
None,
# Avoid resolving the options attribute right away, in case
# it's a property with a non-trivial getter (eg, one which
# imports modules).
lambda plug=plug: plug.options(),
plug.description)
subCommands = property(subCommands)
def run(runApp, ServerOptions):
config = ServerOptions()
try:
config.parseOptions()
except usage.error as ue:
print(config)
print("%s: %s" % (sys.argv[0], ue))
else:
runApp(config)
def convertStyle(filein, typein, passphrase, fileout, typeout, encrypt):
application = service.loadApplication(filein, typein, passphrase)
sob.IPersistable(application).setStyle(typeout)
passphrase = getSavePassphrase(encrypt)
if passphrase:
fileout = None
sob.IPersistable(application).save(filename=fileout, passphrase=passphrase)
def startApplication(application, save):
from twisted.internet import reactor
service.IService(application).startService()
if save:
p = sob.IPersistable(application)
reactor.addSystemEventTrigger('after', 'shutdown', p.save, 'shutdown')
reactor.addSystemEventTrigger('before', 'shutdown',
service.IService(application).stopService)
|
|
# -*- coding: utf-8 -*-
"""
stellarPYL - python stellar spectra processing software
Copyright (c) 2016 Brunston Poon
@file: pxlambda test
This program comes with absolutely no warranty.
"""
import configparser
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import stellar as st
import tools as to
import text as txt
config = configparser.ConfigParser()
config.read('settings.ini')
defThresh = config['CONTROL']['defaultthreshold']
autoIntensity = config['CONTROL']['autointensity']
top = int(config['CONTROL']['manualtop'])
bottom = int(config['CONTROL']['manualbot'])
right = int(config['CONTROL']['manualright'])
left = int(config['CONTROL']['manualleft'])
step = float(config['CONTROL']['r'])
verbose = config['CONTROL']['verbose']
showthresh = config['CONTROL']['showthresh']
margin = int(config['CONTROL']['margin'])
print("""
We need a file. Place it in the same directory as this script and give the name.
""")
path = input("enter filename> ")
dataImage = input("file for second image to be adjusted (the data image) ")
threshI = int(defThresh)
if threshI >= 0:
print("converting. please wait...")
img = Image.open(path)
dataArray = to.converter(path)
if showthresh == "yes":
to.showThreshold(dataArray, threshI)
print("working on crop. please wait...")
cropped = st.cropN(img, threshI, top, bottom, left, right, margin)
to.restorer(cropped, 'cropped')
print("cropped image saved to cropped.tiff")
#TODO debugging - check to see if numpy.fliplr does what we want
dataArrayFlipped = np.fliplr(dataArray)
to.restorer(dataArrayFlipped, 'flipped_img')
croppedimg = Image.open('cropped.tiff')
print("converting cropped image. please wait...")
dataArray = to.converter('cropped.tiff')
#TODO trying to flip here.
dataArray = np.fliplr(dataArray)
regTup = st.regression(croppedimg)
to.showRegression(croppedimg,regTup)
if autoIntensity in ['saaw']:
print("working on intensity_saaw. please wait...")
intensity = st.intensitySAAW(croppedimg,dataArray,regTup,\
threshI,step,10)
#TODO remove debugging
for element in intensity:
print(element)
#using IMG_2617.tif of sirius and using hA = beta, hB = gamma b/c of
#apparent visibles.
wavelengths = to.pixelLambda(intensity, 640, 692)
#print(wavelengths)
to.plotIntensityWLambda(intensity,wavelengths)
#to.plotIntensityWLambda2(intensity,wavelengths)
#to.plotSamples(croppedimg,intensity,regTup) #TODO fix
#TODO debugging text file printing intensity and wavelengths from response
f = open("debug_wavelengths_intensities_pre-reponse_from-pxlt.txt","w")
f.write("#values from pxlambdatest.py before feeding to response\n#wavelengths intensity\n")
for i in range(len(wavelengths)):
f.write(str(wavelengths[i])+" "+str(intensity[i])+"\n")
f.close()
asdfjkl = "asdfjkl"
print("string before response: ",asdfjkl)
response = st.response(intensity, wavelengths, "pulkovo/sirius.dat", 0.5)
print("len, response: {0}".format(len(response)))
print(response)
#adjust and display new plot
adjusted = []
for i in wavelengths:
adjusted.append(intensity[i]*response[i])
adjustedND = np.array(adjusted)
to.plotIntensityWLambda(adjustedND,wavelengths)
#TODO just realized above convention is confusing. Order of variables
# passing to function is (y,x)
#TODO debugging text files output of plot...
f = open("debug_first_plot_x_y.txt","w")
f.write("#X Y\n#wavelengths, adjustedND\n")
for i in range(len(adjustedND)):
f.write(str(wavelengths[i])+" "+str(adjustedND[i])+"\n")
f.close()
print("debug file of plot that just popped up (adjusted intensity plot)")
# plt.figure(3)
# plt.clf()
# plt.plot(x_star, y_star,'o',label='original data',markersize=4)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.title("Pulkovo Data from {0}".format(pulkovo))
# plt.show()
#TODO DEBUGGING TEXT FILES DEBUGGING TEXT FILES AND IPYTHON
#TODO remove debugging, examine pulkovoResponse in iPython
#pulkovoData = np.loadtxt('pulkovo/sirius.dat')
#for i in range(len(pulkovoData)):
# pulkovoData[i][0] = float(pulkovoData[i][0])
# pulkovoData[i][1] = float(pulkovoData[i][1])
#pulkovoResponse = st.response(pulkovoData[:,1], pulkovoData[:,0], "pulkovo/sirius.dat", 1,asdfjkl)
#first column in pulkovo is equiv to intensity, zeroth wavelength
#TODO debugging column text files...
#f = open('debug_response_wavelength.txt','w')
#f.write("#num wavelengths adjustedND")
#for i in range(max(len(wavelengths),len(adjustedND))):
# f.write(str(i)+" "+str(wavelengths[i])+" "+str(adjustedND[i])+"\n")
#f.close()
#print("debug of wavelength and adjustedND to debug_response_wavelength.txt")
#WE WANT TO PLOT ALL THE THINGS ON A SINGLE GRAPH. LET'S PRACTICE
#THAT HERE BEFORE WE DO IT IN A FUNCTION
#TODO ADD TO FUNCTION
host = host_subplot(111, axes_class = AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx() #parasite vert. axis 1
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
par2.axis["right"].toggle(all=True)
host.set_xlabel("Wavelength")
host.set_ylabel("Adjusted Intensity")
par1.set_ylabel("Original Intensity")
par2.set_ylabel("Literature Intensity")
#literature val
pulkovoData = np.loadtxt('pulkovo/sirius.dat')
p1, = host.plot(wavelengths, adjustedND, label="Adjusted Intensity")
p2, = par1.plot(wavelengths, intensity, label="Original Intensity")
p3, = par2.plot(pulkovoData[:,0], pulkovoData[:,1], label="Literature Intensity")
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#SECOND IMAGE SECOND IMAGE SECOND IMAGE SECOND IMAGE!!!
#adjust our second image: convert to data as well as overlay our new
#adjusted array (to account for camera sensitivity)
dataImageObject = Image.open(path)
dataImageArray = to.converter(path)
if showthresh == "yes":
to.showThreshold(dataArray, threshI)
print("working on dataImage crop")
dataImageCropped = st.cropN(dataImageObject, threshI, top, bottom, left,\
right, margin)
to.restorer(dataImageCropped, 'dataImageCropped')
print("cropped image saved to dataImageCropped.tiff")
dataImageCroppedObject = Image.open('dataImageCropped.tiff')
print("converting cropped dataImage")
dataImageCroppedArray = to.converter('dataImageCropped.tiff')
regTupDI = st.regression(dataImageCroppedObject)
to.showRegression(dataImageCroppedObject, regTupDI)
if autoIntensity in ['saaw']:
print("working on intensity_saaw. please wait...")
intensityDI = st.intensitySAAW(dataImageCroppedObject,\
dataImageCroppedArray, regTupDI,\
threshI, step, 10)
#need to identify wavelengths for target image, see line 69 of file.
#use with data collected same night to ensure that the other params
#remain the same (background, clouds, amt of atmosphere in between, etc
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2 driver
"""
from libcloud.providers import Provider
from libcloud.types import NodeState, InvalidCredsException
from libcloud.base import Node, Response, ConnectionUserAndKey
from libcloud.base import NodeDriver, NodeSize, NodeImage, NodeLocation
import base64
import hmac
from hashlib import sha256
import time
import urllib
from xml.etree import ElementTree as ET
EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com'
EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com'
EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com'
EC2_AP_SOUTHEAST_HOST = 'ec2.ap-southeast-1.amazonaws.com'
API_VERSION = '2009-11-30'
NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
EC2_INSTANCE_TYPES = {
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
}
EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES)
#
# On demand prices must also be hardcoded, because Amazon doesn't provide an
# API to fetch them. From http://aws.amazon.com/ec2/pricing/
#
EC2_US_EAST_INSTANCE_TYPES['m1.small']['price'] = '.085'
EC2_US_EAST_INSTANCE_TYPES['m1.large']['price'] = '.34'
EC2_US_EAST_INSTANCE_TYPES['m1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['c1.medium']['price'] = '.17'
EC2_US_EAST_INSTANCE_TYPES['c1.xlarge']['price'] = '.68'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.50'
EC2_US_EAST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.2'
EC2_US_EAST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.4'
EC2_US_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_US_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_US_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_US_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_US_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.57'
EC2_US_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_US_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
EC2_EU_WEST_INSTANCE_TYPES['m1.small']['price'] = '.095'
EC2_EU_WEST_INSTANCE_TYPES['m1.large']['price'] = '.38'
EC2_EU_WEST_INSTANCE_TYPES['m1.xlarge']['price'] = '.76'
EC2_EU_WEST_INSTANCE_TYPES['c1.medium']['price'] = '.19'
EC2_EU_WEST_INSTANCE_TYPES['c1.xlarge']['price'] = '.76'
EC2_US_EAST_INSTANCE_TYPES['m2.xlarge']['price'] = '.57'
EC2_EU_WEST_INSTANCE_TYPES['m2.2xlarge']['price'] = '1.34'
EC2_EU_WEST_INSTANCE_TYPES['m2.4xlarge']['price'] = '2.68'
# prices are the same
EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_EU_WEST_INSTANCE_TYPES)
class EC2Response(Response):
"""
EC2 specific response parsing and error handling.
"""
def parse_body(self):
if not self.body:
return None
return ET.XML(self.body)
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsException(msg)
for err in ET.XML(self.body).findall('Errors/Error'):
code, message = err.getchildren()
err_list.append("%s: %s" % (code.text, message.text))
if code.text == "InvalidClientTokenId":
raise InvalidCredsException(err_list[-1])
if code.text == "SignatureDoesNotMatch":
raise InvalidCredsException(err_list[-1])
if code.text == "AuthFailure":
raise InvalidCredsException(err_list[-1])
if code.text == "OptInRequired":
raise InvalidCredsException(err_list[-1])
return "\n".join(err_list)
class EC2Connection(ConnectionUserAndKey):
"""
Repersents a single connection to the EC2 Endpoint
"""
host = EC2_US_EAST_HOST
responseCls = EC2Response
def add_default_params(self, params):
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['AWSAccessKeyId'] = self.user_id
params['Version'] = API_VERSION
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._get_aws_auth_param(params, self.key, self.action)
return params
def _get_aws_auth_param(self, params, secret_key, path='/'):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
pairs.append(urllib.quote(key, safe='') + '=' +
urllib.quote(params[key], safe='-_~'))
qs = '&'.join(pairs)
string_to_sign = '\n'.join(('GET', self.host, path, qs))
b64_hmac = base64.b64encode(
hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()
)
return b64_hmac
class EC2NodeDriver(NodeDriver):
"""
Amazon EC2 node driver
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2 (us-east-1)'
path = '/'
_instance_types = EC2_US_EAST_INSTANCE_TYPES
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED
}
def _findtext(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _fixxpath(self, xpath):
# ElementTree wants namespaces in its xpaths, so here we add them.
return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")])
def _findattr(self, element, xpath):
return element.findtext(self._fixxpath(xpath))
def _findall(self, element, xpath):
return element.findall(self._fixxpath(xpath))
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params["%s.%s" % (key, i)] = value
return params
def _get_boolean(self, element):
tag = "{%s}%s" % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([ term_status == status
for term_status
in ('shutting-down', 'terminated') ])
def _to_nodes(self, object, xpath, groups=None):
return [ self._to_node(el, groups=groups)
for el in object.findall(self._fixxpath(xpath)) ]
def _to_node(self, element, groups=None):
try:
state = self.NODE_STATE_MAP[
self._findattr(element, "instanceState/name")
]
except KeyError:
state = NodeState.UNKNOWN
n = Node(
id=self._findtext(element, 'instanceId'),
name=self._findtext(element, 'instanceId'),
state=state,
public_ip=[self._findtext(element, 'dnsName')],
private_ip=[self._findtext(element, 'privateDnsName')],
driver=self.connection.driver,
extra={
'dns_name': self._findattr(element, "dnsName"),
'instanceId': self._findattr(element, "instanceId"),
'imageId': self._findattr(element, "imageId"),
'private_dns': self._findattr(element, "privateDnsName"),
'status': self._findattr(element, "instanceState/name"),
'keyname': self._findattr(element, "keyName"),
'launchindex': self._findattr(element, "amiLaunchIndex"),
'productcode':
[p.text for p in self._findall(
element, "productCodesSet/item/productCode"
)],
'instancetype': self._findattr(element, "instanceType"),
'launchdatetime': self._findattr(element, "launchTime"),
'availability': self._findattr(element,
"placement/availabilityZone"),
'kernelid': self._findattr(element, "kernelId"),
'ramdiskid': self._findattr(element, "ramdiskId"),
'groups': groups
}
)
return n
def _to_images(self, object):
return [ self._to_image(el)
for el in object.findall(
self._fixxpath('imagesSet/item')
) ]
def _to_image(self, element):
n = NodeImage(id=self._findtext(element, 'imageId'),
name=self._findtext(element, 'imageLocation'),
driver=self.connection.driver)
return n
def list_nodes(self):
params = {'Action': 'DescribeInstances' }
elem=self.connection.request(self.path, params=params).object
nodes=[]
for rs in self._findall(elem, 'reservationSet/item'):
groups=[g.findtext('')
for g in self._findall(rs, 'groupSet/item/groupId')]
nodes += self._to_nodes(rs, 'instancesSet/item', groups)
return nodes
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in self._instance_types.values() ]
def list_images(self, location=None):
params = {'Action': 'DescribeImages'}
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def ex_create_security_group(self, name, description):
"""Creates a new Security Group
@note: This is a non-standard extension API, and only works for EC2.
@type name: C{str}
@param name: The name of the security group to Create. This must be unique.
@type description: C{str}
@param description: Human readable description of a Security Group.
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
return self.connection.request(self.path, params=params).object
def ex_authorize_security_group_permissive(self, name):
"""Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
@type name: C{str}
@param name: The name of the security group to edit
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception, e:
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def create_node(self, **kwargs):
"""Create a new EC2 node
See L{NodeDriver.create_node} for more keyword args.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@keyword ex_mincount: Minimum number of instances to launch
@type ex_mincount: C{int}
@keyword ex_maxcount: Maximum number of instances to launch
@type ex_maxcount: C{int}
@keyword ex_securitygroup: Name of security group
@type ex_securitygroup: C{str}
@keyword ex_keyname: The name of the key pair
@type ex_keyname: C{str}
@keyword ex_userdata: User data
@type ex_userdata: C{str}
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': kwargs.get('ex_mincount','1'),
'MaxCount': kwargs.get('ex_maxcount','1'),
'InstanceType': size.id
}
if 'ex_securitygroup' in kwargs:
if not isinstance(kwargs['ex_securitygroup'], list):
kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]
for sig in range(len(kwargs['ex_securitygroup'])):
params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig]
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(kwargs['ex_userdata'])
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
"""
Reboot the node by passing in the node object
"""
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
"""
Destroy node by passing in the node object
"""
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. Virginia', 'US', self)]
class EC2EUConnection(EC2Connection):
"""
Connection class for EC2 in the Western Europe Region
"""
host = EC2_EU_WEST_HOST
class EC2EUNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western Europe Region
"""
name = 'Amazon EC2 (eu-west-1)'
connectionCls = EC2EUConnection
_instance_types = EC2_EU_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon Europe Ireland', 'IE', self)]
class EC2USWestConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region
"""
host = EC2_US_WEST_HOST
class EC2USWestNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western US Region
"""
name = 'Amazon EC2 (us-west-1)'
connectionCls = EC2USWestConnection
_instance_types = EC2_US_WEST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon US N. California', 'US', self)]
class EC2APSEConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific Region
"""
host = EC2_AP_SOUTHEAST_HOST
class EC2APSENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific Region
"""
name = 'Amazon EC2 (ap-southeast-1)'
connectionCls = EC2APSEConnection
_instance_types = EC2_AP_SOUTHEAST_INSTANCE_TYPES
def list_locations(self):
return [NodeLocation(0, 'Amazon Asia-Pacific Singapore', 'SG', self)]
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(EC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
connectionCls = EucConnection
_instance_types = EC2_US_WEST_INSTANCE_TYPES
def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None):
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = "/services/Eucalyptus"
self.path = path
def list_locations(self):
raise NotImplementedError, \
'list_locations not implemented for this driver'
|
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import cgi
import hashlib
import os
import re
import shutil
import tempfile
import threading
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import dataToDumpFile
from lib.core.common import dataToStdout
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import isListLike
from lib.core.common import normalizeUnicode
from lib.core.common import openFile
from lib.core.common import prioritySortColumns
from lib.core.common import randomInt
from lib.core.common import safeCSValue
from lib.core.common import unicodeencode
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import CONTENT_STATUS
from lib.core.enums import CONTENT_TYPE
from lib.core.enums import DBMS
from lib.core.enums import DUMP_FORMAT
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapValueException
from lib.core.exception import SqlmapSystemException
from lib.core.replication import Replication
from lib.core.settings import DUMP_FILE_BUFFER_SIZE
from lib.core.settings import HTML_DUMP_CSS_STYLE
from lib.core.settings import IS_WIN
from lib.core.settings import METADB_SUFFIX
from lib.core.settings import MIN_BINARY_DISK_DUMP_SIZE
from lib.core.settings import TRIM_STDOUT_DUMP_SIZE
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import WINDOWS_RESERVED_NAMES
from thirdparty.magic import magic
from extra.safe2bin.safe2bin import safechardecode
class Dump(object):
"""
This class defines methods used to parse and output the results
of SQL injection actions
"""
def __init__(self):
self._outputFile = None
self._outputFP = None
self._lock = threading.Lock()
def _write(self, data, newline=True, console=True, content_type=None):
if hasattr(conf, "api"):
dataToStdout(data, content_type=content_type, status=CONTENT_STATUS.COMPLETE)
return
text = "%s%s" % (data, "\n" if newline else " ")
if console:
dataToStdout(text)
if kb.get("multiThreadMode"):
self._lock.acquire()
try:
self._outputFP.write(text)
except IOError, ex:
errMsg = "error occurred while writing to log file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
if kb.get("multiThreadMode"):
self._lock.release()
kb.dataOutputFlag = True
def flush(self):
if self._outputFP:
try:
self._outputFP.flush()
except IOError:
pass
def setOutputFile(self):
self._outputFile = os.path.join(conf.outputPath, "log")
try:
self._outputFP = openFile(self._outputFile, "ab" if not conf.flushSession else "wb")
except IOError, ex:
errMsg = "error occurred while opening log file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
def getOutputFile(self):
return self._outputFile
def singleString(self, data, content_type=None):
self._write(data, content_type=content_type)
def string(self, header, data, content_type=None, sort=True):
kb.stickyLevel = None
if hasattr(conf, "api"):
self._write(data, content_type=content_type)
return
if isListLike(data):
self.lister(header, data, content_type, sort)
elif data is not None:
_ = getUnicode(data)
if _.endswith("\r\n"):
_ = _[:-2]
elif _.endswith("\n"):
_ = _[:-1]
if _.strip(' '):
_ = _.strip(' ')
if "\n" in _:
self._write("%s:\n---\n%s\n---" % (header, _))
else:
self._write("%s: %s" % (header, ("'%s'" % _) if isinstance(data, basestring) else _))
else:
self._write("%s:\tNone" % header)
def lister(self, header, elements, content_type=None, sort=True):
if elements and sort:
try:
elements = set(elements)
elements = list(elements)
elements.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
except:
pass
if hasattr(conf, "api"):
self._write(elements, content_type=content_type)
return
if elements:
self._write("%s [%d]:" % (header, len(elements)))
for element in elements:
if isinstance(element, basestring):
self._write("[*] %s" % element)
elif isListLike(element):
self._write("[*] " + ", ".join(getUnicode(e) for e in element))
if elements:
self._write("")
def banner(self, data):
self.string("banner", data, content_type=CONTENT_TYPE.BANNER)
def currentUser(self, data):
self.string("current user", data, content_type=CONTENT_TYPE.CURRENT_USER)
def currentDb(self, data):
if Backend.isDbms(DBMS.MAXDB):
self.string("current database (no practical usage on %s)" % Backend.getIdentifiedDbms(), data, content_type=CONTENT_TYPE.CURRENT_DB)
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.PGSQL, DBMS.HSQLDB):
self.string("current schema (equivalent to database on %s)" % Backend.getIdentifiedDbms(), data, content_type=CONTENT_TYPE.CURRENT_DB)
else:
self.string("current database", data, content_type=CONTENT_TYPE.CURRENT_DB)
def hostname(self, data):
self.string("hostname", data, content_type=CONTENT_TYPE.HOSTNAME)
def dba(self, data):
self.string("current user is DBA", data, content_type=CONTENT_TYPE.IS_DBA)
def users(self, users):
self.lister("database management system users", users, content_type=CONTENT_TYPE.USERS)
def userSettings(self, header, userSettings, subHeader, content_type=None):
self._areAdmins = set()
if isinstance(userSettings, (tuple, list, set)):
self._areAdmins = userSettings[1]
userSettings = userSettings[0]
users = userSettings.keys()
users.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
if hasattr(conf, "api"):
self._write(userSettings, content_type=content_type)
return
if userSettings:
self._write("%s:" % header)
for user in users:
settings = userSettings[user]
if settings is None:
stringSettings = ""
else:
stringSettings = " [%d]:" % len(settings)
if user in self._areAdmins:
self._write("[*] %s (administrator)%s" % (user, stringSettings))
else:
self._write("[*] %s%s" % (user, stringSettings))
if settings:
settings.sort()
for setting in settings:
self._write(" %s: %s" % (subHeader, setting))
if userSettings:
self.singleString("")
def dbs(self, dbs):
self.lister("available databases", dbs, content_type=CONTENT_TYPE.DBS)
def dbTables(self, dbTables):
if isinstance(dbTables, dict) and len(dbTables) > 0:
if hasattr(conf, "api"):
self._write(dbTables, content_type=CONTENT_TYPE.TABLES)
return
maxlength = 0
for tables in dbTables.values():
for table in tables:
if table and isListLike(table):
table = table[0]
maxlength = max(maxlength, len(unsafeSQLIdentificatorNaming(normalizeUnicode(table) or unicode(table))))
lines = "-" * (int(maxlength) + 2)
for db, tables in dbTables.items():
tables.sort()
self._write("Database: %s" % unsafeSQLIdentificatorNaming(db) if db else "Current database")
if len(tables) == 1:
self._write("[1 table]")
else:
self._write("[%d tables]" % len(tables))
self._write("+%s+" % lines)
for table in tables:
if table and isListLike(table):
table = table[0]
table = unsafeSQLIdentificatorNaming(table)
blank = " " * (maxlength - len(normalizeUnicode(table) or unicode(table)))
self._write("| %s%s |" % (table, blank))
self._write("+%s+\n" % lines)
elif dbTables is None or len(dbTables) == 0:
self.singleString("No tables found", content_type=CONTENT_TYPE.TABLES)
else:
self.string("tables", dbTables, content_type=CONTENT_TYPE.TABLES)
def dbTableColumns(self, tableColumns, content_type=None):
if isinstance(tableColumns, dict) and len(tableColumns) > 0:
if hasattr(conf, "api"):
self._write(tableColumns, content_type=content_type)
return
for db, tables in tableColumns.items():
if not db:
db = "All"
for table, columns in tables.items():
maxlength1 = 0
maxlength2 = 0
colType = None
colList = columns.keys()
colList.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
for column in colList:
colType = columns[column]
column = unsafeSQLIdentificatorNaming(column)
maxlength1 = max(maxlength1, len(column or ""))
maxlength2 = max(maxlength2, len(colType or ""))
maxlength1 = max(maxlength1, len("COLUMN"))
lines1 = "-" * (maxlength1 + 2)
if colType is not None:
maxlength2 = max(maxlength2, len("TYPE"))
lines2 = "-" * (maxlength2 + 2)
self._write("Database: %s\nTable: %s" % (unsafeSQLIdentificatorNaming(db) if db else "Current database", unsafeSQLIdentificatorNaming(table)))
if len(columns) == 1:
self._write("[1 column]")
else:
self._write("[%d columns]" % len(columns))
if colType is not None:
self._write("+%s+%s+" % (lines1, lines2))
else:
self._write("+%s+" % lines1)
blank1 = " " * (maxlength1 - len("COLUMN"))
if colType is not None:
blank2 = " " * (maxlength2 - len("TYPE"))
if colType is not None:
self._write("| Column%s | Type%s |" % (blank1, blank2))
self._write("+%s+%s+" % (lines1, lines2))
else:
self._write("| Column%s |" % blank1)
self._write("+%s+" % lines1)
for column in colList:
colType = columns[column]
column = unsafeSQLIdentificatorNaming(column)
blank1 = " " * (maxlength1 - len(column))
if colType is not None:
blank2 = " " * (maxlength2 - len(colType))
self._write("| %s%s | %s%s |" % (column, blank1, colType, blank2))
else:
self._write("| %s%s |" % (column, blank1))
if colType is not None:
self._write("+%s+%s+\n" % (lines1, lines2))
else:
self._write("+%s+\n" % lines1)
def dbTablesCount(self, dbTables):
if isinstance(dbTables, dict) and len(dbTables) > 0:
if hasattr(conf, "api"):
self._write(dbTables, content_type=CONTENT_TYPE.COUNT)
return
maxlength1 = len("Table")
maxlength2 = len("Entries")
for ctables in dbTables.values():
for tables in ctables.values():
for table in tables:
maxlength1 = max(maxlength1, len(normalizeUnicode(table) or unicode(table)))
for db, counts in dbTables.items():
self._write("Database: %s" % unsafeSQLIdentificatorNaming(db) if db else "Current database")
lines1 = "-" * (maxlength1 + 2)
blank1 = " " * (maxlength1 - len("Table"))
lines2 = "-" * (maxlength2 + 2)
blank2 = " " * (maxlength2 - len("Entries"))
self._write("+%s+%s+" % (lines1, lines2))
self._write("| Table%s | Entries%s |" % (blank1, blank2))
self._write("+%s+%s+" % (lines1, lines2))
sortedCounts = counts.keys()
sortedCounts.sort(reverse=True)
for count in sortedCounts:
tables = counts[count]
if count is None:
count = "Unknown"
tables.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
for table in tables:
blank1 = " " * (maxlength1 - len(normalizeUnicode(table) or unicode(table)))
blank2 = " " * (maxlength2 - len(str(count)))
self._write("| %s%s | %d%s |" % (table, blank1, count, blank2))
self._write("+%s+%s+\n" % (lines1, lines2))
else:
logger.error("unable to retrieve the number of entries for any table")
def dbTableValues(self, tableValues):
replication = None
rtable = None
dumpFP = None
appendToFile = False
warnFile = False
if tableValues is None:
return
db = tableValues["__infos__"]["db"]
if not db:
db = "All"
table = tableValues["__infos__"]["table"]
if hasattr(conf, "api"):
self._write(tableValues, content_type=CONTENT_TYPE.DUMP_TABLE)
return
dumpDbPath = os.path.join(conf.dumpPath, unsafeSQLIdentificatorNaming(db))
if conf.dumpFormat == DUMP_FORMAT.SQLITE:
replication = Replication(os.path.join(conf.dumpPath, "%s.sqlite3" % unsafeSQLIdentificatorNaming(db)))
elif conf.dumpFormat in (DUMP_FORMAT.CSV, DUMP_FORMAT.HTML):
if not os.path.isdir(dumpDbPath):
try:
os.makedirs(dumpDbPath, 0755)
except:
warnFile = True
_ = unicodeencode(re.sub(r"[^\w]", "_", unsafeSQLIdentificatorNaming(db)))
dumpDbPath = os.path.join(conf.dumpPath, "%s-%s" % (_, hashlib.md5(unicodeencode(db)).hexdigest()[:8]))
if not os.path.isdir(dumpDbPath):
try:
os.makedirs(dumpDbPath, 0755)
except Exception, ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapdb")
except IOError, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to create dump directory "
warnMsg += "'%s' (%s). " % (dumpDbPath, getSafeExString(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
dumpDbPath = tempDir
dumpFileName = os.path.join(dumpDbPath, "%s.%s" % (unsafeSQLIdentificatorNaming(table), conf.dumpFormat.lower()))
if not checkFile(dumpFileName, False):
try:
openFile(dumpFileName, "w+b").close()
except SqlmapSystemException:
raise
except:
warnFile = True
_ = re.sub(r"[^\w]", "_", normalizeUnicode(unsafeSQLIdentificatorNaming(table)))
if len(_) < len(table) or IS_WIN and table.upper() in WINDOWS_RESERVED_NAMES:
_ = unicodeencode(re.sub(r"[^\w]", "_", unsafeSQLIdentificatorNaming(table)))
dumpFileName = os.path.join(dumpDbPath, "%s-%s.%s" % (_, hashlib.md5(unicodeencode(table)).hexdigest()[:8], conf.dumpFormat.lower()))
else:
dumpFileName = os.path.join(dumpDbPath, "%s.%s" % (_, conf.dumpFormat.lower()))
else:
appendToFile = any((conf.limitStart, conf.limitStop))
if not appendToFile:
count = 1
while True:
candidate = "%s.%d" % (dumpFileName, count)
if not checkFile(candidate, False):
try:
shutil.copyfile(dumpFileName, candidate)
except IOError:
pass
finally:
break
else:
count += 1
dumpFP = openFile(dumpFileName, "wb" if not appendToFile else "ab", buffering=DUMP_FILE_BUFFER_SIZE)
count = int(tableValues["__infos__"]["count"])
separator = str()
field = 1
fields = len(tableValues) - 1
columns = prioritySortColumns(tableValues.keys())
if conf.col:
cols = conf.col.split(',')
columns = sorted(columns, key=lambda _: cols.index(_) if _ in cols else 0)
for column in columns:
if column != "__infos__":
info = tableValues[column]
lines = "-" * (int(info["length"]) + 2)
separator += "+%s" % lines
separator += "+"
self._write("Database: %s\nTable: %s" % (unsafeSQLIdentificatorNaming(db) if db else "Current database", unsafeSQLIdentificatorNaming(table)))
if conf.dumpFormat == DUMP_FORMAT.SQLITE:
cols = []
for column in columns:
if column != "__infos__":
colType = Replication.INTEGER
for value in tableValues[column]['values']:
try:
if not value or value == " ": # NULL
continue
int(value)
except ValueError:
colType = None
break
if colType is None:
colType = Replication.REAL
for value in tableValues[column]['values']:
try:
if not value or value == " ": # NULL
continue
float(value)
except ValueError:
colType = None
break
cols.append((unsafeSQLIdentificatorNaming(column), colType if colType else Replication.TEXT))
rtable = replication.createTable(table, cols)
elif conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "<!DOCTYPE html>\n<html>\n<head>\n")
dataToDumpFile(dumpFP, "<meta http-equiv=\"Content-type\" content=\"text/html;charset=%s\">\n" % UNICODE_ENCODING)
dataToDumpFile(dumpFP, "<title>%s</title>\n" % ("%s%s" % ("%s." % db if METADB_SUFFIX not in db else "", table)))
dataToDumpFile(dumpFP, HTML_DUMP_CSS_STYLE)
dataToDumpFile(dumpFP, "\n</head>\n<body>\n<table>\n<thead>\n<tr>\n")
if count == 1:
self._write("[1 entry]")
else:
self._write("[%d entries]" % count)
self._write(separator)
for column in columns:
if column != "__infos__":
info = tableValues[column]
column = unsafeSQLIdentificatorNaming(column)
maxlength = int(info["length"])
blank = " " * (maxlength - len(column))
self._write("| %s%s" % (column, blank), newline=False)
if not appendToFile:
if conf.dumpFormat == DUMP_FORMAT.CSV:
if field == fields:
dataToDumpFile(dumpFP, "%s" % safeCSValue(column))
else:
dataToDumpFile(dumpFP, "%s%s" % (safeCSValue(column), conf.csvDel))
elif conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "<th>%s</th>" % cgi.escape(column).encode("ascii", "xmlcharrefreplace"))
field += 1
if conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "\n</tr>\n</thead>\n<tbody>\n")
self._write("|\n%s" % separator)
if conf.dumpFormat == DUMP_FORMAT.CSV:
dataToDumpFile(dumpFP, "\n" if not appendToFile else "")
elif conf.dumpFormat == DUMP_FORMAT.SQLITE:
rtable.beginTransaction()
if count > TRIM_STDOUT_DUMP_SIZE:
warnMsg = "console output will be trimmed to "
warnMsg += "last %d rows due to " % TRIM_STDOUT_DUMP_SIZE
warnMsg += "large table size"
logger.warning(warnMsg)
for i in xrange(count):
console = (i >= count - TRIM_STDOUT_DUMP_SIZE)
field = 1
values = []
if conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "<tr>")
for column in columns:
if column != "__infos__":
info = tableValues[column]
if len(info["values"]) <= i:
continue
if info["values"][i] is None:
value = u''
else:
value = getUnicode(info["values"][i])
value = DUMP_REPLACEMENTS.get(value, value)
values.append(value)
maxlength = int(info["length"])
blank = " " * (maxlength - len(value))
self._write("| %s%s" % (value, blank), newline=False, console=console)
if len(value) > MIN_BINARY_DISK_DUMP_SIZE and r'\x' in value:
try:
mimetype = magic.from_buffer(value, mime=True)
if any(mimetype.startswith(_) for _ in ("application", "image")):
if not os.path.isdir(dumpDbPath):
os.makedirs(dumpDbPath, 0755)
_ = re.sub(r"[^\w]", "_", normalizeUnicode(unsafeSQLIdentificatorNaming(column)))
filepath = os.path.join(dumpDbPath, "%s-%d.bin" % (_, randomInt(8)))
warnMsg = "writing binary ('%s') content to file '%s' " % (mimetype, filepath)
logger.warn(warnMsg)
with open(filepath, "wb") as f:
_ = safechardecode(value, True)
f.write(_)
except magic.MagicException, err:
logger.debug(str(err))
if conf.dumpFormat == DUMP_FORMAT.CSV:
if field == fields:
dataToDumpFile(dumpFP, "%s" % safeCSValue(value))
else:
dataToDumpFile(dumpFP, "%s%s" % (safeCSValue(value), conf.csvDel))
elif conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "<td>%s</td>" % cgi.escape(value).encode("ascii", "xmlcharrefreplace"))
field += 1
if conf.dumpFormat == DUMP_FORMAT.SQLITE:
try:
rtable.insert(values)
except SqlmapValueException:
pass
elif conf.dumpFormat == DUMP_FORMAT.CSV:
dataToDumpFile(dumpFP, "\n")
elif conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "</tr>\n")
self._write("|", console=console)
self._write("%s\n" % separator)
if conf.dumpFormat == DUMP_FORMAT.SQLITE:
rtable.endTransaction()
logger.info("table '%s.%s' dumped to sqlite3 database '%s'" % (db, table, replication.dbpath))
elif conf.dumpFormat in (DUMP_FORMAT.CSV, DUMP_FORMAT.HTML):
if conf.dumpFormat == DUMP_FORMAT.HTML:
dataToDumpFile(dumpFP, "</tbody>\n</table>\n</body>\n</html>")
else:
dataToDumpFile(dumpFP, "\n")
dumpFP.close()
msg = "table '%s.%s' dumped to %s file '%s'" % (db, table, conf.dumpFormat, dumpFileName)
if not warnFile:
logger.info(msg)
else:
logger.warn(msg)
def dbColumns(self, dbColumnsDict, colConsider, dbs):
if hasattr(conf, "api"):
self._write(dbColumnsDict, content_type=CONTENT_TYPE.COLUMNS)
return
for column in dbColumnsDict.keys():
if colConsider == "1":
colConsiderStr = "s LIKE '%s' were" % unsafeSQLIdentificatorNaming(column)
else:
colConsiderStr = " '%s' was" % unsafeSQLIdentificatorNaming(column)
msg = "column%s found in the " % colConsiderStr
msg += "following databases:"
self._write(msg)
_ = {}
for db, tblData in dbs.items():
for tbl, colData in tblData.items():
for col, dataType in colData.items():
if column.lower() in col.lower():
if db in _:
if tbl in _[db]:
_[db][tbl][col] = dataType
else:
_[db][tbl] = {col: dataType}
else:
_[db] = {}
_[db][tbl] = {col: dataType}
continue
self.dbTableColumns(_)
def query(self, query, queryRes):
self.string(query, queryRes, content_type=CONTENT_TYPE.SQL_QUERY)
def rFile(self, fileData):
self.lister("files saved to", fileData, sort=False, content_type=CONTENT_TYPE.FILE_READ)
def registerValue(self, registerData):
self.string("Registry key value data", registerData, content_type=CONTENT_TYPE.REG_READ, sort=False)
# object to manage how to print the retrieved queries output to
# standard output and sessions file
dumper = Dump()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from markdown2 import markdown
no_cache = 1
no_sitemap = 1
base_template_path = "templates/www/print.html"
standard_format = "templates/print_formats/standard.html"
def get_context(context):
"""Build context for print"""
if not ((frappe.form_dict.doctype and frappe.form_dict.name) or frappe.form_dict.doc):
return {
"body": """<h1>Error</h1>
<p>Parameters doctype and name required</p>
<pre>%s</pre>""" % repr(frappe.form_dict)
}
if frappe.form_dict.doc:
doc = frappe.form_dict.doc
else:
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
meta = frappe.get_meta(doc.doctype)
print_format = get_print_format_doc(None, meta = meta)
return {
"body": get_html(doc, print_format = print_format,
meta=meta, trigger_print = frappe.form_dict.trigger_print,
no_letterhead=frappe.form_dict.no_letterhead),
"css": get_print_style(frappe.form_dict.style, print_format),
"comment": frappe.session.user,
"title": doc.get(meta.title_field) if meta.title_field else doc.name
}
def get_print_format_doc(print_format_name, meta):
"""Returns print format document"""
if not print_format_name:
print_format_name = frappe.form_dict.format \
or meta.default_print_format or "Standard"
if print_format_name == "Standard":
return None
else:
try:
return frappe.get_doc("Print Format", print_format_name)
except frappe.DoesNotExistError:
# if old name, return standard!
return None
def get_html(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
print_settings = frappe.db.get_singles_dict("Print Settings")
if isinstance(no_letterhead, basestring):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(print_settings.with_letterhead)
doc.flags.in_print = True
if not frappe.flags.ignore_print_permissions:
validate_print_permission(doc)
if doc.meta.is_submittable:
if doc.docstatus==0 and not print_settings.allow_print_for_draft:
frappe.throw(_("Not allowed to print draft documents"), frappe.PermissionError)
if doc.docstatus==2 and not print_settings.allow_print_for_cancelled:
frappe.throw(_("Not allowed to print cancelled documents"), frappe.PermissionError)
if hasattr(doc, "before_print"):
doc.before_print()
if not hasattr(doc, "print_heading"): doc.print_heading = None
if not hasattr(doc, "sub_heading"): doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
format_data, format_data_map = [], {}
# determine template
if print_format:
doc._show_section_headings = print_format.show_section_headings
doc._line_breaks = print_format.line_breaks
doc._align_labels_left = print_format.align_labels_left
if print_format.format_data:
# set format data
format_data = json.loads(print_format.format_data)
for df in format_data:
format_data_map[df.get("fieldname")] = df
if "visible_columns" in df:
for _df in df.get("visible_columns"):
format_data_map[_df.get("fieldname")] = _df
doc.format_data_map = format_data_map
template = "standard"
elif print_format.standard=="Yes" or print_format.custom_format:
template = jenv.from_string(get_print_format(doc.doctype,
print_format))
else:
# fallback
template = "standard"
else:
template = "standard"
if template == "standard":
template = jenv.get_template(standard_format)
letter_head = frappe._dict(get_letter_head(doc, no_letterhead) or {})
convert_markdown(doc, meta)
args = {
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta, format_data),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": letter_head.content,
"footer": letter_head.footer,
"print_settings": frappe.get_doc("Print Settings")
}
html = template.render(args, filters={"len": len})
if cint(trigger_print):
html += trigger_print_script
return html
def convert_markdown(doc, meta):
'''Convert text field values to markdown if necessary'''
for field in meta.fields:
if field.fieldtype=='Text Editor':
value = doc.get(field.fieldname)
if value and '<!-- markdown -->' in value:
doc.set(field.fieldname, markdown(value))
@frappe.whitelist()
def get_html_and_style(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
"""Returns `html` and `style` of print format, used in PDF etc"""
if isinstance(doc, basestring) and isinstance(name, basestring):
doc = frappe.get_doc(doc, name)
if isinstance(doc, basestring):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
return {
"html": get_html(doc, name=name, print_format=print_format, meta=meta,
no_letterhead=no_letterhead, trigger_print=trigger_print),
"style": get_print_style(print_format=print_format)
}
def validate_print_permission(doc):
if frappe.form_dict.get("key"):
if frappe.form_dict.key == doc.get_signature():
return
for ptype in ("read", "print"):
if (not frappe.has_permission(doc.doctype, ptype, doc)
and not frappe.has_website_permission(doc)):
raise frappe.PermissionError(_("No {0} permission").format(ptype))
def get_letter_head(doc, no_letterhead):
if no_letterhead:
return {}
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, ["content", "footer"], as_dict=True)
else:
return frappe.db.get_value("Letter Head", {"is_default": 1}, ["content", "footer"], as_dict=True) or {}
def get_print_format(doctype, print_format):
if print_format.disabled:
frappe.throw(_("Print Format {0} is disabled").format(print_format.name),
frappe.DoesNotExistError)
# server, find template
path = os.path.join(get_doc_path(frappe.db.get_value("DocType", doctype, "module"),
"Print Format", print_format.name), frappe.scrub(print_format.name) + ".html")
if os.path.exists(path):
with open(path, "r") as pffile:
return pffile.read()
else:
if print_format.html:
return print_format.html
else:
frappe.throw(_("No template found at path: {0}").format(path),
frappe.TemplateNotFoundError)
def make_layout(doc, meta, format_data=None):
"""Builds a hierarchical layout object from the fields list to be rendered
by `standard.html`
:param doc: Document to be rendered.
:param meta: Document meta object (doctype).
:param format_data: Fields sequence and properties defined by Print Format Builder."""
layout, page = [], []
layout.append(page)
if format_data:
# extract print_heading_template from the first field
# and remove the field
if format_data[0].get("fieldname") == "print_heading_template":
doc.print_heading_template = format_data[0].get("options")
format_data = format_data[1:]
def get_new_section(): return {'columns': [], 'has_data': False}
def append_empty_field_dict_to_page_column(page):
""" append empty columns dict to page layout """
if not page[-1]['columns']:
page[-1]['columns'].append({'fields': []})
for df in format_data or meta.fields:
if format_data:
# embellish df with original properties
df = frappe._dict(df)
if df.fieldname:
original = meta.get_field(df.fieldname)
if original:
newdf = original.as_dict()
newdf.update(df)
df = newdf
df.print_hide = 0
if df.fieldtype=="Section Break" or page==[]:
if len(page) > 1:
if page[-1]['has_data']==False:
# truncate last section if empty
del page[-1]
section = get_new_section()
if df.fieldtype=='Section Break' and df.label:
section['label'] = df.label
page.append(section)
elif df.fieldtype=="Column Break":
# if last column break and last column is not empty
page[-1]['columns'].append({'fields': []})
else:
# add a column if not yet added
append_empty_field_dict_to_page_column(page)
if df.fieldtype=="HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if is_visible(df, doc) and has_value(df, doc):
append_empty_field_dict_to_page_column(page)
page[-1]['columns'][-1]['fields'].append(df)
# section has fields
page[-1]['has_data'] = True
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype=="Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [get_new_section()]
layout.append(page)
append_empty_field_dict_to_page_column(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1]['columns'][-1]['fields'].append(df)
return layout
def is_visible(df, doc):
"""Returns True if docfield is visible in print layout and does not have print_hide set."""
if df.fieldtype in ("Section Break", "Column Break", "Button"):
return False
if hasattr(doc, "hide_in_print_layout"):
if df.fieldname in doc.hide_in_print_layout:
return False
if df.permlevel > 0 and not doc.has_permlevel_access_to(df.fieldname, df):
return False
return not doc.is_print_hide(df.fieldname, df)
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, basestring) and not strip_html(value).strip():
return False
elif isinstance(value, list) and not len(value):
return False
return True
def get_print_style(style=None, print_format=None, for_legacy=False):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or "Standard"
context = {
"print_settings": print_settings,
"print_style": style,
"font": get_font(print_settings, print_format, for_legacy)
}
css = frappe.get_template("templates/styles/standard.css").render(context)
try:
css += frappe.get_template("templates/styles/" + style.lower() + ".css").render(context)
except TemplateNotFound:
pass
# move @import to top
for at_import in list(set(re.findall("(@import url\([^\)]+\)[;]?)", css))):
css = css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
if print_format and print_format.css:
css += "\n\n" + print_format.css
return css
def get_font(print_settings, print_format=None, for_legacy=False):
default = '"Helvetica Neue", Helvetica, Arial, "Open Sans", sans-serif'
if for_legacy:
return default
font = None
if print_format:
if print_format.font and print_format.font!="Default":
font = '{0}, sans-serif'.format(print_format.font)
if not font:
if print_settings.font and print_settings.font!="Default":
font = '{0}, sans-serif'.format(print_settings.font)
else:
font = default
return font
def get_visible_columns(data, table_meta, df):
"""Returns list of visible columns based on print_hide and if all columns have value."""
columns = []
doc = data[0] or frappe.new_doc(df.options)
def add_column(col_df):
return is_visible(col_df, doc) \
and column_has_value(data, col_df.get("fieldname"))
if df.get("visible_columns"):
# columns specified by column builder
for col_df in df.get("visible_columns"):
# load default docfield properties
docfield = table_meta.get_field(col_df.get("fieldname"))
if not docfield:
continue
newdf = docfield.as_dict().copy()
newdf.update(col_df)
if add_column(newdf):
columns.append(newdf)
else:
for col_df in table_meta.fields:
if add_column(col_df):
columns.append(col_df)
return columns
def column_has_value(data, fieldname):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, basestring):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
trigger_print_script = """
<script>
//allow wrapping of long tr
var elements = document.getElementsByTagName("tr");
var i = elements.length;
while (i--) {
if(elements[i].clientHeight>300){
elements[i].setAttribute("style", "page-break-inside: auto;");
}
}
window.print();
// close the window after print
// NOTE: doesn't close if print is cancelled in Chrome
setTimeout(function() {
window.close();
}, 1000);
</script>
"""
|
|
import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
print('==> Experiment 2_0')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
print('Shape of X_train: %s'%str(X_train.shape))
print('Shape of y_train: %s'%str(y_train.shape))
print('Shape of X_val: %s'%str(X_val.shape))
print('Shape of y_val: %s'%str(y_val.shape))
return [X_train, y_train, X_val, y_val]
def runNeuralNet(num_freq, X_train, y_train, X_val, y_val, batch_size, num_epochs, pooling_strategy):
# Neural-network model set-up
num_training_vec, total_features = X_train.shape
num_frames = int(total_features / num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(y_train.max(), y_val.max()) + 1)
k1 = 32
k2 = 0
l = num_frames
print("Num Classes: %g"%(num_classes))
print_freq = 1
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
# Set-up input and output label
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
# go straight from input to output, densely connected to SM layer
'''
W_sm = init_weight_variable([total_features, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(x, W_sm) + b_sm
'''
# single convolutional layer
W_conv1 = init_weight_variable([num_freq, 1, 1, k1])
b_conv1 = init_bias_variable([k1])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_conv1_flat = tf.reshape(h_conv1, [-1, num_frames * k1])
W_sm = init_weight_variable([num_frames * k1, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
'''
# One hidden layer then softmax
numHiddenUnits = 100
W_1 = init_weight_variable([total_features, numHiddenUnits])
b_1 = init_bias_variable([numHiddenUnits])
W_sm = init_weight_variable([numHiddenUnits, num_classes])
b_sm = init_bias_variable([num_classes])
hiddenActivation = tf.nn.relu(tf.matmul(x, W_1) + b_1)
y_conv = tf.matmul(hiddenActivation, W_sm) + b_sm
'''
# second layer
#W_conv2 = init_weight_variable([1, l, k1, k2])
#b_conv2 = init_bias_variable([k2])
#h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
#h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])
#h_pool2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# softmax layer
#W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])
#b_sm = init_bias_variable([num_classes])
#y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm
# evaluations
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# session
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
# print("h_conv1 %s"%str(h_conv1.eval(feed_dict={x:X_train, y_:y_train})))
# print("W_sm is: %s"%str(W_sm.eval()))
# print("h_conv1_flat is: %s"%str(h_conv1_flat.eval(feed_dict={x:X_train, y_:y_train})))
# print("y_conv: %s"%str(y_conv.eval(feed_dict={x: X_train, y_: y_train})))
# print("y_ is : %s"%str(y_.eval(feed_dict={x:X_train, y_:y_train})))
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
epoch_numbers = []
# benchmark
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
epochEnd = time.time()
# printing and recording data
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
epoch_numbers += [epoch]
#print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
'''
Our Main
Command Line Arguments: (1) Length of horizontal window
'''
# load the data
[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp2/exp2_d15_1s_2.mat')
batchSize = 1000
numEpochs = 300
poolingStrategy = 'MAX'
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, batchSize, numEpochs, poolingStrategy)
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = epoch_numbers
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs')
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig('exp2_SMOnly.png', format='png')
plt.close()
print('==> Done.')
'''
y_ = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
y_ = np.array([[0], [1], [2], [3], [3]])
x = np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29]])
x_val = np.array([[5, 6, 7, 8, 9, 10], [9, 10, 11, 12, 13, 14], [11, 12, 13, 14, 15, 16]])
y_val = np.array([[1], [3], [2]])
runNeuralNet(2, x, y_, x_val, y_val, 1, 300, 'MAX')
def numParams(Hk, Wk, Hi, Wi, k1, Ns):
return Hk * Wk * k1 + (Hi - Hk + 1) * (Wi - Wk + 1) * k1 * Ns
def numParamsInKernel(Hk, Wk, k1):
return Hk * Wk * k1
'''
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import time
import pytest
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.internal.api_trakt import TraktUserAuth
from flexget.plugins.list.trakt_list import TraktSet
@pytest.mark.online
class TestTraktList(object):
"""
Credentials for test account are:
username: flexget_list_test
password: flexget
"""
config = """
tasks:
test_list:
trakt_list:
account: 'flexget_list_test'
list: watchlist
type: episodes
strip_dates: yes
test_add_episode_auto:
mock:
- {title: 'Stranger Things S01E05 720p HDTV'}
- {title: 'Stranger Things S01E06 720p HDTV'}
series:
- Stranger Things:
begin: S01E05
list_add:
- trakt_list:
account: 'flexget_list_test'
list: watchlist
strip_dates: yes
"""
trakt_config = {'account': 'flexget_list_test',
'list': 'watchlist',
'type': 'shows'}
@pytest.fixture(autouse=True)
def db_auth(self, manager):
kwargs = {
'account': 'flexget_list_test',
'access_token': '336e1b7bb963b63f3cda4500a31aaa06de5fc22201795c4b89e239211d418f22',
'refresh_token': 'f07c967d834673f5e61cba0942f177fbf839528241db9cb6dfb5a51dd267692f',
'created': 1481976079,
'expires': 7776000
}
# Creates the trakt token in db
with Session() as session:
auth = TraktUserAuth(**kwargs)
session.add(auth)
def test_get_list(self):
config = {'account': 'flexget_list_test', 'list': 'testlist', 'type': 'auto'}
trakt_set = TraktSet(config)
entries = sorted([dict(e) for e in trakt_set], key=lambda x: sorted(x.keys()))
assert entries == sorted([
{
'trakt_show_slug': 'castle',
'original_url': 'https://trakt.tv/shows/castle/seasons/8/episodes/15',
'url': 'https://trakt.tv/shows/castle/seasons/8/episodes/15',
'series_season': 8,
'tvdb_id': 83462,
'series_name': 'Castle (2009)',
'imdb_id': 'tt1219024',
'series_id': 'S08E15',
'series_episode': 15,
'trakt_episode_id': 2125119,
'trakt_series_name': 'Castle',
'trakt_series_year': 2009,
'title': 'Castle (2009) S08E15 Fidelis Ad Mortem',
'trakt_show_id': 1410,
'trakt_ep_name': 'Fidelis Ad Mortem',
'tvrage_id': 19267
},
{
'movie_name': 'Deadpool',
'original_url': 'https://trakt.tv/movies/deadpool-2016',
'tmdb_id': 293660,
'title': 'Deadpool (2016)',
'url': 'https://trakt.tv/movies/deadpool-2016',
'trakt_movie_id': 190430,
'trakt_movie_name': 'Deadpool',
'imdb_id': 'tt1431045',
'movie_year': 2016,
'trakt_movie_slug': 'deadpool-2016',
'trakt_movie_year': 2016,
},
{
'trakt_show_slug': 'the-walking-dead',
'tmdb_id': 1402,
'title': 'The Walking Dead (2010)',
'url': 'https://trakt.tv/shows/the-walking-dead',
'original_url': 'https://trakt.tv/shows/the-walking-dead',
'series_name': 'The Walking Dead (2010)',
'trakt_show_id': 1393,
'tvdb_id': 153021,
'imdb_id': 'tt1520211',
'trakt_series_name': 'The Walking Dead',
'trakt_series_year': 2010,
'tvrage_id': 25056
}
], key=lambda x: sorted(x.keys()))
def test_strip_dates(self):
config = {'account': 'flexget_list_test', 'list': 'testlist', 'strip_dates': True, 'type': 'auto'}
trakt_set = TraktSet(config)
titles = [e['title'] for e in trakt_set]
assert set(titles) == {'The Walking Dead', 'Deadpool', 'Castle S08E15 Fidelis Ad Mortem'}
def test_trakt_add(self):
# Initialize trakt set
trakt_set = TraktSet(self.trakt_config)
trakt_set.clear()
entry = Entry(title='White collar', series_name='White Collar (2009)')
assert entry not in trakt_set
trakt_set.add(entry)
time.sleep(5)
assert entry in trakt_set
def test_trakt_add_episode(self):
episode_config = self.trakt_config.copy()
episode_config['type'] = 'episodes'
trakt_set = TraktSet(episode_config)
# Initialize trakt set
trakt_set.clear()
entry = Entry(**{u'trakt_show_slug': u'game-of-thrones',
u'original_url': u'https://trakt.tv/shows/game-of-thrones/seasons/4/episodes/5',
u'url': u'https://trakt.tv/shows/game-of-thrones/seasons/4/episodes/5', u'series_season': 4,
u'tvdb_id': 121361, u'series_name': u'Game of Thrones (2011)', u'imdb_id': u'tt0944947',
u'series_id': u'S04E05', u'series_episode': 5, u'trakt_episode_id': 73674,
u'title': u'Game of Thrones (2011) S04E05 First of His Name', u'trakt_show_id': 1390,
u'trakt_ep_name': u'First of His Name', u'tvrage_id': 24493})
assert entry not in trakt_set
trakt_set.add(entry)
assert entry in trakt_set
def test_trakt_add_episode_simple(self):
episode_config = self.trakt_config.copy()
episode_config['type'] = 'episodes'
trakt_set = TraktSet(episode_config)
# Initialize trakt set
trakt_set.clear()
entry = Entry(**{u'series_name': u'Game of Thrones (2011)', u'series_id': u'S04E05', u'series_episode': 5,
u'series_season': 4, u'title': u'Game of Thrones (2011) S04E05 First of His Name'})
assert entry not in trakt_set
trakt_set.add(entry)
assert entry in trakt_set
def test_trakt_add_episode_task(self, execute_task):
episode_config = self.trakt_config.copy()
episode_config['type'] = 'episodes'
# Initialize trakt set
trakt_set = TraktSet(episode_config)
trakt_set.clear()
execute_task('test_add_episode_auto')
task = execute_task('test_list')
assert len(task.entries) == 2
assert task.entries[0]['series_name'] == 'Stranger Things (2016)'
assert task.entries[1]['series_name'] == 'Stranger Things (2016)'
for series_id in ['S01E05', 'S01E06']:
entry1 = task.entries[0]
entry2 = task.entries[1]
assert series_id in [entry1['series_id'], entry2['series_id']]
def test_trakt_remove(self):
trakt_set = TraktSet(self.trakt_config)
# Initialize trakt set
trakt_set.clear()
entry = Entry(title='White collar', series_name='White Collar (2009)')
assert entry not in trakt_set
trakt_set.add(entry)
time.sleep(5)
assert entry in trakt_set
trakt_set.remove(entry)
time.sleep(5)
assert entry not in trakt_set
|
|
# Autor: Alexander Herbrich
# Wann: 03.02.2016
# Thema: Hangman
from random import *
import time
import sys
Hangman_Pics = ["""
============== """ , """
|
|
|
|
|
============== """ , """
+====+
|
|
|
|
============== """ , """
+====+
| |
|
|
|
============== """ , """
+====+
| |
0 |
|
|
|
============== """ , """
+====+
| |
0 |
| |
|
|
============== """ ,"""
+====+
| |
0 |
| |
/ |
|
============== """ , """
+====+
| |
0 |
| |
/ \ |
|
============== """ , """
+====+
| |
0 |
\| |
/ \ |
|
============== """ , """
+====+
| |
0 |
\|/ |
/ \ |
|
============== """ ]
def delay_print(s, delay=0.06):
for c in str(s):
sys.stdout.write('%s' % c )
sys.stdout.flush()
time.sleep(delay)
def delay_input(s):
delay_print(s)
return input()
def hangman_start(wait_time):
# Read the word list and clean it
with open("wordliste.txt") as f:
liste = [w.lower().strip() for w in f.readlines()]
# Choose a random word
m = randrange (len(liste))
wort = (liste[m])
a = len(wort)
maske = (a * "-")
Teilnehmer = delay_input ("Guten Tag. Heute koennen sie eine Runde Hangman spielen. Was ist ihr Name? \n")
delay_print ("\nSie heissen also " + Teilnehmer + ". Schoen sie mal zu treffen. \n")
time.sleep(wait_time*6)
Antwort = delay_input ("\nZum Fortfahren (JA) und zum Beenden (NEIN)")
if (Antwort.lower() == "ja"):
delay_print("\nJetzt geht es los :-) Viel Spass und Erfolg beim Raten. \n")
time.sleep(wait_time*8)
if (Antwort.lower() == "nein"):
delay_print ("\nDas Spiel wird jetzt beendet...")
time.sleep(wait_time*6)
sys.exit("Das Spiel wurde verlassen")
return (wort, maske , Teilnehmer)
def Untersuchung (NAMEN, name1,Zeichen):
if (len (NAMEN) != len (name1)):
return
z = Zeichen[0]
name31 = ""
ianz =0
le = len (NAMEN)
for i in range (le):
if (NAMEN[i] == z):
ianz += 1
name31 = name31 + z
else:
name31 = name31 + name1[i]
return (name31, ianz)
def PrintHangMan(Leben, wait_time):
if (Leben == 9):
time.sleep(wait_time*3)
delay_print ("\nDer Galgen wird erstellt...")
elif (Leben == 3):
time.sleep(wait_time*3)
delay_print ("\nAchtung nur noch 3 Leben...")
elif (Leben == 1):
time.sleep(wait_time*3)
delay_print ("\nLetzte Chance...")
time.sleep(wait_time*3)
delay_print ("\t\t\t\t\t" + Hangman_Pics[9-Leben], 0.001)
time.sleep(wait_time*3)
def ReadLetter(wait_time, Versuche, benutzt, Leben, mask):
time.sleep(wait_time)
delay_print ("\n")
print ("-" * 80)
print (str(Versuche) + ". Versuch \n")
print ("\t" * 7 + "Genutzte Buchstaben:")
print ("\t" * 7 + str(benutzt))
print ("\t" * 7 + "Leben:")
print ("\t" * 7 + str(Leben))
delay_print("\nIhr Wort: \n")
delay_print ("\n" + mask + "\n")
delay_print ("\n")
return delay_input ("BITTE EINEN BUCHSTABEN EINGEBEN... \n")
def hangman() :
Leben= 10
Buchstabe = " "
wait_time = 0.0
L1 = hangman_start(wait_time)
word = L1[0]
Lword = len (word)
mask = L1[1]
user = L1[2]
Versuche = 1
Fehlversuche = 0
success = 0
benutzt = []
while Leben > 0:
Buchstabe = ReadLetter(wait_time, Versuche, benutzt, Leben, mask)
Versuche += 1
if (Buchstabe in benutzt):
delay_print ("\nBitte benutzen sie einen Buchstaben der noch nicht verwendet wurde!!! \n")
time.sleep(wait_time*6)
continue
benutzt.append(Buchstabe)
U1 = Untersuchung (word, mask, Buchstabe)
new_mask = U1[0]
gefunden = U1[1]
#delay_print (gefunden)
if (gefunden == 0 ):
Leben -= 1
Fehlversuche += 1
delay_print ("\nFALSCH!!! \n")
PrintHangMan(Leben, wait_time)
else:
success += gefunden
mask = new_mask
delay_print ("\nRICHTIG!!!\n")
time.sleep(wait_time*3)
if (success == Lword):
delay_print ("\n" + mask + "\n")
if (Fehlversuche == 0):
delay_print ("Gewonnen ohne einen Fehlversuch\n")
else:
delay_print ("Gewonnen nach " + str(Fehlversuche) + " Fehlversuch(en)\n")
return
delay_print ("\nDas wars ........... Tod durch den Galgen. Das Wort waere " + str(L1[0]) + " gewesen.")
hangman()
|
|
"""
Choose SNPs from GWAS summary statistics.
Method 1:
Sort by significance.
Choose top SNP.
Remove all SNPs in LD.
Loop until p-value threshold or n variants is reached.
"""
# This file is part of grstools.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Marc-Andre Legault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import logging
import pickle
import time
import os
import csv
import collections
import geneparse
import geneparse.utils
from geneparse.exceptions import InvalidChromosome
from ..utils import parse_grs_file, InMemoryGenotypeExtractor
from ..version import grstools_version
logger = logging.getLogger(__name__)
class ParameterObject(object):
"""Abstract class of argument validators."""
@staticmethod
def valid(o):
raise NotImplementedError()
class Filename(ParameterObject):
@staticmethod
def valid(o):
"""Validate that the object can be opened for reading."""
try:
with open(o, "r"):
pass
except:
return False
return True
class Region(ParameterObject):
@classmethod
def valid(cls, o):
"""Validate that a str is of the form chrXX:START-END."""
try:
cls._parse_region(o)
except ValueError:
return False
return True
@staticmethod
def _parse_region(s):
message = "Expected format for region is: 'chr1:12345-22345'."
if not hasattr(s, "startswith"):
raise ValueError(message)
if not s.startswith("chr"):
raise ValueError(message)
s = s[3:]
try:
chrom, tail = s.split(":")
start, end = [int(i) for i in tail.split("-")]
except:
raise ValueError(message)
# Flip start and end position if needed.
if start > end:
start, end = end, start
return chrom, start, end
class Some(ParameterObject):
def __new__(cls, t):
# Valid if:
# - Is none
# - Is instance of t
# - t.valid is true
def valid(o):
if o is None:
return True
if isinstance(o, t):
return True
if hasattr(t, "valid") and t.valid(o):
return True
return False
return type(
"Some",
(ParameterObject, ),
{"valid": valid}
)
class SNPSelectionLog(object):
"""Class to keep a log of selected SNPs for a GRS."""
# Known parameters and their types.
KNOWN_PARAMETERS = {
"DEBUG": bool,
"SUMMARY_FILENAME": Filename,
"REFERENCE_FILENAME": str,
"MAF_THRESHOLD": float,
"TARGET_N": Some(int),
"LD_WINDOW_SIZE": int,
"LD_CLUMP_THRESHOLD": float,
"P_THRESHOLD": float,
"EXCLUDE_AMBIGUOUS_ALLELES": bool,
"EXCLUDE_NO_REFERENCE": bool,
"REGION_INCLUDED": Some(Region),
"REGION_EXCLUDED": Some(Region),
"OUTPUT_PREFIX": str,
}
EXCLUSION_REASONS = {
"MAF_FILTER",
"INVALID_CHROMOSOME",
"AMBIGUOUS_ALLELES_FILTER",
"REGION_INCLUSION_FILTER",
"REGION_EXCLUSION_FILTER",
"NOT_IN_REFERENCE_PANEL",
"DUP_OR_MULTIALLELIC_IN_REFERENCE_PANEL",
"LD_CLUMPED"
}
def __init__(self, output_prefix):
self.parameters = {"OUTPUT_PREFIX": output_prefix}
self.available_variants = None
self.special = []
self.included = []
self.excluded = []
def init_logger(self):
"""Sets the log level and binds to a logger instance.
This is called automatically as a sort of startup hook after argument
parsing.
"""
self.logger = logger
if self.parameters.get("DEBUG", False):
self.logger.setLevel(logging.DEBUG)
self.display_configuration()
def log_selection_trace(self):
"""Gets called at the end of the selection process.
This function is responsible for writing the exclusions to disk.
"""
self.logger.info("Writing selection logs (exclusions and special "
"warnings).")
excluded_filename = self.parameters["OUTPUT_PREFIX"] + ".excluded.log"
special_incl_filename = (
self.parameters["OUTPUT_PREFIX"] + ".special.log"
)
variant_sets = [
(excluded_filename, self.excluded),
(special_incl_filename, self.special)
]
for filename, variant_set in variant_sets:
with open(filename, "w") as f:
writer = csv.writer(f)
writer.writerow(["name", "chrom", "pos", "alleles", "reason",
"details"])
for variant, reason, details in variant_set:
alleles = "/".join(variant.alleles)
writer.writerow([
variant.name, variant.chrom, variant.pos,
alleles, reason, details
])
def close(self):
pass
def display_configuration(self):
"""Displays all recorded parameter values.
This is called by init_logger.
"""
self.logger.info("grstools v{}".format(grstools_version))
self.logger.info("Starting variant selection with the following "
"parameters:")
self.logger.info(
"\tUsing summary statistics from: '{}'"
"".format(self.parameters["SUMMARY_FILENAME"])
)
self.logger.info(
"\tReference panel for MAF and LD computation are from: '{}'"
"".format(self.parameters["REFERENCE_FILENAME"])
)
self.logger.info(
"\tP-value <= {:g}"
"".format(self.parameters["P_THRESHOLD"])
)
self.logger.info(
"\tMAF >= {:.4f}"
"".format(self.parameters["MAF_THRESHOLD"])
)
if self.parameters.get("TARGET_N"):
self.logger.info(
"\tSelecting up to {} variants"
"".format(self.parameters["TARGET_N"])
)
self.logger.info(
"\tClumping variants with LD >= {:.2f}"
"".format(self.parameters["LD_CLUMP_THRESHOLD"])
)
self.logger.info(
"\t{} variants with ambiguous alleles (A/T or G/C)"
"".format(
"EXCLUDING" if self.parameters["EXCLUDE_AMBIGUOUS_ALLELES"]
else "INCLUDING"
)
)
self.logger.info(
"\t{} variants that are absent from the reference genotypes"
"".format(
"EXCLUDING" if self.parameters["EXCLUDE_NO_REFERENCE"]
else "INCLUDING"
)
)
if self.parameters.get("REGION_INCLUDED"):
self.logger.info(
"\tINCLUDING variants in region '{}'"
"".format(self.parameters["REGION_INCLUDED"])
)
if self.parameters.get("REGION_EXCLUDED"):
self.logger.info(
"\tEXCLUDING variants in region '{}'"
"".format(self.parameters["REGION_EXCLUDED"])
)
self.logger.info("\tOutput prefix: '{}'"
"".format(self.parameters["OUTPUT_PREFIX"]))
# Changes in state that are to be recorded by the logger.
def record_parameter(self, key, value):
"""Record the value of parameter of the algorithm."""
if key not in self.KNOWN_PARAMETERS:
raise ValueError("Unknown parameter '{}'.".format(key))
t = self.KNOWN_PARAMETERS[key]
if isinstance(value, t):
# Provided with an instance of the right type.
pass
elif hasattr(t, "valid") and t.valid(value):
# Value was validated by ParameterObject.
pass
else:
raise ValueError(
"Invalid value '{}' for parameter '{}'."
"".format(value, key)
)
self.parameters[key] = value
def record_included_special(self, variant, reason, details=None):
"""Record included variants that need special care or attention.
For example, ambiguous variants that have a frequency close to 0.5
or variants that are absent from the reference panel.
"""
self.special.append((variant, reason, details))
def record_included(self, variant):
"""Record that a variant has been included in the GRS."""
self.included.append(variant)
def record_excluded(self, variant, reason, details=None):
"""Record that a variant has been excluded (and why)."""
if reason not in self.EXCLUSION_REASONS:
raise ValueError(
"Unknown reason for exclusion: '{}'"
"".format(reason)
)
self.excluded.append((variant, reason, details))
def record_ld_block(self, variant, other_loci, r2):
"""Record the LD between variants."""
if len(other_loci) == 0:
self.logger.debug("\tVARIANT {} ALONE".format(variant))
return
other_loci = [g.variant for g in other_loci]
start = variant.pos - self.parameters["LD_WINDOW_SIZE"] // 2
end = variant.pos + self.parameters["LD_WINDOW_SIZE"] // 2
self.logger.debug(
"\tLD_REGION {} to {} ({}: {}-{}) [{} candidates]"
"".format(
other_loci[0],
other_loci[-1],
variant.chrom,
start, end, len(other_loci))
)
blocks_directory = self.parameters["OUTPUT_PREFIX"] + ".ld_blocks"
if not os.path.isdir(blocks_directory):
os.mkdir(blocks_directory)
# Serialize LD blocks to disk.
filename = os.path.join(
blocks_directory,
"variant_{}_{}:{}-{}.blocks.pkl".format(
variant.name, variant.chrom, start, end
)
)
with open(filename, "wb") as f:
pickle.dump({
"cur": variant,
"other_loci": other_loci,
"r2": r2
}, f)
def get_excluded_variants(self):
return {i[0] for i in self.excluded}
class Row(object):
__slots__ = ("name", "chrom", "pos", "reference", "risk", "p_value",
"effect", "maf")
def __init__(self, name, chrom, pos, reference, risk, p_value, effect,
maf=None):
"""The row of a GRS file."""
self.name = name
self.chrom = chrom
self.pos = pos
self.reference = reference
self.risk = risk
self.p_value = p_value
self.effect = effect
self.maf = maf
def write_header(self, f):
f.write("name,chrom,pos,reference,risk,p-value,effect")
if self.maf is not None:
f.write(",maf")
f.write("\n")
@property
def _fields(self):
fields = [
self.name, self.chrom, self.pos, self.reference, self.risk,
self.p_value, self.effect
]
if self.maf is not None:
fields.append(self.maf)
return fields
def write(self, f, sep=","):
for i, field in enumerate(self._fields):
if i != 0:
f.write(",")
if type(field) is float:
f.write("{:.9g}".format(field))
else:
f.write(str(field))
f.write("\n")
def read_summary_statistics(filename, p_threshold, log, sep=",",
exclude_ambiguous=False, region=None,
exclude_region=None):
"""Read summary statistics file.
Args:
filename (str): Summary statistics (.grs) file name.
p_threshold (float): Minimum p-value for inclusion.
sep (str): File column delimiter.
exclude_ambiguous (bool): Flag to exclude ambiguous (A/T or G/C)
variants.
region (str): Genomic region of the form chr3:12345-12355. If a region
is provided, only variants in the region will be KEPT.
exclude_region (str): Genomic region to exclude (see above for
details).
Returns:
collections.OrderedDict: The OrderedDict maps Variant instances to
their summary statistics (effect size, p-value, etc.) represented
as Row instances.
"""
if region is not None:
region = Region._parse_region(region)
if exclude_region is not None:
exclude_region = Region._parse_region(exclude_region)
# Variant to stats orderedict (but constructed as a list).
summary = []
df = parse_grs_file(filename, p_threshold=p_threshold, sep=sep)
df.sort_values("p-value", inplace=True)
# Method to see if a variant is in a region.
def _in_region(variant, chrom, start, end):
return (
variant.chrom == chrom and
start <= variant.pos <= end
)
# For now, this is not a limiting step, but it might be nice to parallelize
# this eventually.
for idx, info in df.iterrows():
if info["p-value"] > p_threshold:
break
try:
variant = geneparse.Variant(info["name"], info.chrom, info.pos,
[info.reference, info.risk])
except InvalidChromosome:
bad_v = geneparse.Variant(
info["name"], geneparse.Chromosome(info.chrom), info.pos,
[info.reference, info.risk]
)
log.record_excluded(bad_v, "INVALID_CHROMOSOME", info.chrom)
continue
# Region based inclusion/exclusion
if region is not None:
if not _in_region(variant, *region):
log.record_excluded(
variant, "REGION_INCLUSION_FILTER",
"{} not in {}".format(variant, region)
)
continue
if exclude_region is not None:
if _in_region(variant, *exclude_region):
log.record_excluded(
variant, "REGION_EXCLUSION_FILTER",
"{} in {}".format(variant, exclude_region)
)
continue
ambiguous = variant.alleles_ambiguous()
if ambiguous and exclude_ambiguous:
log.record_excluded(variant, "AMBIGUOUS_ALLELES_FILTER")
continue
if "maf" in info.index:
if info.maf <= log.parameters["MAF_THRESHOLD"]:
log.record_excluded(
variant,
"MAF_FILTER",
"MAF recorded as {:g} in summary statistics file"
"".format()
)
continue
row_args = [info["name"], info.chrom, info.pos, info.reference,
info.risk, info["p-value"], info.effect]
if "maf" in info.index:
row_args.append(info.maf)
row = Row(*row_args)
summary.append((variant, row))
# Convert the summary statistics to an ordereddict of loci to stats.
summary = collections.OrderedDict(
sorted(summary, key=lambda x: x[1].p_value)
)
return summary
def greedy_pick_clump(summary, genotypes, log):
"""Greedy algorithm to select SNPs for inclusion in the GRS.
Args:
summary (Dict[Variant, Row]): Dict representation of the summary
statistics file containing Variants as keys and their information
as values.
genotypes (geneparse.core.GenotypesReader): Genotypes from a reference
panel for LD computation.
log (SNPSelectionLog): A class to manage state of the selection
process.
target_n (int): Number of variants to stop the selection routine. This
can be used if only the top N SNPs should be used to define the
GRS.
Returns:
List[Row]: Variants selected by the algorithm.
"""
log.logger.info("Starting the greedy variant selection.")
out = []
while len(summary) > 0:
# One of the stop conditions is target n, which we check.
target_n = log.parameters.get("TARGET_N")
if target_n is not None and len(out) >= target_n:
log.logger.info("Target number of variants reached.")
break
# Get the next best variant.
cur, info = summary.popitem(last=False)
# Check if current variant is a suitable candidate.
g = genotypes.get_variant_genotypes(cur)
if variant_is_good_to_keep(cur, g, log):
log.record_included(cur)
out.append(info)
# If the variant is in the reference, we do LD pruning.
if len(g) == 1:
summary = ld_prune(summary, g[0], genotypes, log)
# Otherwise, just go to the next one (exclusion will be noted by the
# predicate function).
return out
def variant_is_good_to_keep(cur, g, log):
"""Check that the currently selected variant is good to keep.
When this is called, it's after parsing the summary statistics and when
looking up variants in the reference panel.
This means that the filters being applied are:
- Reference panel MAF filtering
- Filters based on the availability in reference panels
- Filters based on multiallelic / duplicates in reference panel
"""
# Variant not in reference panel
if len(g) == 0:
if log.parameters["EXCLUDE_NO_REFERENCE"]:
log.record_excluded(cur, "NOT_IN_REFERENCE_PANEL")
return False
else:
log.record_included_special(
cur,
"NOT_IN_REFERENCE",
"Variant {} was absent from the reference panel but was still "
"included in the GRS. It is important to validate that it is "
"not correlated with other included variants."
"".format(cur)
)
return True
elif len(g) == 1:
# Variant was uniquely found in the reference panel, we can check the
# MAF.
maf = g[0].maf()
if maf <= log.parameters["MAF_THRESHOLD"]:
log.record_excluded(
cur,
"MAF_FILTER",
"MAF from reference panel is {:g}".format(maf)
)
return False
# Variant is duplicate or multiallelic
elif len(g) > 1:
log.record_excluded(cur, "DUP_OR_MULTIALLELIC_IN_REFERENCE_PANEL")
return False
return True
def ld_prune(summary, g, genotypes, log):
"""Return a list of variant with all variants correlated to cur removed."""
v = g.variant
left = v.pos - log.parameters["LD_WINDOW_SIZE"] // 2
right = v.pos + log.parameters["LD_WINDOW_SIZE"] // 2
others = list(genotypes.get_variants_in_region(v.chrom, left, right))
# Remove the variants in reference but not in summary.
others = [
other_g for other_g in others if other_g.variant in summary.keys()
]
if len(others) < 1:
# No need to prune, no variants in LD.
return summary
# r2 is a series with index the variant name in the reference file.
r2 = geneparse.utils.compute_ld(g, others, r2=True)
# Remove all the variants from the summary statistics if correlated.
ld_threshold = log.parameters["LD_CLUMP_THRESHOLD"]
# Record the LD matrix.
log.record_ld_block(v, others, r2.values)
for g, ld in zip(others, r2):
if ld >= ld_threshold:
del summary[g.variant]
log.record_excluded(
g.variant,
"LD_CLUMPED",
"LD with {} {} is {:g}".format(
v.name if v.name else "",
v, ld
)
)
return summary
def write_selection(grs, log):
"""Write the selected SNPs to disk."""
if len(grs) == 0:
log.logger.warning(
"No variant satisfied the provided thresholds (could not generate "
"a GRS)."
)
return
log.logger.info(
"Writing the file containing the selected {} variants."
"".format(len(grs))
)
output_filename = log.parameters["OUTPUT_PREFIX"] + ".grs"
with open(output_filename, "w") as f:
grs[0].write_header(f)
for row in grs:
row.write(f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--p-threshold",
help="P-value threshold for inclusion in the GRS (default: 5e-8).",
default=5e-8,
type=float
)
parser.add_argument(
"--target-n",
help="Target number of variants to include in the GRS.",
default=None,
type=int
)
parser.add_argument(
"--maf-threshold",
help="Minimum MAF to allow inclusion in the GRS (default %(default)s).",
default=0.05,
type=float
)
parser.add_argument(
"--ld-threshold",
help=("LD threshold for the clumping step. All variants in LD with "
"variants in the GRS are excluded iteratively (default "
"%(default)s)."),
default=0.15,
type=float
)
parser.add_argument(
"--ld-window-size",
help=("Size of the LD window used to find correlated variants. "
"Making this window smaller will make the execution faster but "
"increases the chance of missing correlated variants "
"(default 1Mb)."),
default=int(1e6),
type=int
)
parser.add_argument(
"--region",
help=("Only consider variants located WITHIN a genomic region. "
"The expected format is 'chrCHR:START-END'. For example: "
"'chr1:12345-22345'."),
default=None,
type=str
)
parser.add_argument(
"--exclude-region",
help=("Only consider variants located OUTSIDE a genomic region. "
"The expected format is 'chrCHR:START-END'. For example: "
"'chr1:12345-22345'."),
default=None,
type=str
)
parser.add_argument(
"--exclude-ambiguous-alleles",
help="Exclude variants with ambiguous alleles (e.g. G/C or A/T)",
action="store_true"
)
parser.add_argument(
"--exclude-no-reference",
help="Exclude variants with no genotypes in the reference panel.",
action="store_true"
)
# Files
parser.add_argument(
"--summary",
help=("Path to the summary statistics files. Required columns are "
"'name', 'chrom', 'pos', 'p-value', 'effect', 'reference' and "
"'risk'."),
required=True
)
parser.add_argument(
"--reference",
help=("Path the binary plink file containing reference genotypes. "
"These genotypes will be used for LD clumping."),
required=True
)
parser.add_argument(
"--output", "-o",
help="Output prefix (default: %(default)s).",
default="grstools_selection"
)
parser.add_argument(
"--debug",
action="store_true",
)
return parser.parse_args()
def main():
SCRIPT_START_TIME = time.time()
global debug
args = parse_args()
# Setting the output prefix.
log = SNPSelectionLog(args.output)
if args.debug:
debug = True
log.record_parameter("DEBUG", True)
logger.setLevel(logging.DEBUG)
summary_filename = args.summary
log.record_parameter("SUMMARY_FILENAME", summary_filename)
reference_filename = args.reference
log.record_parameter("REFERENCE_FILENAME", reference_filename)
# Parameters
p_threshold = args.p_threshold
log.record_parameter("P_THRESHOLD", p_threshold)
target_n = args.target_n
log.record_parameter("TARGET_N", target_n)
maf_threshold = args.maf_threshold
log.record_parameter("MAF_THRESHOLD", maf_threshold)
ld_threshold = args.ld_threshold
log.record_parameter("LD_CLUMP_THRESHOLD", ld_threshold)
ld_window_size = args.ld_window_size
log.record_parameter("LD_WINDOW_SIZE", ld_window_size)
exclude_ambiguous = args.exclude_ambiguous_alleles
log.record_parameter("EXCLUDE_AMBIGUOUS_ALLELES", exclude_ambiguous)
exclude_no_reference = args.exclude_no_reference
log.record_parameter("EXCLUDE_NO_REFERENCE", exclude_no_reference)
region = args.region
log.record_parameter("REGION_INCLUDED", region)
exclude_region = args.exclude_region
log.record_parameter("REGION_EXCLUDED", exclude_region)
# Parameters have been recorded so we initialize the logger.
# This will make it in debug mode if needed and print the config values
# to the screen.
log.init_logger()
# Read the summary statistics.
log.logger.info("Reading summary statistics.")
summary = read_summary_statistics(summary_filename, p_threshold, log,
exclude_ambiguous=exclude_ambiguous,
region=region,
exclude_region=exclude_region)
# Do the greedy variant selection.
with geneparse.parsers["plink"](reference_filename) as reference:
genotypes = InMemoryGenotypeExtractor(reference, summary.keys())
try:
grs = greedy_pick_clump(summary, genotypes, log)
finally:
genotypes.close()
# Call the logger to dump the trace.
log.log_selection_trace()
# Write the selected GRS to file.
write_selection(grs, log)
# Display the execution time.
t = time.time() - SCRIPT_START_TIME
m, s = divmod(t, 60)
h, m = divmod(m, 60)
log.logger.info(
"Completed SNP selection in {:02d}:{:02d}:{:02d}."
"".format(int(h), int(m), int(s))
)
|
|
"""
TODO docstring
"""
from pyviennacl import _viennacl as _v, backend
class SolverTag(object):
def vcl_solve_call(self, *args):
raise NotImplementedError("Solver not implemented")
def vcl_inplace_solve_call(self, *args):
raise NotImplementedError("In-place solver not implemented")
class SolverWithoutPreconditioner(object): pass
class EigenvalueTag(object): pass
class NMFTag(object): pass
class BandwidthReductionTag(object): pass
class PreconditionerTag(object):
vcl_precond_type = None
vcl_tag_type = None
def instantiate(self, leaf):
self.vcl_precond_type = getattr(_v, self.vcl_precond_type_name + "_" + type(leaf.vcl_leaf).__name__)
vcl_precond = self.vcl_precond_type(leaf.vcl_leaf, self.vcl_tag)
self.vcl_precond = vcl_precond
class NoPreconditioner(PreconditionerTag):
vcl_tag_type = None
vcl_precond_type = _v.no_precond
def __init__(self):
self.vcl_tag = None
self.vcl_precond = self.vcl_precond_type()
def instantiate(self, leaf): pass
class ICHOL0(PreconditionerTag):
vcl_tag_type = _v.ichol0_tag
vcl_precond_type_name = 'ichol0_precond'
def __init__(self):
self.vcl_tag = self.vcl_tag_type()
class ILUT(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.ilut_tag
vcl_precond_type_name = 'ilut_precond'
def __init__(self, entries_per_row=20, drop_tolerance=1e-4, with_level_scheduling=False):
"""
TODO docstring
"""
self.vcl_tag = self.vcl_tag_type(entries_per_row, drop_tolerance, with_level_scheduling)
@property
def entries_per_row(self):
return self.vcl_tag.entries_per_row
@property
def drop_tolerance(self):
return self.vcl_tag.drop_tolerance
@property
def with_level_scheduling(self):
return self.vcl_tag.with_level_scheduling
class BlockILUT(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.ilut_tag
vcl_precond_type_name = 'block_ilut_precond'
def __init__(self, entries_per_row=20, drop_tolerance=1e-4, with_level_scheduling=False, num_blocks=8):
"""
TODO docstring
"""
self.num_blocks = num_blocks
self.vcl_tag = self.vcl_tag_type(entries_per_row, drop_tolerance, with_level_scheduling)
def instantiate(self, leaf):
self.vcl_precond_type = getattr(_v, self.vcl_precond_type_name + "_" + type(leaf.vcl_leaf).__name__)
vcl_precond = self.vcl_precond_type(leaf.vcl_leaf, self.vcl_tag, self.num_blocks)
self.vcl_precond = vcl_precond
@property
def entries_per_row(self):
return self.vcl_tag.entries_per_row
@property
def drop_tolerance(self):
return self.vcl_tag.drop_tolerance
@property
def with_level_scheduling(self):
return self.vcl_tag.with_level_scheduling
class ILU0(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.ilu0_tag
vcl_precond_type_name = 'ilu0_precond'
def __init__(self, with_level_scheduling=False):
"""
TODO docstring
"""
self.vcl_tag = self.vcl_tag_type(with_level_scheduling)
@property
def with_level_scheduling(self):
return self.vcl_tag.with_level_scheduling
class BlockILU0(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.ilu0_tag
vcl_precond_type_name = 'block_ilu0_precond'
def __init__(self, with_level_scheduling=False, num_blocks=8):
"""
TODO docstring
"""
self.num_blocks = num_blocks
self.vcl_tag = self.vcl_tag_type(with_level_scheduling)
def instantiate(self, leaf):
self.vcl_precond_type = getattr(_v, self.vcl_precond_type_name + "_" + type(leaf.vcl_leaf).__name__)
vcl_precond = self.vcl_precond_type(leaf.vcl_leaf, self.vcl_tag, self.num_blocks)
self.vcl_precond = vcl_precond
@property
def with_level_scheduling(self):
return self.vcl_tag.with_level_scheduling
class Jacobi(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.jacobi_tag
vcl_precond_type_name = 'jacobi_precond'
def __init__(self):
"""
TODO docstring
"""
self.vcl_tag = self.vcl_tag_type()
class RowScaling(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.row_scaling_tag
vcl_precond_type_name = 'row_scaling_precond'
def __init__(self, p=2):
"""
TODO docstring (nb - norm is l^p norm)
"""
self.vcl_tag = self.vcl_tag_type(p)
@property
def norm(self):
return self.vcl_tag.norm
class AMG(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.amg_tag
vcl_precond_type_name = 'amg_precond'
def __init__(self, coarse=1,
interpol=1,
threshold=0.25,
interpol_weight=0.2,
jacobi_weight=1.0,
presmooth=1,
postsmooth=1,
coarse_levels=0):
"""
TODO docstring
"""
if not backend.WITH_OPENCL:
raise NotImplementedError("AMG preconditioner only available with OpenCL")
self.vcl_tag = self.vcl_tag_type(coarse, interpol, threshold, interpol_weight, jacobi_weight, presmooth, postsmooth, coarse_levels)
@property
def coarse(self):
return self.vcl_tag.coarse
@property
def interpol(self):
return self.vcl_tag.interpol
@property
def threshold(self):
return self.vcl_tag.threshold
@property
def interpol_weight(self):
return self.vcl_tag.interpol_weight
@property
def jacobi_weight(self):
return self.vcl_tag.jacobi_weight
@property
def presmooth(self):
return self.vcl_tag.presmooth
@property
def postsmooth(self):
return self.vcl_tag.postsmooth
@property
def coarse_levels(self):
return self.vcl_tag.coarse_levels
class SPAI(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.spai_tag
vcl_precond_type_name = 'spai_precond'
def __init__(self, residual_norm_threshold=1e-3,
iteration_limit=5,
residual_threshold=1e-2,
is_static=False,
is_right=False):
if not backend.WITH_OPENCL:
raise NotImplementedError("SPAI preconditioner only available with OpenCL")
self.vcl_tag = self.vcl_tag_type(residual_norm_threshold, iteration_limit, residual_threshold, is_static, is_right)
@property
def residual_norm_threshold(self):
return self.vcl_tag.residual_norm_threshold
@property
def iteration_limit(self):
return self.vcl_tag.iteration_limit
@property
def residual_threshold(self):
return self.vcl_tag.residual_threshold
@property
def is_static(self):
return self.vcl_tag.is_static
@property
def is_right(self):
return self.vcl_tag.is_right
class FSPAI(PreconditionerTag):
"""
TODO docstring
"""
vcl_tag_type = _v.fspai_tag
vcl_precond_type_name = 'fspai_precond'
def __init__(self, residual_norm_threshold=1e-3,
iteration_limit=5,
is_static=False,
is_right=False):
if not backend.WITH_OPENCL:
raise NotImplementedError("FSPAI preconditioner only available with OpenCL")
self.vcl_tag = self.vcl_tag_type(residual_norm_threshold, iteration_limit, is_static, is_right)
@property
def residual_norm_threshold(self):
return self.vcl_tag.residual_norm_threshold
@property
def iteration_limit(self):
return self.vcl_tag.iteration_limit
@property
def is_static(self):
return self.vcl_tag.is_static
@property
def is_right(self):
return self.vcl_tag.is_right
class Lower(SolverTag, SolverWithoutPreconditioner):
"""
Instruct the solver to solve for a lower triangular system matrix
"""
vcl_tag = _v.lower_tag()
def vcl_solve_call(self, *args):
return _v.direct_solve(*args)
def vcl_inplace_solve_call(self, *args):
return _v.direct_inplace_solve(*args)
class UnitLower(SolverTag, SolverWithoutPreconditioner):
"""
Instruct the solver to solve for a unit lower triangular system matrix
"""
vcl_tag = _v.unit_lower_tag()
def vcl_solve_call(self, *args):
return _v.direct_solve(*args)
def vcl_inplace_solve_call(self, *args):
return _v.direct_inplace_solve(*args)
class Upper(SolverTag, SolverWithoutPreconditioner):
"""
Instruct the solver to solve for an upper triangular system matrix
"""
vcl_tag = _v.upper_tag()
def vcl_solve_call(self, *args):
return _v.direct_solve(*args)
def vcl_inplace_solve_call(self, *args):
return _v.direct_inplace_solve(*args)
class UnitUpper(SolverTag, SolverWithoutPreconditioner):
"""
Instruct the solver to solve for a unit upper triangular system matrix
"""
vcl_tag = _v.unit_upper_tag()
def vcl_solve_call(self, *args):
return _v.direct_solve(*args)
def vcl_inplace_solve_call(self, *args):
return _v.direct_inplace_solve(*args)
class CG(SolverTag):
"""
Instruct the solver to solve using the conjugate gradient solver.
Assumes that the system matrix is symmetric positive definite.
Used for supplying solver parameters.
"""
def vcl_solve_call(self, *args):
return _v.iterative_solve(*args)
def __init__(self, tolerance = 1e-8, max_iterations = 300):
"""
Construct a cg_tag.
Parameters
----------
tolerance : float, optional
Relative tolerance for the residual
(solver quits if ||r|| < tolerance * ||r_initial|| obtains)
max_iterations : int, optional
The maximum number of iterations
"""
self.vcl_tag = _v.cg_tag(tolerance, max_iterations)
@property
def tolerance(self):
"""
The relative tolerance
"""
return self.vcl_tag.tolerance
@property
def max_iterations(self):
"""
The maximum number of iterations
"""
return self.vcl_tag.max_iterations
@property
def iters(self):
"""
The number of solver iterations
"""
return self.vcl_tag.iters
@property
def error(self):
"""
The estimated relative error at the end of the solver run
"""
return self.vcl_tag.error
class MixedPrecisionCG(SolverTag):
"""
TODO docstring -- EXPERIMENTAL
Instruct the solver to solve using the conjugate gradient solver.
Assumes that the system matrix is symmetric positive definite.
Used for supplying solver parameters.
"""
def vcl_solve_call(self, *args):
return _v.iterative_solve(*args)
def __init__(self, tolerance = 1e-8, max_iterations = 300, inner_tolerance = 1e-2):
"""
TODO docstring
Parameters
----------
tolerance : float, optional
Relative tolerance for the residual
(solver quits if ||r|| < tolerance * ||r_initial|| obtains)
max_iterations : int, optional
The maximum number of iterations
"""
self.vcl_tag = _v.mixed_precision_cg_tag(tolerance, max_iterations, inner_tolerance)
@property
def tolerance(self):
"""
The relative tolerance
"""
return self.vcl_tag.tolerance
@property
def inner_tolerance(self):
"""
TODO docstring
"""
return self.vcl_tag.inner_tolerance
@property
def max_iterations(self):
"""
The maximum number of iterations
"""
return self.vcl_tag.max_iterations
@property
def iters(self):
"""
The number of solver iterations
"""
return self.vcl_tag.iters
@property
def error(self):
"""
The estimated relative error at the end of the solver run
"""
return self.vcl_tag.error
class BiCGStab(SolverTag):
"""
Instruct the solver to solve using the stabilised bi-conjugate gradient
(BiCGStab) solver.
Assumes that the system matrix is non-symmetric.
Used for supplying solver parameters.
"""
def vcl_solve_call(self, *args):
return _v.iterative_solve(*args)
def __init__(self, tolerance = 1e-8,
max_iterations = 400, max_iterations_before_restart = 200):
"""
Construct a bicgstab_tag.
Parameters
----------
tolerance : float, optional
Relative tolerance for the residual
(solver quits if ||r|| < tolerance * ||r_initial|| obtains)
max_iterations : int, optional
Maximum number of iterations
max_iterations_before restart : int, optional
Maximum number of iterations before BiCGStab is reinitialised,
to avoid accumulation of round-off errors.
"""
self.vcl_tag = _v.bicgstab_tag(tolerance, max_iterations,
max_iterations_before_restart)
@property
def tolerance(self):
"""
The relative tolerance
"""
return self.vcl_tag.tolerance
@property
def max_iterations(self):
"""
The maximum number of iterations
"""
return self.vcl_tag.max_iterations
@property
def max_iterations(self):
"""
The maximum number of iterations before a restart
"""
return self.vcl_tag.max_iterations_before_restart
@property
def iters(self):
"""
The number of solver iterations
"""
return self.vcl_tag.iters
@property
def error(self):
"""
The estimated relative error at the end of the solver run
"""
return self.vcl_tag.error
class GMRES(SolverTag):
"""
Instruct the solver to solve using the GMRES solver.
Used for supplying solver parameters.
"""
def vcl_solve_call(self, *args):
return _v.iterative_solve(*args)
def __init__(self, tolerance=1e-8, max_iterations=300, krylov_dim=20):
"""
TODO docstring
Parameters
----------
tolerance : float, optional
Relative tolerance for the residual
(solver quits if ||r|| < tolerance * ||r_initial|| obtains)
max_iterations : int, optional
Maximum number of iterations, including restarts
krylov_dim : int, optional
The maximum dimension of the Krylov space before restart
(number of restarts can be computed as max_iterations / krylov_dim)
"""
self.vcl_tag = _v.gmres_tag(tolerance, max_iterations, krylov_dim)
@property
def tolerance(self):
"""
The relative tolerance
"""
return self.vcl_tag.tolerance
@property
def max_iterations(self):
"""
The maximum number of iterations
"""
return self.vcl_tag.max_iterations
@property
def krylov_dim(self):
"""
The maximum dimension of the Krylov space before restart
"""
return self.vcl_tag.krylov_dim
@property
def max_restarts(self):
"""
The maximum number of GMRES restarts
"""
return self.vcl_tag.max_restarts
@property
def iters(self):
"""
The number of solver iterations
"""
return self.vcl_tag.iters
@property
def error(self):
"""
The estimated relative error at the end of the solver run
"""
return self.vcl_tag.error
class PowerIteration(EigenvalueTag):
"""
Instruct the eigenvalue computation to use the power iteration algorithm.
Used for supplying eigenvalue computation parameters.
"""
def __init__(self, factor = 1e-8, max_iterations = 50000):
"""
TODO docstring
Parameters
----------
factor : float, optional
Halt when the eigenvalue does not change more than this value.
max_iterations : int, optional
Maximum number of iterations to compute.
"""
self.vcl_tag = _v.power_iter_tag(factor, max_iterations)
@property
def factor(self):
"""
The termination factor.
If the eigenvalue does not change more than this value, the algorithm
stops.
"""
return self.vcl_tag.factor
@property
def max_iterations(self):
"""
The maximum number of iterations
"""
return self.vcl_tag.max_iterations
class Lanczos(EigenvalueTag):
"""
Instruct the eigenvalue computation to use the Lanczos algorithm.
Used for supplying eigenvalue computation parameters.
"""
def __init__(self, factor = 0.75, num_eig = 10, method = 0, krylov = 100):
"""
Construct a lanczos_tag.
Parameters
----------
factor : float
Exponent of epsilon (reorthogonalisation batch tolerance)
num_eig : int
Number of eigenvalues to return
method : {0, 1, 2}
0 for partial reorthogonalisation
1 for full reorthogonalisation
2 for Lanczos without reorthogonalisation
krylov : int
Maximum Krylov-space size
"""
self.vcl_tag = _v.lanczos_tag(factor, num_eig, method, krylov)
@property
def factor(self):
"""
The tolerance factor for reorthogonalisation batches, expressed as
the exponent of epsilon.
"""
return self.vcl_tag.factor
@property
def num_eigenvalues(self):
"""
The number of eigenvalues to return.
"""
return self.vcl_tag.num_eigenvalues
@property
def krylov_size(self):
"""
The size of the Kylov space.
"""
return self.vcl_tag.krylov_size
@property
def method(self):
"""
The reorthogonalisation method choice.
"""
return self.vcl_tag.method
class NMF(NMFTag):
"""
TODO docstring
"""
def __init__(self, tolerance=1e-4, stagnation_tolerance=1e-5,
max_iterations=10000, check_after_steps=100,
print_relative_error=False):
"""
TODO docstring
"""
self.vcl_tag = _v.nmf_config(tolerance, stagnation_tolerance,
max_iterations, check_after_steps)
self.vcl_tag.print_relative_error = print_relative_error
@property
def iterations(self):
"""
TODO docstring
"""
return self.vcl_tag.iterations
@property
def tolerance(self):
"""
TODO docstring
"""
return self.vcl_tag.tolerance
@property
def stagnation_tolerance(self):
"""
TODO docstring
"""
return self.vcl_tag.stagnation_tolerance
@property
def max_iterations(self):
"""
TODO docstring
"""
return self.vcl_tag.max_iterations
@property
def check_after_steps(self):
"""
TODO docstring
"""
return self.vcl_tag.check_after_steps
@property
def print_relative_error(self):
"""
TODO docstring
"""
return self.vcl_tag.print_relative_error
class CuthillMcKee(BandwidthReductionTag):
"""
TODO docstring
"""
vcl_tag = _v.cuthill_mckee_tag()
class GibbsPooleStockmeyer(BandwidthReductionTag):
"""
TODO docstring
"""
vcl_tag = _v.gibbs_poole_stockmeyer_tag()
class AdvancedCuthillMcKee(BandwidthReductionTag):
"""
TODO docstring
"""
def __init__(self, a=0.0, gmax=1):
"""
TODO docstring
"""
self.vcl_tag = _v.advanced_cuthill_mckee_tag(a, gmax)
@property
def starting_node_param(self):
"""
TODO docstring
"""
return self.vcl_tag.starting_node_param
@property
def max_root_nodes(self):
"""
TODO docstring
"""
return self.vcl_tag.max_root_nodes
__all__ = ['NoPreconditioner', 'ICHOL0', 'ILUT', 'BlockILUT', 'ILU0',
'BlockILU0', 'Jacobi', 'RowScaling', 'AMG', 'SPAI', 'FSPAI',
'Lower', 'UnitLower', 'Upper', 'UnitUpper', 'CG',
'MixedPrecisionCG', 'BiCGStab', 'GMRES', 'PowerIteration',
'Lanczos', 'NMF', 'CuthillMcKee', 'GibbsPooleStockmeyer',
'AdvancedCuthillMcKee']
|
|
import re
from bot import Command, categories
from bot.utils import is_float
pat_currency = re.compile('[a-zA-Z]{3}')
baseurl = 'https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE' \
'&from_currency={}&to_currency={}&apikey={}'
baseurl_sbif = 'https://api.sbif.cl/api-sbifv3/recursos_api/{}?apikey={}&formato=json'
baseurl_orionx = 'https://ymxh3ju7n5.execute-api.us-east-1.amazonaws.com/client/graphql'
localsbif = ['uf', 'utm']
cryptomemes = ['btc', 'xmr', 'eth', 'ltc', 'xlm', 'xrp', 'bch', 'dash', 'doge']
cryptoclp = ['cha', 'luk']
class Value(Command):
__author__ = 'makzk'
__version__ = '1.0.0'
def __init__(self, bot):
super().__init__(bot)
self.name = 'value'
self.aliases = localsbif + cryptomemes + cryptoclp
self.help = '$[value-help]'
self.format = '$[value-format]'
self.format_shortcut = '$[value-format-short]'
self.category = categories.INFORMATION
self.default_config = {
'sbif_apikey': '',
'currency_apikey': ''
}
self.div_handlers = {}
for m in cryptomemes:
self.div_handlers[m.upper()] = (self.convert_crypto, 'USD')
for m in localsbif:
self.div_handlers[m.upper()] = (self.sbif, 'CLP')
for m in cryptoclp:
self.div_handlers[m.upper()] = (self.orionx, 'CLP')
async def handle(self, cmd):
div_from = 'USD'
div_to = 'EUR'
mult = 1
if cmd.cmdname != self.name:
div_from = cmd.cmdname.rstrip('2').upper()
if div_from in self.div_handlers.keys():
_, div_to = self.div_handlers[div_from]
if cmd.argc >= 1:
if is_float(cmd.args[0]):
mult = float(cmd.args[0])
if cmd.argc > 1 and self.valid_currency(cmd.args[1]):
div_to = cmd.args[1]
else:
div_to = cmd.args[0]
else:
if cmd.argc == 1:
if not self.valid_currency(cmd.args[0]):
await cmd.answer('$[value-error-currency] [2] $[format]: $[value-format]')
return
div_from = cmd.args[0].upper()
if div_from not in self.div_handlers.keys():
await cmd.answer('$[value-error-currency] [3] $[format]: $[value-format]')
return
_, div_to = self.div_handlers[div_from]
elif cmd.argc == 2:
if (is_float(cmd.args[0]) and not is_float(cmd.args[1])) \
or (is_float(cmd.args[1]) and not is_float(cmd.args[2])):
mult = float(cmd.args[0]) if is_float(cmd.args[0]) else float(cmd.args[1])
div_from = cmd.args[1].upper() if is_float(cmd.args[0]) else cmd.args[0].upper()
if not self.valid_currency(div_from):
await cmd.answer('$[value-error-currency] [4] $[format]: $[value-format]')
return
if div_from not in self.div_handlers.keys():
await cmd.answer('$[value-error-currency] [5] $[format]: $[value-format]')
return
_, div_to = self.div_handlers[div_from]
else:
div_from = cmd.args[0]
div_to = cmd.args[1]
elif cmd.argc >= 3:
if not is_float(cmd.args[0]):
await cmd.answer('$[value-error-currency] [6] $[format]: $[value-format]')
return
mult = float(cmd.args[0].replace(',', '.'))
div_from = cmd.args[1]
div_to = cmd.args[2]
if not self.valid_currency(div_from) or not self.valid_currency(div_to):
await cmd.answer('$[value-error-currency] $[format]: $[value-format]')
return
try:
div_from = div_from.upper()
div_to = div_to.upper()
await cmd.typing()
result = await self.handler(div_from, div_to, mult)
except DivRetrievalError as e:
await cmd.answer('$[error]', locales={'errortext': str(e)})
return
precision = 5 if result < 1 else 2
result = '{:.{prec}f}'.format(result, prec=precision)
await cmd.answer('{mult} {dfrom} = **{result}** {to}'.format(
dfrom=div_from, to=div_to, mult=mult, result=result))
"""
Handles different types of currency supported by the different APIs connected here
"""
async def handler(self, dv_from, dv_to, mult):
if dv_from in self.div_handlers:
handler, default_to = self.div_handlers[dv_from]
val = await handler(dv_from)
return await self.handler(default_to, dv_to, mult * val)
if dv_from == dv_to:
return mult
return await self.convert(dv_from, dv_to) * mult
#
# Services readers
#
async def convert(self, div1, div2):
if self.bot.config['currency_apikey'] == '':
raise DivRetrievalError('$[value-error-apikey]')
attempts = 0
url = baseurl.format(div1, div2, self.bot.config['currency_apikey'])
while attempts < 10:
self.log.debug('Loading currency data, attempt ' + str(attempts + 1))
self.log.debug('Loading URL %s', url)
async with self.http.get(url) as r:
data = await r.json()
if r.status != 200:
attempts += 1
continue
try:
k = 'Realtime Currency Exchange Rate'
if k not in data.keys():
if 'Error Message' in data.keys() and data['Error Message'].startswith('Invalid API call.'):
raise DivRetrievalError('$[value-error-currency]')
else:
raise DivRetrievalError('$[value-error-answer]')
else:
j = '5. Exchange Rate'
if j not in data[k].keys():
raise DivRetrievalError('$[value-error-answer]')
value = float(data[k][j])
except ValueError as e:
self.log.exception(e)
raise DivRetrievalError('$[value-error-unavailable]')
return value
async def convert_crypto(self, meme):
return await self.convert(meme, 'USD')
async def sbif(self, api):
# TODO: Cache
attempts = 0
if self.bot.config['sbif_apikey'] == '':
raise DivRetrievalError('$[value-error-sbif-key]')
url = baseurl_sbif.format(api.lower(), self.bot.config['sbif_apikey'])
while attempts < 10:
async with self.http.get(url) as r:
try:
data = await r.json(content_type='text/json')
except TypeError:
data = await r.json()
if r.status != 200:
attempts += 1
continue
try:
campo = api.upper() + 's'
value = float(data[campo][0]['Valor'].replace('.', '').replace(',', '.'))
except (KeyError, ValueError):
raise DivRetrievalError('$[value-error-sbif]')
return value
async def orionx(self, meme):
q = [{
"query": "query getMarketStatsHome($x:ID){market(code:$x){lastTrade{price}}}",
"variables": {"x": meme + "CLP"}
}]
self.log.debug('Loading url %s for %s', baseurl_orionx, meme + "CLP")
async with self.http.post(baseurl_orionx, json=q, headers={'fingerprint': 'xd'}) as r:
try:
data = await r.json()
return data[0]['data']['market']['lastTrade']['price']
except KeyError:
raise DivRetrievalError('$[value-error-data-not-available]')
return 0
def valid_currency(self, curr):
if not isinstance(curr, str):
return False
curr = curr.upper()
return curr in self.div_handlers.keys() or pat_currency.match(curr)
class DivRetrievalError(BaseException):
pass
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base test class for running non-stubbed tests (functional tests)
The FunctionalTest class contains helper methods for starting the API
and Registry server, grabbing the logs of each, cleaning up pidfiles,
and spinning down the servers.
"""
import atexit
import datetime
import logging
import os
import shutil
import signal
import socket
import sys
import tempfile
import time
import fixtures
from oslo.serialization import jsonutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
from glance.common import utils
from glance.db.sqlalchemy import api as db_api
from glance import tests as glance_tests
from glance.tests import utils as test_utils
execute, get_unused_port = test_utils.execute, test_utils.get_unused_port
class Server(object):
"""
Class used to easily manage starting and stopping
a server during functional test runs.
"""
def __init__(self, test_dir, port, sock=None):
"""
Creates a new Server object.
:param test_dir: The directory where all test stuff is kept. This is
passed from the FunctionalTestCase.
:param port: The port to start a server up on.
"""
self.verbose = True
self.debug = True
self.no_venv = False
self.test_dir = test_dir
self.bind_port = port
self.conf_file_name = None
self.conf_base = None
self.paste_conf_base = None
self.exec_env = None
self.deployment_flavor = ''
self.show_image_direct_url = False
self.show_multiple_locations = False
self.property_protection_file = ''
self.enable_v1_api = True
self.enable_v2_api = True
self.enable_v1_registry = True
self.enable_v2_registry = True
self.needs_database = False
self.log_file = None
self.sock = sock
self.fork_socket = True
self.process_pid = None
self.server_module = None
self.stop_kill = False
self.use_user_token = False
def write_conf(self, **kwargs):
"""
Writes the configuration file for the server to its intended
destination. Returns the name of the configuration file and
the over-ridden config content (may be useful for populating
error messages).
"""
if not self.conf_base:
raise RuntimeError("Subclass did not populate config_base!")
conf_override = self.__dict__.copy()
if kwargs:
conf_override.update(**kwargs)
# A config file and paste.ini to use just for this test...we don't want
# to trample on currently-running Glance servers, now do we?
conf_dir = os.path.join(self.test_dir, 'etc')
conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name)
if os.path.exists(conf_filepath):
os.unlink(conf_filepath)
paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini")
if os.path.exists(paste_conf_filepath):
os.unlink(paste_conf_filepath)
utils.safe_mkdirs(conf_dir)
def override_conf(filepath, overridden):
with open(filepath, 'wb') as conf_file:
conf_file.write(overridden)
conf_file.flush()
return conf_file.name
overridden_core = self.conf_base % conf_override
self.conf_file_name = override_conf(conf_filepath, overridden_core)
overridden_paste = ''
if self.paste_conf_base:
overridden_paste = self.paste_conf_base % conf_override
override_conf(paste_conf_filepath, overridden_paste)
overridden = ('==Core config==\n%s\n==Paste config==\n%s' %
(overridden_core, overridden_paste))
return self.conf_file_name, overridden
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Starts the server.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
# Ensure the configuration file is written
self.write_conf(**kwargs)
self.create_database()
cmd = ("%(server_module)s --config-file %(conf_file_name)s"
% {"server_module": self.server_module,
"conf_file_name": self.conf_file_name})
cmd = "%s -m %s" % (sys.executable, cmd)
# close the sock and release the unused port closer to start time
if self.exec_env:
exec_env = self.exec_env.copy()
else:
exec_env = {}
if self.sock:
if not self.fork_socket:
self.sock.close()
self.sock = None
else:
fd = os.dup(self.sock.fileno())
exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd)
self.sock.close()
self.process_pid = test_utils.fork_exec(cmd,
logfile=os.devnull,
exec_env=exec_env)
self.stop_kill = not expect_exit
if self.pid_file:
pf = open(self.pid_file, 'w')
pf.write('%d\n' % self.process_pid)
pf.close()
if not expect_exit:
rc = 0
try:
os.kill(self.process_pid, 0)
except OSError:
raise RuntimeError("The process did not start")
else:
rc = test_utils.wait_for_fork(
self.process_pid,
expected_exitcode=expected_exitcode)
# avoid an FD leak
if self.sock:
os.close(fd)
self.sock = None
return (rc, '', '')
def reload(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Start and stop the service to reload
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.stop()
return self.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode, **kwargs)
def create_database(self):
"""Create database if required for this server"""
if self.needs_database:
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
conf_filepath = os.path.join(conf_dir, 'glance-manage.conf')
with open(conf_filepath, 'wb') as conf_file:
conf_file.write('[DEFAULT]\n')
conf_file.write('sql_connection = %s' % self.sql_connection)
conf_file.flush()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
os.system('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
cmd = ('%s -m glance.cmd.manage --config-file %s db sync' %
(sys.executable, conf_filepath))
execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env,
expect_exit=True)
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
os.system('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def stop(self):
"""
Spin down the server.
"""
if not self.process_pid:
raise Exception('why is this being called? %s' % self.server_name)
if self.stop_kill:
os.kill(self.process_pid, signal.SIGTERM)
rc = test_utils.wait_for_fork(self.process_pid, raise_error=False)
return (rc, '', '')
def dump_log(self, name):
log = logging.getLogger(name)
if not self.log_file or not os.path.exists(self.log_file):
return
fptr = open(self.log_file, 'r')
for line in fptr:
log.info(line.strip())
class ApiServer(Server):
"""
Server object that starts/stops/manages the API server
"""
def __init__(self, test_dir, port, policy_file, delayed_delete=False,
pid_file=None, sock=None, **kwargs):
super(ApiServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'api'
self.server_module = 'glance.cmd.%s' % self.server_name
self.default_store = kwargs.get("default_store", "file")
self.key_file = ""
self.cert_file = ""
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.image_dir = os.path.join(self.test_dir, "images")
self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
self.log_file = os.path.join(self.test_dir, "api.log")
self.image_size_cap = 1099511627776
self.delayed_delete = delayed_delete
self.owner_is_tenant = True
self.workers = 0
self.scrub_time = 5
self.image_cache_dir = os.path.join(self.test_dir,
'cache')
self.image_cache_driver = 'sqlite'
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.property_protection_rule_format = 'roles'
self.image_member_quota = 10
self.image_property_quota = 10
self.image_tag_quota = 10
self.image_location_quota = 2
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.data_api = kwargs.get("data_api",
"glance.db.sqlalchemy.api")
self.user_storage_quota = '0'
self.lock_path = self.test_dir
self.location_strategy = 'location_order'
self.store_type_location_strategy_preference = ""
self.conf_base = """[DEFAULT]
verbose = %(verbose)s
debug = %(debug)s
default_log_levels = eventlet.wsgi.server=DEBUG
bind_host = 127.0.0.1
bind_port = %(bind_port)s
key_file = %(key_file)s
cert_file = %(cert_file)s
metadata_encryption_key = %(metadata_encryption_key)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
use_user_token = %(use_user_token)s
log_file = %(log_file)s
image_size_cap = %(image_size_cap)d
delayed_delete = %(delayed_delete)s
owner_is_tenant = %(owner_is_tenant)s
workers = %(workers)s
scrub_time = %(scrub_time)s
scrubber_datadir = %(scrubber_datadir)s
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
data_api = %(data_api)s
sql_connection = %(sql_connection)s
show_image_direct_url = %(show_image_direct_url)s
show_multiple_locations = %(show_multiple_locations)s
user_storage_quota = %(user_storage_quota)s
enable_v1_api = %(enable_v1_api)s
enable_v2_api = %(enable_v2_api)s
lock_path = %(lock_path)s
property_protection_file = %(property_protection_file)s
property_protection_rule_format = %(property_protection_rule_format)s
image_member_quota=%(image_member_quota)s
image_property_quota=%(image_property_quota)s
image_tag_quota=%(image_tag_quota)s
image_location_quota=%(image_location_quota)s
location_strategy=%(location_strategy)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
[store_type_location_strategy]
store_type_preference = %(store_type_location_strategy_preference)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
default_store = %(default_store)s
"""
self.paste_conf_base = """[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
class RegistryServer(Server):
"""
Server object that starts/stops/manages the Registry server
"""
def __init__(self, test_dir, port, policy_file, sock=None):
super(RegistryServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'registry'
self.server_module = 'glance.cmd.%s' % self.server_name
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.pid_file = os.path.join(self.test_dir, "registry.pid")
self.log_file = os.path.join(self.test_dir, "registry.log")
self.owner_is_tenant = True
self.workers = 0
self.api_version = 1
self.user_storage_quota = '0'
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.conf_base = """[DEFAULT]
verbose = %(verbose)s
debug = %(debug)s
bind_host = 127.0.0.1
bind_port = %(bind_port)s
log_file = %(log_file)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
api_limit_max = 1000
limit_param_default = 25
owner_is_tenant = %(owner_is_tenant)s
enable_v2_registry = %(enable_v2_registry)s
workers = %(workers)s
user_storage_quota = %(user_storage_quota)s
metadata_encryption_key = %(metadata_encryption_key)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
"""
self.paste_conf_base = """[pipeline:glance-registry]
pipeline = unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = fakeauth context registryapp
[pipeline:glance-registry-trusted-auth]
pipeline = context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api:API.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
class ScrubberDaemon(Server):
"""
Server object that starts/stops/manages the Scrubber server
"""
def __init__(self, test_dir, policy_file, daemon=False, **kwargs):
# NOTE(jkoelker): Set the port to 0 since we actually don't listen
super(ScrubberDaemon, self).__init__(test_dir, 0)
self.server_name = 'scrubber'
self.server_module = 'glance.cmd.%s' % self.server_name
self.daemon = daemon
self.image_dir = os.path.join(self.test_dir, "images")
self.scrub_time = 5
self.scrubber_datadir = os.path.join(self.test_dir,
"scrubber")
self.pid_file = os.path.join(self.test_dir, "scrubber.pid")
self.log_file = os.path.join(self.test_dir, "scrubber.log")
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.lock_path = self.test_dir
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.conf_base = """[DEFAULT]
verbose = %(verbose)s
debug = %(debug)s
filesystem_store_datadir=%(image_dir)s
log_file = %(log_file)s
daemon = %(daemon)s
wakeup_time = 2
scrub_time = %(scrub_time)s
scrubber_datadir = %(scrubber_datadir)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
lock_path = %(lock_path)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
"""
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
if 'daemon' in kwargs:
expect_exit = False
return super(ScrubberDaemon, self).start(
expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
class FunctionalTest(test_utils.BaseTestCase):
"""
Base test class for any test that wants to test the actual
servers and clients and not just the stubbed out interfaces
"""
inited = False
disabled = False
launched_servers = []
def setUp(self):
super(FunctionalTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.api_protocol = 'http'
self.api_port, api_sock = test_utils.get_unused_port_and_socket()
self.registry_port, reg_sock = test_utils.get_unused_port_and_socket()
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
self.copy_data_file('schema-image.json', conf_dir)
self.copy_data_file('policy.json', conf_dir)
self.copy_data_file('property-protections.conf', conf_dir)
self.copy_data_file('property-protections-policies.conf', conf_dir)
self.property_file_roles = os.path.join(conf_dir,
'property-protections.conf')
property_policies = 'property-protections-policies.conf'
self.property_file_policies = os.path.join(conf_dir,
property_policies)
self.policy_file = os.path.join(conf_dir, 'policy.json')
self.api_server = ApiServer(self.test_dir,
self.api_port,
self.policy_file,
sock=api_sock)
self.registry_server = RegistryServer(self.test_dir,
self.registry_port,
self.policy_file,
sock=reg_sock)
self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file)
self.pid_files = [self.api_server.pid_file,
self.registry_server.pid_file,
self.scrubber_daemon.pid_file]
self.files_to_destroy = []
self.launched_servers = []
def tearDown(self):
if not self.disabled:
self.cleanup()
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_database(self.registry_server.sql_connection)
self._reset_database(self.api_server.sql_connection)
super(FunctionalTest, self).tearDown()
self.api_server.dump_log('api_server')
self.registry_server.dump_log('registry_server')
self.scrubber_daemon.dump_log('scrubber_daemon')
def set_policy_rules(self, rules):
fap = open(self.policy_file, 'w')
fap.write(jsonutils.dumps(rules))
fap.close()
def _reset_database(self, conn_string):
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We leave behind the sqlite DB for failing tests to aid
# in diagnosis, as the file size is relatively small and
# won't interfere with subsequent tests as it's in a per-
# test directory (which is blown-away if the test is green)
pass
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p%s" % auth_pieces[1]
sql = ("drop database if exists %(database)s; "
"create database %(database)s;") % {'database': database}
cmd = ("mysql -u%(user)s %(password)s -h%(host)s "
"-e\"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
def cleanup(self):
"""
Makes sure anything we created or started up in the
tests are destroyed or spun down
"""
# NOTE(jbresnah) call stop on each of the servers instead of
# checking the pid file. stop() will wait until the child
# server is dead. This eliminates the possibility of a race
# between a child process listening on a port actually dying
# and a new process being started
servers = [self.api_server,
self.registry_server,
self.scrubber_daemon]
for s in servers:
try:
s.stop()
except Exception:
pass
for f in self.files_to_destroy:
if os.path.exists(f):
os.unlink(f)
def start_server(self,
server,
expect_launch,
expect_exit=True,
expected_exitcode=0,
**kwargs):
"""
Starts a server on an unused port.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the server.
:param server: the server to launch
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
:param expected_exitcode: expected exitcode from the launcher
"""
self.cleanup()
# Start up the requested server
exitcode, out, err = server.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
if expect_exit:
self.assertEqual(expected_exitcode, exitcode,
"Failed to spin up the requested server. "
"Got: %s" % err)
self.launched_servers.append(server)
launch_msg = self.wait_for_servers([server], expect_launch)
self.assertTrue(launch_msg is None, launch_msg)
def start_with_retry(self, server, port_name, max_retries,
expect_launch=True,
**kwargs):
"""
Starts a server, with retries if the server launches but
fails to start listening on the expected port.
:param server: the server to launch
:param port_name: the name of the port attribute
:param max_retries: the maximum number of attempts
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
"""
launch_msg = None
for i in range(max_retries):
exitcode, out, err = server.start(expect_exit=not expect_launch,
**kwargs)
name = server.server_name
self.assertEqual(0, exitcode,
"Failed to spin up the %s server. "
"Got: %s" % (name, err))
launch_msg = self.wait_for_servers([server], expect_launch)
if launch_msg:
server.stop()
server.bind_port = get_unused_port()
setattr(self, port_name, server.bind_port)
else:
self.launched_servers.append(server)
break
self.assertTrue(launch_msg is None, launch_msg)
def start_servers(self, **kwargs):
"""
Starts the API and Registry servers (glance-control api start
& glance-control registry start) on unused ports. glance-control
should be installed into the python path
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.cleanup()
# Start up the API and default registry server
# We start the registry server first, as the API server config
# depends on the registry port - this ordering allows for
# retrying the launch on a port clash
self.start_with_retry(self.registry_server, 'registry_port', 3,
**kwargs)
kwargs['registry_port'] = self.registry_server.bind_port
self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)
exitcode, out, err = self.scrubber_daemon.start(**kwargs)
self.assertEqual(0, exitcode,
"Failed to spin up the Scrubber daemon. "
"Got: %s" % err)
def ping_server(self, port):
"""
Simple ping on the port. If responsive, return True, else
return False.
:note We use raw sockets, not ping here, since ping uses ICMP and
has no concept of ports...
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("127.0.0.1", port))
s.close()
return True
except socket.error:
return False
def wait_for_servers(self, servers, expect_launch=True, timeout=10):
"""
Tight loop, waiting for the given server port(s) to be available.
Returns when all are pingable. There is a timeout on waiting
for the servers to come up.
:param servers: Glance server ports to ping
:param expect_launch: Optional, true iff the server(s) are
expected to successfully start
:param timeout: Optional, defaults to 3 seconds
:return: None if launch expectation is met, otherwise an
assertion message
"""
now = datetime.datetime.now()
timeout_time = now + datetime.timedelta(seconds=timeout)
replied = []
while (timeout_time > now):
pinged = 0
for server in servers:
if self.ping_server(server.bind_port):
pinged += 1
if server not in replied:
replied.append(server)
if pinged == len(servers):
msg = 'Unexpected server launch status'
return None if expect_launch else msg
now = datetime.datetime.now()
time.sleep(0.05)
failed = list(set(servers) - set(replied))
msg = 'Unexpected server launch status for: '
for f in failed:
msg += ('%s, ' % f.server_name)
if os.path.exists(f.pid_file):
pid = f.process_pid
trace = f.pid_file.replace('.pid', '.trace')
cmd = 'strace -p %d -o %s' % (pid, trace)
execute(cmd, raise_error=False, expect_exit=False)
time.sleep(0.5)
if os.path.exists(trace):
msg += ('\nstrace:\n%s\n' % open(trace).read())
self.add_log_details(failed)
return msg if expect_launch else None
def stop_server(self, server, name):
"""
Called to stop a single server in a normal fashion using the
glance-control stop method to gracefully shut the server down.
:param server: the server to stop
"""
# Spin down the requested server
server.stop()
def stop_servers(self):
"""
Called to stop the started servers in a normal fashion. Note
that cleanup() will stop the servers using a fairly draconian
method of sending a SIGTERM signal to the servers. Here, we use
the glance-control stop method to gracefully shut the server down.
This method also asserts that the shutdown was clean, and so it
is meant to be called during a normal test case sequence.
"""
# Spin down the API and default registry server
self.stop_server(self.api_server, 'API server')
self.stop_server(self.registry_server, 'Registry server')
self.stop_server(self.scrubber_daemon, 'Scrubber daemon')
self._reset_database(self.registry_server.sql_connection)
def run_sql_cmd(self, sql):
"""
Provides a crude mechanism to run manual SQL commands for backend
DB verification within the functional tests.
The raw result set is returned.
"""
engine = db_api.get_engine()
return engine.execute(sql)
def copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def add_log_details(self, servers=None):
logs = [s.log_file for s in (servers or self.launched_servers)]
for log in logs:
if os.path.exists(log):
testtools.content.attach_file(self, log)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from oslo_utils import fixture
from oslo_utils import timeutils
from taskflow.engines.worker_based import executor
from taskflow.engines.worker_based import protocol as pr
from taskflow import task as task_atom
from taskflow import test
from taskflow.test import mock
from taskflow.tests import utils as test_utils
from taskflow.types import failure
class TestWorkerTaskExecutor(test.MockTestCase):
def setUp(self):
super(TestWorkerTaskExecutor, self).setUp()
self.task = test_utils.DummyTask()
self.task_uuid = 'task-uuid'
self.task_args = {'a': 'a'}
self.task_result = 'task-result'
self.task_failures = {}
self.timeout = 60
self.broker_url = 'broker-url'
self.executor_uuid = 'executor-uuid'
self.executor_exchange = 'executor-exchange'
self.executor_topic = 'test-topic1'
self.proxy_started_event = threading.Event()
# patch classes
self.proxy_mock, self.proxy_inst_mock = self.patchClass(
executor.proxy, 'Proxy')
self.request_mock, self.request_inst_mock = self.patchClass(
executor.pr, 'Request', autospec=False)
# other mocking
self.proxy_inst_mock.start.side_effect = self._fake_proxy_start
self.proxy_inst_mock.stop.side_effect = self._fake_proxy_stop
self.request_inst_mock.uuid = self.task_uuid
self.request_inst_mock.expired = False
self.request_inst_mock.task_cls = self.task.name
self.message_mock = mock.MagicMock(name='message')
self.message_mock.properties = {'correlation_id': self.task_uuid,
'type': pr.RESPONSE}
def _fake_proxy_start(self):
self.proxy_started_event.set()
while self.proxy_started_event.is_set():
time.sleep(0.01)
def _fake_proxy_stop(self):
self.proxy_started_event.clear()
def executor(self, reset_master_mock=True, **kwargs):
executor_kwargs = dict(uuid=self.executor_uuid,
exchange=self.executor_exchange,
topics=[self.executor_topic],
url=self.broker_url)
executor_kwargs.update(kwargs)
ex = executor.WorkerTaskExecutor(**executor_kwargs)
if reset_master_mock:
self.resetMasterMock()
return ex
def test_creation(self):
ex = self.executor(reset_master_mock=False)
master_mock_calls = [
mock.call.Proxy(self.executor_uuid, self.executor_exchange,
on_wait=ex._on_wait,
url=self.broker_url, transport=mock.ANY,
transport_options=mock.ANY,
retry_options=mock.ANY,
type_handlers=mock.ANY),
mock.call.proxy.dispatcher.type_handlers.update(mock.ANY),
]
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
def test_on_message_response_state_running(self):
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.transition_and_log_error(pr.RUNNING, logger=mock.ANY),
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_progress(self):
response = pr.Response(pr.EVENT,
event_type=task_atom.EVENT_UPDATE_PROGRESS,
details={'progress': 1.0})
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.notifier.notify(task_atom.EVENT_UPDATE_PROGRESS,
{'progress': 1.0}),
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_failure(self):
a_failure = failure.Failure.from_exception(Exception('test'))
failure_dict = a_failure.to_dict()
response = pr.Response(pr.FAILURE, result=failure_dict)
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual(0, len(ex._requests_cache))
expected_calls = [
mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY),
mock.call.set_result(result=test_utils.FailureMatcher(a_failure))
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_success(self):
response = pr.Response(pr.SUCCESS, result=self.task_result,
event='executed')
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.transition_and_log_error(pr.SUCCESS, logger=mock.ANY),
mock.call.set_result(result=self.task_result, event='executed')
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_unknown_state(self):
response = pr.Response(state='<unknown>')
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_message_response_unknown_task(self):
self.message_mock.properties['correlation_id'] = '<unknown>'
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_message_response_no_correlation_id(self):
self.message_mock.properties = {'type': pr.RESPONSE}
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_wait_task_not_expired(self):
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
self.assertEqual(1, len(ex._requests_cache))
ex._on_wait()
self.assertEqual(1, len(ex._requests_cache))
def test_on_wait_task_expired(self):
now = timeutils.utcnow()
f = self.useFixture(fixture.TimeFixture(override_time=now))
self.request_inst_mock.expired = True
self.request_inst_mock.created_on = now
f.advance_time_seconds(120)
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
self.assertEqual(1, len(ex._requests_cache))
ex._on_wait()
self.assertEqual(0, len(ex._requests_cache))
def test_remove_task_non_existent(self):
ex = self.executor()
ex._requests_cache[self.task_uuid] = self.request_inst_mock
self.assertEqual(1, len(ex._requests_cache))
del ex._requests_cache[self.task_uuid]
self.assertEqual(0, len(ex._requests_cache))
# delete non-existent
try:
del ex._requests_cache[self.task_uuid]
except KeyError:
pass
self.assertEqual(0, len(ex._requests_cache))
def test_execute_task(self):
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, self.timeout),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_revert_task(self):
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.revert_task(self.task, self.task_uuid, self.task_args,
self.task_result, self.task_failures)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'revert',
self.task_args, self.timeout,
failures=self.task_failures,
result=self.task_result),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_execute_task_topic_not_found(self):
ex = self.executor()
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, self.timeout),
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_execute_task_publish_error(self):
self.proxy_inst_mock.publish.side_effect = Exception('Woot!')
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, self.timeout),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid),
mock.call.request.transition_and_log_error(pr.FAILURE,
logger=mock.ANY),
mock.call.request.set_result(mock.ANY)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_start_stop(self):
ex = self.executor()
ex.start()
# make sure proxy thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
def test_start_already_running(self):
ex = self.executor()
ex.start()
# make sure proxy thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# start executor again
ex.start()
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
def test_stop_not_running(self):
self.executor().stop()
self.assertEqual([], self.master_mock.mock_calls)
def test_stop_not_alive(self):
self.proxy_inst_mock.start.side_effect = None
# start executor
ex = self.executor()
ex.start()
# stop executor
ex.stop()
# since proxy thread is already done - stop is not called
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait()
], any_order=True)
def test_restart(self):
ex = self.executor()
ex.start()
# make sure thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# restart executor
ex.stop()
ex.start()
# make sure thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop(),
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
|
|
"""
sphinx.builders._epub_base
~~~~~~~~~~~~~~~~~~~~~~~~~~
Base class of epub2/epub3 builders.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import html
import os
import re
from os import path
from typing import Any, Dict, List, NamedTuple, Set, Tuple
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.utils import smartquotes
from sphinx import addnodes
from sphinx.builders.html import BuildInfo, StandaloneHTMLBuilder
from sphinx.locale import __
from sphinx.util import logging, status_iterator
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import format_date
from sphinx.util.osutil import copyfile, ensuredir
try:
from PIL import Image
except ImportError:
Image = None
logger = logging.getLogger(__name__)
# (Fragment) templates from which the metainfo files content.opf and
# toc.ncx are created.
# This template section also defines strings that are embedded in the html
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
COVERPAGE_NAME = 'epub-cover.xhtml'
TOCTREE_TEMPLATE = 'toctree-l%d'
LINK_TARGET_TEMPLATE = ' [%(uri)s]'
FOOTNOTE_LABEL_TEMPLATE = '#%d'
FOOTNOTES_RUBRIC_NAME = 'Footnotes'
CSS_LINK_TARGET_CLASS = 'link-target'
# XXX These strings should be localized according to epub_language
GUIDE_TITLES = {
'toc': 'Table of Contents',
'cover': 'Cover'
}
MEDIA_TYPES = {
'.xhtml': 'application/xhtml+xml',
'.css': 'text/css',
'.png': 'image/png',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
}
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
# Regular expression to match colons only in local fragment identifiers.
# If the URI contains a colon before the #,
# it is an external link that should not change.
REFURI_RE = re.compile("([^#:]*#)(.*)")
class ManifestItem(NamedTuple):
href: str
id: str
media_type: str
class Spine(NamedTuple):
idref: str
linear: bool
class Guide(NamedTuple):
type: str
title: str
uri: str
class NavPoint(NamedTuple):
navpoint: str
playorder: int
text: str
refuri: str
children: List[Any] # mypy does not support recursive types
# https://github.com/python/mypy/issues/7069
def sphinx_smarty_pants(t: str, language: str = 'en') -> str:
t = t.replace('"', '"')
t = smartquotes.educateDashesOldSchool(t)
t = smartquotes.educateQuotes(t, language)
t = t.replace('"', '"')
return t
ssp = sphinx_smarty_pants
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
"""
Builder that outputs epub files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
epub file.
"""
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
supported_remote_images = False
# don't add links
add_permalinks = False
# don't use # as current path. ePub check reject it.
allow_sharp_as_current_path = False
# don't add sidebar etc.
embedded = True
# disable download role
download_support = False
# don't create links to original images from images
html_scaled_image_link = False
# don't generate search index or include search page
search = False
coverpage_name = COVERPAGE_NAME
toctree_template = TOCTREE_TEMPLATE
link_target_template = LINK_TARGET_TEMPLATE
css_link_target_class = CSS_LINK_TARGET_CLASS
guide_titles = GUIDE_TITLES
media_types = MEDIA_TYPES
refuri_re = REFURI_RE
template_dir = ""
doctype = ""
def init(self) -> None:
super().init()
# the output files for epub must be .html only
self.out_suffix = '.xhtml'
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
self.id_cache: Dict[str, str] = {}
self.use_index = self.get_builder_config('use_index', 'epub')
self.refnodes: List[Dict[str, Any]] = []
def create_build_info(self) -> BuildInfo:
return BuildInfo(self.config, self.tags, ['html', 'epub'])
def get_theme_config(self) -> Tuple[str, Dict]:
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name: str) -> str:
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = self.id_cache.get(name)
if not id:
id = 'epub-%d' % self.env.new_serialno('epub')
self.id_cache[name] = id
return id
def get_refnodes(self, doctree: Node, result: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # NOQA
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
if isinstance(doctree, nodes.reference) and doctree.get('refuri'):
refuri = doctree['refuri']
if refuri.startswith('http://') or refuri.startswith('https://') \
or refuri.startswith('irc:') or refuri.startswith('mailto:'):
return result
classes = doctree.parent.attributes['classes']
for level in range(8, 0, -1): # or range(1, 8)?
if (self.toctree_template % level) in classes:
result.append({
'level': level,
'refuri': html.escape(refuri),
'text': ssp(html.escape(doctree.astext()))
})
break
elif isinstance(doctree, nodes.Element):
for elem in doctree:
result = self.get_refnodes(elem, result)
return result
def check_refnodes(self, nodes: List[Dict[str, Any]]) -> None:
appeared: Set[str] = set()
for node in nodes:
if node['refuri'] in appeared:
logger.warning(
__('duplicated ToC entry found: %s'),
node['refuri'],
type="epub",
subtype="duplicated_toc_entry",
)
else:
appeared.add(node['refuri'])
def get_toc(self) -> None:
"""Get the total table of contents, containing the root_doc
and pre and post files not managed by sphinx.
"""
doctree = self.env.get_and_resolve_doctree(self.config.root_doc,
self, prune_toctrees=False,
includehidden=True)
self.refnodes = self.get_refnodes(doctree, [])
master_dir = path.dirname(self.config.root_doc)
if master_dir:
master_dir += '/' # XXX or os.sep?
for item in self.refnodes:
item['refuri'] = master_dir + item['refuri']
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes: List[Dict[str, Any]]) -> None:
"""Add the root_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
'level': 1,
'refuri': html.escape(self.config.root_doc + self.out_suffix),
'text': ssp(html.escape(
self.env.titles[self.config.root_doc].astext()))
})
for file, text in reversed(self.config.epub_pre_files):
refnodes.insert(0, {
'level': 1,
'refuri': html.escape(file),
'text': ssp(html.escape(text))
})
for file, text in self.config.epub_post_files:
refnodes.append({
'level': 1,
'refuri': html.escape(file),
'text': ssp(html.escape(text))
})
def fix_fragment(self, prefix: str, fragment: str) -> str:
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree: nodes.document) -> None:
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
transport protocol specification.
"""
def update_node_id(node: Element) -> None:
"""Update IDs of given *node*."""
new_ids: List[str] = []
for node_id in node['ids']:
new_id = self.fix_fragment('', node_id)
if new_id not in new_ids:
new_ids.append(new_id)
node['ids'] = new_ids
for reference in tree.traverse(nodes.reference):
if 'refuri' in reference:
m = self.refuri_re.match(reference['refuri'])
if m:
reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))
if 'refid' in reference:
reference['refid'] = self.fix_fragment('', reference['refid'])
for target in tree.traverse(nodes.target):
update_node_id(target)
next_node: Node = target.next_node(ascend=True)
if isinstance(next_node, nodes.Element):
update_node_id(next_node)
for desc_signature in tree.traverse(addnodes.desc_signature):
update_node_id(desc_signature)
def add_visible_links(self, tree: nodes.document, show_urls: str = 'inline') -> None:
"""Add visible link targets for external links"""
def make_footnote_ref(doc: nodes.document, label: str) -> nodes.footnote_reference:
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
doc.note_autofootnote_ref(footnote_ref)
return footnote_ref
def make_footnote(doc: nodes.document, label: str, uri: str) -> nodes.footnote:
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
para.append(nodes.Text(uri))
footnote.append(para)
footnote.insert(0, nodes.label('', label))
doc.note_autofootnote(footnote)
return footnote
def footnote_spot(tree: nodes.document) -> Tuple[Element, int]:
"""Find or create a spot to place footnotes.
The function returns the tuple (parent, index)."""
# The code uses the following heuristic:
# a) place them after the last existing footnote
# b) place them after an (empty) Footnotes rubric
# c) create an empty Footnotes rubric at the end of the document
fns = tree.traverse(nodes.footnote)
if fns:
fn = fns[-1]
return fn.parent, fn.parent.index(fn) + 1
for node in tree.traverse(nodes.rubric):
if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:
return node.parent, node.parent.index(node) + 1
doc = tree.traverse(nodes.document)[0]
rub = nodes.rubric()
rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))
doc.append(rub)
return doc, doc.index(rub) + 1
if show_urls == 'no':
return
if show_urls == 'footnote':
doc = tree.traverse(nodes.document)[0]
fn_spot, fn_idx = footnote_spot(tree)
nr = 1
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
uri.startswith('ftp:')) and uri not in node.astext():
idx = node.parent.index(node) + 1
if show_urls == 'inline':
uri = self.link_target_template % {'uri': uri}
link = nodes.inline(uri, uri)
link['classes'].append(self.css_link_target_class)
node.parent.insert(idx, link)
elif show_urls == 'footnote':
label = FOOTNOTE_LABEL_TEMPLATE % nr
nr += 1
footnote_ref = make_footnote_ref(doc, label)
node.parent.insert(idx, footnote_ref)
footnote = make_footnote(doc, label, uri)
fn_spot.insert(fn_idx, footnote)
footnote_ref['refid'] = footnote['ids'][0]
footnote.add_backref(footnote_ref['ids'][0])
fn_idx += 1
def write_doc(self, docname: str, doctree: nodes.document) -> None:
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
self.fix_ids(doctree)
self.add_visible_links(doctree, self.config.epub_show_urls)
super().write_doc(docname, doctree)
def fix_genindex(self, tree: List[Tuple[str, List[Tuple[str, Any]]]]) -> None:
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
for entryname, (links, subitems, key_) in columns:
for (i, (ismain, link)) in enumerate(links):
m = self.refuri_re.match(link)
if m:
links[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
for subentryname, subentrylinks in subitems:
for (i, (ismain, link)) in enumerate(subentrylinks):
m = self.refuri_re.match(link)
if m:
subentrylinks[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename: str) -> bool:
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
def copy_image_files_pil(self) -> None:
"""Copy images using Pillow, the Python Imaging Library.
The method tries to read and write the files with Pillow, converting
the format and resizing the image if necessary/possible.
"""
ensuredir(path.join(self.outdir, self.imagedir))
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity):
dest = self.images[src]
try:
img = Image.open(path.join(self.srcdir, src))
except OSError:
if not self.is_vector_graphics(src):
logger.warning(__('cannot read image file %r: copying it instead'),
path.join(self.srcdir, src))
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except OSError as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
continue
if self.config.epub_fix_images:
if img.mode in ('P',):
# See the Pillow documentation for Image.convert()
# https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
img = img.convert()
if self.config.epub_max_image_width > 0:
(width, height) = img.size
nw = self.config.epub_max_image_width
if width > nw:
nh = (height * nw) / width
img = img.resize((nw, nh), Image.BICUBIC)
try:
img.save(path.join(self.outdir, self.imagedir, dest))
except OSError as err:
logger.warning(__('cannot write image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_image_files(self) -> None:
"""Copy image files to destination directory.
This overwritten method can use Pillow to convert image files.
"""
if self.images:
if self.config.epub_fix_images or self.config.epub_max_image_width:
if not Image:
logger.warning(__('Pillow not found - copying image files'))
super().copy_image_files()
else:
self.copy_image_files_pil()
else:
super().copy_image_files()
def copy_download_files(self) -> None:
pass
def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html',
outfilename: str = None, event_arg: Any = None) -> None:
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
attributes.
"""
if pagename.startswith('genindex') and 'genindexentries' in addctx:
if not self.use_index:
return
self.fix_genindex(addctx['genindexentries'])
addctx['doctype'] = self.doctype
super().handle_page(pagename, addctx, templatename, outfilename, event_arg)
def build_mimetype(self) -> None:
"""Write the metainfo file mimetype."""
logger.info(__('writing mimetype file...'))
copy_asset_file(path.join(self.template_dir, 'mimetype'), self.outdir)
def build_container(self, outname: str = 'META-INF/container.xml') -> None: # NOQA
"""Write the metainfo file META-INF/container.xml."""
logger.info(__('writing META-INF/container.xml file...'))
outdir = path.join(self.outdir, 'META-INF')
ensuredir(outdir)
copy_asset_file(path.join(self.template_dir, 'container.xml'), outdir)
def content_metadata(self) -> Dict[str, Any]:
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
metadata: Dict[str, Any] = {}
metadata['title'] = html.escape(self.config.epub_title)
metadata['author'] = html.escape(self.config.epub_author)
metadata['uid'] = html.escape(self.config.epub_uid)
metadata['lang'] = html.escape(self.config.epub_language)
metadata['publisher'] = html.escape(self.config.epub_publisher)
metadata['copyright'] = html.escape(self.config.epub_copyright)
metadata['scheme'] = html.escape(self.config.epub_scheme)
metadata['id'] = html.escape(self.config.epub_identifier)
metadata['date'] = html.escape(format_date("%Y-%m-%d"))
metadata['manifest_items'] = []
metadata['spines'] = []
metadata['guides'] = []
return metadata
def build_content(self) -> None:
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
logger.info(__('writing content.opf file...'))
metadata = self.content_metadata()
# files
if not self.outdir.endswith(os.sep):
self.outdir += os.sep
olen = len(self.outdir)
self.files: List[str] = []
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
'nav.xhtml', self.config.epub_basename + '.epub'] + \
self.config.epub_exclude_files
if not self.use_index:
self.ignored_files.append('genindex' + self.out_suffix)
for root, dirs, files in os.walk(self.outdir):
dirs.sort()
for fn in sorted(files):
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
continue
ext = path.splitext(filename)[-1]
if ext not in self.media_types:
# we always have JS and potentially OpenSearch files, don't
# always warn about them
if ext not in ('.js', '.xml'):
logger.warning(__('unknown mimetype for %s, ignoring'), filename,
type='epub', subtype='unknown_project_files')
continue
filename = filename.replace(os.sep, '/')
item = ManifestItem(html.escape(filename),
html.escape(self.make_id(filename)),
html.escape(self.media_types[ext]))
metadata['manifest_items'].append(item)
self.files.append(filename)
# spine
spinefiles = set()
for refnode in self.refnodes:
if '#' in refnode['refuri']:
continue
if refnode['refuri'] in self.ignored_files:
continue
spine = Spine(html.escape(self.make_id(refnode['refuri'])), True)
metadata['spines'].append(spine)
spinefiles.add(refnode['refuri'])
for info in self.domain_indices:
spine = Spine(html.escape(self.make_id(info[0] + self.out_suffix)), True)
metadata['spines'].append(spine)
spinefiles.add(info[0] + self.out_suffix)
if self.use_index:
spine = Spine(html.escape(self.make_id('genindex' + self.out_suffix)), True)
metadata['spines'].append(spine)
spinefiles.add('genindex' + self.out_suffix)
# add auto generated files
for name in self.files:
if name not in spinefiles and name.endswith(self.out_suffix):
spine = Spine(html.escape(self.make_id(name)), False)
metadata['spines'].append(spine)
# add the optional cover
html_tmpl = None
if self.config.epub_cover:
image, html_tmpl = self.config.epub_cover
image = image.replace(os.sep, '/')
metadata['cover'] = html.escape(self.make_id(image))
if html_tmpl:
spine = Spine(html.escape(self.make_id(self.coverpage_name)), True)
metadata['spines'].insert(0, spine)
if self.coverpage_name not in self.files:
ext = path.splitext(self.coverpage_name)[-1]
self.files.append(self.coverpage_name)
item = ManifestItem(html.escape(self.coverpage_name),
html.escape(self.make_id(self.coverpage_name)),
html.escape(self.media_types[ext]))
metadata['manifest_items'].append(item)
ctx = {'image': html.escape(image), 'title': self.config.project}
self.handle_page(
path.splitext(self.coverpage_name)[0], ctx, html_tmpl)
spinefiles.add(self.coverpage_name)
auto_add_cover = True
auto_add_toc = True
if self.config.epub_guide:
for type, uri, title in self.config.epub_guide:
file = uri.split('#')[0]
if file not in self.files:
self.files.append(file)
if type == 'cover':
auto_add_cover = False
if type == 'toc':
auto_add_toc = False
metadata['guides'].append(Guide(html.escape(type),
html.escape(title),
html.escape(uri)))
if auto_add_cover and html_tmpl:
metadata['guides'].append(Guide('cover',
self.guide_titles['cover'],
html.escape(self.coverpage_name)))
if auto_add_toc and self.refnodes:
metadata['guides'].append(Guide('toc',
self.guide_titles['toc'],
html.escape(self.refnodes[0]['refuri'])))
# write the project file
copy_asset_file(path.join(self.template_dir, 'content.opf_t'), self.outdir, metadata)
def new_navpoint(self, node: Dict[str, Any], level: int, incr: bool = True) -> NavPoint:
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
self.playorder += 1
self.tocid += 1
return NavPoint('navPoint%d' % self.tocid, self.playorder,
node['text'], node['refuri'], [])
def build_navpoints(self, nodes: List[Dict[str, Any]]) -> List[NavPoint]:
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
the parent node is reinserted in the subnav.
"""
navstack: List[NavPoint] = []
navstack.append(NavPoint('dummy', 0, '', '', []))
level = 0
lastnode = None
for node in nodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.epub_tocdepth:
continue
if node['level'] == level:
navpoint = self.new_navpoint(node, level)
navstack.pop()
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] == level + 1:
level += 1
if lastnode and self.config.epub_tocdup:
# Insert starting point in subtoc with same playOrder
navstack[-1].children.append(self.new_navpoint(lastnode, level, False))
navpoint = self.new_navpoint(node, level)
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] < level:
while node['level'] < len(navstack):
navstack.pop()
level = node['level']
navpoint = self.new_navpoint(node, level)
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
else:
raise
lastnode = node
return navstack[0].children
def toc_metadata(self, level: int, navpoints: List[NavPoint]) -> Dict[str, Any]:
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
metadata: Dict[str, Any] = {}
metadata['uid'] = self.config.epub_uid
metadata['title'] = html.escape(self.config.epub_title)
metadata['level'] = level
metadata['navpoints'] = navpoints
return metadata
def build_toc(self) -> None:
"""Write the metainfo file toc.ncx."""
logger.info(__('writing toc.ncx file...'))
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(self.config.root_doc,
self, prune_toctrees=False,
includehidden=False)
refnodes = self.get_refnodes(doctree, [])
self.toc_add_files(refnodes)
else:
# 'includehidden'
refnodes = self.refnodes
self.check_refnodes(refnodes)
navpoints = self.build_navpoints(refnodes)
level = max(item['level'] for item in self.refnodes)
level = min(level, self.config.epub_tocdepth)
copy_asset_file(path.join(self.template_dir, 'toc.ncx_t'), self.outdir,
self.toc_metadata(level, navpoints))
def build_epub(self) -> None:
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
entry.
"""
outname = self.config.epub_basename + '.epub'
logger.info(__('writing %s file...'), outname)
epub_filename = path.join(self.outdir, outname)
with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:
epub.write(path.join(self.outdir, 'mimetype'), 'mimetype', ZIP_STORED)
for filename in ['META-INF/container.xml', 'content.opf', 'toc.ncx']:
epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)
for filename in self.files:
epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)
|
|
#------------------------------------------------------------------------------
# Tests for DataFrame various cases. Uses external csv control files
import os
import numpy as np
import pandas as pd
import nose.tools as nt
import lamana as la
import lamana.utils.tools as ut
from lamana import constructs as con
from lamana.models import Wilson_LT as wlt
dft = wlt.Defaults()
# SETUP -----------------------------------------------------------------------
def fix_discontinuities(laminate, inner_i):
'''Replace t_ Nan values at discontinuities with adjacent value for inner_i.
Accounts for the following found in controls directory:
- missing t(um) and d(mm) columns
- discontinuities in multi and special plies
- and more ...
'''
df = laminate.copy()
# Tensile side
discTensidx = df.loc[(df['label'] == 'discont.')
& (df['type'] == 'inner')
& (df['side'] == 'Tens.'), 't(um)'].index.tolist()
# Compressive side
discCompidx = df.loc[(df['label'] == 'discont.')
& (df['type'] == 'inner')
& (df['side'] == 'Comp.'), 't(um)'].index.tolist()
#print(discTensidx)
#print(df)
#print(inner_i)
for i, inner in enumerate(inner_i):
#print(i, inner, inner_i)
df.loc[discTensidx[i], 't(um)'] = inner
for i, inner_r in enumerate(reversed(inner_i)):
#print(inner_r)
df.loc[discCompidx[i], 't(um)'] = inner_r
return df
def extract_dataframe(df):
'''Parse corrected DataFrame from a csv file; legacy, automated or custom.'''
df_expected = df.copy()
# Mild cleanup
if 'd(mm)' in df_expected.columns:
df_expected['d(m)'] = df_expected['d(mm)']/1000.
del df_expected['d(mm)']
if 't(um)' not in df_expected.columns: # for custom controls from legacy scripts
df_expected['t(um)'] = df_expected['h(m)']/1e-6
# Assign Nan to layer thickness of the discontinuity row
df_expected.loc[df_expected['label'] == 'discont.', 't(um)'
] = np.nan
# Twice the h in the middle
df_expected.loc[df_expected['type'] == 'middle', 't(um)'
] = df_expected.loc[df_expected['type'] == 'middle', 'h(m)'].multiply(2)/1e-6
# Parse data mainly for the Case
nplies = len(df_expected['layer'].unique())
p = df_expected.groupby('layer').size().iloc[0]
t_total = df_expected.iloc[-1]['d(m)'] # (in m)
# Get a geometry string to feed the API
if nplies < 5:
geometry = ut.get_special_geometry(df_expected)
elif nplies >= 5:
geometry = ut.get_multi_geometry(df_expected)
elif nplies < 1:
raise Exception('Number of plies < 1. No plies detected.')
#print(geometry)
# Plugin holes; overwrite placeholder Nans at discontinuities
# Primarily for custom controls
geo = geometry.split('-')
#print(geo)
outer = float(geo[0])
if '[' in geo[1]:
inners = geo[1][1:-1].split(',')
'''Why is float needed here and not int?'''
'''Getting 200L error for float.'''
#print(inners)
inner_i = [float(t) for t in inners]
else:
inner_i = float(geo[1])
#print(inner_i)
df_expected.loc[(df_expected['label'] == 'discont.')
& (df_expected['type'] == 'outer'),'t(um)'] = outer
if ('discont.' in df_expected['label'].values) and ('inner' in df_expected['type'].values):
df_expected = fix_discontinuities(df_expected, inner_i)
return df_expected, geometry, nplies, p, t_total
# TESTS -----------------------------------------------------------------------
# Test Columns
def test_apply_LaminateModels_cols_dimensions1():
'''Test actual against expected DataFrames in .cvs files found in
tests/controls_LT; IDs and dimensional columns.
'''
# Prepare file path.
# Depends which directory nosetests is rum
#path = os.getcwd() # use for the test in the correct path
path = os.path.join(os.getcwd(), 'lamana', 'tests', 'controls_LT') # for builds
#path = path + r'\lamana\tests\controls_LT' # for Main Script. Comment out in tests
#path = path + r'\tests\controls_LT' # for test
#path = os.path.join(os.getcwd(), 'tests', 'controls_LT') # for test
#path = path + r'\controls_LT' # for test
# Read all files in the path (REF 013)
for file in ut.read_csv_dir(path):
#df_expected = file
df = file
#print(df_expected)
df_expected, geometry, nplies, p, t_total = extract_dataframe(df)
# Build actual Case using distributions API
dft.load_params['p'] = p
case = la.distributions.Case(dft.load_params, dft.mat_props)
case.apply([geometry])
df = case.frames[0]
# Compare the dimensional columns only
'''Bypassing z(m), z(m)*, intf and k for now'''
'''UPDATE: k add back in 0.4.4b'''
###
cols = ['layer', 'side','type', 'matl',
# 'label', 't(um)', 'h(m)', 'd(m)', 'intf', 'k', 'Z(m)', 'z(m)']
# 'label', 't(um)', 'h(m)', 'd(m)', 'intf', 'k', 'Z(m)', ]
# 'label', 't(um)', 'h(m)', 'd(m)', 'intf', 'Z(m)', ] # removed; k redefined in 0.4.3c4d
# 'label', 't(um)', 'h(m)', 'd(m)', 'intf']
'label', 't(um)', 'h(m)', 'd(m)', 'intf', 'k']
print('A .csv file is being processed with the following dimensional properties:')
print(' Number of plies: {} \n p: {} \n total \
t (m): {} \n geometry: {} \n'.format(nplies, p, t_total, geometry))
actual = df[cols]
expected = df_expected[cols]
#print ('expected (file) \n', expected)
#print('actual (API) \n', actual)
#print(expected.dtypes)
#print(actual.dtypes)
print('\n')
ut.assertFrameEqual(actual, expected)
def test_apply_LaminateModels_cols_models1():
'''Test .cvs files found in tests/controls_LT with API DataFrames.
Comparing models columns only.
Due to a different algorithms for calculating internal values,
rows yielding maximum and minimum stress are the most reliable comparisons.
Tests for internal points can varying depending on choice of z(m).
So they will be excluded from this test.
'''
'''Wait for skipcols kwarg in read_csv in pandas 0.17'''
def remove_units(cols):
'''Return a dict of stress column lables with units removed.'''
#cols = cols.tolist()
dict_ = {}
for idx, colname in enumerate(cols):
if 'stress' in colname:
tokens = colname.split(' ') # works even w/o units
unitless_name = tokens[0]
#print(name)
dict_[colname] = unitless_name
return dict_
# Prepare file path.
# Depends which directory nosetests is rum
#path = os.getcwd() # use for the test in the correct path
path = os.path.join(os.getcwd(), 'lamana', 'tests', 'controls_LT') # for builds
#path = path + r'\lamana\tests\controls_LT' # for Main Script. Comment out in tests
#path = path + r'\tests\controls_LT' # for test
#path = os.path.join(os.getcwd(), 'tests', 'controls_LT') # for test
#path = path + r'\controls_LT' # for test
# Read all files in the path (REF 013)
for file in ut.read_csv_dir(path):
#df_expected = file
df = file
#print(df_expected)
df_expected, geometry, nplies, p, t_total = extract_dataframe(df)
# Build actual Case using distributions API
dft.load_params['p'] = p
case = la.distributions.Case(dft.load_params, dft.mat_props)
case.apply([geometry])
df = case.frames[0]
# Compare only model-related columns; skip API columns
IDs = ['layer','side', 'type', 'matl', 't(um)'] # except label_
Dimensionals = ['h(m)', 'd(m)', 'intf', 'k', 'Z(m)', 'z(m)', 'z(m)*']
#bypassed = ['z(m)', 'z(m)*', 'intf', 'k']
skippedcols = IDs + Dimensionals
actual_remainingcols = df.columns.difference(skippedcols)
expected_remainingcols = df_expected.columns.difference(skippedcols)
# Get model columns only and strip units from stress columns
df_models = df[actual_remainingcols].copy()
df_expected = df_expected[expected_remainingcols].copy()
df_models.rename(columns=remove_units(actual_remainingcols), inplace=True)
df_expected.rename(columns=remove_units(expected_remainingcols), inplace=True)
#print(df_expected)
print('A .csv file is being processed with the following dimensional properties:')
print(' Number of plies: {} \n p: {} \n total \
t (m): {} \n geometry: {} \n'.format(nplies, p, t_total, geometry))
# Use all rows (including internals, optional)
#actual = df_models
#expected = df_expected[expected_remainingcols]
# Only use max stress rows (interfacial)
actual1 = df_models.loc[df_models['label'] == 'interface']
expected1 = df_expected.loc[df_expected['label'] == 'interface']
#expected1 = df_expected[expected_remainingcols].loc[df_expected['label'] == 'interface']
# Only use min stress rows (discontunities)
if p > 1:
actual2 = df_models.loc[df_models['label'] == 'discont.']
expected2 = df_expected.loc[df_expected['label'] == 'discont.']
#expected2 = df_expected[expected_remainingcols].loc[df_expected['label'] == 'discont.']
#print ('expected (file) \n', expected1)
#print('actual (API) \n', actual1)
#print ('expected (file) \n', expected2)
#print('actual (API) \n', actual2)
#print(type(expected1))
#print(type(actual1))
#print(expected1.dtypes)
#print(actual1.dtypes)
print('\n')
ut.assertFrameEqual(actual1, expected1, check_dtype=False) # max stress rows
if p > 1: # min stress rows
ut.assertFrameEqual(actual2, expected2, check_dtype=False)
# Internal rows depend on the `z_` algorithm. They are not compared to prevent breakage.
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import inspect
import itertools
import logging
import time
import types
from types import NoneType
from google.appengine.ext import db, ndb
from mcfw.cache import set_cache_key
from mcfw.consts import MISSING
from mcfw.properties import get_members, simple_types, object_factory, long_property, unicode_property, typed_property
class ErrorResponse(object):
status_code = long_property('1')
error = unicode_property('2')
data = typed_property('3', dict)
def __init__(self, rest_exception):
"""
Args:
rest_exception (mcfw.exceptions.HttpException):
"""
self.status_code = rest_exception.http_code
self.error = u'%s' % rest_exception.error
self.data = rest_exception.data
class MissingArgumentException(Exception):
def __init__(self, name, func=None):
Exception.__init__(self, "%s is a required argument%s!" % (
name, (' in function %s' % func.func_name) if func else ''))
self.name = name
def log_access(call=True, response=True):
def wrap(f):
def logged(*args, **kwargs):
if call:
arg_str = ""
for i, arg in enumerate(args):
arg_str += " %s: %s\n" % (i, arg)
kwarg_str = ""
for kw, arg in kwargs.iteritems():
kwarg_str += " %s: %s\n" % (kw, arg)
logging.debug(u"%s.%s\nargs:\n%skwargs:\n%s" % (f.__module__, f.__name__, arg_str, kwarg_str))
start = time.time()
try:
result = f(*args, **kwargs)
if response:
end = time.time()
logging.debug(
u"%s.%s finished in %s seconds returning %s" % (f.__module__, f.__name__, end - start, result))
return result
except:
if response:
end = time.time()
logging.exception(u"%s.%s failed in %s seconds" % (f.__module__, f.__name__, end - start))
raise
set_cache_key(logged, f)
logged.__name__ = f.__name__
logged.__module__ = f.__module__
if hasattr(f, u"meta"):
logged.meta.update(f.meta)
return logged
return wrap
def arguments(**kwarg_types):
""" The arguments decorator function describes & validates the parameters of the function."""
map(_validate_type_spec, kwarg_types.itervalues())
def wrap(f):
# validate argspec
if not inspect.isfunction(f):
raise ValueError("f is not of type function!")
f_args = inspect.getargspec(f)
f_args = inspect.ArgSpec([a for a in f_args[0] if a not in ('self', 'cls')], f_args[1], f_args[2], f_args[3])
f_arg_count = len(f_args[0])
f_defaults = f_args[3]
if not f_defaults:
f_defaults = []
f_arg_defaults_count = len(f_defaults)
f_arg_no_defaults_count = f_arg_count - f_arg_defaults_count
f_arg_defaults = dict(
(f_args[0][i], f_defaults[i - f_arg_no_defaults_count] if i >= f_arg_no_defaults_count else MISSING) for i
in xrange(f_arg_count))
f_pure_default_args_dict = dict((f_args[0][i], f_defaults[i - f_arg_no_defaults_count]) for i in
xrange(f_arg_no_defaults_count, f_arg_count))
if not f_arg_count == len(kwarg_types):
raise ValueError(f.func_name + " does not contain the expected arguments!")
unknown_args = filter(lambda arg: arg not in kwarg_types, f_args[0])
if unknown_args:
raise ValueError("No type information is supplied for %s!" % ", ".join(unknown_args))
def typechecked_f(*args, **kwargs):
if len(args) > len(f_args[0]):
raise ValueError("%s() takes %s arguments (%s given)" % (f.__name__, len(f_args[0]), len(args)))
kwargs.update(dict(((f_args[0][i], args[i]) for i in xrange(len(args)))))
# accept MISSING as magical value or not
accept_missing = u'accept_missing' in kwargs
if accept_missing:
kwargs.pop(u'accept_missing')
# apply default value if available
for arg, _ in kwarg_types.iteritems():
value = kwargs.get(arg, f_arg_defaults[arg])
if value == MISSING:
value = f_arg_defaults.get(arg, MISSING)
kwargs[arg] = value
# validate number of arguments
if not len(kwargs) == len(kwarg_types):
raise ValueError("kwarg mismatch\nExpected:%s\nGot:%s" % (kwarg_types, kwargs))
# validate supplied arguments
unknown_args = filter(lambda arg: arg not in kwarg_types, kwargs)
if unknown_args:
raise ValueError("Unknown argument(s) %s supplied!" % ", ".join(unknown_args))
# validate argument values
map(lambda arg: _check_type(arg, kwarg_types[arg], kwargs[arg], accept_missing=accept_missing, func=f),
kwargs)
return f(**kwargs)
set_cache_key(typechecked_f, f)
typechecked_f.__name__ = f.__name__
typechecked_f.__module__ = f.__module__
typechecked_f.meta[u"fargs"] = f_args
typechecked_f.meta[u"kwarg_types"] = kwarg_types
typechecked_f.meta[u"pure_default_args_dict"] = f_pure_default_args_dict
if hasattr(f, u"meta"):
typechecked_f.meta.update(f.meta)
return typechecked_f
return wrap
def returns(type_=NoneType):
""" The retunrs decorator function describes & validates the result of the function."""
_validate_type_spec(type_)
def wrap(f):
def typechecked_return(*args, **kwargs):
result = f(*args, **kwargs)
return _check_type(u"Result", type_, result, func=f)
set_cache_key(typechecked_return, f)
typechecked_return.__name__ = f.__name__
typechecked_return.__module__ = f.__module__
typechecked_return.meta[u"return_type"] = type_
if hasattr(f, u"meta"):
typechecked_return.meta.update(f.meta)
return typechecked_return
return wrap
def run(function, args, kwargs):
kwargs['accept_missing'] = None
result = function(*args, **kwargs)
type_, islist = _get_return_type_details(function)
return serialize_value(result, type_, islist, skip_missing=True)
def parse_parameters(function, parameters):
kwarg_types = get_parameter_types(function)
return get_parameters(parameters, kwarg_types)
def parse_complex_value(type_, value, islist):
if value is None:
return None
parser = _get_complex_parser(type_)
if islist:
return map(parser, value)
else:
return parser(value)
def check_function_metadata(function):
if "kwarg_types" not in function.meta or "return_type" not in function.meta:
raise ValueError("Can not execute function. Too little meta information is available!")
def get_parameter_types(function):
return function.meta["kwarg_types"]
def get_parameters(parameters, kwarg_types):
return {name: parse_parameter(name, type_, parameters[name]) if name in parameters else MISSING
for name, type_ in kwarg_types.iteritems()}
def get_type_details(type_, value=MISSING):
if isinstance(type_, tuple):
# The value can have multiple types.
if value is not MISSING:
# We must find the type by comparing the possible types with the real type of <value>
value_is_list = isinstance(value, list)
if value_is_list:
if not value:
return unicode, True # The type doesn't matter, the list is empty
value = value[0]
for t in type_:
is_list = isinstance(t, list)
if is_list != value_is_list:
continue
if is_list:
t = t[0]
if t in (str, unicode):
type_to_check = (str, unicode)
elif t in (int, long):
type_to_check = (int, long)
else:
type_to_check = t
if isinstance(value, type_to_check):
return type(value), is_list
# Weird... type not found and @arguments didn't raise... The serialization will probably fail.
is_list = isinstance(type_, list)
if is_list:
type_ = type_[0]
return type_, is_list
def serialize_complex_value(value, type_, islist, skip_missing=False):
if type_ == dict:
return value
def optimal_serializer(val):
if not isinstance(type_, object_factory) and isinstance(val, type_):
serializer = _get_complex_serializer(val.__class__)
else:
serializer = _get_complex_serializer(type_)
return serializer(val, skip_missing)
if value is None:
return None
if islist:
try:
return map(optimal_serializer, value)
except:
logging.warn("value for type %s was %s", type_, value)
raise
else:
return optimal_serializer(value)
def serialize_value(value, type_, islist, skip_missing=False):
if value is None \
or type_ in simple_types \
or (isinstance(type_, tuple) and all(t in simple_types for t in type_)):
return value
else:
return serialize_complex_value(value, type_, islist, skip_missing)
def parse_parameter(name, type_, value):
raw_type, is_list = get_type_details(type_, value)
if isinstance(value, list) != is_list:
raise ValueError("list expected for parameter %s and got %s or vice versa!" % (name, value))
if isinstance(value, list):
return map(lambda x: _parse_value(name, raw_type, x), value)
else:
return _parse_value(name, raw_type, value)
def _validate_type_spec(type_):
if isinstance(type_, list) and len(type_) != 1:
raise ValueError("Illegal type specification!")
DICT_KEY_ITERATOR_TYPE = type(dict().iterkeys())
def _check_type(name, type_, value, accept_missing=False, func=None):
if value == MISSING:
if accept_missing:
return value
else:
raise MissingArgumentException(name, func)
checktype = (str, unicode) if type_ in (str, unicode) else type_
checktype = (int, long) if checktype in (int, long) else checktype
if value == None and (isinstance(checktype, list) or type_ not in (int, long, float, bool)):
return value
if isinstance(type_, tuple):
# multiple types are allowed. checking if value is one of the them.
errors = list()
for t in type_:
try:
return _check_type(name, t, value, accept_missing, func)
except (ValueError, TypeError) as e:
errors.append(e)
continue
logging.debug('\n\n'.join(map(str, errors)))
raise ValueError("%s is not of expected type %s! Its type is %s:\n%s" % (name, str(type_), type(value), value))
if isinstance(checktype, list) and isinstance(value, list):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
for i, x in enumerate(value):
t = checktype.get_subtype(x) if isinstance(checktype, object_factory) else checktype
if not isinstance(x, t):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
elif isinstance(checktype, list) and isinstance(value, (
types.GeneratorType, db.Query, ndb.Query, itertools.chain, DICT_KEY_ITERATOR_TYPE)):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
def checkStreaming():
for o in value:
if not isinstance(o, checktype):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item with type %s: %s."
% (name, str(checktype), type(x), x))
yield o
return checkStreaming()
elif checktype == type and isinstance(value, list):
if len(value) != 1:
raise ValueError("%s: unexpected type count (%s)" % (name, len(value)))
def check(t, i):
if not isinstance(t, type):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
if isinstance(value[0], tuple):
for i, t in enumerate(value[0]):
check(t, i)
else:
check(value[0], 0)
else:
if isinstance(checktype, object_factory):
checktype = checktype.get_subtype(value)
try:
if not isinstance(value, checktype):
raise ValueError(
"%s is not of expected type %s! Its type is %s:\n%s" % (name, str(checktype), type(value), value))
except TypeError as e:
raise TypeError("%s\nvalue: %s\nchecktype: %s" % (e.message, value, checktype))
return value
_complexParserCache = dict()
def _get_complex_parser(type_):
if type_ == dict:
return lambda x: x
if type_ not in _complexParserCache:
def parse(value):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
inst = t()
complex_members, simple_members = get_members(t)
map(lambda (name, prop): setattr(inst, name, value[name] if name in value else MISSING), simple_members)
map(lambda (name, prop):
setattr(inst, name, parse_complex_value(
prop.get_subtype(inst) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type,
value[name],
prop.list) if name in value else MISSING),
complex_members)
return inst
_complexParserCache[type_] = parse
return parse
else:
return _complexParserCache[type_]
_value_types = set((int, long, float, bool, NoneType))
def _parse_value(name, type_, value):
def raize():
raise ValueError("Incorrect type received for parameter '%s'. Expected %s and got %s (%s)."
% (name, type_, type(value), value))
istuple = isinstance(type_, tuple)
if (istuple and set(type_).issubset(_value_types)) or type_ in _value_types:
if not isinstance(value, type_):
raize()
return value
elif istuple:
for tt in type_:
try:
return _parse_value(name, tt, value)
except ValueError:
pass
raize()
elif value == None:
return None
elif type_ == unicode:
if not isinstance(value, (str, unicode)):
raize()
return value if isinstance(value, unicode) else unicode(value)
elif type_ == str:
if not isinstance(value, (str, unicode)):
raize()
return value
elif not isinstance(value, dict):
raize()
return parse_complex_value(type_, value, False)
_complex_serializer_cache = dict()
def _get_complex_serializer(type_):
if not type_ in _complex_serializer_cache:
def serializer(value, skip_missing):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
complex_members, simple_members = get_members(t)
result = dict([(name, getattr(value, name)) for (name, _) in simple_members if
not skip_missing or getattr(value, name) != MISSING])
def _serialize(name, prop):
attr = getattr(value, name)
real_type = prop.get_subtype(value) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type
serialized_value = serialize_complex_value(attr, real_type, prop.list, skip_missing)
return (name, serialized_value)
result.update(dict([_serialize(name, prop) for (name, prop) in complex_members if
not skip_missing or getattr(value, name) != MISSING]))
return result
_complex_serializer_cache[type_] = serializer
return serializer
else:
return _complex_serializer_cache[type_]
def _get_return_type_details(function):
return get_type_details(function.meta["return_type"])
|
|
#!/usr/bin/python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2016 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a baseline scan against a target URL using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# By default it will spider the target URL for one minute, but you can change
# that via the -m parameter.
# It will then wait for the passive scanning to finish - how long that takes
# depends on the number of pages found.
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
import getopt
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import traceback
import urllib2
from datetime import datetime
from random import randint
from zapv2 import ZAPv2
timeout = 120
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
running_in_docker = os.path.exists('/.dockerenv')
# Pscan rules that aren't really relevant, eg example alpha rules
blacklist = ['-1', '50003', '60000', '60001']
logging.basicConfig(level=logging.INFO)
def usage():
print ('Usage: zap-baseline.py -t <target> [options]')
print (' -t target target URL including the protocol, eg https://www.example.com')
print ('Options:')
print (' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print (' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print (' -g gen_file generate default config file (all rules set to WARN)')
print (' -m mins the number of minutes to spider for (default 1)')
print (' -r report_html file to write the full ZAP HTML report')
print (' -x report_xml file to write the full ZAP XML report')
print (' -a include the alpha passive scan rules as well')
print (' -d show debug messages')
print (' -i default rules not in the config file to INFO')
print (' -s short output format - dont show PASSes or example URLs')
def load_config(config):
for line in config:
if not line.startswith('#') and len(line) > 1:
(key, val, optional) = line.rstrip().split('\t', 2)
if key == 'OUTOFSCOPE':
for plugin_id in val.split(','):
if not plugin_id in out_of_scope_dict:
out_of_scope_dict[plugin_id] = []
out_of_scope_dict[plugin_id].append(re.compile(optional))
else:
config_dict[key] = val
if '\t' in optional:
(ignore, usermsg) = optional.rstrip().split('\t')
config_msg[key] = usermsg
else:
config_msg[key] = ''
def is_in_scope(plugin_id, url):
if '*' in out_of_scope_dict:
for oos_prog in out_of_scope_dict['*']:
#print('OOS Compare ' + oos_url + ' vs ' + 'url)
if oos_prog.match(url):
#print('OOS Ignoring ' + str(plugin_id) + ' ' + url)
return False
#print 'Not in * dict'
if plugin_id in out_of_scope_dict:
for oos_prog in out_of_scope_dict[plugin_id]:
#print('OOS Compare ' + oos_url + ' vs ' + 'url)
if oos_prog.match(url):
#print('OOS Ignoring ' + str(plugin_id) + ' ' + url)
return False
#print 'Not in ' + plugin_id + ' dict'
return True
def print_rule(action, alert_list, detailed_output, user_msg):
print (action + ': ' + alert_list[0].get('alert') + ' [' + alert_list[0].get('pluginId') + '] x ' + str(len(alert_list)) + ' ' + user_msg)
if detailed_output:
# Show (up to) first 5 urls
for alert in alert_list[0:5]:
print ('\t' + alert.get('url'))
def main(argv):
config_file = ''
config_url = ''
generate = ''
mins = 1
port = 0
detailed_output = True
report_html = ''
report_xml = ''
target = ''
zap_alpha = False
info_unspecified = False
base_dir = ''
zap_ip = 'localhost'
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
try:
opts, args = getopt.getopt(argv,"t:c:u:g:m:r:x:dais")
except getopt.GetoptError:
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-t':
target = arg
logging.debug ('Target: ' + target)
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-m':
mins = int(arg)
elif opt == '-r':
report_html = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-s':
detailed_output = False
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if not (target.startswith('http://') or target.startswith('https://')):
logging.warning ('Target must start with \'http://\' or \'https://\'')
usage()
sys.exit(3)
if running_in_docker:
base_dir = '/zap/wrk/'
if len(config_file) > 0 or len(generate) > 0 or len(report_html) > 0 or len(report_xml) > 0:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning ('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
# Choose a random 'ephemeral' port and check its available
while True:
port = randint(32768, 61000)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not (sock.connect_ex(('127.0.0.1', port)) == 0):
# Its free:)
break
logging.debug ('Using port: ' + str(port))
if len(config_file) > 0:
# load config file from filestore
with open(base_dir + config_file) as f:
load_config(f)
elif len(config_url) > 0:
# load config file from url
try:
load_config(urllib2.urlopen(config_url))
except:
logging.warning ('Failed to read configs from ' + config_url)
sys.exit(3)
if running_in_docker:
try:
logging.debug ('Starting ZAP')
params = ['zap.sh', '-daemon',
'-port', str(port),
'-host', '0.0.0.0',
'-config', 'api.disablekey=true',
'-config', 'spider.maxDuration=' + str(mins),
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if (zap_alpha):
params.append('-addoninstall')
params.append('pscanrulesAlpha')
with open('zap.out', "w") as outfile:
subprocess.Popen(params, stdout=outfile)
except OSError:
logging.warning ('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
try:
logging.debug ('Pulling ZAP Weekly Docker image')
ls_output = subprocess.check_output(['docker', 'pull', 'owasp/zap2docker-weekly'])
except OSError:
logging.warning ('Failed to run docker - is it on your path?')
sys.exit(3)
try:
logging.debug ('Starting ZAP')
params = ['docker', 'run', '-u', 'zap',
'-p', str(port) + ':' + str(port),
'-d', 'owasp/zap2docker-weekly',
'zap.sh', '-daemon',
'-port', str(port),
'-host', '0.0.0.0',
'-config', 'api.disablekey=true',
'-config', 'spider.maxDuration=' + str(mins)]
if (zap_alpha):
params.append('-addoninstall')
params.append('pscanrulesAlpha')
cid = subprocess.check_output(params).rstrip()
logging.debug ('Docker CID: ' + cid)
insp_output = subprocess.check_output(['docker', 'inspect', cid])
#logging.debug ('Docker Inspect: ' + insp_output)
insp_json = json.loads(insp_output)
zap_ip = str(insp_json[0]['NetworkSettings']['IPAddress'])
logging.debug ('Docker ZAP IP Addr: ' + zap_ip)
except OSError:
logging.warning ('Failed to start ZAP in docker :(')
sys.exit(3)
try:
# Wait for ZAP to start
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
for x in range(0, timeout):
try:
logging.debug ('ZAP Version ' + zap.core.version)
break
except IOError:
time.sleep(1)
# Access the target
zap.urlopen(target)
time.sleep(2)
# Spider target
logging.debug ('Spider ' + target)
spider_scan_id = zap.spider.scan(target)
time.sleep(5)
start = datetime.now()
while (int(zap.spider.status(spider_scan_id)) < 100):
if (datetime.now() - start).seconds > ((mins * 60) + 10):
# TODO HACK to cope with API not recognising when spider has finished due to exceeding maxDuration
# Can be removed once the underlying fix is included in the ZAP Weekly release
break
logging.debug ('Spider progress %: ' + zap.spider.status(spider_scan_id))
time.sleep(5)
logging.debug ('Spider complete')
# Wait for passive scanning to complete
rtc = zap.pscan.records_to_scan
logging.debug ('Records to scan...')
while (int(zap.pscan.records_to_scan) > 0):
logging.debug ('Records to passive scan : ' + zap.pscan.records_to_scan)
time.sleep(2)
logging.debug ('Passive scanning complete')
# Print out a count of the number of urls
num_urls = len(zap.core.urls)
if (num_urls == 0):
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print ('Total of ' + str(len(zap.core.urls)) + ' URLs')
# Retrieve the alerts
alert_dict = {}
alerts = zap.core.alerts()
for alert in alerts:
plugin_id = alert.get('pluginId')
if plugin_id in blacklist:
continue
if not is_in_scope(plugin_id, alert.get('url')):
continue
if (not alert_dict.has_key(plugin_id)):
alert_dict[plugin_id] = []
alert_dict[plugin_id].append(alert)
all_rules = zap.pscan.scanners
all_dict = {}
for rule in all_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name')
if len(generate) > 0:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write ('# zap-baseline rule configuraion file\n')
f.write ('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write ('# Only the rule identifiers are used - the names are just for info\n')
f.write ('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.iteritems()):
f.write (key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if (not alert_dict.has_key(plugin_id)):
pass_dict[plugin_id] = rule.get('name')
if detailed_output:
for key, rule in sorted(pass_dict.iteritems()):
print ('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
# print out the ignored rules
for key, alert_list in sorted(alert_dict.iteritems()):
if config_dict.has_key(key) and config_dict[key] == 'IGNORE':
user_msg = ''
if key in config_msg:
user_msg = config_msg[key]
print_rule(config_dict[key], alert_list, detailed_output, user_msg)
ignore_count += 1
# print out the info rules
for key, alert_list in sorted(alert_dict.iteritems()):
if config_dict.has_key(key) and config_dict[key] == 'INFO':
user_msg = ''
if key in config_msg:
user_msg = config_msg[key]
print_rule(config_dict[key], alert_list, detailed_output, user_msg)
info_count += 1
# print out the warning rules
for key, alert_list in sorted(alert_dict.iteritems()):
if (not config_dict.has_key(key)) or (config_dict[key] == 'WARN'):
user_msg = ''
if key in config_msg:
user_msg = config_msg[key]
print_rule('WARN', alert_list, detailed_output, user_msg)
warn_count += 1
# print out the failing rules
for key, alert_list in sorted(alert_dict.iteritems()):
if config_dict.has_key(key) and config_dict[key] == 'FAIL':
user_msg = ''
if key in config_msg:
user_msg = config_msg[key]
print_rule(config_dict[key], alert_list, detailed_output, user_msg)
fail_count += 1
if len(report_html) > 0:
# Save the report
with open(base_dir + report_html, 'w') as f:
f.write (zap.core.htmlreport())
if len(report_xml) > 0:
# Save the report
with open(base_dir + report_xml, 'w') as f:
f.write (zap.core.xmlreport())
print ('FAIL: ' + str(fail_count) + '\tWARN: ' + str(warn_count) + '\tINFO: ' + str(info_count) +
'\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
# Stop ZAP
zap.core.shutdown()
except IOError as (errno, strerror):
logging.warning ('I/O error(' + str(errno) + '): ' + strerror)
traceback.print_exc()
except:
logging.warning ('Unexpected error: ' + str(sys.exc_info()[0]))
traceback.print_exc()
if not running_in_docker:
# Close container - ignore failures
try:
logging.debug ('Stopping Docker container')
subprocess.check_output(['docker', 'stop', cid])
logging.debug ('Docker container stopped')
except OSError:
logging.warning ('Docker stop failed')
# Remove container - ignore failures
try:
logging.debug ('Removing Docker container')
subprocess.check_output(['docker', 'rm', cid])
logging.debug ('Docker container removed')
except OSError:
logging.warning ('Docker rm failed')
if fail_count > 0:
sys.exit(1)
elif warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _add_hidden_layer_summary(value, tag):
summary.scalar('%s_fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s_activation' % tag, value)
def _dnn_model_fn(
features, labels, mode, head, hidden_units, feature_columns,
optimizer='Adagrad', activation_fn=nn.relu, dropout=None,
input_layer_partitioner=None, config=None):
"""Deep Neural Net model_fn.
Args:
features: Dict of `Tensor` (depends on data passed to `train`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
hidden_units: Iterable of integer number of hidden units per layer.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05.
activation_fn: Activation function applied to each layer.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'dnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'input_from_feature_columns',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
net = feature_column_lib.input_layer(
features=features,
feature_columns=feature_columns)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
'hiddenlayer_%d' % layer_id,
values=(net,)) as hidden_layer_scope:
net = core_layers.dense(
net,
units=num_hidden_units,
activation=activation_fn,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = core_layers.dropout(net, rate=dropout, training=True)
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
'logits',
values=(net,)) as logits_scope:
logits = core_layers.dense(
net,
units=head.logits_dimension,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
pass
estimator.predict_scores(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_feature_key=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `DNNRegressor` estimator.
"""
def _model_fn(features, labels, mode, config):
return _dnn_model_fn(
features=features,
labels=labels,
mode=mode,
# pylint: disable=protected-access
head=head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension,
weight_feature_key=weight_feature_key),
# pylint: enable=protected-access
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=line-too-long
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
# pylint: enable=line-too-long
class DirichletMultinomial(distribution.Distribution):
"""DirichletMultinomial mixture distribution.
This distribution is parameterized by a vector `alpha` of concentration
parameters for `k` classes and `n`, the counts per each class..
#### Mathematical details
The Dirichlet Multinomial is a distribution over k-class count data, meaning
for each k-tuple of non-negative integer `counts = [c_1,...,c_k]`, we have a
probability of these draws being made from the distribution. The distribution
has hyperparameters `alpha = (alpha_1,...,alpha_k)`, and probability mass
function (pmf):
```pmf(counts) = N! / (n_1!...n_k!) * Beta(alpha + c) / Beta(alpha)```
where above `N = sum_j n_j`, `N!` is `N` factorial, and
`Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the multivariate beta
function.
This is a mixture distribution in that `M` samples can be produced by:
1. Choose class probabilities `p = (p_1,...,p_k) ~ Dir(alpha)`
2. Draw integers `m = (n_1,...,n_k) ~ Multinomial(N, p)`
This class provides methods to create indexed batches of Dirichlet
Multinomial distributions. If the provided `alpha` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single Dirichlet Multinomial distribution. When calling distribution
functions (e.g. `dist.pmf(counts)`), `alpha` and `counts` are broadcast to the
same shape (if possible). In all cases, the last dimension of alpha/counts
represents single Dirichlet Multinomial distributions.
#### Examples
```python
alpha = [1, 2, 3]
n = 2
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0, 0, 2]
dist.pmf(counts) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.
counts = [[1, 1, 0], [1, 0, 1]]
dist.pmf(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
n = [3, 3]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
counts = [2, 1, 0]
dist.pmf(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of alpha once int allowed.
def __init__(self,
n,
alpha,
validate_args=True,
allow_nan_stats=False,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
n: Non-negative floating point tensor, whose dtype is the same as
`alpha`. The shape is broadcastable to `[N1,..., Nm]` with `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different Dirichlet
multinomial distributions. Its components should be equal to integer
values.
alpha: Positive floating point tensor, whose dtype is the same as
`n` with shape broadcastable to `[N1,..., Nm, k]` `m >= 0`. Defines
this as a batch of `N1 x ... x Nm` different `k` class Dirichlet
multinomial distributions.
validate_args: Whether to assert valid values for parameters `alpha` and
`n`, and `x` in `prob` and `log_prob`. If `False`, correct behavior is
not guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet multinomial distribution,
# also known as a beta-binomial.
dist = DirichletMultinomial(2.0, [1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = DirichletMultinomial([3., 4], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._name = name
with ops.op_scope([n, alpha], name):
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, wherease
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough
# explicitivity.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and alpha.
self._alpha = self._check_alpha(alpha)
self._n = self._check_n(n)
self._alpha_sum = math_ops.reduce_sum(
self._alpha, reduction_indices=[-1], keep_dims=False)
self._get_batch_shape = self._alpha_sum.get_shape()
# event shape depends only on alpha, not "n".
self._get_event_shape = self._alpha.get_shape().with_rank_at_least(1)[-1:]
@property
def n(self):
"""Parameter defining this distribution."""
return self._n
@property
def alpha(self):
"""Parameter defining this distribution."""
return self._alpha
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
"""Name to prepend to all ops."""
return self._name
@property
def dtype(self):
"""dtype of samples from this distribution."""
return self._alpha.dtype
def mean(self, name="mean"):
"""Class means for every batch member."""
alpha = self._alpha
alpha_sum = self._alpha_sum
n = self._n
with ops.name_scope(self.name):
with ops.op_scope([alpha, alpha_sum, n], name):
mean_no_n = alpha / array_ops.expand_dims(alpha_sum, -1)
return array_ops.expand_dims(n, -1) * mean_no_n
def variance(self, name="mean"):
"""Class variances for every batch member.
The variance for each batch member is defined as the following:
```
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
Args:
name: The name for this op.
Returns:
A `Tensor` representing the variances for each batch member.
"""
alpha = self._alpha
alpha_sum = self._alpha_sum
n = self._n
with ops.name_scope(self.name):
with ops.op_scope([alpha, alpha_sum, n], name):
expanded_alpha_sum = array_ops.expand_dims(alpha_sum, -1)
shared_factor = n * (expanded_alpha_sum + n) / (
expanded_alpha_sum + 1) * array_ops.ones_like(alpha)
mean_no_n = alpha / expanded_alpha_sum
expanded_mean_no_n = array_ops.expand_dims(mean_no_n, -1)
variance = -math_ops.batch_matmul(
expanded_mean_no_n, expanded_mean_no_n, adj_y=True)
variance += array_ops.batch_matrix_diag(mean_no_n)
variance *= array_ops.expand_dims(shared_factor, -1)
return variance
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha_sum], name):
return array_ops.shape(self._alpha_sum)
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._get_batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha], name):
return array_ops.reverse(array_ops.shape(self._alpha), [True])[0]
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._get_event_shape
def cdf(self, x, name="cdf"):
raise NotImplementedError(
"DirichletMultinomial does not have a well-defined cdf.")
def log_cdf(self, x, name="log_cdf"):
raise NotImplementedError(
"DirichletMultinomial does not have a well-defined cdf.")
def log_prob(self, counts, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `n_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Args:
counts: Non-negative tensor with dtype `dtype` and whose shape can be
broadcast with `self.alpha`. For fixed leading dimensions, the last
dimension represents counts for the corresponding Dirichlet Multinomial
distribution in `self.alpha`. `counts` is only legal if it sums up to
`n` and its components are equal to integer values.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nn]`.
"""
n = self._n
alpha = self._alpha
with ops.name_scope(self.name):
with ops.op_scope([n, alpha, counts], name):
counts = self._check_counts(counts)
ordered_prob = (special_math_ops.lbeta(alpha + counts) -
special_math_ops.lbeta(alpha))
log_prob = ordered_prob + distribution_util.log_combinations(
n, counts)
return log_prob
def prob(self, counts, name="prob"):
"""`P[counts]`, computed for every batch member.
For each batch of counts `[c_1,...,c_k]`, `P[counts]` is the probability
that after sampling `sum_j c_j` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `c_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Args:
counts: Non-negative tensor with dtype `dtype` and whose shape can be
broadcast with `self.alpha`. For fixed leading dimensions, the last
dimension represents counts for the corresponding Dirichlet Multinomial
distribution in `self.alpha`. `counts` is only legal if it sums up to
`n` and its components are equal to integer values.
name: Name to give this Op, defaults to "prob".
Returns:
Probabilities for each record, shape `[N1,...,Nn]`.
"""
return super(DirichletMultinomial, self).prob(counts, name=name)
def _check_counts(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
counts = ops.convert_to_tensor(counts, name="counts")
if not self.validate_args:
return counts
candidate_n = math_ops.reduce_sum(counts, reduction_indices=[-1])
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(counts),
check_ops.assert_equal(
self._n, candidate_n,
message="counts do not sum to n"
),
distribution_util.assert_integer_form(counts)], counts)
def _check_alpha(self, alpha):
alpha = ops.convert_to_tensor(alpha, name="alpha")
if not self.validate_args:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
def _check_n(self, n):
n = ops.convert_to_tensor(n, name="n")
if not self.validate_args:
return n
return control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(n),
distribution_util.assert_integer_form(n)], n)
@property
def is_continuous(self):
return False
@property
def is_reparameterized(self):
return False
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
from contextlib import nullcontext
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal, assert_array_almost_equal_nulp
from astropy.convolution.convolve import convolve_fft, convolve
from astropy.utils.exceptions import AstropyUserWarning
from astropy import units as u
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap']
NANTREATMENT_OPTIONS = ('interpolate', 'fill')
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ('boundary', 'nan_treatment', 'normalize_kernel')
options = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
))
option_names_preserve_nan = ('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan')
options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False)))
def expected_boundary_warning(boundary=None):
# Helper that returns the appropriate context manager for the boundary=None
# warning depending on the value of boundary.
if boundary is None:
ctx = pytest.warns(AstropyUserWarning,
match='The convolve_fft version of boundary=None '
'is equivalent to the convolve boundary=\'fill\'')
else:
ctx = nullcontext()
return ctx
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10*np.spacing(x.max()), rtol=0.)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_quantity(self, boundary, nan_treatment, normalize_kernel):
"""
Test that convolve_fft works correctly when input array is a Quantity
"""
x = np.array([1., 4., 5., 6., 5., 7., 8.], dtype='float64') * u.ph
y = np.array([0.2, 0.6, 0.2], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert x.unit == z.unit
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'),
'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
('fill', 'interpolate', True): answer_dict['average_fill_zeros'],
('wrap', 'interpolate', True): answer_dict['average_wrap'],
('fill', 'interpolate', False): answer_dict['sum_fill_zeros'],
('wrap', 'interpolate', False): answer_dict['sum_wrap'],
}
for k in list(result_dict.keys()):
result_dict[(k[0], 'fill', k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == 'fill':
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([0.5, 0.5, 0.5], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_dict = {
'sum': np.array([0.5, 2.0, 1.5], dtype='float64'),
'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'),
'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'),
'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([2., 2., 2.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'),
}
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1., 0., 3.])
inputs = (np.array([1., np.nan, 3.], dtype='float64'),
np.array([1., np.inf, 3.], dtype='float64'))
outputs = (np.array([1., 0., 3.], dtype='float64'),
np.array([1., 0., 3.], dtype='float64'))
options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs, outputs))
@pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'),
options_unity1withnan)
def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan, inval, outval):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = inval
y = np.array([1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
'sum': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros': np.array([1., 4., 3.], dtype='float64'),
'sum_zeros': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'),
'average': np.array([1., 2., 3.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'),
'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'),
'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'),
'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'),
'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'),
}
for key in list(answer_dict.keys()):
if 'sum' in key:
answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
posns = np.isfinite(z)
answer = answer_dict[answer_key][posns]
# check that fill is set and that the 1'th position that was originally
# NaN is included in the check
if (nan_treatment == 'fill') and posns[1]:
# we fill the center with the sum of the input array divided by
# three, since we've now pre-filled the center value with zero
answer[1] = 4 / (3. if normalize_kernel else 1.)
assert_floatclose(z[posns], answer)
def test_nan_interpolate(self):
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
nan_treatment='interpolate',
fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_nan_fill(self):
# regression for #8121
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
result = convolve_fft(array, kernel, boundary='fill',
nan_treatment='fill',
fill_value=0)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as zero rather than being ignored.
assert_floatclose(result, [1/3., 4/3., 1.])
def test_nan_fill_two(self):
# regression for #8121
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
result = convolve_fft(array, kernel, boundary='fill',
nan_treatment='fill',
fill_value=1)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as fill_value=1 rather than being ignored.
assert_floatclose(result, [1., 5/3., 5/3.])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(result, [1./2, 2, 3./2])
# Now test against convolve()
convolve_result = convolve(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(convolve_result, result)
# Test masked kernel
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_kernel = np.ma.masked_array(kernel, mask=[0, 1, 0])
result = convolve_fft(array, masked_kernel, boundary='fill',
fill_value=0.)
assert_floatclose(result, [1, 2, 1])
# Now test against convolve()
convolve_result = convolve(array, masked_kernel, boundary='fill',
fill_value=0.)
assert_floatclose(convolve_result, result)
def test_normalize_function(self):
"""
Check if convolve_fft works when passing a normalize function.
"""
array = [1, 2, 3]
kernel = [3, 3, 3]
result = convolve_fft(array, kernel, normalize_kernel=np.max)
assert_floatclose(result, [3, 6, 5])
@pytest.mark.parametrize(option_names, options)
def test_normalization_is_respected(self, boundary,
nan_treatment,
normalize_kernel):
"""
Check that if normalize_kernel is False then the normalization
tolerance is respected.
"""
array = np.array([1, 2, 3])
# A simple identity kernel to which a non-zero normalization is added.
base_kernel = np.array([1.0])
# Use the same normalization error tolerance in all cases.
normalization_rtol = 1e-4
# Add the error below to the kernel.
norm_error = [normalization_rtol / 10, normalization_rtol * 10]
for err in norm_error:
kernel = base_kernel + err
result = convolve_fft(array, kernel,
normalize_kernel=normalize_kernel,
nan_treatment=nan_treatment,
normalization_zero_tol=normalization_rtol)
if normalize_kernel:
# Kernel has been normalized to 1.
assert_floatclose(result, array)
else:
# Kernel should not have been normalized...
assert_floatclose(result, array * kernel)
class TestConvolve2D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[1.]], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel)
w = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['sum_withzeros'] = answer_dict['sum']
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
a = answer_dict[answer_key]
assert_floatclose(z, a)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
z = np.nan_to_num(z)
x = np.nan_to_num(x)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 3.],
[1., np.nan, 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
# commented out: allow unnormalized nan-ignoring convolution
# # kernel is not normalized, so this situation -> exception
# if nan_treatment and not normalize_kernel:
# with pytest.raises(ValueError):
# z = convolve_fft(x, y, boundary=boundary,
# nan_treatment=nan_treatment,
# normalize_kernel=normalize_kernel,
# ignore_edge_zeros=ignore_edge_zeros,
# )
# return
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
# you cannot fill w/nan, you can only interpolate over it
fill_value=np.nan if normalize_kernel and nan_treatment=='interpolate' else 0,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
# weights
w_n = np.array([[3., 5., 3.],
[5., 8., 5.],
[3., 5., 3.]], dtype='float64')
w_z = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w_z
answer_dict['average_interpnan'] = answer_dict['sum'] / w_n
answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8.
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8.
answer_dict['sum_withzeros'] = answer_dict['sum']
answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8.
answer_dict['sum_withzeros_interpnan'] = answer_dict['sum']
answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
answer_dict[answer_key]
# Skip the NaN at [1, 1] when preserve_nan=True
posns = np.where(np.isfinite(z))
# for reasons unknown, the Windows FFT returns an answer for the [0, 0]
# component that is EXACTLY 10*np.spacing
assert_floatclose(z[posns], z[posns])
def test_big_fail(self):
""" Test that convolve_fft raises an exception if a too-large array is passed in."""
with pytest.raises((ValueError, MemoryError)):
# while a good idea, this approach did not work; it actually writes to disk
# arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex)
# this just allocates the memory but never touches it; it's better:
arr = np.empty([512, 512, 512], dtype=complex)
# note 512**3 * 16 bytes = 2.0 GB
convolve_fft(arr, arr)
def test_padding(self):
"""
Test that convolve_fft pads to _next_fast_lengths and does not expand all dimensions
to length of longest side (#11242/#10047).
"""
# old implementation expanded this to up to 2048**3
shape = (1, 1226, 518)
img = np.zeros(shape, dtype='float64')
img[0, 600:610, 300:304] = 1.0
kernel = np.zeros((1, 7, 7), dtype='float64')
kernel[0, 3, 3] = 1.0
with pytest.warns(AstropyUserWarning,
match="psf_pad was set to False, which overrides the boundary='fill'"):
img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=False, fft_pad=False)
assert_array_equal(img_fft.shape, shape)
img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=False, fft_pad=True)
# should be from either hardcoded _good_sizes[] or scipy.fft.next_fast_len()
assert img_fft.shape in ((1, 1250, 540), (1, 1232, 525))
img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=True, fft_pad=False)
assert_array_equal(img_fft.shape, np.array(shape) + np.array(kernel.shape))
img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=True, fft_pad=True)
assert img_fft.shape in ((2, 1250, 540), (2, 1250, 525))
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary in (None, 'fill'):
assert_floatclose(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'))
elif boundary == 'wrap':
assert_floatclose(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'))
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False)
if boundary in (None, 'fill'):
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve_fft works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve_fft doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalence
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
|
|
"""Memory mapped interface builder."""
import copy
import re
from scoff.ast.visits.syntax import SyntaxChecker
# from scoff.ast.visits.control import SetFlag, ClearFlagAfter
import hdltools.util
from hdltools.abshdl.const import HDLIntegerConstant
from hdltools.abshdl.expr import HDLExpression
from hdltools.abshdl.mmap import MemoryMappedInterface
from hdltools.abshdl.module import HDLModuleParameter
from hdltools.abshdl.registers import HDLRegister, HDLRegisterField
from hdltools.logging import DEFAULT_LOGGER
from hdltools.mmap import FlagPort
EXPRESSION_REGEX = re.compile(r"[\+\-\*\/\(\)]+")
class MMBuilder(SyntaxChecker):
"""interface builder."""
def __init__(self, *args, **kwargs):
"""Initialize."""
super().__init__(*args, **kwargs)
self._reg_size = None
self._reg_addr_offset = None
self._parameters = {}
self._registers = {}
self._ports = {}
self._cur_reg_addr = 0
self._replacement_values = {}
def _get_parameter_value(self, param_name):
"""Get parameter value."""
if param_name in self._replacement_values:
return self._replacement_values[param_name]
return self._parameters[param_name]
@staticmethod
def slice_size(slic):
"""Get slice size in bits."""
if len(slic) > 1:
return slic[0] - slic[1] + 1
else:
return 1
@staticmethod
def bitfield_pos_to_slice(pos):
"""Convert to slice from parser object."""
ret = [int(pos.left)]
if pos.right is not None:
ret.append(int(pos.right))
return ret
def _next_available_address(self):
"""Find next available address."""
if not self._registers:
if self._reg_addr_offset is None:
raise RuntimeError("unknown addressing mode")
self._cur_reg_addr += self._reg_addr_offset
return 0
addr_set = {register.addr for register in self._registers.values()}
possible_offsets = range(
0, max(addr_set) + self._reg_addr_offset, self._reg_addr_offset
)
for offset in possible_offsets:
if offset in addr_set:
continue
else:
self._cur_reg_addr = offset
return offset
# must increment
self._cur_reg_addr = max(addr_set) + self._reg_addr_offset
return self._cur_reg_addr
def visit_StaticStatement(self, node):
"""Visit static statement."""
if node.var == "register_size":
if not isinstance(node.value, int):
# placeholder for syntax error
raise ValueError("identifier or expressions not supported")
if self._reg_size is not None:
# warning, re-defining
pass
self._reg_size = node.value
elif node.var == "addr_mode":
if not isinstance(node.value, str) or node.value not in (
"byte",
"word",
):
raise ValueError("addr_mode can only be 'byte' or 'word'")
if self._reg_addr_offset is not None:
# warning, re-defining
pass
if node.value == "byte":
self._reg_addr_offset = self._reg_size // 8
else:
self._reg_addr_offset = 1
else:
raise RuntimeError("unknown setting: '{}'".format(node.var))
def visit_FnCall(self, node):
"""Visit function call."""
# function calls only allowed for members of the hdltools.util module
if not hasattr(hdltools.util, node.fn):
raise NameError(f"function '{node.fn}' is unknown")
fn = getattr(hdltools.util, node.fn)
fn_args = []
for arg in node.args:
if isinstance(arg, str):
# lookup parameter name
if arg not in self._parameters:
raise NameError(f"unknown name '{arg}'")
fn_args.append(self._get_parameter_value(arg).value.value)
else:
fn_args.append(arg)
return fn(*fn_args)
def visit_ParameterStatement(self, node):
"""Visit parameter statement."""
if node.name in self._parameters:
# warning, re-defining!
pass
value = HDLIntegerConstant(node.value)
self._parameters[node.name] = HDLModuleParameter(
node.name, "integer", value
)
def visit_Range(self, node):
"""Visit range."""
if isinstance(node.left, str):
if node.left not in self._parameters:
raise NameError(f"unknown name '{node.left}'")
node.left = self._get_parameter_value(node.left).value.value
if isinstance(node.right, str):
if node.right not in self._parameters:
raise NameError(f"unknown name '{node.right}'")
node.right = self._get_parameter_value(node.right).value.value
return node
def visitPre_GenerateStatement(self, node):
"""Enter generate statement."""
generated_scope = []
# visit ahead
super().visit(node.range)
for range_val in range(node.range.left, node.range.right):
# HACK insert temporary parameter value
self._parameters[node.var] = HDLModuleParameter(
node.var, "integer", range_val
)
for stmt in node.gen_scope:
cpy_stmt = copy.deepcopy(stmt)
super().visit(cpy_stmt)
generated_scope.append(cpy_stmt)
del self._parameters[node.var]
node.gen_scope = generated_scope
def visit_TemplatedNameSubstFmt(self, node):
"""Visit template substitution."""
def _find_name(name):
"""Find name."""
if name not in self._parameters:
raise NameError(f"in template: unknown name '{name}'")
return self._get_parameter_value(name).value
m = EXPRESSION_REGEX.findall(node.arg)
if m:
# is expression
expr = ""
names = re.findall(r"[_a-zA-Z]\w*", node.arg)
for name in names:
value = _find_name(name)
expr = node.arg.replace(name, str(value))
expr = expr.replace("/", "//")
try:
expr = eval(expr)
except SyntaxError:
raise RuntimeError("invalid expression in template")
return expr
# is name
return _find_name(node.arg)
def visit_SlaveRegister(self, node):
"""Visit register declaration."""
if node.address is not None:
reg_addr = node.address
else:
reg_addr = self._next_available_address()
if isinstance(node.name, str):
register = HDLRegister(
node.name, size=self._reg_size, addr=reg_addr
)
# add properties
for prop in node.properties:
register.add_properties(**{prop.name: prop.value})
if register.name in self._registers:
# warning, re-defining!
pass
# add register
DEFAULT_LOGGER.debug(f"adding register '{register.name}'")
self._registers[register.name] = register
else:
(fragment,) = node.name.fragments
(template,) = fragment.templates
try:
start, end = template.arg.split("-")
_addr = reg_addr
for reg in range(int(start), int(end) + 1):
reg_name = fragment.fragment + str(reg)
register = HDLRegister(
reg_name, size=self._reg_size, addr=_addr
)
for prop in node.properties:
register.add_properties(
**{prop.name: prop.value.format(str(reg))}
)
if register.name in self._registers:
# warning: re-defining!
pass
# add register
self._registers[register.name] = register
_addr = self._next_available_address()
except:
raise RuntimeError("error in template rule")
def visit_SlaveRegisterField(self, node):
"""Visit register field."""
src_reg, src_field = node.source
if src_reg not in self._registers:
raise ValueError("unknown register: {}".format(src_reg))
ssize = self.slice_size(self.bitfield_pos_to_slice(node.position))
if node.default is not None:
if isinstance(node.default, int):
param_size = HDLIntegerConstant.minimum_value_size(
node.default
)
defval = node.default
else:
if node.default.strip() in self._parameters:
param_size = 0
defval = HDLExpression(node.default.strip(), size=ssize)
else:
raise RuntimeError(
"unknown identifier: {}".format(node.default.strip())
)
if ssize < param_size:
raise RuntimeError("value does not fit in field")
else:
defval = 0
reg_field = HDLRegisterField(
src_field,
self.bitfield_pos_to_slice(node.position),
node.access,
default_value=defval,
)
for prop in node.properties:
reg_field.add_properties(**{prop.name: prop.value})
self._registers[src_reg].add_fields(reg_field)
def visit_SourceBitAccessor(self, node):
"""Visit source bit accessor."""
return (node.register, node.bit)
def visit_TemplatedNameSubstFragment(self, node):
"""Visit fragments."""
return node.fragment + "".join(
[str(template) for template in node.templates]
)
def visit_TemplatedNameSubst(self, node):
"""Templated name."""
return "".join(node.fragments)
def visit_TemplatedNameDecl(self, node):
"""Visit templated name declaration."""
if len(node.fragments) > 1:
raise NotImplementedError
(fragment,) = node.fragments
if len(fragment.templates) > 1:
raise NotImplementedError
if not fragment.templates:
return fragment.fragment
return node
def visit_SignalSource(self, node):
"""Visit signal source."""
return node.dest
def visit_SignalDestination(self, node):
"""Visit signal destination."""
return node.dest
def visit_OutputDescriptor(self, node):
"""Visit output descriptor."""
self._visit_descriptor(node, "out")
def visit_InputDescriptor(self, node):
"""Visit input descriptor."""
self._visit_descriptor(node, "in")
def _visit_descriptor(self, node, direction):
"""Visit output/input descriptor."""
if direction not in ("in", "out"):
raise RuntimeError("invalid direction")
if isinstance(node.sig, str):
src_reg = node.sig
src_bit = None
elif isinstance(node.sig, tuple):
# SourceBitAccessor
src_reg, src_bit = node.sig
else:
# templated name
src_reg = node.sig
src_bit = None
if isinstance(node.name, str):
# simple declaration
if src_reg not in self._registers:
raise KeyError("invalid register: {}".format(src_reg))
src_reg = self._registers[src_reg]
if src_bit is not None and src_reg.has_field(src_bit) is False:
raise KeyError("invalid field: {}".format(src_bit))
port = FlagPort(src_reg, src_bit, direction, node.name)
if port.name in self._ports:
# warning, re-define
pass
self._ports[port.name] = port
else:
(fragment,) = node.name.fragments
try:
start, end = fragment.templates[0].rule.split("-")
except:
raise RuntimeError("error in fragment rule")
for port in range(int(start), int(end) + 1):
fmt_str = "{{{}}}".format(
src_reg.fragments[0].templates[0].arg
)
_reg = src_reg.fragments[0].fragment + fmt_str.format(port)
if _reg not in self._registers:
raise KeyError('invalid register: "{}"'.format(_reg))
_reg = self._registers[_reg]
if src_bit is not None and _reg.has_field(src_bit) is False:
raise KeyError('invalid field: "{}"'.format(src_bit))
port = FlagPort(
_reg, src_bit, direction, fragment.fragment + str(port),
)
self._ports[port.name] = port
def visit_PositiveIntegerValue(self, node):
"""Visit a static value."""
if node.posint is not None:
return int(node.posint)
if node.hex is not None:
try:
if node.hex.startswith("0x"):
return int(node.hex[2:], 16)
else:
return int(node.hex, 16)
except ValueError:
# placeholder for syntax error
raise
def visit(self, node, param_replace=None):
"""Visit."""
self._replacement_values = (
{
name: HDLModuleParameter(
name, "integer", HDLIntegerConstant(value)
)
for name, value in param_replace.items()
}
if param_replace is not None
else {}
)
super().visit(node)
mmap = MemoryMappedInterface(self._reg_size, self._reg_addr_offset)
for register in self._registers.values():
mmap.add_register(register)
for port in self._ports.values():
mmap.add_port(port)
for param_name, param in self._parameters.items():
if param_name in self._replacement_values:
mmap.add_parameter(
param_name, self._replacement_values[param_name]
)
else:
mmap.add_parameter(param_name, param)
return mmap
|
|
"""A setuptools based setup module.
"""
from __future__ import print_function
import os
import fnmatch
import re
import sys
import subprocess
import yaml
# Always prefer setuptools over distutils
from setuptools import setup, Command
from setuptools_lint.setuptools_command import PylintCommand
from six import string_types
from six.moves import reload_module
from yamllint.config import YamlLintConfig
from yamllint.cli import Format
from yamllint import linter
def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
''' find files matching file_regex '''
found = []
exclude_regex = ''
include_regex = ''
if exclude_dirs is not None:
exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
# Don't use include_dirs, it is broken
if include_dirs is not None:
include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
for root, dirs, files in os.walk(base_dir):
if exclude_dirs is not None:
# filter out excludes for dirs
dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
if include_dirs is not None:
# filter for includes for dirs
dirs[:] = [d for d in dirs if re.match(include_regex, d)]
matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
found.extend(matches)
return found
def recursive_search(search_list, field):
"""
Takes a list with nested dicts, and searches all dicts for a key of the
field provided. If the items in the list are not dicts, the items are not
processed.
"""
fields_found = []
for item in search_list:
if isinstance(item, dict):
for key, value in item.items():
if key == field:
fields_found.append(value)
elif isinstance(value, list):
results = recursive_search(value, field)
for result in results:
fields_found.append(result)
return fields_found
def find_playbooks():
''' find Ansible playbooks'''
all_playbooks = set()
included_playbooks = set()
exclude_dirs = ('adhoc', 'tasks')
for yaml_file in find_files(
os.path.join(os.getcwd(), 'playbooks'),
exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
for task in yaml.safe_load(contents) or {}:
if not isinstance(task, dict):
# Skip yaml files which are not a dictionary of tasks
continue
if 'include' in task or 'import_playbook' in task:
# Add the playbook and capture included playbooks
all_playbooks.add(yaml_file)
if 'include' in task:
directive = task['include']
else:
directive = task['import_playbook']
included_file_name = directive.split()[0]
included_file = os.path.normpath(
os.path.join(os.path.dirname(yaml_file),
included_file_name))
included_playbooks.add(included_file)
elif 'hosts' in task:
all_playbooks.add(yaml_file)
return all_playbooks, included_playbooks
class OpenShiftAnsibleYamlLint(Command):
''' Command to run yamllint '''
description = "Run yamllint tests"
user_options = [
('excludes=', 'e', 'directories to exclude'),
('config-file=', 'c', 'config file to use'),
('format=', 'f', 'format to use (standard, parsable)'),
]
def initialize_options(self):
''' initialize_options '''
# Reason: Defining these attributes as a part of initialize_options is
# consistent with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
self.excludes = None
self.config_file = None
self.format = None
def finalize_options(self):
''' finalize_options '''
# Reason: These attributes are defined in initialize_options and this
# usage is consistant with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
if isinstance(self.excludes, string_types):
self.excludes = self.excludes.split(',')
if self.format is None:
self.format = 'standard'
assert (self.format in ['standard', 'parsable']), (
'unknown format {0}.'.format(self.format))
if self.config_file is None:
self.config_file = '.yamllint'
assert os.path.isfile(self.config_file), (
'yamllint config file {0} does not exist.'.format(self.config_file))
def run(self):
''' run command '''
if self.excludes is not None:
print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
config = YamlLintConfig(file=self.config_file)
has_errors = False
has_warnings = False
if self.format == 'parsable':
format_method = Format.parsable
else:
format_method = Format.standard_color
for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
first = True
with open(yaml_file, 'r') as contents:
for problem in linter.run(contents, config):
if first and self.format != 'parsable':
print('\n{0}:'.format(os.path.relpath(yaml_file)))
first = False
print(format_method(problem, yaml_file))
if problem.level == linter.PROBLEM_LEVELS[2]:
has_errors = True
elif problem.level == linter.PROBLEM_LEVELS[1]:
has_warnings = True
if has_errors or has_warnings:
print('yamllint issues found')
raise SystemExit(1)
class OpenShiftAnsiblePylint(PylintCommand):
''' Class to override the default behavior of PylintCommand '''
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def find_all_modules(self):
''' find all python files to test '''
exclude_dirs = ('.tox', 'utils', 'test', 'tests', 'git')
modules = []
for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
package = os.path.basename(match).replace('.py', '')
modules.append(('openshift_ansible', package, match))
return modules
def get_finalized_command(self, cmd):
''' override get_finalized_command to ensure we use our
find_all_modules method '''
if cmd == 'build_py':
return self
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def with_project_on_sys_path(self, func, func_args, func_kwargs):
''' override behavior, since we don't need to build '''
return func(*func_args, **func_kwargs)
class OpenShiftAnsibleGenerateValidation(Command):
''' Command to run generated module validation'''
description = "Run generated module validation"
user_options = []
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
# self isn't used but I believe is required when it is called.
# pylint: disable=no-self-use
def run(self):
''' run command '''
# find the files that call generate
generate_files = find_files('roles',
['inventory',
'test',
'playbooks',
'utils'],
None,
'generate.py$')
if len(generate_files) < 1:
print('Did not find any code generation. Please verify module code generation.') # noqa: E501
raise SystemExit(1)
errors = False
for gen in generate_files:
print('Checking generated module code: {0}'.format(gen))
try:
sys.path.insert(0, os.path.dirname(gen))
# we are importing dynamically. This isn't in
# the python path.
# pylint: disable=import-error
import generate
reload_module(generate)
generate.verify()
except generate.GenerateAnsibleException as gae:
print(gae.args)
errors = True
if errors:
print('Found errors while generating module code.')
raise SystemExit(1)
print('\nAll generate scripts passed.\n')
class OpenShiftAnsibleSyntaxCheck(Command):
''' Command to run Ansible syntax check'''
description = "Run Ansible syntax check"
user_options = []
# Colors
FAIL = '\033[31m' # Red
ENDC = '\033[0m' # Reset
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
def deprecate_jinja2_in_when(self, yaml_contents, yaml_file):
''' Check for Jinja2 templating delimiters in when conditions '''
test_result = False
failed_items = []
search_results = recursive_search(yaml_contents, 'when')
for item in search_results:
if isinstance(item, str):
if '{{' in item or '{%' in item:
failed_items.append(item)
else:
for sub_item in item:
if '{{' in sub_item or '{%' in sub_item:
failed_items.append(sub_item)
if len(failed_items) > 0:
print('{}Error: Usage of Jinja2 templating delimiters in when '
'conditions is deprecated in Ansible 2.3.\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in failed_items:
print(' Found: "{}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def deprecate_include(self, yaml_contents, yaml_file):
''' Check for usage of include directive '''
test_result = False
search_results = recursive_search(yaml_contents, 'include')
if len(search_results) > 0:
print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in search_results:
print(' Found: "include: {}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def run(self):
''' run command '''
has_errors = False
print('#' * 60)
print('Ansible Deprecation Checks')
exclude_dirs = ('adhoc', 'files', 'meta', 'vars', 'defaults', '.tox')
for yaml_file in find_files(
os.getcwd(), exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
yaml_contents = yaml.safe_load(contents)
if not isinstance(yaml_contents, list):
continue
# Check for Jinja2 templating delimiters in when conditions
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
# Check for usage of include: directive
result = self.deprecate_include(yaml_contents, yaml_file)
has_errors = result or has_errors
if not has_errors:
print('...PASSED')
all_playbooks, included_playbooks = find_playbooks()
print('#' * 60)
print('Invalid Playbook Include Checks')
invalid_include = []
for playbook in included_playbooks:
# Ignore imported playbooks in 'common', 'private' and 'init'. It is
# expected that these locations would be imported by entry point
# playbooks.
# Ignore playbooks in 'aws', 'gcp' and 'openstack' because these
# playbooks do not follow the same component entry point structure.
# Ignore deploy_cluster.yml and prerequisites.yml because these are
# entry point playbooks but are imported by playbooks in the cloud
# provisioning playbooks.
ignored = ('common', 'private', 'init',
'aws', 'gcp', 'openstack',
'deploy_cluster.yml', 'prerequisites.yml')
if any(x in playbook for x in ignored):
continue
invalid_include.append(playbook)
if invalid_include:
print('{}Invalid included playbook(s) found. Please ensure'
' component entry point playbooks are not included{}'.format(self.FAIL, self.ENDC))
invalid_include.sort()
for playbook in invalid_include:
print('{}{}{}'.format(self.FAIL, playbook, self.ENDC))
has_errors = True
if not has_errors:
print('...PASSED')
print('#' * 60)
print('Ansible Playbook Entry Point Syntax Checks')
# Evaluate the difference between all playbooks and included playbooks
entrypoint_playbooks = sorted(all_playbooks.difference(included_playbooks))
print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
for playbook in entrypoint_playbooks:
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
# Error on any entry points in 'common' or 'private'
invalid_entry_point = ('common', 'private')
if any(x in playbook for x in invalid_entry_point):
print('{}Invalid entry point playbook or orphaned file. Entry'
' point playbooks are not allowed in \'common\' or'
' \'private\' directories{}'.format(self.FAIL, self.ENDC))
has_errors = True
# --syntax-check each entry point playbook
try:
# Create a host group list to avoid WARNING on unmatched host patterns
tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH']
subprocess.check_output(
['ansible-playbook', '-i', tox_ansible_inv,
'--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv)]
)
except subprocess.CalledProcessError as cpe:
print('{}Execution failed: {}{}'.format(
self.FAIL, cpe, self.ENDC))
has_errors = True
if has_errors:
raise SystemExit(1)
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def initialize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def finalize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def run(self):
''' run command '''
print("Unsupported command for openshift-ansible")
setup(
name='openshift-ansible',
license="Apache 2.0",
cmdclass={
'install': UnsupportedCommand,
'develop': UnsupportedCommand,
'build': UnsupportedCommand,
'build_py': UnsupportedCommand,
'build_ext': UnsupportedCommand,
'egg_info': UnsupportedCommand,
'sdist': UnsupportedCommand,
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
|
|
# encoding: utf8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import msgpack
from time import time
from hashlib import md5
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol, Factory, connectionDone, \
ClientFactory
from twisted.python.failure import Failure
from twisted.python import log
from spyne import EventManager, Address, ServerBase, Application
from spyne.auxproc import process_contexts
from spyne.error import InternalError
class TwistedMessagePackProtocolFactory(Factory):
def __init__(self, tpt):
assert isinstance(tpt, ServerBase)
self.tpt = tpt
self.event_manager = EventManager(self)
def buildProtocol(self, address):
return TwistedMessagePackProtocol(self.tpt, factory=self)
TwistedMessagePackProtocolServerFactory = TwistedMessagePackProtocolFactory
class TwistedMessagePackProtocolClientFactory(ClientFactory):
def __init__(self, tpt, max_buffer_size=2 * 1024 * 1024):
assert isinstance(tpt, ServerBase), \
"%r is not a ServerBase instance" % tpt
self.tpt = tpt
self.max_buffer_size = max_buffer_size
self.event_manager = EventManager(self)
def buildProtocol(self, address):
return TwistedMessagePackProtocol(self.tpt,
max_buffer_size=self.max_buffer_size, factory=self)
def _cha(*args): return args
class TwistedMessagePackProtocol(Protocol):
def __init__(self, tpt, max_buffer_size=2 * 1024 * 1024, factory=None):
assert isinstance(tpt, ServerBase)
self.factory = factory
self._buffer = msgpack.Unpacker(max_buffer_size=max_buffer_size)
self.spyne_tpt = tpt
self.sessid = ''
self.sent_bytes = 0
self.recv_bytes = 0
def gen_sessid(self, *args):
"""It's up to you to use this in a subclass."""
retval = _cha(
Address.from_twisted_address(self.transport.getPeer()),
time(),
*args
)
return md5(repr(retval)).hexdigest()
def connectionMade(self):
self.sent_bytes = 0
self.recv_bytes = 0
if self.factory is not None:
self.factory.event_manager.fire_event("connection_made", self)
def connectionLost(self, reason=connectionDone):
if self.factory is not None:
self.factory.event_manager.fire_event("connection_lost", self)
def dataReceived(self, data):
self._buffer.feed(data)
self.recv_bytes += len(data)
for msg in self._buffer:
self.process_incoming_message(msg)
def process_incoming_message(self, msg):
p_ctx, others = self.spyne_tpt.produce_contexts(msg)
p_ctx.transport.remote_addr = Address.from_twisted_address(
self.transport.getPeer())
p_ctx.transport.protocol = self
p_ctx.transport.sessid = self.sessid
self.process_contexts(p_ctx, others)
def transport_write(self, data):
self.sent_bytes += len(data)
self.transport.write(data)
def handle_error(self, p_ctx, others, exc):
self.spyne_tpt.get_out_string(p_ctx)
if isinstance(exc, InternalError):
error = self.spyne_tpt.OUT_RESPONSE_SERVER_ERROR
else:
error = self.spyne_tpt.OUT_RESPONSE_CLIENT_ERROR
data = p_ctx.out_document[0]
if isinstance(data, dict):
data = data.values()
out_string = msgpack.packb([
error, msgpack.packb(data),
])
self.transport_write(out_string)
p_ctx.transport.resp_length = len(out_string)
p_ctx.close()
try:
process_contexts(self, others, p_ctx, error=error)
except Exception as e:
# Report but ignore any exceptions from auxiliary methods.
logger.exception(e)
def process_contexts(self, p_ctx, others):
if p_ctx.in_error:
self.handle_error(p_ctx, others, p_ctx.in_error)
return
self.spyne_tpt.get_in_object(p_ctx)
if p_ctx.in_error:
logger.error(p_ctx.in_error)
self.handle_error(p_ctx, others, p_ctx.in_error)
return
self.spyne_tpt.get_out_object(p_ctx)
if p_ctx.out_error:
self.handle_error(p_ctx, others, p_ctx.out_error)
return
if len(p_ctx.descriptor.out_message._type_info) > 1:
ret = p_ctx.out_object
else:
ret = p_ctx.out_object[0]
if isinstance(ret, Deferred):
ret.addCallback(_cb_deferred, self, p_ctx, others)
ret.addErrback(_eb_deferred, self, p_ctx, others)
ret.addErrback(log.err)
else:
_cb_deferred(p_ctx.out_object, self, p_ctx, others, nowrap=True)
def _eb_deferred(retval, prot, p_ctx, others):
p_ctx.out_error = retval.value
tb = None
if isinstance(retval, Failure):
tb = retval.getTracebackObject()
retval.printTraceback()
p_ctx.out_error = InternalError(retval.value)
prot.handle_error(p_ctx, others, p_ctx.out_error)
prot.transport_write(''.join(p_ctx.out_string))
p_ctx.transport.resp_length = len(p_ctx.out_string)
prot.transport.loseConnection()
return Failure(p_ctx.out_error, p_ctx.out_error.__class__, tb)
def _cb_deferred(ret, prot, p_ctx, others, nowrap=False):
if len(p_ctx.descriptor.out_message._type_info) > 1 or nowrap:
p_ctx.out_object = ret
else:
p_ctx.out_object = [ret]
try:
prot.spyne_tpt.get_out_string(p_ctx)
prot.spyne_tpt.pack(p_ctx)
out_string = ''.join(p_ctx.out_string)
prot.transport_write(out_string)
p_ctx.transport.resp_length = len(out_string)
except Exception as e:
logger.exception(e)
prot.handle_error(p_ctx, others, InternalError(e))
finally:
p_ctx.close()
process_contexts(prot.spyne_tpt, others, p_ctx)
|
|
#!/usr/bin/env python
#
# GrovePi Library for using the Grove - Gesture Sensor v1.0(http://www.seeedstudio.com/depot/Grove-Gesture-p-2463.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this library? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
# History
# ------------------------------------------------
# Author Date Comments
# Karan 31 Dec 15 Initial Authoring
#
# Code derived from the basic Arduino library for the Gesture Sensor by Seeed: https://github.com/Seeed-Studio/Gesture_PAJ7620
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time,sys
import RPi.GPIO as GPIO
import smbus
# use the bus that matches your raspi version
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
class gesture:
#Registers and variables for the gesture sensor
GES_REACTION_TIME =.500 # You can adjust the reaction time according to the actual circumstance.
GES_ENTRY_TIME =.800 # When you want to recognize the Forward/Backward gestures, your gestures' reaction time must less than GES_ENTRY_TIME(0.8s).
GES_QUIT_TIME =1.000
BANK0 = 0
BANK1 = 1
PAJ7620_ADDR_BASE =0x00
#REGISTER BANK SELECT
PAJ7620_REGITER_BANK_SEL =(PAJ7620_ADDR_BASE + 0xEF) #W
#DEVICE ID
PAJ7620_ID =0x73
#REGISTER BANK 0
PAJ7620_ADDR_SUSPEND_CMD =(PAJ7620_ADDR_BASE + 0x3) #W
PAJ7620_ADDR_GES_PS_DET_MASK_0 =(PAJ7620_ADDR_BASE + 0x41) #RW
PAJ7620_ADDR_GES_PS_DET_MASK_1 =(PAJ7620_ADDR_BASE + 0x42) #RW
PAJ7620_ADDR_GES_PS_DET_FLAG_0 =(PAJ7620_ADDR_BASE + 0x43) #R
PAJ7620_ADDR_GES_PS_DET_FLAG_1 =(PAJ7620_ADDR_BASE + 0x44) #R
PAJ7620_ADDR_STATE_INDICATOR =(PAJ7620_ADDR_BASE + 0x45) #R
PAJ7620_ADDR_PS_HIGH_THRESHOLD =(PAJ7620_ADDR_BASE + 0x69) #RW
PAJ7620_ADDR_PS_LOW_THRESHOLD =(PAJ7620_ADDR_BASE + 0x6A) #RW
PAJ7620_ADDR_PS_APPROACH_STATE =(PAJ7620_ADDR_BASE + 0x6B) #R
PAJ7620_ADDR_PS_RAW_DATA =(PAJ7620_ADDR_BASE + 0x6C) #R
#REGISTER BANK 1
PAJ7620_ADDR_PS_GAIN =(PAJ7620_ADDR_BASE + 0x44) #RW
PAJ7620_ADDR_IDLE_S1_STEP_0 =(PAJ7620_ADDR_BASE + 0x67) #RW
PAJ7620_ADDR_IDLE_S1_STEP_1 =(PAJ7620_ADDR_BASE + 0x68) #RW
PAJ7620_ADDR_IDLE_S2_STEP_0 =(PAJ7620_ADDR_BASE + 0x69) #RW
PAJ7620_ADDR_IDLE_S2_STEP_1 =(PAJ7620_ADDR_BASE + 0x6A) #RW
PAJ7620_ADDR_OP_TO_S1_STEP_0 =(PAJ7620_ADDR_BASE + 0x6B) #RW
PAJ7620_ADDR_OP_TO_S1_STEP_1 =(PAJ7620_ADDR_BASE + 0x6C) #RW
PAJ7620_ADDR_OP_TO_S2_STEP_0 =(PAJ7620_ADDR_BASE + 0x6D) #RW
PAJ7620_ADDR_OP_TO_S2_STEP_1 =(PAJ7620_ADDR_BASE + 0x6E) #RW
PAJ7620_ADDR_OPERATION_ENABLE =(PAJ7620_ADDR_BASE + 0x72) #RW
#PAJ7620_REGITER_BANK_SEL
PAJ7620_BANK0=0
PAJ7620_BANK1=1
#PAJ7620_ADDR_SUSPEND_CMD
PAJ7620_I2C_WAKEUP =1
PAJ7620_I2C_SUSPEND =0
#PAJ7620_ADDR_OPERATION_ENABLE
PAJ7620_ENABLE=1
PAJ7620_DISABLE=0
#ADC, delete
REG_ADDR_RESULT = 0x00
REG_ADDR_ALERT = 0x01
REG_ADDR_CONFIG = 0x02
REG_ADDR_LIMITL = 0x03
REG_ADDR_LIMITH = 0x04
REG_ADDR_HYST = 0x05
REG_ADDR_CONVL = 0x06
REG_ADDR_CONVH = 0x07
GES_RIGHT_FLAG =1<<0
GES_LEFT_FLAG =1<<1
GES_UP_FLAG =1<<2
GES_DOWN_FLAG =1<<3
GES_FORWARD_FLAG =1<<4
GES_BACKWARD_FLAG =1<<5
GES_CLOCKWISE_FLAG =1<<6
GES_COUNT_CLOCKWISE_FLAG =1<<7
GES_WAVE_FLAG =1<<0
#Gesture output
FORWARD = 1
BACKWARD = 2
RIGHT = 3
LEFT = 4
UP = 5
DOWN = 6
CLOCKWISE = 7
ANTI_CLOCKWISE = 8
WAVE = 9
#Initial register state
initRegisterArray=( [0xEF,0x00],
[0x32,0x29],
[0x33,0x01],
[0x34,0x00],
[0x35,0x01],
[0x36,0x00],
[0x37,0x07],
[0x38,0x17],
[0x39,0x06],
[0x3A,0x12],
[0x3F,0x00],
[0x40,0x02],
[0x41,0xFF],
[0x42,0x01],
[0x46,0x2D],
[0x47,0x0F],
[0x48,0x3C],
[0x49,0x00],
[0x4A,0x1E],
[0x4B,0x00],
[0x4C,0x20],
[0x4D,0x00],
[0x4E,0x1A],
[0x4F,0x14],
[0x50,0x00],
[0x51,0x10],
[0x52,0x00],
[0x5C,0x02],
[0x5D,0x00],
[0x5E,0x10],
[0x5F,0x3F],
[0x60,0x27],
[0x61,0x28],
[0x62,0x00],
[0x63,0x03],
[0x64,0xF7],
[0x65,0x03],
[0x66,0xD9],
[0x67,0x03],
[0x68,0x01],
[0x69,0xC8],
[0x6A,0x40],
[0x6D,0x04],
[0x6E,0x00],
[0x6F,0x00],
[0x70,0x80],
[0x71,0x00],
[0x72,0x00],
[0x73,0x00],
[0x74,0xF0],
[0x75,0x00],
[0x80,0x42],
[0x81,0x44],
[0x82,0x04],
[0x83,0x20],
[0x84,0x20],
[0x85,0x00],
[0x86,0x10],
[0x87,0x00],
[0x88,0x05],
[0x89,0x18],
[0x8A,0x10],
[0x8B,0x01],
[0x8C,0x37],
[0x8D,0x00],
[0x8E,0xF0],
[0x8F,0x81],
[0x90,0x06],
[0x91,0x06],
[0x92,0x1E],
[0x93,0x0D],
[0x94,0x0A],
[0x95,0x0A],
[0x96,0x0C],
[0x97,0x05],
[0x98,0x0A],
[0x99,0x41],
[0x9A,0x14],
[0x9B,0x0A],
[0x9C,0x3F],
[0x9D,0x33],
[0x9E,0xAE],
[0x9F,0xF9],
[0xA0,0x48],
[0xA1,0x13],
[0xA2,0x10],
[0xA3,0x08],
[0xA4,0x30],
[0xA5,0x19],
[0xA6,0x10],
[0xA7,0x08],
[0xA8,0x24],
[0xA9,0x04],
[0xAA,0x1E],
[0xAB,0x1E],
[0xCC,0x19],
[0xCD,0x0B],
[0xCE,0x13],
[0xCF,0x64],
[0xD0,0x21],
[0xD1,0x0F],
[0xD2,0x88],
[0xE0,0x01],
[0xE1,0x04],
[0xE2,0x41],
[0xE3,0xD6],
[0xE4,0x00],
[0xE5,0x0C],
[0xE6,0x0A],
[0xE7,0x00],
[0xE8,0x00],
[0xE9,0x00],
[0xEE,0x07],
[0xEF,0x01],
[0x00,0x1E],
[0x01,0x1E],
[0x02,0x0F],
[0x03,0x10],
[0x04,0x02],
[0x05,0x00],
[0x06,0xB0],
[0x07,0x04],
[0x08,0x0D],
[0x09,0x0E],
[0x0A,0x9C],
[0x0B,0x04],
[0x0C,0x05],
[0x0D,0x0F],
[0x0E,0x02],
[0x0F,0x12],
[0x10,0x02],
[0x11,0x02],
[0x12,0x00],
[0x13,0x01],
[0x14,0x05],
[0x15,0x07],
[0x16,0x05],
[0x17,0x07],
[0x18,0x01],
[0x19,0x04],
[0x1A,0x05],
[0x1B,0x0C],
[0x1C,0x2A],
[0x1D,0x01],
[0x1E,0x00],
[0x21,0x00],
[0x22,0x00],
[0x23,0x00],
[0x25,0x01],
[0x26,0x00],
[0x27,0x39],
[0x28,0x7F],
[0x29,0x08],
[0x30,0x03],
[0x31,0x00],
[0x32,0x1A],
[0x33,0x1A],
[0x34,0x07],
[0x35,0x07],
[0x36,0x01],
[0x37,0xFF],
[0x38,0x36],
[0x39,0x07],
[0x3A,0x00],
[0x3E,0xFF],
[0x3F,0x00],
[0x40,0x77],
[0x41,0x40],
[0x42,0x00],
[0x43,0x30],
[0x44,0xA0],
[0x45,0x5C],
[0x46,0x00],
[0x47,0x00],
[0x48,0x58],
[0x4A,0x1E],
[0x4B,0x1E],
[0x4C,0x00],
[0x4D,0x00],
[0x4E,0xA0],
[0x4F,0x80],
[0x50,0x00],
[0x51,0x00],
[0x52,0x00],
[0x53,0x00],
[0x54,0x00],
[0x57,0x80],
[0x59,0x10],
[0x5A,0x08],
[0x5B,0x94],
[0x5C,0xE8],
[0x5D,0x08],
[0x5E,0x3D],
[0x5F,0x99],
[0x60,0x45],
[0x61,0x40],
[0x63,0x2D],
[0x64,0x02],
[0x65,0x96],
[0x66,0x00],
[0x67,0x97],
[0x68,0x01],
[0x69,0xCD],
[0x6A,0x01],
[0x6B,0xB0],
[0x6C,0x04],
[0x6D,0x2C],
[0x6E,0x01],
[0x6F,0x32],
[0x71,0x00],
[0x72,0x01],
[0x73,0x35],
[0x74,0x00],
[0x75,0x33],
[0x76,0x31],
[0x77,0x01],
[0x7C,0x84],
[0x7D,0x03],
[0x7E,0x01])
#Enable debug message
debug=0
#Initialize the sensors
def init(self):
time.sleep(.001)
self.paj7620SelectBank(self.BANK0)
self.paj7620SelectBank(self.BANK0)
data0 = self.paj7620ReadReg(0, 1)[0]
data1 = self.paj7620ReadReg(1, 1)[0]
if self.debug:
print "data0:",data0,"data1:",data1
if data0 <> 0x20 :#or data1 <> 0x76:
print "Error with sensor"
#return 0xff
if data0 == 0x20:
print "wake-up finish."
for i in range(len(self.initRegisterArray)):
self.paj7620WriteReg(self.initRegisterArray[i][0],self.initRegisterArray[i][1])
self.paj7620SelectBank(self.BANK0)
print "Paj7620 initialize register finished."
#Write a byte to a register on the Gesture sensor
def paj7620WriteReg(self,addr,cmd):
bus.write_word_data(self.PAJ7620_ID, addr, cmd)
#Select a register bank on the Gesture Sensor
def paj7620SelectBank(self,bank):
if bank==self.BANK0:
self.paj7620WriteReg(self.PAJ7620_REGITER_BANK_SEL, self.PAJ7620_BANK0)
#Read a block of bytes of length "qty" starting at address "addr" from the Gesture sensor
def paj7620ReadReg(self,addr,qty):
return bus.read_i2c_block_data(self.PAJ7620_ID, addr,qty)
#Print the values from the gesture sensor
def print_gesture(self):
data=self.paj7620ReadReg(0x43,1)[0]
if data==self.GES_RIGHT_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
print "Forward"
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
print "Backward"
time.sleep(self.GES_QUIT_TIME)
else:
print "Right"
elif data==self.GES_LEFT_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
print "Forward"
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
print "Backward"
time.sleep(self.GES_QUIT_TIME)
else:
print "Left"
elif data==self.GES_UP_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
print "Forward"
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
print "Backward"
time.sleep(self.GES_QUIT_TIME)
else:
print "Up"
elif data==self.GES_DOWN_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
print "Forward"
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
print "Backward"
time.sleep(self.GES_QUIT_TIME)
else:
print "Down"
elif data==self.GES_FORWARD_FLAG:
print "Forward"
time.sleep(self.GES_QUIT_TIME)
elif data==self.GES_BACKWARD_FLAG:
print "Backward"
time.sleep(self.GES_QUIT_TIME)
elif data==self.GES_CLOCKWISE_FLAG:
print "Clockwise"
elif data==self.GES_COUNT_CLOCKWISE_FLAG:
print "anti-clockwise"
else:
data1=self.paj7620ReadReg(0x44, 1)[0]
if (data1 == self.GES_WAVE_FLAG):
print "wave"
#Return a vlaue from the gestire sensor which can be used in a program
# 0:nothing
# 1:Forward
# 2:Backward
# 3:Right
# 4:Left
# 5:Up
# 6:Down
# 7:Clockwise
# 8:anti-clockwise
# 9:wave
def return_gesture(self):
data=self.paj7620ReadReg(0x43,1)[0]
if data==self.GES_RIGHT_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
return 1
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
return 2
time.sleep(self.GES_QUIT_TIME)
else:
return 3
elif data==self.GES_LEFT_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
return 1
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
return 2
time.sleep(self.GES_QUIT_TIME)
else:
return 4
elif data==self.GES_UP_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
return 1
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
return 2
time.sleep(self.GES_QUIT_TIME)
else:
return 5
elif data==self.GES_DOWN_FLAG:
time.sleep(self.GES_ENTRY_TIME)
data=self.paj7620ReadReg(0x43, 1)[0]
if data == self.GES_FORWARD_FLAG:
return 1
time.sleep(self.GES_QUIT_TIME)
elif data == self.GES_BACKWARD_FLAG:
return 2
time.sleep(self.GES_QUIT_TIME)
else:
return 6
elif data==self.GES_FORWARD_FLAG:
return 1
time.sleep(self.GES_QUIT_TIME)
elif data==self.GES_BACKWARD_FLAG:
return 2
time.sleep(self.GES_QUIT_TIME)
elif data==self.GES_CLOCKWISE_FLAG:
return 7
elif data==self.GES_COUNT_CLOCKWISE_FLAG:
return 8
else:
data1=self.paj7620ReadReg(0x44, 1)[0]
if (data1 == self.GES_WAVE_FLAG):
return 9
return 0
if __name__ == "__main__":
g=gesture()
g.init()
while True:
g.print_gesture()
time.sleep(.1)
# print g.return_gesture()
# time.sleep(.1)
|
|
import datetime
import inspect
import os
import subprocess
import sys
import textwrap
import time
import uuid
import varlink
if sys.version_info[0] == 2:
raise ImportError("The mock module isn't compatible with python 2")
def cast_type(typeof):
cast = {'str': 'string'}
typeof = str(typeof).replace("<class '", "").replace("'>", "")
return cast.get(typeof, typeof)
def get_ignored():
ignore = dir(MockedService)
return ignore
def get_interface_attributs(interface, ignored):
attributs = {"callables": [], "others": []}
for attr in dir(interface):
if attr in ignored:
continue
attribut = getattr(interface, attr)
if callable(attribut):
attributs["callables"].append(attr)
else:
attributs["others"].append(attr)
return attributs
def generate_callable_interface(interface, attr):
attribut = getattr(interface, attr)
signature = inspect.signature(attribut)
params = signature.parameters.values()
sign = []
for param in params:
if param.name == "self":
continue
typeof = param.annotation
sign.append("{}: {}".format(param.name, cast_type(typeof)))
returned = signature.return_annotation
if returned:
returned = cast_type(returned)
doc = attribut.__doc__
if not doc:
raise ValueError(
"docstring format must be:"
"return name: type")
doc = doc.replace("return ", "")
if ":" in doc:
returned = doc
else:
returned = "{}: {}".format(doc, returned)
else:
returned = ""
return "method {name}({signature}) -> ({returned})".format(
name=attr,
signature=",".join(sign),
returned=returned
)
class MockedServiceProcess():
address = None
vendor = None
product = None
version = None
url = None
interface = None
interface_file = None
interface_name = None
interface_content = None
service_to_mock = None
def run(self):
mocked_service = varlink.Service(
vendor=self.vendor,
product=self.product,
version=self.version,
url=self.url)
instanciated_service = self.service_to_mock()
mocked_service._set_interface(
self.interface_file,
instanciated_service)
class ServiceRequestHandler(varlink.RequestHandler):
service = mocked_service
self.varlink_server = varlink.ThreadingServer(
self.address, ServiceRequestHandler)
self.varlink_server.serve_forever()
def service_generator(service, info, filename="mockedservice.py"):
with open(filename, "w+") as pyfp:
pyfp.write(textwrap.dedent("""\
'''
Generated by varlink mocking system
{datetime}
Only for testing purpose and unit testing
'''
""".format(datetime=datetime.datetime.now())))
pyfp.write("import varlink\n\n")
pyfp.write(inspect.getsource(service))
pyfp.write("\n\n")
pyfp.write(inspect.getsource(MockedServiceProcess))
pyfp.write("\n\n")
pyfp.write("if __name__ == '__main__':\n")
pyfp.write(" msp = MockedServiceProcess()\n")
for key, value in info.items():
surround = "'"
if value["type"] == "raw":
surround = ""
pyfp.write(" msp.{key} = {surround}{value}{surround}\n".format(
key=key, value=value["value"], surround=surround))
pyfp.write(" msp.run()\n")
def mockedservice(fake_service=None, fake_types=None,
address='unix:@test', name=None,
vendor='varlink', product='mock', version=1,
url='http://localhost'):
"""
Varlink mocking service
To mock a fake service and merely test your varlink client against.
The mocking feature is for testing purpose, it's allow
you to test your varlink client against a fake service which will
returned self handed result defined in your object who will be mocked.
Example:
>>> import unittest
>>> from varlink import mock
>>> import varlink
>>>
>>>
>>> types = '''
>>> type MyPersonalType (
>>> foo: string,
>>> bar: string,
>>> )
>>> '''
>>>
>>>
>>> class Service():
>>>
>>> def Test1(self, param1: int) -> dict:
>>> '''
>>> return test: MyPersonalType
>>> '''
>>> return {
>>> "test": {
>>> "foo": "bim",
>>> "bar": "boom"
>>> }
>>> }
>>>
>>> def Test2(self, param1: str) -> dict:
>>> '''
>>> return (test: string)
>>> '''
>>> return {"test": param1}
>>>
>>> def Test3(self, param1: int) -> dict:
>>> '''
>>> return (test: int, boom: string, foo: string, bar: 42)
>>> '''
>>> return {
>>> "test": param1 * 2,
>>> "boom": "foo",
>>> "foo": "bar",
>>> "bar": 42,
>>> }
>>>
>>>
>>> class TestMyClientWithMockedService(unittest.TestCase):
>>>
>>> @mock.mockedservice(
>>> fake_service=Service,
>>> fake_types=types,
>>> name='org.service.com',
>>> address='unix:@foo'
>>> )
>>> def test_my_client_against_a_mock(self):
>>> with varlink.Client("unix:@foo") as client:
>>> connection = client.open('org.service.com')
>>> self.assertEqual(
>>> connection.Test1(param1=1)["test"]["bar"], "boom")
>>> self.assertEqual(
>>> connection.Test2(param1="foo")["test"], "foo")
>>> self.assertEqual(
>>> connection.Test3(param1=6)["test"], 12)
>>> self.assertEqual(
>>> connection.Test3(param1=6)["bar"], 42)
First you need to define a sample class that will be passed to your
decorator `mock.mockedservice` and then a service will be initialized
and launched automatically, and after that you just need to connect your
client to him and to establish your connection then now you can
call your methods and it will give you the expected result.
You can also mock some types too, to help you to mock more complex service
and interfaces like podman by example.
You can define the return type by using the method docstring like
the method Test1 in our previous example.
The mocking module is only compatible with python 3 or higher version
of python because this module require annotation to generate interface
description.
If you try to use it with python 2.x it will raise an ``ImportError``.
"""
def decorator(func):
def wrapper(*args, **kwargs):
with MockedService(fake_service, fake_types, name=name,
address=address):
try:
func(*args, **kwargs)
except BrokenPipeError:
# manage fake service stoping
pass
return
return wrapper
return decorator
class MockedService():
def __init__(self, service, types, address='unix:@test', name=None,
vendor='varlink', product='mock', version=1,
url='http://localhost'):
if not name:
module = service.__module__
try:
self.name = os.path.splitext(module)[1].replace('.', '')
except IndexError:
self.name = module
else:
self.name = name
self.identifier = str(uuid.uuid4())
self.interface_description = None
self.service = service
self.types = types
self.address = address
self.vendor = vendor
self.product = product
self.version = version
self.url = url
self.service_info = {
"address": {'type': 'inherited', 'value': address},
"vendor": {'type': 'inherited', 'value': vendor},
"product": {'type': 'inherited', 'value': product},
"version": {'type': 'raw', 'value': version},
"url": {'type': 'inherited', 'value': url},
"interface_name": {'type': 'inherited', 'value': self.name},
"interface_file": {
'type': 'inherited',
'value': self.get_interface_file_path()},
"service_to_mock": {'type': 'raw', 'value': service.__name__},
}
self.generate_interface()
def generate_interface(self):
ignore = get_ignored()
self.interface_description = ["interface {}".format(self.name)]
if self.types:
for line in self.types.split("\n"):
self.interface_description.append(line)
attributs = get_interface_attributs(self.service, ignore)
for attr in attributs["callables"]:
self.interface_description.append(generate_callable_interface(
self.service, attr))
def get_interface_file_path(self):
return "/tmp/{}".format(self.name)
def generate_interface_file(self):
tfp = open(self.get_interface_file_path(), "w+")
tfp.write("\n".join(self.interface_description))
tfp.close()
def delete_interface_files(self):
os.remove(self.get_interface_file_path())
os.remove(self.mocked_service_file)
def service_start(self):
self.service_pid = subprocess.Popen(
[sys.executable, self.mocked_service_file]
)
time.sleep(2)
def service_stop(self):
self.service_pid.kill()
self.service_pid.communicate()
def __enter__(self):
self.mocked_service_file = "/tmp/{}".format(self.identifier)
service_generator(
self.service, self.service_info,
filename=self.mocked_service_file)
self.generate_interface_file()
self.service_start()
return self
def __exit__(self, type, value, traceback):
self.service_stop()
self.delete_interface_files()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from rally.common.i18n import _
from rally.common.plugin import discover
_exception_map = None
class RallyException(Exception):
"""Base Rally Exception
To correctly use this class, inherit from it and define
a "msg_fmt" property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("%(message)s")
error_code = 500
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if "%(message)s" in self.msg_fmt:
kwargs.update({"message": message})
super(RallyException, self).__init__(self.msg_fmt % kwargs)
def format_message(self):
return six.text_type(self)
def find_exception(response):
"""Discover a proper exception class based on response object."""
global _exception_map
if _exception_map is None:
_exception_map = dict(
(e.error_code, e) for e in discover.itersubclasses(RallyException))
exc_class = _exception_map.get(response.status_code, RallyException)
error_data = response.json()["error"]
if error_data["args"]:
return exc_class(error_data["args"])
return exc_class(error_data["msg"])
def make_exception(exc):
"""Check a class of exception and convert it to rally-like if needed."""
if isinstance(exc, RallyException):
return exc
return RallyException(str(exc))
class InvalidArgumentsException(RallyException):
error_code = 455
msg_fmt = _("Invalid arguments: '%(message)s'")
class InvalidConfigException(RallyException):
error_code = 456
msg_fmt = _("This config has invalid schema: `%(message)s`")
class InvalidTaskException(InvalidConfigException):
error_code = 457
msg_fmt = _("Task config is invalid: `%(message)s`")
class InvalidTaskConfig(InvalidTaskException):
error_code = 458
msg_fmt = _("Input task is invalid!\n\n"
"Subtask %(name)s[%(pos)s] has wrong configuration"
"\nSubtask configuration:\n%(config)s\n"
"\nReason(s):\n %(reason)s")
class NotFoundException(RallyException):
error_code = 404
msg_fmt = _("The resource can not be found: %(message)s")
class ThreadTimeoutException(RallyException):
error_code = 515
msg_fmt = _("Iteration interrupted due to timeout.")
class PluginNotFound(NotFoundException):
error_code = 459
msg_fmt = _("There is no plugin with name: `%(name)s` in "
"%(platform)s platform.")
class PluginWithSuchNameExists(RallyException):
error_code = 516
msg_fmt = _("Plugin with such name: %(name)s already exists in "
"%(platform)s platform. It's module allocates at "
"%(existing_path)s. You are trying to add plugin whose module "
"allocates at %(new_path)s.")
class TaskNotFound(NotFoundException):
error_code = 460
msg_fmt = _("Task with uuid=%(uuid)s not found.")
class DeploymentNotFound(NotFoundException):
error_code = 461
msg_fmt = _("Deployment %(deployment)s not found.")
class DeploymentNameExists(RallyException):
error_code = 462
msg_fmt = _("Deployment name '%(deployment)s' already registered.")
class DeploymentNotFinishedStatus(RallyException):
error_code = 463
msg_fmt = _("Deployment '%(name)s' (UUID=%(uuid)s) is in"
" '%(status)s' status.")
class DeploymentIsBusy(RallyException):
error_code = 464
msg_fmt = _("There are allocated resources for the deployment with "
"uuid=%(uuid)s.")
class RallyAssertionError(RallyException):
msg_fmt = _("Assertion error: %(message)s")
class ResourceNotFound(NotFoundException):
error_code = 465
msg_fmt = _("Resource with id=%(id)s not found.")
class TimeoutException(RallyException):
error_code = 517
msg_fmt = _("Rally tired waiting for %(resource_type)s %(resource_name)s:"
"%(resource_id)s to become %(desired_status)s current "
"status %(resource_status)s")
class GetResourceFailure(RallyException):
error_code = 518
msg_fmt = _("Failed to get the resource %(resource)s: %(err)s")
class GetResourceNotFound(GetResourceFailure):
error_code = 519
msg_fmt = _("Resource %(resource)s is not found.")
class GetResourceErrorStatus(GetResourceFailure):
error_code = 520
msg_fmt = _("Resource %(resource)s has %(status)s status.\n"
"Fault: %(fault)s")
class ScriptError(RallyException):
msg_fmt = _("Script execution failed: %(message)s")
class TaskInvalidStatus(RallyException):
error_code = 466
msg_fmt = _("Task `%(uuid)s` in `%(actual)s` status but `%(require)s` is "
"required.")
class InvalidAdminException(InvalidArgumentsException):
error_code = 521
msg_fmt = _("user '%(username)s' doesn't have 'admin' role")
class AuthenticationFailed(InvalidArgumentsException):
error_code = 401
msg_fmt = _("Failed to authenticate to %(url)s for user '%(username)s'"
" in project '%(project)s': %(etype)s: %(error)s")
class InvalidScenarioArgument(RallyException):
error_code = 467
msg_fmt = _("Invalid scenario argument: '%(message)s'")
class ContextSetupFailure(RallyException):
error_code = 524
msg_fmt = _("Unable to setup context '%(ctx_name)s': '%(msg)s'")
class ValidationError(RallyException):
error_code = 468
msg_fmt = _("Validation error: %(message)s")
class WorkerNotFound(NotFoundException):
error_code = 469
msg_fmt = _("Worker %(worker)s could not be found")
class WorkerAlreadyRegistered(RallyException):
error_code = 525
msg_fmt = _("Worker %(worker)s already registered")
class MultipleMatchesFound(RallyException):
error_code = 470
msg_fmt = _("Found multiple %(needle)s: %(haystack)s")
def __init__(self, **kwargs):
if "hint" in kwargs:
self.msg_fmt += ". Hint: %(hint)s"
super(MultipleMatchesFound, self).__init__(**kwargs)
class SSHTimeout(RallyException):
error_code = 526
pass
class SSHError(RallyException):
error_code = 527
pass
class InvalidConnectionString(RallyException):
error_code = 471
msg_fmt = _("The connection string is not valid: %(message)s. Please "
"check your connection string.")
class DowngradeNotSupported(RallyException):
error_code = 528
msg_fmt = _("Database schema downgrade is not supported.")
|
|
config = {
"name": "Planetoids", # plugin name
"type": "generator", #plugin type
"description": ["Planetoids & Terra"] #description
}
import sys
if __name__ == "__main__":
sys.path.extend(["."])
import os
os.chdir("..")
del (os)
from math import cos, sin, pi
from collections import defaultdict
from random import *
import pygame
from omnitool.loadbar import Bar
from omnitool.database import itemlist, names, tiles, ntiles
from omnitool.tinterface import *
from .planetoids_lib import terragui
from .planetoids_lib.tree import make_tree
from .planetoids_lib.terradata import *
class Generator():
def __init__(self):
itemlist["Life Crystal"] = 1
itemlist["Fallen Star"] = 10
itemlist["Wood"] = 100
itemlist['Swiftness Potion'] = 5
itemlist['Battle Potion'] = 5
itemlist['Shine Potion'] = 5
itemlist['Gravitation Potion'] = 5
itemlist['Water Walking Potion'] = 5
itemlist['Invisibility Potion'] = 5
itemlist['Night Owl Potion'] = 5
itemlist['Magic Power Potion'] = 5
itemlist['Thorns Potion'] = 5
itemlist['Mana Regeneration Potion'] = 5
itemlist['Archery Potion'] = 5
itemlist['Hunter Potion'] = 5
itemlist['Restoration Potion'] = 5
itemlist['Lesser Healing Potion'] = 5
itemlist['Featherfall Potion'] = 5
itemlist['Obsidian Skin Potion'] = 5
itemlist['Spelunker Potion'] = 5
itemlist['Ironskin Potion'] = 5
itemlist['Gold Bar'] = 10
itemlist['Meteorite Bar'] = 10
itemlist['Silver Bar'] = 10
itemlist['Iron Bar'] = 10
itemlist['Copper Bar'] = 10
itemlist["Meteorite"] = 30
def run(self):
# print ("Welcome to the Planetoids & Terra World Generator V12")
is_exe = hasattr(sys, "frozen")
terramode = False
if is_exe:
import os
path = os.path.dirname((sys.executable))
sys.path = [path] + sys.path
def draw_chasm(sur, pos, rmin, rmax, amin, amax):
points = [
(int(pos[0] + rmin * cos(amin)), int(pos[1] + rmin * sin(amin))),
(int(pos[0] + rmin * cos((amin + amax) / 2)), int(pos[1] + rmin * sin((amin + amax) / 2))),
(int(pos[0] + rmin * cos(amax)), int(pos[1] + rmin * sin(amax))),
(int(pos[0] + rmax * cos(amax)), int(pos[1] + rmax * sin(amax))),
(int(pos[0] + rmax * cos((amin + amax) / 2)), int(pos[1] + rmax * sin((amin + amax) / 2))),
(int(pos[0] + rmax * cos(amin)), int(pos[1] + rmax * sin(amin)))]
pygame.draw.polygon(sur, (233, 233, 233), points)
steps = 70
pygame.draw.circle(sur, (23, 23, 23), points[-1], 8)
pygame.draw.circle(sur, (23, 23, 23), points[3], 8)
orb = randint(steps // 2, steps)
for x in range(steps + 1):
x = float(x)
cpos = (int(points[0][0] * x / steps + points[-1][0] * (steps - x) / steps),
int(points[0][1] * x / steps + points[-1][1] * (steps - x) / steps))
ra = randint(4, 8) #
#pygame.draw.circle(sur, (32,32,32), cpos, ra, 4) #vines
pygame.draw.circle(sur, (23, 23, 23), cpos, ra, 2) #grass
pygame.draw.circle(sur, (25, 25, 25), cpos, ra) #ebonstone
cpos2 = (int(points[2][0] * x / steps + points[3][0] * (steps - x) / steps),
int(points[2][1] * x / steps + points[3][1] * (steps - x) / steps))
ra = randint(4, 8)
#pygame.draw.circle(sur, (32,32,32), cpos2, ra, 4) #vines
pygame.draw.circle(sur, (23, 23, 23), cpos2, ra, 2) #grass
pygame.draw.circle(sur, (25, 25, 25), cpos2, ra) #ebonstone
if x == orb:
cpos = (cpos[0] + cpos2[0]) // 2, (cpos[1] + cpos2[1]) // 2
pygame.draw.circle(sur, (25, 25, 25), cpos, 3) #ebonstone
sur.blit(multis["shadoworb"], (cpos[0] - 1, cpos[1] - 1))
return sur
d = terragui.run(None)
if not d == False:
name, mode, starttype, sun, atlantis, merch, loot, hard, mirrored, pre = d
sizetype = mode[0]
terramode = mode[1]
else:
hard = 0
name = "Planetoids"
## ask the user what kind of world he/she wants.
print("Select world type")
print("Terra mode only available on large and square")
print("1: planetoids")
print("2: terra & planetoids; implies large world size")
print("3: terra; implies square world size")
valid = [1, 2, 3]
terramode = 0
while terramode not in valid:
try:
terramode = int(input("World type:"))
except:
pass
if terramode not in valid:
print("Please put in 1,2, or 3 and then hit enter, cant be that hard, right?")
print("")
terramode -= 1
if not terramode:
print("Select world size")
print("1: small (4200 x 1200)")
print("2: medium (6300 x 1800)")
print("3: large (8400 x 2400)")
print("4: square (2400 x 2400)")
valid = [1, 2, 3, 4]
sizetype = 0
while sizetype not in valid:
try:
sizetype = int(raw_input("World size:"))
except:
pass
if sizetype not in valid:
print("Please put in 1,2,3 or 4 and then hit enter, cant be that hard, right?")
print("")
valid = [1, 2, 3, 4]
print("Select start condition")
print("1: Day (Standard Terraria)")
print("2: Morning")
print("3: Night")
print("4: Bloodmoon")
starttype = 0
while starttype not in valid:
try:
starttype = int(raw_input("Start condition:"))
except:
pass
if starttype not in valid:
print("Please input 1,2,3 or 4, then hit enter.")
print("")
valid = [1, 2, 3, 4]
print("Select extra difficulty, you may select multiple by entering multiple numbers.")
print("By entering nothing you play normal")
print("1: Darkness! I dont need a puny sun!")
print("2: Less loot! I want to keep exploring!")
print("3: Atlantis, I want to conquer the world from my sunken planet!")
print("4: No merchant at the start, I want to earn him!")
dif = input("Difficulty:")
if "1" in dif:
sun = False
else:
sun = True
if "2" in dif:
loot = True
else:
loot = False
if "3" in dif:
atlantis = True
else:
atlantis = False
if "4" in dif:
merch = False
else:
merchant = True
loadingbar = Bar(caption = "Planetoids: startup")
sizetype -= 1
starttype -= 1
#people dont like to start counting at 0, so its decremented afterwards
#for people used to python this part of code should be obvious
#otherwise: [1,2,3][0] returns 1, as that is at part 1
#this is a cpu intense way of doing it, but its the less typing
#execution time at this point is also pretty much unimportant
is_day = [1, 1, 0, 0][starttype]
is_blood = [0, 0, 0, 1][starttype]
time = [13000.0, 0.0, 0.0, 0.0][starttype]
size = [(4200, 1200), (6300, 1800), (8400, 2400), (2400, 2400)][sizetype]
if terramode:
border = 200
spawn = (size[0] // 2, border)
superradius = 1200 - border
else:
spawn = [(2100, 200), (3150, 300), (4200, 400), (1200, 200)][sizetype]
if not sun: #if no sun
ground = [-1200.0, -1800.0, -2400.0, -2400.0][sizetype]
rock = [385.0, 385.0, 385.0, 385.0][sizetype]
else:
ground = [385.0, 385.0, 385.0, 385.0][sizetype]
rock = [487.0, 703.0, 907.0, 907.0][sizetype]
if sizetype == 3:
#square world has almost the same amount of tiles as small
# so in the following code it will be regarded as a small world.
sizetype = 0
elif sizetype == 2 and terramode: #large world - terra = contents of medium planetoids
sizetype = 1
chestcount = [200, 400, 800][sizetype]
#I would prefer [500,1000,1500] chests
#but terraria only allows 1000 chests as well as 1000 signs, never forget that limit
large_planets = [25, 50, 100][sizetype]
dungeon_planets = [5, 10, 20][sizetype]
small_planets = [250, 500, 1000][sizetype]
stone_planets = [25, 50, 100][sizetype]
#header data
header = {'spawn': spawn, 'groundlevel': ground, 'is_bloodmoon': is_blood,
'dungeon_xy': spawn, 'worldrect': (0, size[0] * 16, 0, size[1] * 16),
'is_meteor_spawned': 0, 'gob_inv_time': 0, 'rocklevel': rock,
'gob_inv_x': 0.0, 'is_day': is_day, 'shadow_orbs_broken': 0,
'width': size[0], 'version': 39, 'gob_inv_type': 0,
'bosses_slain': (0, 0, 0), "npcs_saved": (0, 0, 0), "special_slain": (0, 0, 0),
'gob_inv_size': 0, 'height': size[1],
'ID': randint(10, 10000000), 'moonphase': 0, 'name': name, "hardmode": int(hard),
"altars_broken": 0,
'is_a_shadow_orb_broken': 0, 'time': time}
chestfactor = 1
if sizetype == 0:
for item, amount in itemdata.items():
itemdata[item] = sum(divmod(amount, 2))
for item, amount in goldlockitemdata.items():
goldlockitemdata[item] = sum(divmod(amount, 2))
for item, amount in shadowlockitemdata.items():
shadowlockitemdata[item] = sum(divmod(amount, 2))
chestfactor /= 2
elif sizetype == 2:
for item in itemdata:
itemdata[item] = itemdata[item] * 2
for item, amount in goldlockitemdata.items():
goldlockitemdata[item] = amount*2
for item, amount in shadowlockitemdata.items():
shadowlockitemdata[item] = amount*2
chestfactor *= 2
if mirrored:
for item in itemdata:
itemdata[item] = itemdata[item] // 2 + itemdata[item] % 2
for item, amount in goldlockitemdata.items():
goldlockitemdata[item] = sum(divmod(amount, 2))
for item, amount in shadowlockitemdata.items():
shadowlockitemdata[item] = sum(divmod(amount, 2))
chestcount //= 2
chestfactor /= 2
if loot:
for item in itemdata:
itemdata[item] = itemdata[item] // 2 + itemdata[item] % 2
for item, amount in goldlockitemdata.items():
goldlockitemdata[item] = sum(divmod(amount, 2))
for item, amount in shadowlockitemdata.items():
shadowlockitemdata[item] = sum(divmod(amount, 2))
chestfactor /= 2
itemtotal = 0
for item in itemdata:
itemtotal += itemdata[item]
target = itemtotal // chestcount
loadingbar.set_progress(5, "Planetoids: generating base content")
#initialize a texture to hold all tile data
#could have used an array as well, like numpy, but I am more familiarized with pygame than numpy
surface = pygame.surface.Surface(size)
if atlantis: #if waterworld
surface.fill((254, 0, 255))
pygame.draw.rect(surface, (54, 54, 54), ((0, size[0]), (-1 + size[1] - size[1] // 6, size[1] // 6)))
pygame.draw.rect(surface, (255, 255, 255), ((0, size[0]), (size[1] - size[1] // 6, size[1] // 6)))
else:
surface.fill((255, 255, 255))
def on_radius(rad):
pos = size[0] // 2, size[1] // 2
angle = random() * 2 * pi
return (int(pos[0] + rad * cos(angle)),
int(pos[1] + rad * sin(angle)))
def terrapick(radius): #picks randomly items for a chest
fradius = float(radius)
current = 0
content = []
types = [choice((accessoires, weapons)), choice((other, potions))]
for typ in types:
while 1:
item = choice(list(typ.keys()))
#print item, fradius/superradius
if typ[item] > fradius / superradius:
break
content.append((randint(1, itemlist[item]), item))
for x in range(randint(*healthperchest)):
content.append((1, "Life Crystal"))
stars = randint(*starsperchest)
if stars:
content.append((stars, "Fallen Star"))
content.append((1, "Acorn"))
for x in range(20 - len(content)): #chests always have 20 slots
content.append((0, None))
return (on_radius(radius), content)
def pad_chest(content):
for x in range(20 - len(content)): #chests always have 20 slots
content.append((0, None))
return content
def pick(items, targetnumber): #picks randomly items for a chest planetoids
current = 0
content = []
while targetnumber > current:
item = choice(tuple(items.keys()))
if item in itemlist:
amount = randint(1, min(itemlist[item], items[item], targetnumber - current))
else:
amount = randint(1, min(3, items[item], targetnumber - current))
items[item] -= amount
if items[item] < 1:
del (items[item])
content.append((amount, item))
current += amount
if len(content) > 19:
break
return pad_chest(content), current, items
multis = get_multis()
goldlockedsurf = multis["goldlockchest"]
shadowlockedsurf = multis["shadowlockchest"]
chestnames = ("woodchest",
"goldchest",
"shadowchest",
"barrelchest",
"canchest",
"ebonwoodchest",
"mahoganywoodchest",
"bonechest",
"ivychest",
"icechest",
"livingwoodchest",
"skychest",
"shadewoodchest",
"webbedchest",)
chestsurflist = {}
for entry in chestnames:
chestsurflist[entry] = multis[entry]
loadingbar.set_progress(10, "Planetoids: filling chests")
chests = []
if terramode:
rad = superradius // 50
step = (float(superradius) - superradius // 16 - 30) / terrachestcount
while len(chests) < terrachestcount:
rad += step
pos, content = terrapick(rad)
chests.append((pos, content, choice(chestnames)))
chestcontents = []
while itemtotal > 0: # fill those chests with something useful.. or not, angel statue ftw.
i, c, itemdatabase = pick(itemdata, min(target, itemtotal))
chestcontents.append(i)
itemtotal -= c
def fill_special_chests(itemsperchest, chestcontents, extra_items = ()):
items = []
for item,amount in chestcontents.items():
items.extend([item]*amount)
shuffle(items)
while items:
ch = items[:itemsperchest]
items = items[itemsperchest:]
content = [(1, item) for item in ch]
content.extend([(amount, item) for item, amount in extra_items])
yield pad_chest(content)
goldchests = []
shadowchests = []
special_chest_contents = {"goldlockchest" : goldchests,
"shadowlockchest" : shadowchests,
"blockedjunglechest" : [pad_chest([(1, "Piranha Gun")])],
"blockedcorruptionchest" : [pad_chest([(1, "Scourge of the Corruptor")])],
"blockedcrimsonchest" : [pad_chest([(1, "Vampire Knives")])],
"blockedhallowedchest" : [pad_chest([(1, "Rainbow Gun")])],
"blockedicechest" : [pad_chest([(1, "Staff of the Frost Hydra")])],
}
[goldchests.append(content) for content in fill_special_chests(itemspergoldchest, goldlockitemdata, goldlockextra)]
[shadowchests.append(content) for content in fill_special_chests(itemspershadowchest, shadowlockitemdata, shadowlockextra)]
special_chests = []
for chestmulti, chs in special_chest_contents.items():
for ch in chs:
special_chests.append((chestmulti, ch))
center_pos = complex(header["spawn"][0], header["spawn"][1] + 50) #mid of spawn planet
shadoworbpos = []
def make_planet(c, rmin=20, rmax=50, surround=None, value = False): # function to literally draw the planets onto the world
r = randint(rmin, rmax)
if terramode:
if randint(0, 1) or mirrored:
pos = (randint(50, size[0] // 2 - border // 2 - superradius), randint(50, size[1] - 50))
else:
pos = (randint(size[0] // 2 + border // 2 + superradius, size[0] - 50), randint(50, size[1] - 50))
else:
if mirrored:
pos = (randint(50, size[0] // 2 - 50), randint(50, size[1] - 50))
while abs(complex(pos[0], pos[1]) - center_pos) < r + 200:
pos = (randint(50, size[0] // 2 - 50), randint(50, size[1] - 50))
else:
pos = (randint(50, size[0] - 50), randint(50, size[1] - 50))
while abs(complex(pos[0], pos[1]) - center_pos) < r + 200:
pos = (randint(50, size[0] - 50), randint(50, size[1] - 50))
if c == 25:#ebonstone
dire = random() * 2 * pi
radius = randint(10, r)
shadoworbpos.append((int(pos[0] + radius * cos(dire)), int(pos[1]+ radius * sin(dire))))
# a few special planets.. like glass, jungle donuts etc.
if c == 59:
pygame.draw.circle(surface, (c, c, c), pos, r)
pygame.draw.circle(surface, (60, 60, 60), pos, r, 1) #jungle grass
pygame.draw.circle(surface, (255, 255, 255), pos, r - 30)
pygame.draw.circle(surface, (60, 60, 60), pos, r - 30, 1) #jungle grass
for _ in range(10):
draw_valuable(r-25, r-5,pos,(211,211,211),randint(3,7))
elif c == 54:
pygame.draw.circle(surface, (c, c, c), pos, r)
pygame.draw.circle(surface, (254, randint(0, 1), 255), pos, r - 2)
elif c == 53:
pygame.draw.circle(surface, (40, 40, 40), (pos[0], pos[1] + 1), r)
pygame.draw.circle(surface, (c, c, c), pos, r)
elif c == 0:
pygame.draw.circle(surface, (c, c, c), pos, r)
pygame.draw.circle(surface, (2, 2, 2), pos, r, 1)
pygame.draw.circle(surface, (30, 30, 30), pos, r - 3, 1)
if value:
draw_valuable(r-2, r,pos,choice(valuable),randint(3,7))
elif c == -1:
c = dungeon_map[surround]
pygame.draw.circle(surface, (surround, surround, surround), pos, r + 7)
pygame.draw.circle(surface, (252, c, 0), pos, r)
if value:
draw_valuable(min(10, r), r,pos,choice(valuable),randint(3,7))
else:
if surround != None:
pygame.draw.circle(surface, (surround, surround, surround), pos, r + 7)
pygame.draw.circle(surface, (c, c, c), pos, r)
if value:
draw_valuable(min(10, r), r,pos,choice(valuable),randint(3,7))
return (pos[0] - 1, pos[1] - 1)
def make_hub_planet():
r = randint(75, 125)
if terramode:
if randint(0, 1):
pos = (randint(50, size[0] // 2 - border // 2 - superradius), randint(50, size[1] - 50))
else:
pos = (randint(size[0] // 2 + border // 2 + superradius, size[0] - 50), randint(50, size[1] - 50))
else:
if mirrored:
pos = (randint(50, size[0] // 2 - 50), randint(50, size[1] - 50))
while abs(complex(pos[0], pos[1]) - center_pos) < r + 200:
pos = (randint(50, size[0] // 2 - 50), randint(50, size[1] - 50))
else:
pos = (randint(50, size[0] - 50), randint(50, size[1] - 50))
while abs(complex(pos[0], pos[1]) - center_pos) < r + 200:
pos = (randint(50, size[0] - 50), randint(50, size[1] - 50))
valuables = (r // 25) ** 2
pygame.draw.circle(surface, (0, 0, 0), pos, r) #dirt
pygame.draw.circle(surface, (1, 1, 1), pos, r // 3) #stone
pygame.draw.circle(surface, (2, 2, 2), pos, r, 2) #grassring
pygame.draw.circle(surface, (30, 30, 30), pos, r - 3, 2) #woodring
for x in range(valuables * 5):
rad = randint(1, 10)
npos = get_randrad(pos, r - 10 - rad)
pygame.draw.circle(surface, (252, 2, 252), npos, rad) #air
for x in range(valuables):
rad = randint(4, 7)
npos = get_randrad(pos, r - 5 - rad)
pygame.draw.circle(surface, choice(valuable), npos, rad)
return pos
def get_randrad(pos, radius):
radius = random() * radius
angle = random() * 2 * pi
return (int(pos[0] + radius * cos(angle)),
int(pos[1] + radius * sin(angle)))
def get_randradrange(pos, minradius, maxradius):
radius = randint(minradius, maxradius)
angle = random() * 2 * pi
return (int(pos[0] + radius * cos(angle)),
int(pos[1] + radius * sin(angle)))
def draw_valuable(min_radius, max_radius, planetpos, material, size):
r = random() * 2 * pi
radius = randint(min_radius, max_radius)
pos = (
int(planetpos[0] + radius * cos(r)), int(planetpos[1]+ radius * sin(r)))
pygame.draw.circle(surface, material, pos, size)
def make_terra(surface, size):
pos = (size[0] // 2, size[1] // 2)
r = superradius
valuables = (r // 25) ** 2
pygame.draw.circle(surface, (0, 0, 0), pos, r) #dirt
pygame.draw.circle(surface, (30, 30, 30), pos, 3 * r // 4, r // 100) #wood
pygame.draw.circle(surface, (1, 1, 1), pos, r // 2) #stone
pygame.draw.circle(surface, (59, 59, 59), pos, r // 5) #jungle
pygame.draw.circle(surface, (2, 2, 2), pos, r, 2) #grassring
for name, minradius, maxradius, amount, size in planetdata:
minradius = int(r * minradius)
maxradius = int(r * maxradius)
for x in range(int(amount * valuables)):
npos = get_randradrange(pos, minradius, maxradius)
c = ntiles[name]
usize = randint(size[0], size[1])
if usize > 1:
pygame.draw.circle(surface, (c, c, c), npos, usize) #air
else:
surface.set_at(npos, (c, c, c))
#caverns
for x in range(int(caverns * valuables)):
npos = get_randradrange(pos, r * 0.25, r * 0.75)
pygame.draw.circle(surface, (255, 255, 255), npos, randint(*cavernsize))
for x in range(int(dirtcaverns * valuables)):
npos = get_randradrange(pos, r * 0.75, r * 0.9)
pygame.draw.circle(surface, (252, 2, 255), npos, randint(*dirtcavernsize))
#liquids
for x in range(int(water * valuables)):
npos = get_randradrange(pos, r // 3, r * 0.9)
pygame.draw.circle(surface, (254, 0, 255), npos, randint(watersize[0], watersize[1]))
for x in range(int(lava * valuables)):
npos = get_randradrange(pos, r // 4, r // 3)
pygame.draw.circle(surface, (254, 1, 255), npos, randint(lavasize[0], lavasize[1]))
for x in range(chasms):
if x == 0:
a = random() * pi + pi
while abs(a - 1.5 * pi) < 0.2 * pi:
a = random() * pi + pi
else:
a = random() * pi * 2
while abs(a - 1.5 * pi) < 0.2 * pi:
a = random() * pi + pi
#corruption
deep = randint(chasmdeepness[0], chasmdeepness[1]) * 0.01 * r
surface = draw_chasm(surface, pos, deep, r + 5, a, a + chasmthickness)
#pygame.image.save(surface, "mask2.png")
##jungle
for x in range(int(valuables * jcircle)):
npos = get_randradrange(pos, 5, r // 5)
pygame.draw.circle(surface, (255, 255, 255), npos, randint(*jcirclesize))
#pygame.draw.circle(surface, (60,60,60), npos, randint(*jcirclesize), 1)
for x in range(int(valuables * jrect)):
rect = pygame.rect.Rect((0, 0), (randint(*jrectsize), randint(*jrectsize)))
rect.center = get_randradrange(pos, 5, r // 4)
pygame.draw.rect(surface, (254, 1, 255), rect)
for x in range(int(valuables * jdot)):
npos = get_randradrange(pos, 5, r // 5)
surface.set_at(npos, (48, 48, 48))
for x in range(int(valuables * jarc)):
npos = get_randradrange(pos, r // 10, r // 5)
pygame.draw.circle(surface, (60, 60, 60), npos, randint(*jarcsize), 1)
##trees
for x in range(trees):
a = random() * pi + pi
npos = (int(pos[0] + r * cos(a)),
int(pos[1] + r * sin(a)))
while npos[1] > border + 100:
a = random() * pi + pi
npos = (int(pos[0] + r * cos(a)),
int(pos[1] + r * sin(a)))
h = randint(5, 25)
surface.blit(make_tree(h), (npos[0] - 1, npos[1] - h - 1))
s = pygame.surface.Surface((5, 2))
s.fill((2, 2, 2))
surface.blit(s, (npos[0] - 2, npos[1] - 1))
##altars
for x in range(altar_count):
npos = get_randradrange(pos, r // 5, r // 2)
surface.blit(multis["altar"], npos)
loadingbar.set_progress(20, "Planetoids: drawing some circles")
chestpos = []
if mirrored:
stone_planets //= 2
large_planets //= 2
small_planets //= 2
mul = 3
else:
mul = 5
if not terramode or sizetype == 1:
for x in range(stone_planets):
chestpos.append(make_hub_planet())
for x in range(large_planets):
chestpos.append(make_planet(*choice(data1), value=True))
for x in range(0, (sizetype + 1) * mul):
for d in data1:
chestpos.append(make_planet(*d))
for x in range(dungeon_planets):
chestpos.append(make_planet(*choice(data3)))
for x in range(0, (sizetype + 1) * mul):
for d in data2:
chestpos.append(make_planet(*d))
for x in range(small_planets):
chestpos.append(make_planet(*choice(data2)))
if mirrored:
mirror = surface.subsurface(0, 0, size[0] // 2, size[1])
mirror = pygame.transform.flip(mirror, 1, 0)
surface.blit(mirror, (size[0] // 2, 0))
if terramode:
if atlantis:
pygame.draw.circle(surface, (255, 255, 255), (header["spawn"][0], header["spawn"][1]), 50)
pygame.draw.circle(surface, (54, 54, 54), (header["spawn"][0], header["spawn"][1]), 50, 2)
make_terra(surface, size)
else: #spawnplanet
items = [(50, "Torch"), (25, "Acorn"), (5, "Daybloom Seeds"), (5, "Moonglow Seeds"),
(5, "Blinkroot Seeds"), (5, "Waterleaf Seeds"), (5, "Fireblossom Seeds"),
(5, "Deathweed Seeds"), (5, "Shiverthorn Seeds"), (1, "Life Crystal"), (2, "Mana Crystal"), (50, "Book"),
(200, "Cobweb"), (5, "Mushroom Grass Seeds"), (1, "Snow Globe"), (10, "Mud Block"),
(250, "Dirt Block"), (250, "Dirt Block"),
(1, "Shiny Red Balloon"),
]
for x in range(20 - len(items)):
items.append((0, None))
# draw the spawn planet
radius = 100
center = header["spawn"][0], header["spawn"][1] + 50
if atlantis:
pygame.draw.circle(surface, (255, 255, 255), center, radius + 50)
pygame.draw.circle(surface, (54, 54, 54), center, radius + 50, 2)
pygame.draw.circle(surface, (52, 52, 52), center, radius)
pygame.draw.circle(surface, (2, 2, 2), center, radius)
pygame.draw.circle(surface, (0, 0, 0), center, radius - 2)
pygame.draw.circle(surface, (1, 1, 1), center, radius // 2)
pygame.draw.circle(surface, (30, 30, 30), center, radius // 4)
for _ in range(3):#sand
draw_valuable(20, 40, center, (53,53,53), 7)
for _ in range(2):#clay
draw_valuable(20,50, center, (40,40,40), 7)
for _ in range(2):#iron
draw_valuable(20,30, center, (6,6,6), 4)
for _ in range(3):#copper
draw_valuable(20, 30, center, (7,7,7), 5)
chests.append(((header["spawn"][0] - 1, header["spawn"][1] + 49), items, choice(chestnames)))
header["spawn"] = header["spawn"][0], header["spawn"][1] - radius + 50
surface.blit(make_tree(25), (header["spawn"][0] + 1, header["spawn"][1] - 25))
surface.blit(multis["altar"], (header["spawn"][0] - 2, header["spawn"][1] - 2))
#draw the lower border of the world. Falling into the void was fun in minecraft,
#not su much here, as it will just make "splat"
pygame.draw.rect(surface, (57, 57, 57), ((0, size[1] - 100), (size[0], 100)))
pygame.draw.rect(surface, (254, 1, 255), ((0, size[1] - 150), (size[0], 50)))
#not pure terra mode
if terramode != 2:
#ocean planetoids
pygame.draw.circle(surface, (53, 53, 53), (0, 500), 500)
pygame.draw.circle(surface, (54, 54, 54), (0, 500), 500, 1)
pygame.draw.circle(surface, (253, 0, 255), (0, 301), 300)
pygame.draw.circle(surface, (53, 53, 53), (size[0], 500), 500)
pygame.draw.circle(surface, (54, 54, 54), (size[0], 500), 500, 1)
pygame.draw.circle(surface, (253, 0, 255), (size[0], 301), 300)
a = len(chestcontents)
if sizetype == 0:b = max_altar_planet//2
elif sizetype == 2:b = max_altar_planet*2
elif sizetype == 1:b = max_altar_planet
else:
raise IndexError("Invalid world size")
if mirrored:
double = chestcontents[:]
for pos in chestpos:
pos = size[0] - pos[0], pos[1]
tile = surface.get_at(pos)[0]
if tile == 25 or tile == 23:
surface.blit(multis["shadoworb"], pos) #place shadoworb into corruption
elif tile == 58: #hellfurnace into hellstone
surface.blit(multis["hellfurnace"], pos)
elif b:
b -= 1
surface.blit(multis["altar"], pos)
elif a:
chests.append((pos, chestcontents.pop(), choice(chestnames))) #place chests
a -= 1
else:
print("Warning, could not place all content!")
break # we usually have more planets than chests, so lets get out of here
chestcontents = double
a = len(chestcontents)
b = max_altar_planet
for pos in chestpos:
tile = surface.get_at(pos)[0]
if tile == 25 or tile == 23:
surface.blit(multis["shadoworb"], pos) #place shadoworb into corruption
elif tile == 58: #hellfurnace into hellstone
surface.blit(multis["hellfurnace"], pos)
elif special_chests:
multi, content = special_chests.pop()
chests.append((pos, content, multi))
elif b:
b -= 1
surface.blit(multis["altar"], pos)
elif a:
chests.append((pos, chestcontents.pop(), choice(chestnames))) #place chests
a -= 1
else:
break # we usually have more planets than chests, so lets get out of here
if a:
print("------------------Warning: {} unallocated chests------------------".format(a))
import time
time.sleep(1)
loadingbar.set_progress(30, "Planetoids: hiding goodies")
for shadoworb in shadoworbpos:
surface.blit(multis["shadoworb"], shadoworb)
for chest in chests:
#draw the chests into the world texture
surface.blit(multis[chest[2]], chest[0])
# below is to make sure every chest stands on something, so they dont glitch
d = surface.get_at((chest[0][0], chest[0][1] + 2))[0]
if d > 250 or d == 51:
surface.set_at((chest[0][0], chest[0][1] + 2), (0, 0, 0))
d = surface.get_at((chest[0][0] + 1, chest[0][1] + 2))[0]
if d > 250 or d == 51:
surface.set_at((chest[0][0] + 1, chest[0][1] + 2), (0, 0, 0))
# save the "source" of the world. Helped plenty with debugging.
#pygame.image.save(surface, "mask.png")
for x in range(1000 - len(chests)): #fill in nonechests, as terraria always has 1000 chests
chests.append(None)
self.header = header
z = header["width"] * header["height"] #tileamount
total = z
#list of tiles : walls
walls = defaultdict(lambda: None, {0: 2,
25: 3,
9: 11,
8: 10,
7: 12,
30: 4,
58: 13,
21: 2,
31: 3,
51: 62,#cobweb gets spider nest wall
40: 6,
})
def count(checks):
c = {}
for t_id in checks:
c[t_id] = 0
for x in range(size[0]):
for y in range(size[1]):
tid = surface.get_at((x, y))[0]
if tid in c:
c[tid] += 1
for tid in c:
amount = c[tid]
print("%-10s : %d" % (tiles[tid], amount))
loadingbar.set_progress(50, "Planetoids: writing tile data")
self.tiles = write_tiles(surface, header, walls, True, callback = loadingbar)
self.chests = chests
self.signs = [None] * 1000
self.names = names
self.npcs = [('Guide', (header["spawn"][0] * 16, (header["spawn"][1] - 3) * 16), 1,
(header["spawn"][0], header["spawn"][1] - 3)),
('Old Man', (header["spawn"][0] * 16 - 16, (header["spawn"][1] - 3) * 16), 1,
(header["spawn"][0], header["spawn"][1] - 3))]
if merch: self.npcs.append(
('Merchant', (header["spawn"][0] * 16 - 16, (header["spawn"][1] - 3) * 16), 1,
(header["spawn"][0], header["spawn"][1] - 3)))
self.loadingbar = loadingbar
if __name__ == "__main__":
gen = Generator()
gen.run()
|
|
import numpy as np
import pandas as pd
def unique_row(a):
"""
Returns an array of the ordered, unique set of rows for input array a.
Parameters
----------
a: array
an array with replicated rows.
returns
-------
unique_a: array
an ordered array without replicated rows.
example
-------
>>> a = np.array([[9,9],
[8,8],
[1,1],
[9,9]])
>>> unique_row(a)
>>> array([[1, 1],
[8, 8],
[9, 9]])
"""
b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))
_, idx = np.unique(b, return_index=True)
unique_a = a[idx]
return unique_a
def get_path_real_length(path):
"""
Get the dendritic length of a path, which is the sum of the distance between each consecutive points.
Parameters
----------
path: array
a coordinate array with dim=(n, 3)
Returns
-------
the dendritic length of this path: float
"""
return np.sum(np.sqrt(np.sum((path[1:] - path[:-1])**2, 1)))
def get_path_euclidean_length(path):
"""
get the euclidean length of a path, which is the distance between the first and last points.
Parameters
----------
path: array
a coordinate array with dim=(n, 3)
Returns
-------
the euclidean length of this path: float
"""
return np.sqrt(np.sum((path[0] - path[-1]) ** 2))
def get_outer_terminals(all_terminals):
"""
Get terminal points which form the convex hull of the cell.
Parameters
----------
all_terminals: array
The array contains all terminal points from terminal paths (no other paths connected to them)
Returns
-------
outer_terminals_3d: array
The array contains all terminal points which found the convex hull of the cell.
"""
from scipy.spatial import ConvexHull
hull = ConvexHull(all_terminals[:,:2])
outer_terminals_3d = all_terminals[hull.vertices]
outer_terminals_3d = np.vstack([outer_terminals_3d, outer_terminals_3d[0]])
return outer_terminals_3d
def get_angle(v0, v1):
"""
Get angle (in both radian and degree) between two vectors.
Parameters
----------
v0: array
vector zero.
v1: array
vector one.
Returns
-------
Return a tuple, (angle in radian, angle in degree).
"""
v0 = np.array(v0)
v1 = np.array(v1)
if not v0.any() or not v1.any():
return 0, 0
c = np.dot(v0, v1) / np.linalg.norm(v0) / np.linalg.norm(v1)
return np.arccos(np.clip(c, -1, 1)), np.degrees(np.arccos(np.clip(c, -1, 1)))
def get_remote_vector(path):
"""
Get vector of certain path between the first and the last point.
Parameters
----------
df_paths: pandas.DataFrame
path_id: int
Returns
-------
normalized v: array
returned a normalized vector.
"""
s = path[0]
e = path[-1]
v= e-s
if (v == 0).all():
return np.zeros(3)
else:
return v/np.linalg.norm(v)
def get_local_vector(path):
"""
Get vector of certain path between the first and the second point.
Parameters
----------
df_paths: pandas.DataFrame
path_id: int
Returns
-------
normalized v: array
returned a normalized vector.
"""
s = path[0]
e = path[1]
v= e-s
if (v == 0).all():
return np.zeros(3)
else:
return v/np.linalg.norm(v)
def get_average_angles(df_paths):
"""
a helper function to get the average of all kinds of angles.
Parameters
----------
df_paths: pandas.DataFrame
Returns
-------
average_nodal_angle_deg
average_nodal_angle_rad
average_local_angle_deg
average_local_angle_rad
"""
nodal_angles_deg = {}
nodal_angles_rad = {}
local_angles_deg = {}
local_angles_rad = {}
n = 0
for i in np.unique(df_paths.connect_to):
path_ids = df_paths[df_paths.connect_to == i].index.tolist()
if len(path_ids) >= 2:
from itertools import combinations
path_id_combs = combinations(path_ids, 2)
for path_id_pair in path_id_combs:
p0 = df_paths.loc[path_id_pair[0]].path
p1 = df_paths.loc[path_id_pair[1]].path
v00 = get_remote_vector(p0)
v01 = get_remote_vector(p1)
nodal_angles_rad[n], nodal_angles_deg[n] = get_angle(v00, v01)
v10 = get_local_vector(p0)
v11 = get_local_vector(p1)
local_angles_rad[n], local_angles_deg[n] = get_angle(v10, v11)
n+=1
else:
continue
average_nodal_angle_deg = np.nanmean(list(nodal_angles_deg.values()))
average_nodal_angle_rad = np.nanmean(list(nodal_angles_rad.values()))
average_local_angle_deg = np.nanmean(list(local_angles_deg.values()))
average_local_angle_rad = np.nanmean(list(local_angles_rad.values()))
return average_nodal_angle_deg, average_nodal_angle_rad, average_local_angle_deg, average_local_angle_rad
def get_summary_of_type(df_paths):
"""
A helper function to gather all summarized infomation
Parameters
----------
df_paths: pandas.DataFrame
Return
------
a list of all summarized information.
"""
if len(df_paths) < 1:
return None
branchpoints = np.vstack(df_paths.connect_to_at)
branchpoints = unique_row(branchpoints)
num_branchpoints = len(branchpoints)
max_branch_order = max(df_paths.branch_order)
terminalpaths = df_paths.path[df_paths.connected_by.apply(len) == 0].as_matrix()
terminalpoints = np.vstack([p[-1] for p in terminalpaths])
num_terminalpoints = len(terminalpoints)
# outerterminals = get_outer_terminals(terminalpoints)
num_irreducible_nodes = num_branchpoints + num_terminalpoints
num_dendritic_segments = len(df_paths)
# path length
reallength = df_paths['real_length']
reallength_sum = reallength.sum()
reallength_mean = reallength.mean()
reallength_median = reallength.median()
reallength_min = reallength.min()
reallength_max = reallength.max()
euclidean = df_paths['euclidean_length']
euclidean_sum = euclidean.sum()
euclidean_mean = euclidean.mean()
euclidean_median = euclidean.median()
euclidean_min = euclidean.min()
euclidean_max = euclidean.max()
tortuosity = reallength / euclidean
average_tortuosity = np.mean(tortuosity)
# node angles
average_nodal_angle_deg, average_nodal_angle_rad, average_local_angle_deg, average_local_angle_rad = get_average_angles(df_paths)
if df_paths.iloc[0].type == 2:
t = 'axon'
elif df_paths.iloc[0].type == 3:
t = 'basal_dendrites'
elif df_paths.iloc[0].type == 4:
t = 'apical_dendrites'
else:
t = 'undefined'
return (t,int(num_dendritic_segments),
int(num_branchpoints),
int(num_irreducible_nodes),
int(max_branch_order),
average_nodal_angle_deg,
average_nodal_angle_rad,
average_local_angle_deg,
average_local_angle_rad,
average_tortuosity,
reallength_sum,
reallength_mean,
reallength_median,
reallength_min,
reallength_max,
euclidean_sum,
euclidean_mean,
euclidean_median,
euclidean_min,
euclidean_max,)
def get_summary_data(df_paths):
"""
The summary of the cell morphology.
"""
df_paths = df_paths.copy()
soma = df_paths[df_paths.type == 1]
axon = df_paths[df_paths.type == 2]
dend_basal = df_paths[df_paths.type == 3]
dend_apical = df_paths[df_paths.type == 4]
axon_summary = get_summary_of_type(axon)
dend_basal_summary = get_summary_of_type(dend_basal)
dend_apical_summary = get_summary_of_type(dend_apical)
labels = [
'type',
'num_path_segments',
'num_branchpoints',
'num_irreducible_nodes',
'max_branch_order',
'average_nodal_angle_deg',
'average_nodal_angle_rad',
'average_local_angle_deg',
'average_local_angle_rad',
'average_tortuosity',
'real_length_sum',
'real_length_mean',
'real_length_median',
'real_length_min',
'real_length_max',
'euclidean_length_sum',
'euclidean_length_mean',
'euclidean_length_median',
'euclidean_length_min',
'euclidean_length_max',
]
neurites = [axon_summary,dend_basal_summary,dend_apical_summary]
df_summary = pd.DataFrame.from_records([n for n in neurites if n is not None], columns=labels)
return df_summary
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test multi-worker Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import os
import sys
import threading
from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import keras
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# TODO(b/130375202): remove this class which is a temporary solution before we
# get rid of configure method.
class ParameterServerStrategy(distribute_lib.Strategy):
"""Temporarily mock the original strategy to bypass cluster_spec check."""
def __init__(self, cluster_resolver=None):
"""Initializes this strategy."""
# The `cluster_resolver` must be set so that
# `ParameterServerStrategyExtended` will keep num_gpus for `configure`
# method.
if cluster_resolver is None:
cluster_resolver = TFConfigClusterResolver()
extended = parameter_server_strategy.ParameterServerStrategyExtended(
self, cluster_resolver=cluster_resolver)
super(ParameterServerStrategy, self).__init__(extended)
def _clone_and_build_model(model, strategy):
# The new "original" model in worker 0.
with strategy.scope():
cloned_model = models.clone_model(model)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
# TODO(yuefengz): figure out why the optimizer here is still a
# TFOptimizer.
while isinstance(optimizer, optimizers.TFOptimizer):
optimizer = optimizer.optimizer
optimizer = copy.deepcopy(optimizer)
else:
optimizer_config = model.optimizer.get_config()
optimizer = type(model.optimizer).from_config(optimizer_config)
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics))
return cloned_model
# TODO(b/123918215): Possibly merge this Callback with keras_test.Counter.
class MultiWorkerVerificationCallback(callbacks.Callback):
"""MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme.
This Callback is intended to be used for verifying the callback is indeed
called the correct number of times in various task types.
Attributes:
_task_dict: A nested dictionary storing the number of times a callback has
been called in specific task type, task index, and method name.
Look up structure is
task_name -> task_id -> tracking_method_name -> invoke_count
For example, a _task_dict of
{
'ps': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
},
'worker': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
}
}
indicates the ps task has 'on_epoch_begin' called twice on each
of the two indices, and likewise for worker task.
"""
# TODO(rchao): Add other method calls to verify.
METHODS_TO_VERIFY = ['on_epoch_begin']
def __init__(self, num_epoch, num_worker):
"""Initialize a MultiWorkerVerificationCallback.
Args:
num_epoch: Number of epochs this Callback is expected to be called for.
num_worker: Number of workers this Callback is expected to be called from.
"""
super(MultiWorkerVerificationCallback, self).__init__()
self._num_epoch = num_epoch
self._num_worker = num_worker
self._task_dict = {
key: collections.defaultdict(lambda: collections.defaultdict(int))
for key in ['ps', 'worker']
}
self._lock = threading.Lock()
self._is_between_graph = None
self.wrap_methods(self.METHODS_TO_VERIFY)
@property
def is_between_graph(self):
return self._is_between_graph
@is_between_graph.setter
def is_between_graph(self, is_between_graph):
self._is_between_graph = is_between_graph
def wrap_methods(self, method_names):
"""Wrap methods so that the counts of calls are tracked.
Args:
method_names: A list of names of methods to track calls.
"""
for method_name in method_names:
method = getattr(self, method_name)
def wrapped_method(method_to_wrap, name, *arg, **kwargs):
# Use lock to ensure += operation is thread-safe.
with self._lock:
self._task_dict[test_base.get_task_type()][
test_base.get_task_index()][name] += 1
method_to_wrap(*arg, **kwargs)
setattr(self, method_name,
functools.partial(wrapped_method, method, method_name))
def verify(self, test_case):
method_count_dict = {
method_name: self._num_epoch for method_name in self.METHODS_TO_VERIFY
}
assert self._is_between_graph is not None
if self._is_between_graph:
# TODO(b/124171024): In between-graph replication, by default only the
# chief calls callback. Fix this test to cover that, as well as the rare
# cases where all workers call.
worker_call_count = {
i: method_count_dict for i in range(0, self._num_worker)
}
else:
# If in-graph, only the first worker calls callback methods.
worker_call_count = {0: method_count_dict}
test_case.assertDictEqual(
self._task_dict,
{
# PS' callback is not supposed to be called.
'ps': {},
# Each of the Worker should be called num_epoch of times.
'worker': worker_call_count
})
class KerasMultiWorkerTestIndependentWorker(test_base.IndependentWorkerTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[
collective_strategy.CollectiveAllReduceStrategy,
],
required_gpus=[0, 1]))
def testSimpleModelIndependentWorkerSync(self, strategy_cls):
num_workers = 2
num_epoch = 2
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
# The verification callback will be shared by multiple threads.
verification_callback = MultiWorkerVerificationCallback(
num_epoch=num_epoch, num_worker=num_workers)
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
strategy = strategy_cls()
verification_callback.is_between_graph = \
strategy.extended.experimental_between_graph
batch_size = 64
steps = 2
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
orig_loss, _ = model.evaluate(train_ds, steps=steps)
callbacks_for_fit = nest.flatten(
kwargs.get('verification_callback', []))
history = model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=callbacks_for_fit)
self.assertIsInstance(history, keras.callbacks.History)
trained_loss, _ = model.evaluate(train_ds, steps=steps)
self.assertLess(trained_loss, orig_loss)
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
verification_callback=verification_callback)
threads_to_join = []
strategy = strategy_cls()
if strategy.extended.experimental_between_graph:
for ts in threads.values():
threads_to_join.extend(ts)
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
verification_callback.verify(self)
@combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[ParameterServerStrategy],
required_gpus=[0, 1]))
def testSimpleModelIndependentWorkerAsync(self, strategy_cls):
num_workers = 2
num_epoch = 2
cluster_spec = test_base.create_cluster_spec(
num_workers=num_workers, num_ps=2)
self._barrier = dc._Barrier(4)
# The verification callback will be shared by multiple threads.
verification_callback = MultiWorkerVerificationCallback(
num_epoch=num_epoch, num_worker=num_workers)
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
# TODO(rchao/yuefengz): The following is run by both worker and ps
# threads. The distribute coordinator should run std server immediately
# without configuring the session (or building the graph) on PS.
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
batch_size = 64
steps = 2
strategy = strategy_cls()
verification_callback.is_between_graph = \
strategy.extended.experimental_between_graph
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
val_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# TODO(b/123868066): Verify callback for model.evaluate().
callbacks_for_fit = nest.flatten(
kwargs.get('verification_callback', []))
history = model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=val_ds,
validation_steps=steps,
callbacks=callbacks_for_fit)
self.assertIsInstance(history, keras.callbacks.History)
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
verification_callback=verification_callback)
threads_to_join = []
for task_type, ts in threads.items():
# This test can finish once the worker threads complete, and thus
# the ps threads don't need to be joined.
if task_type == 'ps':
continue
threads_to_join.extend(ts)
self.join_independent_workers(threads_to_join)
verification_callback.verify(self)
if __name__ == '__main__':
# Enable manual variable initialization to make sure variables are initialized
# by `init_restore_or_wait_for_variables`.
backend.manual_variable_initialization(True)
with test.mock.patch.object(sys, 'exit', os._exit):
test.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import access
from keystoneclient import auth
from keystoneclient.auth.identity import access as access_plugin
from keystoneclient.auth.identity import v3
from keystoneclient.auth import token_endpoint
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
import oslo_messaging
from oslo_middleware import request_id as oslo_request_id
from oslo_utils import importutils
import six
from heat.common import endpoint_utils
from heat.common import exception
from heat.common.i18n import _LE, _LW
from heat.common import policy
from heat.common import wsgi
from heat.db import api as db_api
from heat.engine import clients
LOG = logging.getLogger(__name__)
TRUSTEE_CONF_GROUP = 'trustee'
auth.register_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
class RequestContext(context.RequestContext):
"""Stores information about the security context.
Under the security context the user accesses the system, as well as
additional request information.
"""
def __init__(self, auth_token=None, username=None, password=None,
aws_creds=None, tenant=None, user_id=None,
tenant_id=None, auth_url=None, roles=None, is_admin=None,
read_only=False, show_deleted=False,
overwrite=True, trust_id=None, trustor_user_id=None,
request_id=None, auth_token_info=None, region_name=None,
auth_plugin=None, trusts_auth_plugin=None, **kwargs):
"""Initialisation of the request context.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(RequestContext, self).__init__(auth_token=auth_token,
user=username, tenant=tenant,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id=request_id)
self.username = username
self.user_id = user_id
self.password = password
self.region_name = region_name
self.aws_creds = aws_creds
self.tenant_id = tenant_id
self.auth_token_info = auth_token_info
self.auth_url = auth_url
self.roles = roles or []
self._session = None
self._clients = None
self.trust_id = trust_id
self.trustor_user_id = trustor_user_id
self.policy = policy.Enforcer()
self._auth_plugin = auth_plugin
self._trusts_auth_plugin = trusts_auth_plugin
if is_admin is None:
self.is_admin = self.policy.check_is_admin(self)
else:
self.is_admin = is_admin
@property
def session(self):
if self._session is None:
self._session = db_api.get_session()
return self._session
@property
def clients(self):
if self._clients is None:
self._clients = clients.Clients(self)
return self._clients
def to_dict(self):
user_idt = '{user} {tenant}'.format(user=self.user_id or '-',
tenant=self.tenant_id or '-')
return {'auth_token': self.auth_token,
'username': self.username,
'user_id': self.user_id,
'password': self.password,
'aws_creds': self.aws_creds,
'tenant': self.tenant,
'tenant_id': self.tenant_id,
'trust_id': self.trust_id,
'trustor_user_id': self.trustor_user_id,
'auth_token_info': self.auth_token_info,
'auth_url': self.auth_url,
'roles': self.roles,
'is_admin': self.is_admin,
'user': self.user,
'request_id': self.request_id,
'show_deleted': self.show_deleted,
'region_name': self.region_name,
'user_identity': user_idt}
@classmethod
def from_dict(cls, values):
return cls(**values)
@property
def keystone_v3_endpoint(self):
if self.auth_url:
return self.auth_url.replace('v2.0', 'v3')
else:
auth_uri = endpoint_utils.get_auth_uri()
if auth_uri:
return auth_uri
else:
LOG.error('Keystone API endpoint not provided. Set '
'auth_uri in section [clients_keystone] '
'of the configuration file.')
raise exception.AuthorizationFailure()
@property
def trusts_auth_plugin(self):
if self._trusts_auth_plugin:
return self._trusts_auth_plugin
self._trusts_auth_plugin = auth.load_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id)
if self._trusts_auth_plugin:
return self._trusts_auth_plugin
LOG.warn(_LW('Using the keystone_authtoken user as the heat '
'trustee user directly is deprecated. Please add the '
'trustee credentials you need to the %s section of '
'your heat.conf file.') % TRUSTEE_CONF_GROUP)
cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token')
self._trusts_auth_plugin = v3.Password(
username=cfg.CONF.keystone_authtoken.admin_user,
password=cfg.CONF.keystone_authtoken.admin_password,
user_domain_id='default',
auth_url=self.keystone_v3_endpoint,
trust_id=self.trust_id)
return self._trusts_auth_plugin
def _create_auth_plugin(self):
if self.auth_token_info:
auth_ref = access.AccessInfo.factory(body=self.auth_token_info,
auth_token=self.auth_token)
return access_plugin.AccessInfoPlugin(
auth_url=self.keystone_v3_endpoint,
auth_ref=auth_ref)
if self.auth_token:
# FIXME(jamielennox): This is broken but consistent. If you
# only have a token but don't load a service catalog then
# url_for wont work. Stub with the keystone endpoint so at
# least it might be right.
return token_endpoint.Token(endpoint=self.keystone_v3_endpoint,
token=self.auth_token)
if self.password:
return v3.Password(username=self.username,
password=self.password,
project_id=self.tenant_id,
user_domain_id='default',
auth_url=self.keystone_v3_endpoint)
LOG.error(_LE("Keystone v3 API connection failed, no password "
"trust or auth_token!"))
raise exception.AuthorizationFailure()
def reload_auth_plugin(self):
self._auth_plugin = None
@property
def auth_plugin(self):
if not self._auth_plugin:
if self.trust_id:
self._auth_plugin = self.trusts_auth_plugin
else:
self._auth_plugin = self._create_auth_plugin()
return self._auth_plugin
def get_admin_context(show_deleted=False):
return RequestContext(is_admin=True, show_deleted=show_deleted)
class ContextMiddleware(wsgi.Middleware):
def __init__(self, app, conf, **local_conf):
# Determine the context class to use
self.ctxcls = RequestContext
if 'context_class' in local_conf:
self.ctxcls = importutils.import_class(local_conf['context_class'])
super(ContextMiddleware, self).__init__(app)
def make_context(self, *args, **kwargs):
"""Create a context with the given arguments."""
return self.ctxcls(*args, **kwargs)
def process_request(self, req):
"""Constructs an appropriate context from extracted auth information.
Extract any authentication information in the request and construct an
appropriate context from it.
"""
headers = req.headers
environ = req.environ
try:
username = None
password = None
aws_creds = None
if headers.get('X-Auth-User') is not None:
username = headers.get('X-Auth-User')
password = headers.get('X-Auth-Key')
elif headers.get('X-Auth-EC2-Creds') is not None:
aws_creds = headers.get('X-Auth-EC2-Creds')
user_id = headers.get('X-User-Id')
token = headers.get('X-Auth-Token')
tenant = headers.get('X-Project-Name')
tenant_id = headers.get('X-Project-Id')
region_name = headers.get('X-Region-Name')
auth_url = headers.get('X-Auth-Url')
roles = headers.get('X-Roles')
if roles is not None:
roles = roles.split(',')
token_info = environ.get('keystone.token_info')
auth_plugin = environ.get('keystone.token_auth')
req_id = environ.get(oslo_request_id.ENV_REQUEST_ID)
except Exception:
raise exception.NotAuthenticated()
req.context = self.make_context(auth_token=token,
tenant=tenant, tenant_id=tenant_id,
aws_creds=aws_creds,
username=username,
user_id=user_id,
password=password,
auth_url=auth_url,
roles=roles,
request_id=req_id,
auth_token_info=token_info,
region_name=region_name,
auth_plugin=auth_plugin)
def ContextMiddleware_filter_factory(global_conf, **local_conf):
"""Factory method for paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return ContextMiddleware(app, conf)
return filter
def request_context(func):
@six.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
try:
return func(self, ctx, *args, **kwargs)
except exception.HeatException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
|
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
from neon import NervanaObject
from neon.backends.backend import Tensor
class Initializer(NervanaObject):
"""
Abstract base class from which parameter tensor initializers inherit.
Subclasses should implement the ``fill`` method which takes as input a Tensor
and fills the values based on the initialization scheme.
"""
def fill(self, param):
"""
Initialize the provided tensor with values.
Args:
param (Tensor): Input Tensor.
"""
raise NotImplementedError()
class Constant(Initializer):
"""
Initializes parameters as a constant.
"""
def __init__(self, val=0.0, name="constantInit"):
"""
Class constructor.
Args:
val (float, optional): The value to assign to all tensor elements
"""
super(Constant, self).__init__(name=name)
self.val = val
def fill(self, param):
"""
Fills the provided tensor.
Args:
param (tensor): target tensor to fill
"""
if isinstance(self.val, Tensor):
assert self.val.shape == param.shape, "Constant(Array) initializer can"\
" only fill a matching shape tensor"
param[:] = self.val
class Array(Constant):
"""
Initializes parameters with values specified by a provided numpy array.
Same functionality as Constant except serialization needs to dump
tensor values into np array
Args:
vals (ndarray or tensor, optional): Values to assign to the tensor elements
"""
def get_description(self):
"""
Returns description of the object as a dict. Transfers the
tensors back to a numpy array.
"""
desc = super(Array, self).get_description()
if isinstance(desc['config']['val'], Tensor):
desc['config']['val'] = desc['config']['val'].get()
return desc
class Uniform(Initializer):
"""
Initializes parameters with random values drawn from a uniform distribution.
"""
def __init__(self, low=0.0, high=1.0, name="uniformInit"):
"""
Class constructor.
Args:
low (float, optional): Lower bound of range.
high (float, optional): Upper bound of range.
"""
super(Uniform, self).__init__(name=name)
self.low, self.high = (low, high)
def fill(self, param):
"""
Fill the provided tensor with random values drawn from a uniform
distribution.
Args:
params (tensor): Tensor to fill
"""
param[:] = self.be.rng.uniform(self.low, self.high, param.shape)
class Gaussian(Initializer):
"""
Initializes parameters with a gaussian distribution with the provided mean
and standard deviation. Defaults to (loc=0, scale=1)
"""
def __init__(self, loc=0.0, scale=1.0, name="gaussianInit"):
"""
Class constructor.
Args:
loc (float, optional): Mean parameter (mu). Defaults to 0.
scale (float, optional): Standard deviation parameter (sigma). Defaults to 1.
name (string, optional): Name to assign an instance of this class.
"""
super(Gaussian, self).__init__(name=name)
self.loc, self.scale = (loc, scale)
def fill(self, param):
"""
Fill the provided tensor with random values drawn from a gaussian
distribution.
Args:
params (tensor): Tensor to fill
"""
param[:] = self.be.rng.normal(self.loc, self.scale, param.shape)
class GlorotUniform(Initializer):
"""
Initializes parameter tensors with values drawn from a uniform distribution
ranging from :math:`-K` to :math:`K`. We define :math:`K=\sqrt{6 / (n_{in} + n_{out})}`,
where :math:`n_{in}` and :math:`n_{out}` are the input and output dimensions, respectively,
of the parameter tensor. This approach normalizes the range of the initialized values
by the tensor dimensions.
From: "Understanding the difficulty of training deep feedforward neural networks"
(http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf).
"""
def __init__(self, name="autouniformInit"):
"""
Class constructor.
Args:
name (string, optional): Name to assign an instance of this class
"""
super(GlorotUniform, self).__init__(name=name)
def fill(self, param):
"""
Fill the provided tensor with random values drawn from the Uniform
distribution, using normalized bounds.
Args:
params (tensor): Tensor to fill
"""
k = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
param[:] = self.be.rng.uniform(-k, k, param.shape)
class Xavier(Initializer):
"""
Initializes parameter tensors with values drawn from a uniform distribution
ranging from :math:`-K` to :math:`K` We define :math:`K=\sqrt{3 / (n_{in})}`,
where :math:`n_{in}` is the number of input nodes.
Similar to Glorot except the range is normalized by the input size only.
"""
def __init__(self, local=True, name="xavier"):
"""
Class constructor.
Args:
local (bool, optional): Whether the layer type is local (Convolutional) or not.
Default is True.
name (string, optional): Name to assign an instance of this class.
"""
super(Xavier, self).__init__(name=name)
self.local = local
def fill(self, param):
"""
Fill the provided tensor with random values drawn from the Uniform
distribution, using normalized bounds.
Args:
params (tensor): Tensor to fill
"""
fan_in = param.shape[0 if self.local else 1]
scale = np.sqrt(3. / fan_in)
param[:] = self.be.rng.uniform(-scale, scale, param.shape)
class Kaiming(Initializer):
"""
Initializes parameters with a zero-mean Gaussian distribution. The standard deviation
is automatically set as :math:`\sigma=\sqrt{2 / n_{in}}`, where :math:`n_{in}` is
the input dimension of the tensor.
Based on the initializer described in: http://arxiv.org/pdf/1502.01852.pdf.
"""
def __init__(self, local=True, name="Kaiming"):
"""
Class constructor.
Args:
local (bool, optional): Whether the layer type is local (Convolutional) or not.
Default is True.
name (string, optional): Name to assign an instance of this class.
"""
super(Kaiming, self).__init__(name=name)
self.local = local
def fill(self, param):
"""
Fill the provided tensor with random values drawn from a gaussian
distribution.
Args:
params (tensor): Tensor to fill
"""
fan_in = param.shape[0 if self.local else 1]
scale = np.sqrt(2. / fan_in)
param[:] = self.be.rng.normal(0, scale, param.shape)
class IdentityInit(Initializer):
"""
Initializes parameters with the identity matrix.
"""
def __init__(self, local=True, name="Identity"):
"""
Class constructor.
Args:
local (bool, optional): Whether the layer type is local (Convolutional) or not.
Default is True.
name (string, optional): Name to assign an instance of this class.
"""
super(IdentityInit, self).__init__(name=name)
self.local = local
def fill(self, param):
"""
Fill the provided tensor with the identity matrix.
Args:
params (tensor): Tensor to fill
"""
(nin, nout) = param.shape
w_ary = np.zeros((nin, nout), dtype=np.float32)
w_ary[:, :nin] = np.eye(nin)
param[:] = w_ary
class Orthonormal(Initializer):
"""
Initializes parameters with the single value decomposition of a
random gaussian matrix.
Implementation taken from Lasagne. Reference: Saxe et al., http://arxiv.org/abs/1312.6120
"""
def __init__(self, scale=1.1, name="orthonormal"):
"""
Class constructor.
Args:
scale (float, optional): Scaling factor of values. Defaults to 1.1.
name (string, optional): Name to assign an instance of this class.
"""
super(Orthonormal, self).__init__(name=name)
self.scale = scale
def fill(self, param):
"""
Fill the provided tensor using the Orthonormal method.
Args:
params (tensor): Tensor to fill
"""
a = np.random.normal(0.0, 1.0, param.shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == param.shape else v
param[:] = self.scale * q
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 22:01:59 2016
@author: ajaver
"""
import numpy as np
import tables
import pandas as pd
import warnings
from tierpsy.helper.params import read_fps, read_microns_per_pixel
from tierpsy.helper.misc import print_flush, get_base_name
from tierpsy.analysis.stage_aligment.findStageMovement import getFrameDiffVar, findStageMovement, shift2video_ref
def isGoodStageAligment(skeletons_file):
with tables.File(skeletons_file, 'r') as fid:
try:
good_aligment = fid.get_node('/stage_movement')._v_attrs['has_finished']
except (KeyError, IndexError, tables.exceptions.NoSuchNodeError):
good_aligment = 0
return good_aligment in [1, 2]
def _h_get_stage_inv(skeletons_file, timestamp):
if timestamp.size == 0:
return np.zeros((0, 2)), np.zeros(0)
first_frame = timestamp[0]
last_frame = timestamp[-1]
with tables.File(skeletons_file, 'r') as fid:
stage_vec_ori = fid.get_node('/stage_movement/stage_vec')[:]
timestamp_ind = fid.get_node('/timestamp/raw')[:].astype(np.int)
rotation_matrix = fid.get_node('/stage_movement')._v_attrs['rotation_matrix']
microns_per_pixel_scale = fid.get_node('/stage_movement')._v_attrs['microns_per_pixel_scale']
#2D to control for the scale vector directions
# let's rotate the stage movement
dd = np.sign(microns_per_pixel_scale)
rotation_matrix_inv = np.dot(
rotation_matrix * [(1, -1), (-1, 1)], [(dd[0], 0), (0, dd[1])])
# adjust the stage_vec to match the timestamps in the skeletons
good = (timestamp_ind >= first_frame) & (timestamp_ind <= last_frame)
ind_ff = (timestamp_ind[good] - first_frame).astype(np.int) #make sure it is int to be used as index
if timestamp_ind.shape[0] > stage_vec_ori.shape[0]:
#there are extra elements in the timestamp_ind, let's pad it with the same value in the stage vector
extra_n = timestamp_ind.shape[0] - stage_vec_ori.shape[0]
stage_vec_ori = np.pad(stage_vec_ori, ((0, extra_n),(0,0)), 'edge')
stage_vec_ori = stage_vec_ori[good]
stage_vec = np.full((timestamp.size, 2), np.nan)
stage_vec[ind_ff, :] = stage_vec_ori
# the negative symbole is to add the stage vector directly, instead of
# substracting it.
stage_vec_inv = -np.dot(rotation_matrix_inv, stage_vec.T).T
return stage_vec_inv, ind_ff
def _h_add_stage_position_pix(mask_file, skeletons_file):
# if the stage was aligned correctly add the information into the mask file
microns_per_pixel = read_microns_per_pixel(mask_file)
with tables.File(mask_file, 'r+') as fid:
timestamp_c = fid.get_node('/timestamp/raw')[:]
timestamp = np.arange(np.min(timestamp_c), np.max(timestamp_c)+1)
stage_vec_inv, ind_ff = _h_get_stage_inv(skeletons_file, timestamp)
stage_vec_pix = stage_vec_inv[ind_ff]/microns_per_pixel
if '/stage_position_pix' in fid:
fid.remove_node('/', 'stage_position_pix')
fid.create_array('/', 'stage_position_pix', obj=stage_vec_pix)
def alignStageMotion(masked_file, skeletons_file):
base_name = get_base_name(masked_file)
print_flush(base_name + ' Aligning Stage Motion...')
#%%
fps = read_fps(skeletons_file)
#%%
# Open the information file and read the tracking delay time.
# (help from segworm findStageMovement)
# 2. The info file contains the tracking delay. This delay represents the
# minimum time between stage movements and, conversely, the maximum time it
# takes for a stage movement to complete. If the delay is too small, the
# stage movements become chaotic. We load the value for the delay.
with tables.File(masked_file, 'r') as fid:
xml_info = fid.get_node('/xml_info').read().decode()
g_mask = fid.get_node('/mask')
tot_frames = g_mask.shape[0]
# Read the scale conversions, we would need this when we want to convert the pixels into microns
pixelPerMicronX = 1/g_mask._v_attrs['pixels2microns_x']
pixelPerMicronY = 1/g_mask._v_attrs['pixels2microns_y']
with pd.HDFStore(masked_file, 'r') as fid:
stage_log = fid['/stage_log']
#%this is not the cleaneast but matlab does not have a xml parser from
#%text string
delay_str = xml_info.partition('<delay>')[-1].partition('</delay>')[0]
delay_time = float(delay_str) / 1000;
delay_frames = np.ceil(delay_time * fps);
normScale = np.sqrt((pixelPerMicronX ** 2 + pixelPerMicronX ** 2) / 2);
pixelPerMicronScale = normScale * np.array((np.sign(pixelPerMicronX), np.sign(pixelPerMicronY)));
#% Compute the rotation matrix.
#%rotation = 1;
angle = np.arctan(pixelPerMicronY / pixelPerMicronX);
if angle > 0:
angle = np.pi / 4 - angle;
else:
angle = np.pi / 4 + angle;
cosAngle = np.cos(angle);
sinAngle = np.sin(angle);
rotation_matrix = np.array(((cosAngle, -sinAngle), (sinAngle, cosAngle)));
#%%
#% Ev's code uses the full vectors without dropping frames
#% 1. video2Diff differentiates a video frame by frame and outputs the
#% differential variance. We load these frame differences.
frame_diffs_d = getFrameDiffVar(masked_file);
print_flush(base_name + ' Aligning Stage Motion...')
#%% Read the media times and locations from the log file.
#% (help from segworm findStageMovement)
#% 3. The log file contains the initial stage location at media time 0 as
#% well as the subsequent media times and locations per stage movement. Our
#% algorithm attempts to match the frame differences in the video (see step
#% 1) to the media times in this log file. Therefore, we load these media
#% times and stage locations.
#%from the .log.csv file
mediaTimes = stage_log['stage_time'].values;
locations = stage_log[['stage_x', 'stage_y']].values;
#ini stage movement fields
with tables.File(skeletons_file, 'r+') as fid:
# delete data from previous analysis if any
if '/stage_movement' in fid:
fid.remove_node('/stage_movement', recursive = True)
g_stage_movement = fid.create_group('/', 'stage_movement')
g_stage_movement._v_attrs['has_finished'] = 0
#read and prepare timestamp
try:
video_timestamp_ind = fid.get_node('/timestamp/raw')[:]
if np.any(np.isnan(video_timestamp_ind)):
raise ValueError()
else:
video_timestamp_ind = video_timestamp_ind.astype(np.int)
except(tables.exceptions.NoSuchNodeError, ValueError):
warnings.warn('It is corrupt or do not exist. I will assume no dropped frames and deduce it from the number of frames.')
video_timestamp_ind = np.arange(tot_frames, dtype=np.int)
#%% The shift makes everything a bit more complicated. I have to remove the first frame, before resizing the array considering the dropping frames.
if video_timestamp_ind.size > frame_diffs_d.size + 1:
#%i can tolerate one frame (two with respect to the frame_diff)
#%extra at the end of the timestamp
video_timestamp_ind = video_timestamp_ind[:frame_diffs_d.size + 1];
dd = video_timestamp_ind - np.min(video_timestamp_ind) - 1; #shift data
dd = dd[dd>=0];
#%%
if frame_diffs_d.size != dd.size:
raise ValueError('Number of timestamps do not match the number of frames in the movie.')
frame_diffs = np.full(int(np.max(video_timestamp_ind)), np.nan);
frame_diffs[dd] = frame_diffs_d;
#%% save stage data into the skeletons.hdf5
with tables.File(skeletons_file, 'r+') as fid:
# I am saving this data before for debugging purposes
g_stage_movement = fid.get_node('/stage_movement')
fid.create_carray(g_stage_movement, 'frame_diffs', obj=frame_diffs_d)
g_stage_movement._v_attrs['fps'] = fps
g_stage_movement._v_attrs['delay_frames'] = delay_frames
g_stage_movement._v_attrs['microns_per_pixel_scale'] = pixelPerMicronScale
g_stage_movement._v_attrs['rotation_matrix'] = rotation_matrix
#%% try to run the aligment and return empty data if it fails
is_stage_move, movesI, stage_locations = \
findStageMovement(frame_diffs, mediaTimes, locations, delay_frames, fps);
stage_vec_d, is_stage_move_d = shift2video_ref(is_stage_move, movesI, stage_locations, video_timestamp_ind)
#%% save stage data into the skeletons.hdf5
with tables.File(skeletons_file, 'r+') as fid:
g_stage_movement = fid.get_node('/stage_movement')
fid.create_carray(g_stage_movement, 'stage_vec', obj=stage_vec_d)
fid.create_carray(g_stage_movement, 'is_stage_move', obj=is_stage_move_d)
g_stage_movement._v_attrs['has_finished'] = 1
_h_add_stage_position_pix(masked_file, skeletons_file)
print_flush(base_name + ' Aligning Stage Motion. Finished.')
if __name__ == '__main__':
#masked_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/miss_aligments/trp-2 (ok298) off food_2010_04_30__13_03_40___1___8.hdf5'
#masked_file = '/Users/ajaver/Tmp/MaskedVideos/worm 1/L4_19C_1_R_2015_06_24__16_40_14__.hdf5'
#masked_file = '/Users/ajaver/Tmp/MaskedVideos/worm 2/L4_H_18_2016_10_30__15_56_12__.hdf5'
masked_file = '/Volumes/behavgenom_archive$/single_worm/unfinished/WT/PS312/food_mec-10,mec-4-L3/XX/30m_wait/clockwise/197 PS312 3 on mec-10,mec-4-L3 L_2011_07_06__15_33___3___1.hdf5'
skeletons_file = masked_file.replace(
'MaskedVideos',
'Results').replace(
'.hdf5',
'_skeletons.hdf5')
#alignStageMotion(masked_file, skeletons_file)
|
|
#!/usr/bin/env python
import os
import argparse
import subprocess
import time
import shutil
import socket
import numpy as np
from pychemia.code.abinit import AbinitInput, AbinitOutput
from pychemia.population.orbitaldftu import dmatpawu2params
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Orbital DFTU Executor, execute several ABINIT runs changing the "
"value from dmatpawu from the previous output as input until a "
"tolerance is reached. The executor assumes the existance of a "
"command 'abinit' and 'mpirun' and files 'abinit.in' and "
"'abinit.files' the canonical names for those files in ABINIT")
parser.add_argument('--usedmatpu', type=int, help='ABINIT Variable usedmatpu for each run (default: 25)',
required=False, default=25, metavar='<N>')
parser.add_argument('--nstep', type=int, help='ABINIT Variable nstep for each run (default: 50)',
required=False, default=50, metavar='<N>')
parser.add_argument('--tolvrs', type=float, help='ABINIT Variable tolvrs for each run (default: 1E-14)',
required=False, default=1E-14, metavar='<X>')
parser.add_argument('--target_nres2', type=float, help='Stopping criteria for this executor (default: 1E-12)',
required=False, default=1E-12, metavar='<X>')
parser.add_argument('--max_nruns', type=int, help='Maximum number of runs allowed (default: 10)',
required=False, default=10, metavar='<N>')
parser.add_argument('--nhours', type=int, help='Maximun number of hours, ignored if running through a queue '
'system (PBS), mandatory otherwise',
required=False, default=0, metavar='<N>')
parser.add_argument('--nparal', type=int, help='Number of cores for use with MPI, ignored if running through a '
'queue system (PBS), mandatory otherwise',
required=False, default=0, metavar='<N>')
args = parser.parse_args()
print(" ABINIT Orbital DFT+U Executor")
print(" =============================\n")
print("Running on: %s" % socket.gethostname())
print("Current load: %f %f %f" % os.getloadavg())
# Checking the consistency of all arguments:
if args.target_nres2 <= args.tolvrs:
print("Target value must be bigger than ABINIT internal criteria for tolvrs")
parser.print_help()
exit(1)
if args.usedmatpu >= args.nstep:
print("Total number of SCF steps 'nstep' must be bigger than 'usedmatpu' the number of steps with 'dmatpawu' "
"fixed")
parser.print_help()
exit(1)
if not os.path.exists('abinit.in'):
with open('ERROR', 'w') as wf:
wf.write('No abinit.in')
raise RuntimeError("File 'abinit.in' could not be found or its symbolic link is broken")
if not os.path.exists('abinit.files'):
with open('ERROR', 'w') as wf:
wf.write('No abinit.files')
raise RuntimeError("File 'abinit.files' could not be found or its symbolic link is broken")
# Checking the existance of "mpirun" and "abinit"
ret = which('mpirun')
if ret is None:
with open('ERROR', 'w') as wf:
wf.write('No mpirun executable')
raise RuntimeError("Command 'mpirun' could not be found, maybe you need to load the module first")
print("mpirun: %s" % ret)
ret = which('abinit')
if ret is None:
with open('ERROR', 'w') as wf:
wf.write('No abinit executable')
raise RuntimeError("Command 'abinit' could not be found, maybe you need to load the module first")
print("abinit: %s" % ret)
usedmatpu = args.usedmatpu
nstep = args.nstep
tolvrs = args.tolvrs
target_nres2 = args.target_nres2
max_nruns = args.max_nruns
nodefile = os.getenv('PBS_NODEFILE')
nparal = 1
if nodefile is not None:
print("Nodefile: %s" % nodefile)
rf = open(nodefile)
nparal = len(rf.readlines())
elif args.nparal > 0:
nparal = args.nparal
else:
print("ERROR: No queue system detected and no positive value for 'nparal'")
exit(1)
walltime = os.getenv('PBS_WALLTIME')
if walltime is not None:
walltime = int(walltime)
elif args.nhours > 0:
walltime = int(args.nhours*3600)
else:
print("ERROR: No queue system detected and no positive value for 'nhours'")
exit(1)
print("Walltime: %d seconds = %d minutes = %d hours)" % (walltime, int(walltime/60), int(walltime/3600)))
print("Number of cores for MPI: %d" % nparal)
# Getting the current time, use to compute the remaining time in execution
start_time = time.time()
abi = AbinitInput('abinit.in')
print("Checking that abinit.in contains value for dmatpawu...", end='')
if 'dmatpawu' not in abi.variables:
print('No')
raise ValueError("ERROR: Could not open abinit.in")
else:
print('Yes')
print("Checking that abinit.in contains value for lpawu...", end='')
if 'lpawu' not in abi.variables:
print('No')
raise ValueError("ERROR: Could not open abinit.in")
else:
print('Yes, max lpawu=%d' % max(abi['lpawu']))
print('Setting ABINIT variables usedmatpu=%d, nstep=%d and tolvrs=%e' % (usedmatpu, nstep, tolvrs))
abi['usedmatpu'] = usedmatpu
abi['nstep'] = nstep
abi['tolvrs'] = tolvrs
print('Writting modified abinit.in')
abi.write('abinit.in')
# Getting the index from the last execution and adding one for the next run
index = 0
while True:
if os.path.isfile('abinit_%02d.in' % index):
index += 1
else:
break
if index >= max_nruns:
print("Total number of runs has been achieve already, increse 'max_nruns' if you want to continue")
parser.print_help()
exit(1)
print("Executing run with index: %d" % index)
while index < max_nruns:
print('\nABINIT execution %d of %d' % (index+1, max_nruns))
abi = AbinitInput('abinit.in')
# If possible set the WFK from the output back to input
if os.path.isfile('abinit-i_WFK'):
abi['irdwfk'] = 1
abi.write('abinit.in')
# Calling ABINIT
command_line = "mpirun -np %d abinit < abinit.files > abinit.log 2> abinit.err" % nparal
print('Running; %s' % command_line)
start_run = time.time()
subprocess.call(command_line, shell=True)
end_run = time.time()
# Delete the error file if empty
if os.path.isfile('abinit.err') and os.path.getsize('abinit.err') == 0:
os.remove('abinit.err')
runtime = end_run-start_run
print('Execution finished, execution took %d minutes' % int(runtime/60))
# If everything works fine with ABINIT we have abinit.out
# Otherwise is better to stop the entire run
if not os.path.isfile('abinit.out'):
with open('ERROR', 'w') as wf:
wf.write('No abinit.out')
raise ValueError('File not found: abinit.out')
if not os.path.isfile('abinit.log'):
with open('ERROR', 'w') as wf:
wf.write('No abinit.log')
raise ValueError('File not found: abinit.log')
if os.path.isfile('abinit.in'):
shutil.copy2('abinit.in', 'abinit_%02d.in' % index)
# Renaming logs and setting WFK back to input
if os.path.isfile('abinit.log'):
print("Renaming abinit.log")
os.rename('abinit.log', 'abinit_%02d.log' % index)
if os.path.isfile('abinit.err'):
print("Renaming abinit.err")
os.rename('abinit.err', 'abinit_%02d.err' % index)
if os.path.isfile('abinit-o_WFK'):
print("Renaming abinit-o_WFK")
os.rename('abinit-o_WFK', 'abinit-i_WFK')
if os.path.isfile('abinit.out'):
print("Renaming abinit.out")
shutil.copy2('abinit.out', 'abinit_%02d.txt' % index)
os.rename('abinit.out', 'abinit_%02d.out' % index)
abiout = 'abinit_%02d.out' % index
else:
print("Could not find abinit.out")
# Opening the output file
print("Reading the output from 'abinit.out'...")
abo = AbinitOutput(abiout)
if not abo.is_finished:
print("abinit.out is truncated, discarting that output redoing the calculation")
continue
# The final density matrix is build from the outputi
ndim = 2*max(abi['lpawu'])+1
params = dmatpawu2params(abi['dmatpawu'], ndim)
print("Euler angles from 'abinit.in':")
for i in params['euler_angles']:
for j in i:
print(" %7.3f" % j, end=' ')
print("\n", end='')
dmatpawu = abo.get_dmatpawu()
if dmatpawu is not None:
params = dmatpawu2params(dmatpawu, ndim)
print("Euler angles from 'abinit.out':")
for i in params['euler_angles']:
for j in i:
print(" %7.3f" % j, end=' ')
print("\n", end='')
else:
print("Could not get final dmatpawu from abinit.out")
dmatpawu = np.array(abi['dmatpawu']).reshape((-1, ndim, ndim))
print('Shape of dmatpawu: %s' % str(dmatpawu.shape))
# Updating dmatpawu from the output back to input
abi['dmatpawu'] = list(dmatpawu.flatten())
if os.path.isfile('abinit-i_WFK'):
abi['irdwfk'] = 1
abi.write('abinit.in')
# Checking if you should accept the current residual
energetics = abo.get_energetics()
if energetics is None:
raise RuntimeError("Could not get energetics from the output")
nres2 = 1.0
nres2 = energetics['nres2'][-1]
if nres2 < target_nres2 or index == max_nruns-1:
wf = open('COMPLETE', 'w')
wf.write("%d\n" % index)
wf.close()
break
# Current time
curtime = time.time()
if curtime+runtime > start_time + walltime:
print("Based on previous run, it is unlikely that next run will have time to complete, exiting")
print("The walltime for this job is %d minutes and we have been running for %d minutes" %
(int(walltime/60), int((curtime-start_time)/60)))
break
else:
print("Remaining time %d minutes, time for one more run" % int((start_time + walltime - curtime)/60))
# Incresing index for next run
index += 1
|
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ScaleMatvecLinearOperator Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class ScaleMatvecLinearOperatorTest(test_util.TestCase):
def testDiag(self):
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = tf.linalg.LinearOperatorDiag(diag, is_non_singular=True)
bijector = tfb.ScaleMatvecLinearOperator(
scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = diag * x
ildj = -np.sum(np.log(np.abs(diag)), axis=-1)
self.assertStartsWith(bijector.name, 'scale_matvec_linear_operator')
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)))
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=1)),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)))
def testTriL(self):
tril = np.array([[[3, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = tf.linalg.LinearOperatorLowerTriangular(
tril, is_non_singular=True)
bijector = tfb.ScaleMatvecLinearOperator(
scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril).
y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1)
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))))
self.assertStartsWith(bijector.name, 'scale_matvec_linear_operator')
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
ildj,
self.evaluate(
bijector.inverse_log_det_jacobian(
y, event_ndims=2)))
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=2)),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
def testTriLAdjoint(self):
tril = np.array([[[3, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = tf.linalg.LinearOperatorLowerTriangular(
tril, is_non_singular=True)
bijector = tfb.ScaleMatvecLinearOperator(
scale=scale, adjoint=True, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril).
triu = tril.transpose([0, 2, 1])
y = np.matmul(triu, x[..., np.newaxis])[..., 0]
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))))
self.assertStartsWith(bijector.name, 'scale_matvec_linear_operator')
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
ildj,
self.evaluate(
bijector.inverse_log_det_jacobian(
y, event_ndims=2)))
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=2)),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
class _ScaleMatvecLinearOperatorBlockTest(object):
def testBijector(self):
x = [np.array([4., 3., 3.]).astype(np.float32),
np.array([0., -5.]).astype(np.float32)]
op = self.build_operator()
y = self.evaluate(op.matvec(x))
ldj = self.evaluate(op.log_abs_determinant())
bijector = tfb.ScaleMatvecLinearOperatorBlock(scale=op, validate_args=True)
self.assertStartsWith(bijector.name, 'scale_matvec_linear_operator_block')
f_x = bijector.forward(x)
self.assertAllClose(y, self.evaluate(f_x))
inv_y = self.evaluate(bijector.inverse(y))
self.assertAllClose(x, inv_y)
# Calling `inverse` on an output of `bijector.forward` (that is equal to
# `y`) is a cache hit and returns the original, non-broadcasted input `x`.
for x_, z_ in zip(x, bijector.inverse(f_x)):
self.assertIs(x_, z_)
ldj_ = self.evaluate(
bijector.forward_log_det_jacobian(x, event_ndims=[1, 1]))
self.assertAllClose(ldj, ldj_)
self.assertEmpty(ldj_.shape)
self.assertAllClose(
ldj_,
self.evaluate(
-bijector.inverse_log_det_jacobian(y, event_ndims=[1, 1])))
def testOperatorBroadcast(self):
x = [tf.ones((1, 1, 1, 4), dtype=tf.float32),
tf.ones((1, 1, 1, 3), dtype=tf.float32)]
op = self.build_batched_operator()
bijector = tfb.ScaleMatvecLinearOperatorBlock(op, validate_args=True)
self.assertAllEqual(
self.evaluate(tf.shape(bijector.forward_log_det_jacobian(x, [1, 1]))),
self.evaluate(op.batch_shape_tensor()))
# Broadcasting of event shape components with batched LinearOperators
# raises.
with self.assertRaisesRegexp(ValueError, 'bijector parameters changes'):
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=[2, 2]))
# Broadcasting of event shape components with batched LinearOperators
# raises for `ldj_reduce_ndims > batch_ndims`.
with self.assertRaisesRegexp(ValueError, 'bijector parameters changes'):
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=[3, 3]))
def testEventShapeBroadcast(self):
op = self.build_operator()
bijector = tfb.ScaleMatvecLinearOperatorBlock(
op, validate_args=True)
x = [tf.broadcast_to(tf.constant(1., dtype=tf.float32), [2, 3, 3]),
tf.broadcast_to(tf.constant(2., dtype=tf.float32), [2, 1, 2])]
# Forward/inverse event shape methods return the correct value.
self.assertAllEqual(
self.evaluate(bijector.forward_event_shape_tensor(
[tf.shape(x_) for x_ in x])),
[self.evaluate(tf.shape(y_)) for y_ in bijector.forward(x)])
self.assertAllEqual(
bijector.inverse_event_shape([x_.shape for x_ in x]),
[y_.shape for y_ in bijector.inverse(x)])
# Broadcasting of inputs within `ldj_reduce_shape` raises.
with self.assertRaisesRegexp(ValueError, 'left of `min_event_ndims`'):
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=[2, 2]))
def testAlignedEventDims(self):
x = [tf.ones((3,), dtype=tf.float32), tf.ones((2, 2), tf.float32)]
op = self.build_operator()
bijector = tfb.ScaleMatvecLinearOperatorBlock(op, validate_args=True)
with self.assertRaisesRegexp(ValueError, 'equal for all elements'):
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=[1, 2]))
@test_util.test_all_tf_execution_regimes
class ScaleMatvecLinearOperatorBlockDiagTest(
test_util.TestCase, _ScaleMatvecLinearOperatorBlockTest):
def build_operator(self):
return tf.linalg.LinearOperatorBlockDiag(
[tf.linalg.LinearOperatorDiag(diag=[2., 3., 6.]),
tf.linalg.LinearOperatorFullMatrix(matrix=[[12., 5.], [-1., 3.]])],
is_non_singular=True)
def build_batched_operator(self):
seed = test_util.test_seed()
return tf.linalg.LinearOperatorBlockDiag(
[tf.linalg.LinearOperatorDiag(
tf.random.normal((2, 3, 4), dtype=tf.float32, seed=seed)),
tf.linalg.LinearOperatorIdentity(3)], is_non_singular=True)
@test_util.test_all_tf_execution_regimes
class ScaleMatvecLinearOperatorBlockTrilTest(
test_util.TestCase, _ScaleMatvecLinearOperatorBlockTest):
def build_operator(self):
return tf.linalg.LinearOperatorBlockLowerTriangular([
[tf.linalg.LinearOperatorDiag(diag=[2., 3., 6.], is_non_singular=True)],
[tf.linalg.LinearOperatorFullMatrix(
matrix=[[12., 5., -1.], [3., 0., 1.]]),
tf.linalg.LinearOperatorIdentity(2)]], is_non_singular=True)
def build_batched_operator(self):
seed = test_util.test_seed()
return tf.linalg.LinearOperatorBlockLowerTriangular([
[tf.linalg.LinearOperatorFullMatrix(
tf.random.normal((3, 4, 4), dtype=tf.float32, seed=seed),
is_non_singular=True)],
[tf.linalg.LinearOperatorZeros(
3, 4, is_square=False, is_self_adjoint=False),
tf.linalg.LinearOperatorFullMatrix(
tf.random.normal((3, 3), dtype=tf.float32, seed=seed),
is_non_singular=True)]
], is_non_singular=True)
if __name__ == '__main__':
test_util.main()
|
|
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read"
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def has_auxv_support(self):
inferior_args = ["message:main entered", "sleep:5"]
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
# Don't do anything until we match the launched inferior main entry output.
# Then immediately interrupt the process.
# This prevents auxv data being asked for before it's ready and leaves
# us in a stopped state.
self.test_sequence.add_log_lines([
# Start the inferior...
"read packet: $c#63",
# ... match output....
{"type": "output_match", "regex": self.maybe_strict_output_regex(
r"message:main entered\r\n")},
], True)
# ... then interrupt.
self.add_interrupt_packets()
self.add_qSupported_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
features = self.parse_qSupported_response(context)
return self.AUXV_SUPPORT_FEATURE_NAME in features and features[
self.AUXV_SUPPORT_FEATURE_NAME] == "+"
def get_raw_auxv_data(self):
# Start up llgs and inferior, and check for auxv support.
if not self.has_auxv_support():
self.skipTest("auxv data not supported")
# Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target.
# Auxv is specified in terms of pairs of unsigned longs.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
proc_info = self.parse_process_info_response(context)
self.assertIsNotNone(proc_info)
self.assertTrue("ptrsize" in proc_info)
word_size = int(proc_info["ptrsize"])
OFFSET = 0
LENGTH = 0x400
# Grab the auxv data.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $qXfer:auxv:read::{:x},{:x}:#00".format(
OFFSET,
LENGTH),
{
"direction": "send",
"regex": re.compile(
r"^\$([^E])(.*)#[0-9a-fA-F]{2}$",
re.MULTILINE | re.DOTALL),
"capture": {
1: "response_type",
2: "content_raw"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure we end up with all auxv data in one packet.
# FIXME don't assume it all comes back in one packet.
self.assertEqual(context.get("response_type"), "l")
# Decode binary data.
content_raw = context.get("content_raw")
self.assertIsNotNone(content_raw)
return (word_size, self.decode_gdbremote_binary(content_raw))
def supports_auxv(self):
# When non-auxv platforms support llgs, skip the test on platforms
# that don't support auxv.
self.assertTrue(self.has_auxv_support())
#
# We skip the "supports_auxv" test on debugserver. The rest of the tests
# appropriately skip the auxv tests if the support flag is not present
# in the qSupported response, so the debugserver test bits are still there
# in case debugserver code one day does have auxv support and thus those
# tests don't get skipped.
#
@skipIfWindows # no auxv support.
@llgs_test
def test_supports_auxv_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.supports_auxv()
def auxv_data_is_correct_size(self):
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Ensure auxv data is a multiple of 2*word_size (there should be two
# unsigned long fields per auxv entry).
self.assertEqual(len(auxv_data) % (2 * word_size), 0)
self.trace("auxv contains {} entries".format(len(auxv_data) / (2*word_size)))
@debugserver_test
def test_auxv_data_is_correct_size_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_data_is_correct_size()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_data_is_correct_size_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_data_is_correct_size()
def auxv_keys_look_valid(self):
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Grab endian.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
self.assertIsNotNone(auxv_dict)
# Verify keys look reasonable.
for auxv_key in auxv_dict:
self.assertTrue(auxv_key >= 1)
self.assertTrue(auxv_key <= 1000)
self.trace("auxv dict: {}".format(auxv_dict))
@debugserver_test
def test_auxv_keys_look_valid_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_keys_look_valid()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_keys_look_valid_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_keys_look_valid()
def auxv_chunked_reads_work(self):
# Verify that multiple smaller offset,length reads of auxv data
# return the same data as a single larger read.
# Grab the auxv data with a single large read here.
(word_size, auxv_data) = self.get_raw_auxv_data()
self.assertIsNotNone(auxv_data)
# Grab endian.
self.reset_test_sequence()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
self.assertIsNotNone(auxv_dict)
iterated_auxv_data = self.read_binary_data_in_chunks(
"qXfer:auxv:read::", 2 * word_size)
self.assertIsNotNone(iterated_auxv_data)
auxv_dict_iterated = self.build_auxv_dict(
endian, word_size, iterated_auxv_data)
self.assertIsNotNone(auxv_dict_iterated)
# Verify both types of data collection returned same content.
self.assertEqual(auxv_dict_iterated, auxv_dict)
@debugserver_test
def test_auxv_chunked_reads_work_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_chunked_reads_work()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_auxv_chunked_reads_work_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.auxv_chunked_reads_work()
|
|
"""Base class for mixture models."""
# Author: Wei Xue <[email protected]>
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from .. import cluster
from ..base import BaseEstimator
from ..base import DensityMixin
from ..exceptions import ConvergenceWarning
from ..utils import check_array, check_random_state
from ..utils.fixes import logsumexp
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : string
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError("The parameter '%s' should have the shape of %s, "
"but got %s" % (name, param_shape, param.shape))
def _check_X(X, n_components=None, n_features=None, ensure_min_samples=1):
"""Check the input data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
n_components : int
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32],
ensure_min_samples=ensure_min_samples)
if n_components is not None and X.shape[0] < n_components:
raise ValueError('Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X.shape[0]))
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X
class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(self, n_components, tol, reg_covar,
max_iter, n_init, init_params, random_state, warm_start,
verbose, verbose_interval):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError("Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% self.n_components)
if self.tol < 0.:
raise ValueError("Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% self.tol)
if self.n_init < 1:
raise ValueError("Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% self.n_init)
if self.max_iter < 1:
raise ValueError("Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% self.max_iter)
if self.reg_covar < 0.:
raise ValueError("Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative"
% self.reg_covar)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
random_state : RandomState
A random number generator instance.
"""
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros((n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state).fit(X).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self.fit_predict(X, y)
return self
def fit_predict(self, X, y=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model n_init times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a `ConvergenceWarning` is raised. After fitting, it
predicts the most probable label for the input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
X = _check_X(X, self.n_components, ensure_min_samples=2)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not(self.warm_start and hasattr(self, 'converged_'))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
lower_bound = (-np.infty if do_init else self.lower_bound_)
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
lower_bound = self._compute_lower_bound(
log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn('Initialization %d did not converge. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% (init + 1), ConvergenceWarning)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
return log_resp.argmax(axis=1)
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _check_is_fitted(self):
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the weighted log probabilities for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_likelihood : float
Log likelihood of the Gaussian mixture given X.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of each component given the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Returns the probability each Gaussian (state) in
the model given each sample.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample
y : array, shape (nsamples,)
Component labels
"""
self._check_is_fitted()
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components))
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == 'full':
X = np.vstack([
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
elif self.covariance_type == "tied":
X = np.vstack([
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(
self.means_, n_samples_comp)])
else:
X = np.vstack([
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
y = np.concatenate([np.full(sample, j, dtype=int)
for j, sample in enumerate(n_samples_comp)])
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under='ignore'):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(" Iteration %d\t time lapse %.5fs\t ll change %.5f" % (
n_iter, cur_time - self._iter_prev_time, diff_ll))
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print("Initialization converged: %s\t time lapse %.5fs\t ll %.5f" %
(self.converged_, time() - self._init_prev_time, ll))
|
|
# Super simple Illustrator SVG processor for animations. Uses the BeautifulSoup python xml library.
import os
import errno
from bs4 import BeautifulSoup
def create_file(path, mode):
directory = os.path.dirname(path)
if directory != '' and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file = open(path, mode)
return file
def parse_svg(path, namespace, options):
#print(path)
file = open(path,'r')
file_string = file.read().decode('utf8')
file.close();
if namespace == None:
namespace = ''
else:
namespace = namespace + '-'
# BeautifulSoup can't parse attributes with dashes so we replace them with underscores instead
file_string = file_string.replace('data-name', 'data_name')
# Expand origin to data-svg-origin as its a pain in the ass to type
if 'expand_origin' in options and options['expand_origin'] == True:
file_string = file_string.replace('origin=', 'data-svg-origin=')
# Add namespaces to ids
if namespace:
file_string = file_string.replace('id="', 'id="' + namespace)
file_string = file_string.replace('url(#', 'url(#' + namespace)
svg = BeautifulSoup(file_string, 'html.parser')
# namespace symbols
symbol_elements = svg.select('symbol')
for element in symbol_elements:
del element['data_name']
use_elements = svg.select('use')
for element in use_elements:
if namespace:
href = element['xlink:href']
element['xlink:href'] = href.replace('#', '#' + namespace)
del element['id']
# remove titles
if 'title' in options and options['title'] == False:
titles = svg.select('title')
for t in titles: t.extract()
foreign_tags_to_add = []
if 'convert_svg_text_to_html' in options and options['convert_svg_text_to_html'] == True:
text_elements = svg.select('[data_name="#TEXT"]')
for element in text_elements:
area = element.rect
if not area:
print('WARNING: Text areas require a rectangle to be in the same group as the text element')
continue
text_element = element.select('text')[0]
if not text_element:
print('WARNING: No text element found in text area')
continue
x = area['x']
y = area['y']
width = area['width']
height = area['height']
text_content = text_element.getText()
text_tag = BeautifulSoup(text_content, 'html.parser')
data_name = None
if area.has_attr('data_name'): data_name = area['data_name']
#print(data_name)
area.extract()
text_element.extract()
foreign_object_tag = svg.new_tag('foreignObject')
foreign_object_tag['requiredFeatures'] = "http://www.w3.org/TR/SVG11/feature#Extensibility"
foreign_object_tag['transform'] = 'translate(' + x + ' ' + y + ')'
foreign_object_tag['width'] = width + 'px'
foreign_object_tag['height'] = height + 'px'
if 'dont_overflow_text_areas' in options and options['dont_overflow_text_areas'] == True:
foreign_object_tag['style'] = 'overflow:hidden'
if data_name:
val = data_name
if not val.startswith('#'): continue
val = val.replace('#', '')
attributes = str.split(str(val), ',')
for a in attributes:
split = str.split(a.strip(), '=')
if (len(split) < 2): continue
key = split[0]
value = split[1]
if key == 'id': key = namespace + key
foreign_object_tag[key] = value
foreign_object_tag.append(text_tag)
# modyfing the tree affects searches so we need to defer it until the end
foreign_tags_to_add.append({'element':element, 'tag':foreign_object_tag})
if (not 'process_layer_names' in options or ('process_layer_names' in options and options['process_layer_names'] == True)):
elements_with_data_names = svg.select('[data_name]')
for element in elements_with_data_names:
# remove any existing id tag as we'll be making our own
if element.has_attr('id'): del element.attrs['id']
val = element['data_name']
#print(val)
del element['data_name']
if not val.startswith('#'): continue
val = val.replace('#', '')
attributes = str.split(str(val), ',')
for a in attributes:
split = str.split(a.strip(), '=')
if (len(split) < 2): continue
key = split[0]
value = split[1]
if key == 'id' or key == 'class': value = namespace + value
element[key] = value
if 'remove_text_attributes' in options and options['remove_text_attributes'] == True:
#Remove attributes from text tags
text_elements = svg.select('text')
for element in text_elements:
if element.has_attr('font-size'): del element.attrs['font-size']
if element.has_attr('font-family'): del element.attrs['font-family']
if element.has_attr('font-weight'): del element.attrs['font-weight']
if element.has_attr('fill'): del element.attrs['fill']
# Do tree modifications here
if 'convert_svg_text_to_html' in options and options['convert_svg_text_to_html'] == True:
for t in foreign_tags_to_add:
t['element'].append(t['tag'])
return svg
def write_svg(svg, dst_path, options):
result = str(svg)
result = unicode(result, "utf8")
#Remove self closing tags
result = result.replace('></circle>','/>')
result = result.replace('></rect>','/>')
result = result.replace('></path>','/>')
result = result.replace('></polygon>','/>')
if 'nowhitespace' in options and options['nowhitespace'] == True:
result = result.replace('\n','')
#else:
# result = svg.prettify()
# bs4 incorrectly outputs clippath instead of clipPath
result = result.replace('clippath', 'clipPath')
result = result.encode('UTF8')
result_file = create_file(dst_path, 'wb')
result_file.write(result)
result_file.close()
def compile_svg(src_path, dst_path, options):
namespace = None
if 'namespace' in options:
namespace = options['namespace']
svg = parse_svg(src_path, namespace, options)
if 'attributes' in options:
attrs = options['attributes']
for k in attrs:
svg.svg[k] = attrs[k]
if 'description' in options:
current_desc = svg.select('description')
if current_desc:
current_desc[0].string = options['description']
else:
desc_tag = svg.new_tag('description');
desc_tag.string = options['description']
svg.svg.append(desc_tag)
write_svg(svg, dst_path, options)
def compile_master_svg(src_path, dst_path, options):
print('\n')
print(src_path)
file = open(src_path)
svg = BeautifulSoup(file, 'html.parser')
file.close()
master_viewbox = svg.svg.attrs['viewbox']
import_tags = svg.select('[path]')
for tag in import_tags:
component_path = str(tag['path'])
namespace = None
if tag.has_attr('namespace'): namespace = tag['namespace']
component = parse_svg(component_path, namespace, options)
component_viewbox = component.svg.attrs['viewbox']
if master_viewbox != component_viewbox:
print('WARNING: Master viewbox: [' + master_viewbox + '] does not match component viewbox [' + component_viewbox + ']')
# Moves the contents of the component svg file into the master svg
for child in component.svg: tag.contents.append(child)
# Remove redundant path and namespace attributes from the import element
del tag.attrs['path']
if namespace: del tag.attrs['namespace']
if 'attributes' in options:
attrs = options['attributes']
for k in attrs:
print(k + ' = ' + attrs[k])
svg.svg[k] = attrs[k]
if 'title' in options and options['title'] is not False:
current_title = svg.select('title')
if current_title:
current_title[0].string = options['title']
else:
title_tag = svg.new_tag('title');
title_tag.string = options['title']
svg.svg.append(title_tag)
if 'description' in options:
current_desc = svg.select('description')
if current_desc:
current_desc[0].string = options['description']
else:
desc_tag = svg.new_tag('description');
desc_tag.string = options['description']
svg.svg.append(desc_tag)
write_svg(svg, dst_path, options)
# Super dumb / simple function that inlines svgs into html source files
def parse_markup(src_path, output):
print(src_path)
read_state = 0
file = open(src_path, 'r')
for line in file:
if line.startswith('//import'):
path = line.split('//import ')[1].rstrip('\n').rstrip('\r')
parse_markup(path, output)
else:
output.append(line)
file.close()
def inline_svg(src_path, dst_path):
output = [];
file = create_file(dst_path, 'w')
parse_markup(src_path, output)
for line in output: file.write(line)
file.close()
print('')
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports detection models to use with tf-lite.
See export_tflite_lstd_graph.py for usage.
"""
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.tools.graph_transforms import TransformGraph
from lstm_object_detection import model_builder
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False):
"""Appends postprocessing custom op.
Args:
frozen_graph_def: Frozen GraphDef for SSD model after freezing the
checkpoint
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
nms_score_threshold: Score threshold used in Non-maximal suppression in
post-processing
nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
suppression in post-processing
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
appended
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes
"""
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
# Transform the graph to append new postprocessing op
input_names = []
output_names = ['TFLite_Detection_PostProcess']
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False,
binary_graph_name='tflite_graph.pb',
txt_graph_name='tflite_graph.pbtxt'):
"""Exports a tflite compatible graph and anchors for ssd detection model.
Anchors are written to a tensor and tflite compatible graph
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: Dictionary of configuration objects. Keys are `model`,
`train_config`, `train_input_config`, `eval_config`, `eval_input_config`,
`lstm_model`. Value are the corresponding config objects.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
binary_graph_name: Name of the exported graph file in binary format.
txt_graph_name: Name of the exported graph file in text format.
Raises:
ValueError: if the pipeline config contains models other than ssd or uses an
fixed_shape_resizer and provides a shape as well.
"""
model_config = pipeline_config['model']
lstm_config = pipeline_config['lstm_model']
eval_config = pipeline_config['eval_config']
tf.gfile.MakeDirs(output_dir)
if model_config.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
model_config.WhichOneof('model')))
num_classes = model_config.ssd.num_classes
nms_score_threshold = {
model_config.ssd.post_processing.batch_non_max_suppression.score_threshold
}
nms_iou_threshold = {
model_config.ssd.post_processing.batch_non_max_suppression.iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
model_config.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = model_config.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [lstm_config.eval_unroll_length, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
video_tensor = tf.placeholder(
tf.float32, shape=shape, name='input_video_tensor')
detection_model = model_builder.build(
model_config, lstm_config, is_training=False)
preprocessed_video, true_image_shapes = detection_model.preprocess(
tf.to_float(video_tensor))
predicted_tensors = detection_model.predict(preprocessed_video,
true_image_shapes)
# predicted_tensors = detection_model.postprocess(predicted_tensors,
# true_image_shapes)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
model_config.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
# containing the encoded box predictions. Note that these are raw
# predictions and no Non-Max suppression is applied on them and
# no decode center size boxes is applied to them.
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each anchor
# after applying score conversion.
tf.identity(class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
# Add global step to the graph, so we know the training step number when we
# evaluate the model.
tf.train.get_or_create_global_step()
# graph rewriter
is_quantized = ('graph_rewriter' in pipeline_config)
if is_quantized:
graph_rewriter_config = pipeline_config['graph_rewriter']
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False, is_export=True)
graph_rewriter_fn()
if model_config.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
# freeze the graph
saver_kwargs = {}
if eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
]),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
# Add new operation to do post processing in a custom op (TF Lite only)
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def, max_detections, max_classes_per_detection,
nms_score_threshold, nms_iou_threshold, num_classes, scale_values,
detections_per_class, use_regular_nms)
else:
# Return frozen without adding post-processing custom op
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, binary_graph_name)
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, txt_graph_name)
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
|
|
from typing import Sequence
import mock
from kubernetes.client import V1Deployment
from kubernetes.client import V1StatefulSet
from pytest import raises
from paasta_tools.kubernetes.application.controller_wrappers import Application
from paasta_tools.kubernetes_tools import InvalidKubernetesConfig
from paasta_tools.kubernetes_tools import KubeDeployment
from paasta_tools.setup_kubernetes_job import create_application_object
from paasta_tools.setup_kubernetes_job import main
from paasta_tools.setup_kubernetes_job import parse_args
from paasta_tools.setup_kubernetes_job import setup_kube_deployments
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
def test_parse_args():
with mock.patch(
"paasta_tools.setup_kubernetes_job.argparse", autospec=True
) as mock_argparse:
assert parse_args() == mock_argparse.ArgumentParser.return_value.parse_args()
def test_main():
with mock.patch(
"paasta_tools.setup_kubernetes_job.parse_args", autospec=True
) as mock_parse_args, mock.patch(
"paasta_tools.setup_kubernetes_job.KubeClient", autospec=True
) as mock_kube_client, mock.patch(
"paasta_tools.setup_kubernetes_job.ensure_namespace", autospec=True
) as mock_ensure_namespace, mock.patch(
"paasta_tools.setup_kubernetes_job.setup_kube_deployments", autospec=True
) as mock_setup_kube_deployments:
mock_setup_kube_deployments.return_value = True
with raises(SystemExit) as e:
main()
assert e.value.code == 0
assert mock_ensure_namespace.called
mock_setup_kube_deployments.assert_called_with(
kube_client=mock_kube_client.return_value,
service_instances=mock_parse_args.return_value.service_instance_list,
cluster=mock_parse_args.return_value.cluster,
soa_dir=mock_parse_args.return_value.soa_dir,
rate_limit=mock_parse_args.return_value.rate_limit,
)
mock_setup_kube_deployments.return_value = False
with raises(SystemExit) as e:
main()
assert e.value.code == 1
def test_setup_kube_deployment_invalid_job_name():
with mock.patch(
"paasta_tools.setup_kubernetes_job.create_application_object", autospec=True
) as mock_create_application_object, mock.patch(
"paasta_tools.setup_kubernetes_job.list_all_deployments", autospec=True
) as mock_list_all_deployments, mock.patch(
"paasta_tools.setup_kubernetes_job.log", autospec=True
):
mock_client = mock.Mock()
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt", instance="f_m", git_sha="", config_sha="", replicas=0
)
]
mock_service_instances = ["kuruptf_m"]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert mock_create_application_object.call_count == 0
def test_create_application_object():
with mock.patch(
"paasta_tools.setup_kubernetes_job.load_kubernetes_service_config_no_cache",
autospec=True,
) as mock_load_kubernetes_service_config_no_cache, mock.patch(
"paasta_tools.setup_kubernetes_job.load_system_paasta_config", autospec=True
), mock.patch(
"paasta_tools.kubernetes.application.controller_wrappers.Application.load_local_config",
autospec=True,
), mock.patch(
"paasta_tools.kubernetes.application.controller_wrappers.DeploymentWrapper",
autospec=True,
) as mock_deployment_wrapper, mock.patch(
"paasta_tools.kubernetes.application.controller_wrappers.StatefulSetWrapper",
autospec=True,
) as mock_stateful_set_wrapper:
mock_kube_client = mock.Mock()
mock_deploy = mock.MagicMock(spec=V1Deployment)
service_config = mock.MagicMock()
mock_load_kubernetes_service_config_no_cache.return_value = service_config
service_config.format_kubernetes_app.return_value = mock_deploy
# Create DeploymentWrapper
create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
mock_deployment_wrapper.assert_called_with(mock_deploy)
mock_deploy = mock.MagicMock(spec=V1StatefulSet)
service_config.format_kubernetes_app.return_value = mock_deploy
# Create StatefulSetWrapper
create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
mock_stateful_set_wrapper.assert_called_with(mock_deploy)
# Create object that is not statefulset/deployment
with raises(Exception):
service_config.format_kubernetes_app.return_value = mock.MagicMock()
create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
mock_deployment_wrapper.reset_mock()
mock_stateful_set_wrapper.reset_mock()
mock_load_kubernetes_service_config_no_cache.side_effect = (
NoDeploymentsAvailable
)
ret = create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert ret == (True, None)
assert not mock_deployment_wrapper.called
assert not mock_stateful_set_wrapper.called
mock_load_kubernetes_service_config_no_cache.side_effect = (
NoConfigurationForServiceError
)
ret = create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert ret == (False, None)
assert not mock_deployment_wrapper.called
assert not mock_stateful_set_wrapper.called
mock_load_kubernetes_service_config_no_cache.side_effect = None
mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock(
format_kubernetes_app=mock.Mock(
side_effect=InvalidKubernetesConfig(Exception("Oh no!"), "kurupt", "fm")
)
)
ret = create_application_object(
kube_client=mock_kube_client,
service="kurupt",
instance="fm",
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert ret == (False, None)
assert not mock_deployment_wrapper.called
assert not mock_stateful_set_wrapper.called
def test_setup_kube_deployment_create_update():
fake_create = mock.MagicMock()
fake_update = mock.MagicMock()
fake_update_related_api_objects = mock.MagicMock()
def simple_create_application_object(
kube_client, service, instance, cluster, soa_dir
):
fake_app = mock.MagicMock(spec=Application)
fake_app.kube_deployment = KubeDeployment(
service=service, instance=instance, git_sha="1", config_sha="1", replicas=1
)
fake_app.create = fake_create
fake_app.update = fake_update
fake_app.update_related_api_objects = fake_update_related_api_objects
fake_app.item = None
fake_app.soa_config = None
fake_app.__str__ = lambda app: "fake_app"
return True, fake_app
with mock.patch(
"paasta_tools.setup_kubernetes_job.create_application_object",
autospec=True,
side_effect=simple_create_application_object,
) as mock_create_application_object, mock.patch(
"paasta_tools.setup_kubernetes_job.list_all_deployments", autospec=True
) as mock_list_all_deployments, mock.patch(
"paasta_tools.setup_kubernetes_job.log", autospec=True
) as mock_log_obj:
mock_client = mock.Mock()
# No instances created
mock_service_instances: Sequence[str] = []
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert mock_create_application_object.call_count == 0
assert fake_update.call_count == 0
assert fake_update_related_api_objects.call_count == 0
assert mock_log_obj.info.call_count == 0
mock_log_obj.info.reset_mock()
# Create a new instance
mock_service_instances = ["kurupt.fm"]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_create.call_count == 1
assert fake_update.call_count == 0
assert fake_update_related_api_objects.call_count == 1
mock_log_obj.info.reset_mock()
# Update when gitsha changed
fake_create.reset_mock()
fake_update.reset_mock()
fake_update_related_api_objects.reset_mock()
mock_service_instances = ["kurupt.fm"]
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt", instance="fm", git_sha="2", config_sha="1", replicas=1
)
]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_update.call_count == 1
assert fake_create.call_count == 0
assert fake_update_related_api_objects.call_count == 1
mock_log_obj.info.reset_mock()
# Update when configsha changed
fake_create.reset_mock()
fake_update.reset_mock()
fake_update_related_api_objects.reset_mock()
mock_service_instances = ["kurupt.fm"]
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt", instance="fm", git_sha="1", config_sha="2", replicas=1
)
]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_update.call_count == 1
assert fake_create.call_count == 0
assert fake_update_related_api_objects.call_count == 1
mock_log_obj.info.reset_mock()
# Update when replica changed
fake_create.reset_mock()
fake_update.reset_mock()
fake_update_related_api_objects.reset_mock()
mock_service_instances = ["kurupt.fm"]
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt", instance="fm", git_sha="1", config_sha="1", replicas=2
)
]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_update.call_count == 1
assert fake_create.call_count == 0
assert fake_update_related_api_objects.call_count == 1
mock_log_obj.info.reset_mock()
# Update one and Create One
fake_create.reset_mock()
fake_update.reset_mock()
fake_update_related_api_objects.reset_mock()
mock_service_instances = ["kurupt.fm", "kurupt.garage"]
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt",
instance="garage",
git_sha="2",
config_sha="2",
replicas=1,
)
]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_update.call_count == 1
assert fake_create.call_count == 1
assert fake_update_related_api_objects.call_count == 2
mock_log_obj.info.reset_mock()
# Always attempt to update related API objects
fake_create.reset_mock()
fake_update.reset_mock()
fake_update_related_api_objects.reset_mock()
mock_service_instances = ["kurupt.garage"]
mock_list_all_deployments.return_value = [
KubeDeployment(
service="kurupt",
instance="garage",
git_sha="1",
config_sha="1",
replicas=1,
)
]
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
)
assert fake_update.call_count == 0
assert fake_create.call_count == 0
assert fake_update_related_api_objects.call_count == 1
assert mock_log_obj.info.call_args_list[0] == mock.call(
"fake_app is up-to-date!"
)
def test_setup_kube_deployments_rate_limit():
with mock.patch(
"paasta_tools.setup_kubernetes_job.create_application_object", autospec=True,
) as mock_create_application_object, mock.patch(
"paasta_tools.setup_kubernetes_job.list_all_deployments", autospec=True
), mock.patch(
"paasta_tools.setup_kubernetes_job.log", autospec=True
) as mock_log_obj:
mock_client = mock.Mock()
mock_service_instances = ["kurupt.fm", "kurupt.garage", "kurupt.radio"]
fake_app = mock.Mock(create=mock.Mock())
mock_create_application_object.return_value = (True, fake_app)
# Rate limit: 2 calls allowed
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
rate_limit=2,
)
assert fake_app.create.call_count == 2
mock_log_obj.info.assert_any_call(
"Not doing any further updates as we reached the limit (2)"
)
# No rate limit
fake_app.reset_mock()
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
rate_limit=0,
)
assert fake_app.create.call_count == 3
def test_setup_kube_deployments_skip_malformed_apps():
with mock.patch(
"paasta_tools.setup_kubernetes_job.create_application_object", autospec=True,
) as mock_create_application_object, mock.patch(
"paasta_tools.setup_kubernetes_job.list_all_deployments", autospec=True
), mock.patch(
"paasta_tools.setup_kubernetes_job.log", autospec=True
) as mock_log_obj:
mock_client = mock.Mock()
mock_service_instances = ["fake.instance", "mock.instance"]
fake_app = mock.Mock(create=mock.Mock())
fake_app.create = mock.Mock(
side_effect=[Exception("Kaboom!"), mock.Mock(create=mock.Mock())]
)
fake_app.__str__ = mock.Mock(return_value="fake_app")
mock_create_application_object.return_value = (True, fake_app)
setup_kube_deployments(
kube_client=mock_client,
service_instances=mock_service_instances,
cluster="fake_cluster",
soa_dir="/nail/blah",
rate_limit=0,
)
assert fake_app.create.call_count == 2
assert len(mock_log_obj.exception.call_args_list) == 1
assert mock_log_obj.exception.call_args_list[0] == mock.call(
"Error while processing: fake_app"
)
|
|
# gridworld.py
# ------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import random
import sys
import mdp
import environment
import util
import optparse
class Gridworld(mdp.MarkovDecisionProcess):
"""
Gridworld
"""
def __init__(self, grid):
# layout
if type(grid) == type([]): grid = makeGrid(grid)
self.grid = grid
# parameters
self.livingReward = 0.0
self.noise = 0.2
def setLivingReward(self, reward):
"""
The (negative) reward for exiting "normal" states.
Note that in the R+N text, this reward is on entering
a state and therefore is not clearly part of the state's
future rewards.
"""
self.livingReward = reward
def setNoise(self, noise):
"""
The probability of moving in an unintended direction.
"""
self.noise = noise
def getPossibleActions(self, state):
"""
Returns list of valid actions for 'state'.
Note that you can request moves into walls and
that "exit" states transition to the terminal
state under the special action "done".
"""
if state == self.grid.terminalState:
return ()
x,y = state
if type(self.grid[x][y]) == int:
return ('exit',)
return ('north','west','south','east')
def getStates(self):
"""
Return list of all states.
"""
# The true terminal state.
states = [self.grid.terminalState]
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] != '#':
state = (x,y)
states.append(state)
return states
def getReward(self, state, action, nextState):
"""
Get reward for state, action, nextState transition.
Note that the reward depends only on the state being
departed (as in the R+N book examples, which more or
less use this convention).
"""
if state == self.grid.terminalState:
return 0.0
x, y = state
cell = self.grid[x][y]
if type(cell) == int or type(cell) == float:
return cell
return self.livingReward
def getStartState(self):
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] == 'S':
return (x, y)
raise 'Grid has no start state'
def isTerminal(self, state):
"""
Only the TERMINAL_STATE state is *actually* a terminal state.
The other "exit" states are technically non-terminals with
a single action "exit" which leads to the true terminal state.
This convention is to make the grids line up with the examples
in the R+N textbook.
"""
return state == self.grid.terminalState
def getTransitionStatesAndProbs(self, state, action):
"""
Returns list of (nextState, prob) pairs
representing the states reachable
from 'state' by taking 'action' along
with their transition probabilities.
"""
if action not in self.getPossibleActions(state):
raise "Illegal action!"
if self.isTerminal(state):
return []
x, y = state
if type(self.grid[x][y]) == int or type(self.grid[x][y]) == float:
termState = self.grid.terminalState
return [(termState, 1.0)]
successors = []
northState = (self.__isAllowed(y+1,x) and (x,y+1)) or state
westState = (self.__isAllowed(y,x-1) and (x-1,y)) or state
southState = (self.__isAllowed(y-1,x) and (x,y-1)) or state
eastState = (self.__isAllowed(y,x+1) and (x+1,y)) or state
if action == 'north' or action == 'south':
if action == 'north':
successors.append((northState,1-self.noise))
else:
successors.append((southState,1-self.noise))
massLeft = self.noise
successors.append((westState,massLeft/2.0))
successors.append((eastState,massLeft/2.0))
if action == 'west' or action == 'east':
if action == 'west':
successors.append((westState,1-self.noise))
else:
successors.append((eastState,1-self.noise))
massLeft = self.noise
successors.append((northState,massLeft/2.0))
successors.append((southState,massLeft/2.0))
successors = self.__aggregate(successors)
return successors
def __aggregate(self, statesAndProbs):
counter = util.Counter()
for state, prob in statesAndProbs:
counter[state] += prob
newStatesAndProbs = []
for state, prob in counter.items():
newStatesAndProbs.append((state, prob))
return newStatesAndProbs
def __isAllowed(self, y, x):
if y < 0 or y >= self.grid.height: return False
if x < 0 or x >= self.grid.width: return False
return self.grid[x][y] != '#'
class GridworldEnvironment(environment.Environment):
def __init__(self, gridWorld):
self.gridWorld = gridWorld
self.reset()
def getCurrentState(self):
return self.state
def getPossibleActions(self, state):
return self.gridWorld.getPossibleActions(state)
def doAction(self, action):
successors = self.gridWorld.getTransitionStatesAndProbs(self.state, action)
sum = 0.0
rand = random.random()
state = self.getCurrentState()
for nextState, prob in successors:
sum += prob
if sum > 1.0:
raise 'Total transition probability more than one; sample failure.'
if rand < sum:
reward = self.gridWorld.getReward(state, action, nextState)
self.state = nextState
return (nextState, reward)
raise 'Total transition probability less than one; sample failure.'
def reset(self):
self.state = self.gridWorld.getStartState()
class Grid:
"""
A 2-dimensional array of immutables backed by a list of lists. Data is accessed
via grid[x][y] where (x,y) are cartesian coordinates with x horizontal,
y vertical and the origin (0,0) in the bottom left corner.
The __str__ method constructs an output that is oriented appropriately.
"""
def __init__(self, width, height, initialValue=' '):
self.width = width
self.height = height
self.data = [[initialValue for y in range(height)] for x in range(width)]
self.terminalState = 'TERMINAL_STATE'
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, key, item):
self.data[key] = item
def __eq__(self, other):
if other == None: return False
return self.data == other.data
def __hash__(self):
return hash(self.data)
def copy(self):
g = Grid(self.width, self.height)
g.data = [x[:] for x in self.data]
return g
def deepCopy(self):
return self.copy()
def shallowCopy(self):
g = Grid(self.width, self.height)
g.data = self.data
return g
def _getLegacyText(self):
t = [[self.data[x][y] for x in range(self.width)] for y in range(self.height)]
t.reverse()
return t
def __str__(self):
return str(self._getLegacyText())
def makeGrid(gridString):
width, height = len(gridString[0]), len(gridString)
grid = Grid(width, height)
for ybar, line in enumerate(gridString):
y = height - ybar - 1
for x, el in enumerate(line):
grid[x][y] = el
return grid
def getCliffGrid():
grid = [[' ',' ',' ',' ',' '],
['S',' ',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(makeGrid(grid))
def getCliffGrid2():
grid = [[' ',' ',' ',' ',' '],
[8,'S',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(grid)
def getDiscountGrid():
grid = [[' ',' ',' ',' ',' '],
[' ','#',' ',' ',' '],
[' ','#', 1,'#', 10],
['S',' ',' ',' ',' '],
[-10,-10, -10, -10, -10]]
return Gridworld(grid)
def getBridgeGrid():
grid = [[ '#',-100, -100, -100, -100, -100, '#'],
[ 1, 'S', ' ', ' ', ' ', ' ', 10],
[ '#',-100, -100, -100, -100, -100, '#']]
return Gridworld(grid)
def getBookGrid():
grid = [[' ',' ',' ',+1],
[' ','#',' ',-1],
['S',' ',' ',' ']]
return Gridworld(grid)
def getMazeGrid():
grid = [[' ',' ',' ',+1],
['#','#',' ','#'],
[' ','#',' ',' '],
[' ','#','#',' '],
['S',' ',' ',' ']]
return Gridworld(grid)
def getUserAction(state, actionFunction):
"""
Get an action from the user (rather than the agent).
Used for debugging and lecture demos.
"""
import graphicsUtils
action = None
while True:
keys = graphicsUtils.wait_for_keys()
if 'Up' in keys: action = 'north'
if 'Down' in keys: action = 'south'
if 'Left' in keys: action = 'west'
if 'Right' in keys: action = 'east'
if 'q' in keys: sys.exit(0)
if action == None: continue
break
actions = actionFunction(state)
if action not in actions:
action = actions[0]
return action
def printString(x): print x
def runEpisode(agent, environment, discount, decision, display, message, pause, episode):
returns = 0
totalDiscount = 1.0
environment.reset()
if 'startEpisode' in dir(agent): agent.startEpisode()
message("BEGINNING EPISODE: "+str(episode)+"\n")
while True:
# DISPLAY CURRENT STATE
state = environment.getCurrentState()
display(state)
pause()
# END IF IN A TERMINAL STATE
actions = environment.getPossibleActions(state)
if len(actions) == 0:
message("EPISODE "+str(episode)+" COMPLETE: RETURN WAS "+str(returns)+"\n")
return returns
# GET ACTION (USUALLY FROM AGENT)
action = decision(state)
if action == None:
raise 'Error: Agent returned None action'
# EXECUTE ACTION
nextState, reward = environment.doAction(action)
message("Started in state: "+str(state)+
"\nTook action: "+str(action)+
"\nEnded in state: "+str(nextState)+
"\nGot reward: "+str(reward)+"\n")
# UPDATE LEARNER
if 'observeTransition' in dir(agent):
agent.observeTransition(state, action, nextState, reward)
returns += reward * totalDiscount
totalDiscount *= discount
if 'stopEpisode' in dir(agent):
agent.stopEpisode()
def parseOptions():
optParser = optparse.OptionParser()
optParser.add_option('-d', '--discount',action='store',
type='float',dest='discount',default=0.9,
help='Discount on future (default %default)')
optParser.add_option('-r', '--livingReward',action='store',
type='float',dest='livingReward',default=0.0,
metavar="R", help='Reward for living for a time step (default %default)')
optParser.add_option('-n', '--noise',action='store',
type='float',dest='noise',default=0.2,
metavar="P", help='How often action results in ' +
'unintended direction (default %default)' )
optParser.add_option('-e', '--epsilon',action='store',
type='float',dest='epsilon',default=0.3,
metavar="E", help='Chance of taking a random action in q-learning (default %default)')
optParser.add_option('-l', '--learningRate',action='store',
type='float',dest='learningRate',default=0.5,
metavar="P", help='TD learning rate (default %default)' )
optParser.add_option('-i', '--iterations',action='store',
type='int',dest='iters',default=10,
metavar="K", help='Number of rounds of value iteration (default %default)')
optParser.add_option('-k', '--episodes',action='store',
type='int',dest='episodes',default=1,
metavar="K", help='Number of epsiodes of the MDP to run (default %default)')
optParser.add_option('-g', '--grid',action='store',
metavar="G", type='string',dest='grid',default="BookGrid",
help='Grid to use (case sensitive; options are BookGrid, BridgeGrid, CliffGrid, MazeGrid, default %default)' )
optParser.add_option('-w', '--windowSize', metavar="X", type='int',dest='gridSize',default=150,
help='Request a window width of X pixels *per grid cell* (default %default)')
optParser.add_option('-a', '--agent',action='store', metavar="A",
type='string',dest='agent',default="random",
help='Agent type (options are \'random\', \'value\' and \'q\', default %default)')
optParser.add_option('-t', '--text',action='store_true',
dest='textDisplay',default=False,
help='Use text-only ASCII display')
optParser.add_option('-p', '--pause',action='store_true',
dest='pause',default=False,
help='Pause GUI after each time step when running the MDP')
optParser.add_option('-q', '--quiet',action='store_true',
dest='quiet',default=False,
help='Skip display of any learning episodes')
optParser.add_option('-s', '--speed',action='store', metavar="S", type=float,
dest='speed',default=1.0,
help='Speed of animation, S > 1.0 is faster, 0.0 < S < 1.0 is slower (default %default)')
optParser.add_option('-m', '--manual',action='store_true',
dest='manual',default=False,
help='Manually control agent')
optParser.add_option('-v', '--valueSteps',action='store_true' ,default=False,
help='Display each step of value iteration')
opts, args = optParser.parse_args()
if opts.manual and opts.agent != 'q':
print '## Disabling Agents in Manual Mode (-m) ##'
opts.agent = None
# MANAGE CONFLICTS
if opts.textDisplay or opts.quiet:
# if opts.quiet:
opts.pause = False
# opts.manual = False
if opts.manual:
opts.pause = True
return opts
if __name__ == '__main__':
opts = parseOptions()
###########################
# GET THE GRIDWORLD
###########################
import gridworld
mdpFunction = getattr(gridworld, "get"+opts.grid)
mdp = mdpFunction()
mdp.setLivingReward(opts.livingReward)
mdp.setNoise(opts.noise)
env = gridworld.GridworldEnvironment(mdp)
###########################
# GET THE DISPLAY ADAPTER
###########################
import textGridworldDisplay
display = textGridworldDisplay.TextGridworldDisplay(mdp)
if not opts.textDisplay:
import graphicsGridworldDisplay
display = graphicsGridworldDisplay.GraphicsGridworldDisplay(mdp, opts.gridSize, opts.speed)
display.start()
###########################
# GET THE AGENT
###########################
import valueIterationAgents, qlearningAgents
a = None
if opts.agent == 'value':
a = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, opts.iters)
elif opts.agent == 'q':
#env.getPossibleActions, opts.discount, opts.learningRate, opts.epsilon
#simulationFn = lambda agent, state: simulation.GridworldSimulation(agent,state,mdp)
gridWorldEnv = GridworldEnvironment(mdp)
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'actionFn': actionFn}
a = qlearningAgents.QLearningAgent(**qLearnOpts)
elif opts.agent == 'random':
# # No reason to use the random agent without episodes
if opts.episodes == 0:
opts.episodes = 10
class RandomAgent:
def getAction(self, state):
return random.choice(mdp.getPossibleActions(state))
def getValue(self, state):
return 0.0
def getQValue(self, state, action):
return 0.0
def getPolicy(self, state):
"NOTE: 'random' is a special policy value; don't use it in your code."
return 'random'
def update(self, state, action, nextState, reward):
pass
a = RandomAgent()
else:
if not opts.manual: raise 'Unknown agent type: '+opts.agent
###########################
# RUN EPISODES
###########################
# DISPLAY Q/V VALUES BEFORE SIMULATION OF EPISODES
if not opts.manual and opts.agent == 'value':
if opts.valueSteps:
for i in range(opts.iters):
tempAgent = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, i)
display.displayValues(tempAgent, message = "VALUES AFTER "+str(i)+" ITERATIONS")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
# FIGURE OUT WHAT TO DISPLAY EACH TIME STEP (IF ANYTHING)
displayCallback = lambda x: None
if not opts.quiet:
if opts.manual and opts.agent == None:
displayCallback = lambda state: display.displayNullValues(state)
else:
if opts.agent == 'random': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'value': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'q': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
messageCallback = lambda x: printString(x)
if opts.quiet:
messageCallback = lambda x: None
# FIGURE OUT WHETHER TO WAIT FOR A KEY PRESS AFTER EACH TIME STEP
pauseCallback = lambda : None
if opts.pause:
pauseCallback = lambda : display.pause()
# FIGURE OUT WHETHER THE USER WANTS MANUAL CONTROL (FOR DEBUGGING AND DEMOS)
if opts.manual:
decisionCallback = lambda state : getUserAction(state, mdp.getPossibleActions)
else:
decisionCallback = a.getAction
# RUN EPISODES
if opts.episodes > 0:
print
print "RUNNING", opts.episodes, "EPISODES"
print
returns = 0
for episode in range(1, opts.episodes+1):
returns += runEpisode(a, env, opts.discount, decisionCallback, displayCallback, messageCallback, pauseCallback, episode)
if opts.episodes > 0:
print
print "AVERAGE RETURNS FROM START STATE: "+str((returns+0.0) / opts.episodes)
print
print
# DISPLAY POST-LEARNING VALUES / Q-VALUES
if opts.agent == 'q' and not opts.manual:
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for firebase_admin.auth module."""
import base64
import datetime
import random
import string
import time
from typing import List
from urllib import parse
import uuid
import google.oauth2.credentials
from google.auth import transport
import pytest
import requests
import firebase_admin
from firebase_admin import auth
from firebase_admin import credentials
_verify_token_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken'
_verify_password_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword'
_password_reset_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword'
_verify_email_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/setAccountInfo'
_email_sign_in_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/emailLinkSignin'
ACTION_LINK_CONTINUE_URL = 'http://localhost?a=1&b=5#f=1'
X509_CERTIFICATES = [
('-----BEGIN CERTIFICATE-----\nMIICZjCCAc+gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBQMQswCQYDVQQGEwJ1czE'
'L\nMAkGA1UECAwCQ0ExDTALBgNVBAoMBEFjbWUxETAPBgNVBAMMCGFjbWUuY29tMRIw\nEAYDVQQHDAlTdW5ueXZhbGU'
'wHhcNMTgxMjA2MDc1MTUxWhcNMjgxMjAzMDc1MTUx\nWjBQMQswCQYDVQQGEwJ1czELMAkGA1UECAwCQ0ExDTALBgNVB'
'AoMBEFjbWUxETAP\nBgNVBAMMCGFjbWUuY29tMRIwEAYDVQQHDAlTdW5ueXZhbGUwgZ8wDQYJKoZIhvcN\nAQEBBQADg'
'Y0AMIGJAoGBAKphmggjiVgqMLXyzvI7cKphscIIQ+wcv7Dld6MD4aKv\n7Jqr8ltujMxBUeY4LFEKw8Terb01snYpDot'
'filaG6NxpF/GfVVmMalzwWp0mT8+H\nyzyPj89mRcozu17RwuooR6n1ofXjGcBE86lqC21UhA3WVgjPOLqB42rlE9gPn'
'ZLB\nAgMBAAGjUDBOMB0GA1UdDgQWBBS0iM7WnbCNOnieOP1HIA+Oz/ML+zAfBgNVHSME\nGDAWgBS0iM7WnbCNOnieO'
'P1HIA+Oz/ML+zAMBgNVHRMEBTADAQH/MA0GCSqGSIb3\nDQEBDQUAA4GBAF3jBgS+wP+K/jTupEQur6iaqS4UvXd//d4'
'vo1MV06oTLQMTz+rP\nOSMDNwxzfaOn6vgYLKP/Dcy9dSTnSzgxLAxfKvDQZA0vE3udsw0Bd245MmX4+GOp\nlbrN99X'
'P1u+lFxCSdMUzvQ/jW4ysw/Nq4JdJ0gPAyPvL6Qi/3mQdIQwx\n-----END CERTIFICATE-----\n'),
('-----BEGIN CERTIFICATE-----\nMIICZjCCAc+gAwIBAgIBADANBgkqhkiG9w0BAQ0FADBQMQswCQYDVQQGEwJ1czE'
'L\nMAkGA1UECAwCQ0ExDTALBgNVBAoMBEFjbWUxETAPBgNVBAMMCGFjbWUuY29tMRIw\nEAYDVQQHDAlTdW5ueXZhbGU'
'wHhcNMTgxMjA2MDc1ODE4WhcNMjgxMjAzMDc1ODE4\nWjBQMQswCQYDVQQGEwJ1czELMAkGA1UECAwCQ0ExDTALBgNVB'
'AoMBEFjbWUxETAP\nBgNVBAMMCGFjbWUuY29tMRIwEAYDVQQHDAlTdW5ueXZhbGUwgZ8wDQYJKoZIhvcN\nAQEBBQADg'
'Y0AMIGJAoGBAKuzYKfDZGA6DJgQru3wNUqv+S0hMZfP/jbp8ou/8UKu\nrNeX7cfCgt3yxoGCJYKmF6t5mvo76JY0MWw'
'A53BxeP/oyXmJ93uHG5mFRAsVAUKs\ncVVb0Xi6ujxZGVdDWFV696L0BNOoHTfXmac6IBoZQzNNK4n1AATqwo+z7a0pf'
'RrJ\nAgMBAAGjUDBOMB0GA1UdDgQWBBSKmi/ZKMuLN0ES7/jPa7q7jAjPiDAfBgNVHSME\nGDAWgBSKmi/ZKMuLN0ES7'
'/jPa7q7jAjPiDAMBgNVHRMEBTADAQH/MA0GCSqGSIb3\nDQEBDQUAA4GBAAg2a2kSn05NiUOuWOHwPUjW3wQRsGxPXtb'
'hWMhmNdCfKKteM2+/\nLd/jz5F3qkOgGQ3UDgr3SHEoWhnLaJMF4a2tm6vL2rEIfPEK81KhTTRxSsAgMVbU\nJXBz1md'
'6Ur0HlgQC7d1CHC8/xi2DDwHopLyxhogaZUxy9IaRxUEa2vJW\n-----END CERTIFICATE-----\n'),
]
def _sign_in(custom_token, api_key):
body = {'token' : custom_token.decode(), 'returnSecureToken' : True}
params = {'key' : api_key}
resp = requests.request('post', _verify_token_url, params=params, json=body)
resp.raise_for_status()
return resp.json().get('idToken')
def _sign_in_with_password(email, password, api_key):
body = {'email': email, 'password': password, 'returnSecureToken': True}
params = {'key' : api_key}
resp = requests.request('post', _verify_password_url, params=params, json=body)
resp.raise_for_status()
return resp.json().get('idToken')
def _random_string(length=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def _random_id():
random_id = str(uuid.uuid4()).lower().replace('-', '')
email = 'test{0}@example.{1}.com'.format(random_id[:12], random_id[12:])
return random_id, email
def _random_phone():
return '+1' + ''.join([str(random.randint(0, 9)) for _ in range(0, 10)])
def _reset_password(oob_code, new_password, api_key):
body = {'oobCode': oob_code, 'newPassword': new_password}
params = {'key' : api_key}
resp = requests.request('post', _password_reset_url, params=params, json=body)
resp.raise_for_status()
return resp.json().get('email')
def _verify_email(oob_code, api_key):
body = {'oobCode': oob_code}
params = {'key' : api_key}
resp = requests.request('post', _verify_email_url, params=params, json=body)
resp.raise_for_status()
return resp.json().get('email')
def _sign_in_with_email_link(email, oob_code, api_key):
body = {'oobCode': oob_code, 'email': email}
params = {'key' : api_key}
resp = requests.request('post', _email_sign_in_url, params=params, json=body)
resp.raise_for_status()
return resp.json().get('idToken')
def _extract_link_params(link):
query = parse.urlparse(link).query
query_dict = dict(parse.parse_qsl(query))
return query_dict
def test_custom_token(api_key):
custom_token = auth.create_custom_token('user1')
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token)
assert claims['uid'] == 'user1'
def test_custom_token_without_service_account(api_key):
google_cred = firebase_admin.get_app().credential.get_credential()
cred = CredentialWrapper.from_existing_credential(google_cred)
custom_app = firebase_admin.initialize_app(cred, {
'serviceAccountId': google_cred.service_account_email,
'projectId': firebase_admin.get_app().project_id
}, 'temp-app')
try:
custom_token = auth.create_custom_token('user1', app=custom_app)
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token)
assert claims['uid'] == 'user1'
finally:
firebase_admin.delete_app(custom_app)
def test_custom_token_with_claims(api_key):
dev_claims = {'premium' : True, 'subscription' : 'silver'}
custom_token = auth.create_custom_token('user2', dev_claims)
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token)
assert claims['uid'] == 'user2'
assert claims['premium'] is True
assert claims['subscription'] == 'silver'
def test_session_cookies(api_key):
dev_claims = {'premium' : True, 'subscription' : 'silver'}
custom_token = auth.create_custom_token('user3', dev_claims)
id_token = _sign_in(custom_token, api_key)
expires_in = datetime.timedelta(days=1)
session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in)
claims = auth.verify_session_cookie(session_cookie)
assert claims['uid'] == 'user3'
assert claims['premium'] is True
assert claims['subscription'] == 'silver'
assert claims['iss'].startswith('https://session.firebase.google.com')
estimated_exp = int(time.time() + expires_in.total_seconds())
assert abs(claims['exp'] - estimated_exp) < 5
def test_session_cookie_error():
expires_in = datetime.timedelta(days=1)
with pytest.raises(auth.InvalidIdTokenError):
auth.create_session_cookie('not.a.token', expires_in=expires_in)
def test_get_non_existing_user():
with pytest.raises(auth.UserNotFoundError) as excinfo:
auth.get_user('non.existing')
assert str(excinfo.value) == 'No user record found for the provided user ID: non.existing.'
def test_get_non_existing_user_by_email():
with pytest.raises(auth.UserNotFoundError) as excinfo:
auth.get_user_by_email('[email protected]')
error_msg = ('No user record found for the provided email: '
'[email protected].')
assert str(excinfo.value) == error_msg
def test_update_non_existing_user():
with pytest.raises(auth.UserNotFoundError):
auth.update_user('non.existing')
def test_delete_non_existing_user():
with pytest.raises(auth.UserNotFoundError):
auth.delete_user('non.existing')
@pytest.fixture
def new_user():
user = auth.create_user()
yield user
auth.delete_user(user.uid)
@pytest.fixture
def new_user_with_params() -> auth.UserRecord:
random_id, email = _random_id()
phone = _random_phone()
user = auth.create_user(
uid=random_id,
email=email,
phone_number=phone,
display_name='Random User',
photo_url='https://example.com/photo.png',
email_verified=True,
password='secret',
)
yield user
auth.delete_user(user.uid)
@pytest.fixture
def new_user_list():
users = [
auth.create_user(password='password').uid,
auth.create_user(password='password').uid,
auth.create_user(password='password').uid,
]
yield users
# TODO(rsgowman): Using auth.delete_users() would make more sense here, but
# that's currently rate limited to 1qps, so using it in this context would
# almost certainly trigger errors. When/if that limit is relaxed, switch to
# batch delete.
for uid in users:
auth.delete_user(uid)
@pytest.fixture
def new_user_record_list() -> List[auth.UserRecord]:
uid1, email1 = _random_id()
uid2, email2 = _random_id()
uid3, email3 = _random_id()
users = [
auth.create_user(
uid=uid1, email=email1, password='password', phone_number=_random_phone()),
auth.create_user(
uid=uid2, email=email2, password='password', phone_number=_random_phone()),
auth.create_user(
uid=uid3, email=email3, password='password', phone_number=_random_phone()),
]
yield users
for user in users:
auth.delete_user(user.uid)
@pytest.fixture
def new_user_with_provider() -> auth.UserRecord:
uid4, email4 = _random_id()
google_uid, google_email = _random_id()
import_user1 = auth.ImportUserRecord(
uid=uid4,
email=email4,
provider_data=[
auth.UserProvider(
uid=google_uid,
provider_id='google.com',
email=google_email,
)
])
user_import_result = auth.import_users([import_user1])
assert user_import_result.success_count == 1
assert user_import_result.failure_count == 0
user = auth.get_user(uid4)
yield user
auth.delete_user(user.uid)
@pytest.fixture
def new_user_email_unverified():
random_id, email = _random_id()
user = auth.create_user(
uid=random_id,
email=email,
email_verified=False,
password='password'
)
yield user
auth.delete_user(user.uid)
def test_get_user(new_user_with_params):
user = auth.get_user(new_user_with_params.uid)
assert user.uid == new_user_with_params.uid
assert user.display_name == 'Random User'
assert user.email == new_user_with_params.email
assert user.phone_number == new_user_with_params.phone_number
assert user.photo_url == 'https://example.com/photo.png'
assert user.email_verified is True
assert user.disabled is False
user = auth.get_user_by_email(new_user_with_params.email)
assert user.uid == new_user_with_params.uid
user = auth.get_user_by_phone_number(new_user_with_params.phone_number)
assert user.uid == new_user_with_params.uid
assert len(user.provider_data) == 2
provider_ids = sorted([provider.provider_id for provider in user.provider_data])
assert provider_ids == ['password', 'phone']
class TestGetUsers:
@staticmethod
def _map_user_record_to_uid_email_phones(user_record):
return {
'uid': user_record.uid,
'email': user_record.email,
'phone_number': user_record.phone_number
}
def test_multiple_uid_types(self, new_user_record_list, new_user_with_provider):
get_users_results = auth.get_users([
auth.UidIdentifier(new_user_record_list[0].uid),
auth.EmailIdentifier(new_user_record_list[1].email),
auth.PhoneIdentifier(new_user_record_list[2].phone_number),
auth.ProviderIdentifier(
new_user_with_provider.provider_data[0].provider_id,
new_user_with_provider.provider_data[0].uid,
)])
actual = sorted([
self._map_user_record_to_uid_email_phones(user)
for user in get_users_results.users
], key=lambda user: user['uid'])
expected = sorted([
self._map_user_record_to_uid_email_phones(user)
for user in new_user_record_list + [new_user_with_provider]
], key=lambda user: user['uid'])
assert actual == expected
def test_existing_and_non_existing_users(self, new_user_record_list):
get_users_results = auth.get_users([
auth.UidIdentifier(new_user_record_list[0].uid),
auth.UidIdentifier('uid_that_doesnt_exist'),
auth.UidIdentifier(new_user_record_list[2].uid)])
actual = sorted([
self._map_user_record_to_uid_email_phones(user)
for user in get_users_results.users
], key=lambda user: user['uid'])
expected = sorted([
self._map_user_record_to_uid_email_phones(user)
for user in [new_user_record_list[0], new_user_record_list[2]]
], key=lambda user: user['uid'])
assert actual == expected
def test_non_existing_users(self):
not_found_ids = [auth.UidIdentifier('non-existing user')]
get_users_results = auth.get_users(not_found_ids)
assert get_users_results.users == []
assert get_users_results.not_found == not_found_ids
def test_de_dups_duplicate_users(self, new_user):
get_users_results = auth.get_users([
auth.UidIdentifier(new_user.uid),
auth.UidIdentifier(new_user.uid)])
actual = [
self._map_user_record_to_uid_email_phones(user)
for user in get_users_results.users]
expected = [self._map_user_record_to_uid_email_phones(new_user)]
assert actual == expected
def test_last_refresh_timestamp(new_user_with_params: auth.UserRecord, api_key):
# new users should not have a last_refresh_timestamp set
assert new_user_with_params.user_metadata.last_refresh_timestamp is None
# login to cause the last_refresh_timestamp to be set
_sign_in_with_password(new_user_with_params.email, 'secret', api_key)
# Attempt to retrieve the user 3 times (with a small delay between each
# attempt). Occassionally, this call retrieves the user data without the
# lastLoginTime/lastRefreshTime set; possibly because it's hitting a
# different server than the login request uses.
user_record = None
for iteration in range(0, 3):
user_record = auth.get_user(new_user_with_params.uid)
if user_record.user_metadata.last_refresh_timestamp is not None:
break
time.sleep(2 ** iteration)
# Ensure the last refresh time occurred at approximately 'now'. (With a
# tolerance of up to 1 minute; we ideally want to ensure that any timezone
# considerations are handled properly, so as long as we're within an hour,
# we're in good shape.)
millis_per_second = 1000
millis_per_minute = millis_per_second * 60
last_refresh_timestamp = user_record.user_metadata.last_refresh_timestamp
assert last_refresh_timestamp == pytest.approx(
time.time()*millis_per_second, 1*millis_per_minute)
def test_list_users(new_user_list):
err_msg_template = (
'Missing {field} field. A common cause would be forgetting to add the "Firebase ' +
'Authentication Admin" permission. See instructions in CONTRIBUTING.md')
fetched = []
# Test exporting all user accounts.
page = auth.list_users()
while page:
for user in page.users:
assert isinstance(user, auth.ExportedUserRecord)
if user.uid in new_user_list:
fetched.append(user.uid)
assert user.password_hash is not None, (
err_msg_template.format(field='password_hash'))
assert user.password_salt is not None, (
err_msg_template.format(field='password_salt'))
page = page.get_next_page()
assert len(fetched) == len(new_user_list)
fetched = []
page = auth.list_users()
for user in page.iterate_all():
assert isinstance(user, auth.ExportedUserRecord)
if user.uid in new_user_list:
fetched.append(user.uid)
assert user.password_hash is not None, (
err_msg_template.format(field='password_hash'))
assert user.password_salt is not None, (
err_msg_template.format(field='password_salt'))
assert len(fetched) == len(new_user_list)
def test_create_user(new_user):
user = auth.get_user(new_user.uid)
assert user.uid == new_user.uid
assert user.display_name is None
assert user.email is None
assert user.phone_number is None
assert user.photo_url is None
assert user.email_verified is False
assert user.disabled is False
assert user.custom_claims is None
assert user.user_metadata.creation_timestamp > 0
assert user.user_metadata.last_sign_in_timestamp is None
assert len(user.provider_data) == 0
with pytest.raises(auth.UidAlreadyExistsError):
auth.create_user(uid=new_user.uid)
def test_update_user(new_user):
_, email = _random_id()
phone = _random_phone()
user = auth.update_user(
new_user.uid,
email=email,
phone_number=phone,
display_name='Updated Name',
photo_url='https://example.com/photo.png',
email_verified=True,
password='secret')
assert user.uid == new_user.uid
assert user.display_name == 'Updated Name'
assert user.email == email
assert user.phone_number == phone
assert user.photo_url == 'https://example.com/photo.png'
assert user.email_verified is True
assert user.disabled is False
assert user.custom_claims is None
assert len(user.provider_data) == 2
def test_set_custom_user_claims(new_user, api_key):
claims = {'admin' : True, 'package' : 'gold'}
auth.set_custom_user_claims(new_user.uid, claims)
user = auth.get_user(new_user.uid)
assert user.custom_claims == claims
custom_token = auth.create_custom_token(new_user.uid)
id_token = _sign_in(custom_token, api_key)
dev_claims = auth.verify_id_token(id_token)
for key, value in claims.items():
assert dev_claims[key] == value
def test_update_custom_user_claims(new_user):
assert new_user.custom_claims is None
claims = {'admin' : True, 'package' : 'gold'}
auth.set_custom_user_claims(new_user.uid, claims)
user = auth.get_user(new_user.uid)
assert user.custom_claims == claims
claims = {'admin' : False, 'subscription' : 'guest'}
auth.set_custom_user_claims(new_user.uid, claims)
user = auth.get_user(new_user.uid)
assert user.custom_claims == claims
auth.set_custom_user_claims(new_user.uid, None)
user = auth.get_user(new_user.uid)
assert user.custom_claims is None
def test_disable_user(new_user_with_params):
user = auth.update_user(
new_user_with_params.uid,
display_name=auth.DELETE_ATTRIBUTE,
photo_url=auth.DELETE_ATTRIBUTE,
phone_number=auth.DELETE_ATTRIBUTE,
disabled=True)
assert user.uid == new_user_with_params.uid
assert user.email == new_user_with_params.email
assert user.display_name is None
assert user.phone_number is None
assert user.photo_url is None
assert user.email_verified is True
assert user.disabled is True
assert len(user.provider_data) == 1
def test_remove_provider(new_user_with_provider):
provider_ids = [provider.provider_id for provider in new_user_with_provider.provider_data]
assert 'google.com' in provider_ids
user = auth.update_user(new_user_with_provider.uid, providers_to_delete=['google.com'])
assert user.uid == new_user_with_provider.uid
new_provider_ids = [provider.provider_id for provider in user.provider_data]
assert 'google.com' not in new_provider_ids
def test_delete_user():
user = auth.create_user()
auth.delete_user(user.uid)
with pytest.raises(auth.UserNotFoundError):
auth.get_user(user.uid)
class TestDeleteUsers:
def test_delete_multiple_users(self):
uid1 = auth.create_user(disabled=True).uid
uid2 = auth.create_user(disabled=False).uid
uid3 = auth.create_user(disabled=True).uid
delete_users_result = self._slow_delete_users(auth, [uid1, uid2, uid3])
assert delete_users_result.success_count == 3
assert delete_users_result.failure_count == 0
assert len(delete_users_result.errors) == 0
get_users_results = auth.get_users(
[auth.UidIdentifier(uid1), auth.UidIdentifier(uid2), auth.UidIdentifier(uid3)])
assert len(get_users_results.users) == 0
def test_is_idempotent(self):
uid = auth.create_user().uid
delete_users_result = self._slow_delete_users(auth, [uid])
assert delete_users_result.success_count == 1
assert delete_users_result.failure_count == 0
# Delete the user again, ensuring that everything still counts as a
# success.
delete_users_result = self._slow_delete_users(auth, [uid])
assert delete_users_result.success_count == 1
assert delete_users_result.failure_count == 0
def _slow_delete_users(self, auth, uids):
"""The batchDelete endpoint has a rate limit of 1 QPS. Use this test
helper to ensure you don't exceed the quota."""
time.sleep(1)
return auth.delete_users(uids)
def test_revoke_refresh_tokens(new_user):
user = auth.get_user(new_user.uid)
old_valid_after = user.tokens_valid_after_timestamp
time.sleep(1)
auth.revoke_refresh_tokens(new_user.uid)
user = auth.get_user(new_user.uid)
new_valid_after = user.tokens_valid_after_timestamp
assert new_valid_after > old_valid_after
def test_verify_id_token_revoked(new_user, api_key):
custom_token = auth.create_custom_token(new_user.uid)
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token)
assert claims['iat'] * 1000 >= new_user.tokens_valid_after_timestamp
time.sleep(1)
auth.revoke_refresh_tokens(new_user.uid)
claims = auth.verify_id_token(id_token, check_revoked=False)
user = auth.get_user(new_user.uid)
# verify_id_token succeeded because it didn't check revoked.
assert claims['iat'] * 1000 < user.tokens_valid_after_timestamp
with pytest.raises(auth.RevokedIdTokenError) as excinfo:
claims = auth.verify_id_token(id_token, check_revoked=True)
assert str(excinfo.value) == 'The Firebase ID token has been revoked.'
# Sign in again, verify works.
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token, check_revoked=True)
assert claims['iat'] * 1000 >= user.tokens_valid_after_timestamp
def test_verify_id_token_disabled(new_user, api_key):
custom_token = auth.create_custom_token(new_user.uid)
id_token = _sign_in(custom_token, api_key)
claims = auth.verify_id_token(id_token, check_revoked=True)
# Disable the user record.
auth.update_user(new_user.uid, disabled=True)
# Verify the ID token without checking revocation. This should
# not raise.
claims = auth.verify_id_token(id_token, check_revoked=False)
assert claims['sub'] == new_user.uid
# Verify the ID token while checking revocation. This should
# raise an exception.
with pytest.raises(auth.UserDisabledError) as excinfo:
auth.verify_id_token(id_token, check_revoked=True)
assert str(excinfo.value) == 'The user record is disabled.'
def test_verify_session_cookie_revoked(new_user, api_key):
custom_token = auth.create_custom_token(new_user.uid)
id_token = _sign_in(custom_token, api_key)
session_cookie = auth.create_session_cookie(id_token, expires_in=datetime.timedelta(days=1))
time.sleep(1)
auth.revoke_refresh_tokens(new_user.uid)
claims = auth.verify_session_cookie(session_cookie, check_revoked=False)
user = auth.get_user(new_user.uid)
# verify_session_cookie succeeded because it didn't check revoked.
assert claims['iat'] * 1000 < user.tokens_valid_after_timestamp
with pytest.raises(auth.RevokedSessionCookieError) as excinfo:
claims = auth.verify_session_cookie(session_cookie, check_revoked=True)
assert str(excinfo.value) == 'The Firebase session cookie has been revoked.'
# Sign in again, verify works.
id_token = _sign_in(custom_token, api_key)
session_cookie = auth.create_session_cookie(id_token, expires_in=datetime.timedelta(days=1))
claims = auth.verify_session_cookie(session_cookie, check_revoked=True)
assert claims['iat'] * 1000 >= user.tokens_valid_after_timestamp
def test_verify_session_cookie_disabled(new_user, api_key):
custom_token = auth.create_custom_token(new_user.uid)
id_token = _sign_in(custom_token, api_key)
session_cookie = auth.create_session_cookie(id_token, expires_in=datetime.timedelta(days=1))
# Disable the user record.
auth.update_user(new_user.uid, disabled=True)
# Verify the session cookie without checking revocation. This should
# not raise.
claims = auth.verify_session_cookie(session_cookie, check_revoked=False)
assert claims['sub'] == new_user.uid
# Verify the session cookie while checking revocation. This should
# raise an exception.
with pytest.raises(auth.UserDisabledError) as excinfo:
auth.verify_session_cookie(session_cookie, check_revoked=True)
assert str(excinfo.value) == 'The user record is disabled.'
def test_import_users():
uid, email = _random_id()
user = auth.ImportUserRecord(uid=uid, email=email)
result = auth.import_users([user])
try:
assert result.success_count == 1
assert result.failure_count == 0
saved_user = auth.get_user(uid)
assert saved_user.email == email
finally:
auth.delete_user(uid)
def test_import_users_with_password(api_key):
uid, email = _random_id()
password_hash = base64.b64decode(
'V358E8LdWJXAO7muq0CufVpEOXaj8aFiC7T/rcaGieN04q/ZPJ08WhJEHGjj9lz/2TT+/86N5VjVoc5DdBhBiw==')
user = auth.ImportUserRecord(
uid=uid, email=email, password_hash=password_hash, password_salt=b'NaCl')
scrypt_key = base64.b64decode(
'jxspr8Ki0RYycVU8zykbdLGjFQ3McFUH0uiiTvC8pVMXAn210wjLNmdZJzxUECKbm0QsEmYUSDzZvpjeJ9WmXA==')
salt_separator = base64.b64decode('Bw==')
scrypt = auth.UserImportHash.scrypt(
key=scrypt_key, salt_separator=salt_separator, rounds=8, memory_cost=14)
result = auth.import_users([user], hash_alg=scrypt)
try:
assert result.success_count == 1
assert result.failure_count == 0
saved_user = auth.get_user(uid)
assert saved_user.email == email
id_token = _sign_in_with_password(email, 'password', api_key)
assert len(id_token) > 0
finally:
auth.delete_user(uid)
def test_password_reset(new_user_email_unverified, api_key):
link = auth.generate_password_reset_link(new_user_email_unverified.email)
assert isinstance(link, str)
query_dict = _extract_link_params(link)
user_email = _reset_password(query_dict['oobCode'], 'newPassword', api_key)
assert new_user_email_unverified.email == user_email
# password reset also set email_verified to True
assert auth.get_user(new_user_email_unverified.uid).email_verified
def test_email_verification(new_user_email_unverified, api_key):
link = auth.generate_email_verification_link(new_user_email_unverified.email)
assert isinstance(link, str)
query_dict = _extract_link_params(link)
user_email = _verify_email(query_dict['oobCode'], api_key)
assert new_user_email_unverified.email == user_email
assert auth.get_user(new_user_email_unverified.uid).email_verified
def test_password_reset_with_settings(new_user_email_unverified, api_key):
action_code_settings = auth.ActionCodeSettings(ACTION_LINK_CONTINUE_URL)
link = auth.generate_password_reset_link(new_user_email_unverified.email,
action_code_settings=action_code_settings)
assert isinstance(link, str)
query_dict = _extract_link_params(link)
assert query_dict['continueUrl'] == ACTION_LINK_CONTINUE_URL
user_email = _reset_password(query_dict['oobCode'], 'newPassword', api_key)
assert new_user_email_unverified.email == user_email
# password reset also set email_verified to True
assert auth.get_user(new_user_email_unverified.uid).email_verified
def test_email_verification_with_settings(new_user_email_unverified, api_key):
action_code_settings = auth.ActionCodeSettings(ACTION_LINK_CONTINUE_URL)
link = auth.generate_email_verification_link(new_user_email_unverified.email,
action_code_settings=action_code_settings)
assert isinstance(link, str)
query_dict = _extract_link_params(link)
assert query_dict['continueUrl'] == ACTION_LINK_CONTINUE_URL
user_email = _verify_email(query_dict['oobCode'], api_key)
assert new_user_email_unverified.email == user_email
assert auth.get_user(new_user_email_unverified.uid).email_verified
def test_email_sign_in_with_settings(new_user_email_unverified, api_key):
action_code_settings = auth.ActionCodeSettings(ACTION_LINK_CONTINUE_URL)
link = auth.generate_sign_in_with_email_link(new_user_email_unverified.email,
action_code_settings=action_code_settings)
assert isinstance(link, str)
query_dict = _extract_link_params(link)
assert query_dict['continueUrl'] == ACTION_LINK_CONTINUE_URL
oob_code = query_dict['oobCode']
id_token = _sign_in_with_email_link(new_user_email_unverified.email, oob_code, api_key)
assert id_token is not None and len(id_token) > 0
assert auth.get_user(new_user_email_unverified.uid).email_verified
@pytest.fixture(scope='module')
def oidc_provider():
provider_config = _create_oidc_provider_config()
yield provider_config
auth.delete_oidc_provider_config(provider_config.provider_id)
def test_create_oidc_provider_config(oidc_provider):
assert isinstance(oidc_provider, auth.OIDCProviderConfig)
assert oidc_provider.client_id == 'OIDC_CLIENT_ID'
assert oidc_provider.issuer == 'https://oidc.com/issuer'
assert oidc_provider.display_name == 'OIDC_DISPLAY_NAME'
assert oidc_provider.enabled is True
assert oidc_provider.id_token_response_type is True
assert oidc_provider.code_response_type is False
assert oidc_provider.client_secret is None
def test_get_oidc_provider_config(oidc_provider):
provider_config = auth.get_oidc_provider_config(oidc_provider.provider_id)
assert isinstance(provider_config, auth.OIDCProviderConfig)
assert provider_config.provider_id == oidc_provider.provider_id
assert provider_config.client_id == 'OIDC_CLIENT_ID'
assert provider_config.issuer == 'https://oidc.com/issuer'
assert provider_config.display_name == 'OIDC_DISPLAY_NAME'
assert provider_config.enabled is True
assert provider_config.id_token_response_type is True
assert provider_config.code_response_type is False
assert provider_config.client_secret is None
def test_list_oidc_provider_configs(oidc_provider):
page = auth.list_oidc_provider_configs()
result = None
for provider_config in page.iterate_all():
if provider_config.provider_id == oidc_provider.provider_id:
result = provider_config
break
assert result is not None
def test_update_oidc_provider_config():
provider_config = _create_oidc_provider_config()
try:
provider_config = auth.update_oidc_provider_config(
provider_config.provider_id,
client_id='UPDATED_OIDC_CLIENT_ID',
issuer='https://oidc.com/updated_issuer',
display_name='UPDATED_OIDC_DISPLAY_NAME',
enabled=False,
client_secret='CLIENT_SECRET',
id_token_response_type=False,
code_response_type=True)
assert provider_config.client_id == 'UPDATED_OIDC_CLIENT_ID'
assert provider_config.issuer == 'https://oidc.com/updated_issuer'
assert provider_config.display_name == 'UPDATED_OIDC_DISPLAY_NAME'
assert provider_config.enabled is False
assert provider_config.id_token_response_type is False
assert provider_config.code_response_type is True
assert provider_config.client_secret == 'CLIENT_SECRET'
finally:
auth.delete_oidc_provider_config(provider_config.provider_id)
def test_delete_oidc_provider_config():
provider_config = _create_oidc_provider_config()
auth.delete_oidc_provider_config(provider_config.provider_id)
with pytest.raises(auth.ConfigurationNotFoundError):
auth.get_oidc_provider_config(provider_config.provider_id)
@pytest.fixture(scope='module')
def saml_provider():
provider_config = _create_saml_provider_config()
yield provider_config
auth.delete_saml_provider_config(provider_config.provider_id)
def test_create_saml_provider_config(saml_provider):
assert isinstance(saml_provider, auth.SAMLProviderConfig)
assert saml_provider.idp_entity_id == 'IDP_ENTITY_ID'
assert saml_provider.sso_url == 'https://example.com/login'
assert saml_provider.x509_certificates == [X509_CERTIFICATES[0]]
assert saml_provider.rp_entity_id == 'RP_ENTITY_ID'
assert saml_provider.callback_url == 'https://projectId.firebaseapp.com/__/auth/handler'
assert saml_provider.display_name == 'SAML_DISPLAY_NAME'
assert saml_provider.enabled is True
def test_get_saml_provider_config(saml_provider):
provider_config = auth.get_saml_provider_config(saml_provider.provider_id)
assert isinstance(provider_config, auth.SAMLProviderConfig)
assert provider_config.provider_id == saml_provider.provider_id
assert provider_config.idp_entity_id == 'IDP_ENTITY_ID'
assert provider_config.sso_url == 'https://example.com/login'
assert provider_config.x509_certificates == [X509_CERTIFICATES[0]]
assert provider_config.rp_entity_id == 'RP_ENTITY_ID'
assert provider_config.callback_url == 'https://projectId.firebaseapp.com/__/auth/handler'
assert provider_config.display_name == 'SAML_DISPLAY_NAME'
assert provider_config.enabled is True
def test_list_saml_provider_configs(saml_provider):
page = auth.list_saml_provider_configs()
result = None
for provider_config in page.iterate_all():
if provider_config.provider_id == saml_provider.provider_id:
result = provider_config
break
assert result is not None
def test_update_saml_provider_config():
provider_config = _create_saml_provider_config()
try:
provider_config = auth.update_saml_provider_config(
provider_config.provider_id,
idp_entity_id='UPDATED_IDP_ENTITY_ID',
sso_url='https://example.com/updated_login',
x509_certificates=[X509_CERTIFICATES[1]],
rp_entity_id='UPDATED_RP_ENTITY_ID',
callback_url='https://updatedProjectId.firebaseapp.com/__/auth/handler',
display_name='UPDATED_SAML_DISPLAY_NAME',
enabled=False)
assert provider_config.idp_entity_id == 'UPDATED_IDP_ENTITY_ID'
assert provider_config.sso_url == 'https://example.com/updated_login'
assert provider_config.x509_certificates == [X509_CERTIFICATES[1]]
assert provider_config.rp_entity_id == 'UPDATED_RP_ENTITY_ID'
assert provider_config.callback_url == ('https://updatedProjectId.firebaseapp.com/'
'__/auth/handler')
assert provider_config.display_name == 'UPDATED_SAML_DISPLAY_NAME'
assert provider_config.enabled is False
finally:
auth.delete_saml_provider_config(provider_config.provider_id)
def test_delete_saml_provider_config():
provider_config = _create_saml_provider_config()
auth.delete_saml_provider_config(provider_config.provider_id)
with pytest.raises(auth.ConfigurationNotFoundError):
auth.get_saml_provider_config(provider_config.provider_id)
def _create_oidc_provider_config():
provider_id = 'oidc.{0}'.format(_random_string())
return auth.create_oidc_provider_config(
provider_id=provider_id,
client_id='OIDC_CLIENT_ID',
issuer='https://oidc.com/issuer',
display_name='OIDC_DISPLAY_NAME',
enabled=True,
id_token_response_type=True,
code_response_type=False)
def _create_saml_provider_config():
provider_id = 'saml.{0}'.format(_random_string())
return auth.create_saml_provider_config(
provider_id=provider_id,
idp_entity_id='IDP_ENTITY_ID',
sso_url='https://example.com/login',
x509_certificates=[X509_CERTIFICATES[0]],
rp_entity_id='RP_ENTITY_ID',
callback_url='https://projectId.firebaseapp.com/__/auth/handler',
display_name='SAML_DISPLAY_NAME',
enabled=True)
class CredentialWrapper(credentials.Base):
"""A custom Firebase credential that wraps an OAuth2 token."""
def __init__(self, token):
self._delegate = google.oauth2.credentials.Credentials(token)
def get_credential(self):
return self._delegate
@classmethod
def from_existing_credential(cls, google_cred):
if not google_cred.token:
request = transport.requests.Request()
google_cred.refresh(request)
return CredentialWrapper(google_cred.token)
|
|
from __future__ import absolute_import, print_function
import copy
import sys
from numba import ctypes_support as ctypes
from numba.typing.templates import AbstractTemplate
from numba import config, compiler, types
from numba.typing.templates import ConcreteTemplate
from numba import funcdesc, typing, utils
from .cudadrv.devices import get_context
from .cudadrv import nvvm, devicearray, driver
from .errors import KernelRuntimeError
from .api import get_current_device
def compile_cuda(pyfunc, return_type, args, debug, inline):
# First compilation will trigger the initialization of the CUDA backend.
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set('no_compile')
flags.set('no_cpython_wrapper')
if debug:
flags.set('boundcheck')
if inline:
flags.set('forceinline')
# Run compilation pipeline
cres = compiler.compile_extra(typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={})
library = cres.library
library.finalize()
return cres
def compile_kernel(pyfunc, args, link, debug=False, inline=False,
fastmath=False):
cres = compile_cuda(pyfunc, types.void, args, debug=debug, inline=inline)
func = cres.library.get_function(cres.fndesc.llvm_func_name)
kernel = cres.target_context.prepare_cuda_kernel(func,
cres.signature.args)
cukern = CUDAKernel(llvm_module=cres.library._final_module,
name=kernel.name,
pretty_name=cres.fndesc.qualname,
argtypes=cres.signature.args,
type_annotation=cres.type_annotation,
link=link,
debug=debug,
call_helper=cres.call_helper,
fastmath=fastmath)
return cukern
class DeviceFunctionTemplate(object):
"""Unmaterialized device function
"""
def __init__(self, pyfunc, debug, inline):
self.py_func = pyfunc
self.debug = debug
self.inline = inline
self._compileinfos = {}
def compile(self, args):
"""Compile the function for the given argument types.
Each signature is compiled once by caching the compiled function inside
this object.
"""
if args not in self._compileinfos:
cres = compile_cuda(self.py_func, None, args, debug=self.debug,
inline=self.inline)
first_definition = not self._compileinfos
self._compileinfos[args] = cres
libs = [cres.library]
if first_definition:
# First definition
cres.target_context.insert_user_function(self, cres.fndesc,
libs)
else:
cres.target_context.add_user_function(self, cres.fndesc, libs)
else:
cres = self._compileinfos[args]
return cres.signature
def compile_device_template(pyfunc, debug=False, inline=False):
"""Create a DeviceFunctionTemplate object and register the object to
the CUDA typing context.
"""
from .descriptor import CUDATargetDesc
dft = DeviceFunctionTemplate(pyfunc, debug=debug, inline=inline)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args)
typingctx = CUDATargetDesc.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
def compile_device(pyfunc, return_type, args, inline=True, debug=False):
cres = compile_cuda(pyfunc, return_type, args, debug=debug, inline=inline)
devfn = DeviceFunction(cres)
class device_function_template(ConcreteTemplate):
key = devfn
cases = [cres.signature]
cres.typing_context.insert_user_function(devfn, device_function_template)
cres.target_context.insert_user_function(devfn, cres.fndesc, [cres.library])
return devfn
def declare_device_function(name, restype, argtypes):
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = funcdesc.ExternalFunctionDescriptor(
name=name, restype=restype, argtypes=argtypes)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
class DeviceFunction(object):
def __init__(self, cres):
self.cres = cres
class ExternFunction(object):
def __init__(self, name, sig):
self.name = name
self.sig = sig
class CUDAKernelBase(object):
"""Define interface for configurable kernels
"""
def __init__(self):
self.griddim = (1, 1)
self.blockdim = (1, 1, 1)
self.sharedmem = 0
self.stream = 0
def copy(self):
return copy.copy(self)
def configure(self, griddim, blockdim, stream=0, sharedmem=0):
if not isinstance(griddim, (tuple, list)):
griddim = [griddim]
else:
griddim = list(griddim)
if len(griddim) > 3:
raise ValueError('griddim must be a tuple/list of three ints')
while len(griddim) < 3:
griddim.append(1)
if not isinstance(blockdim, (tuple, list)):
blockdim = [blockdim]
else:
blockdim = list(blockdim)
if len(blockdim) > 3:
raise ValueError('blockdim must be tuple/list of three ints')
while len(blockdim) < 3:
blockdim.append(1)
clone = self.copy()
clone.griddim = tuple(griddim)
clone.blockdim = tuple(blockdim)
clone.stream = stream
clone.sharedmem = sharedmem
return clone
def __getitem__(self, args):
if len(args) not in [2, 3, 4]:
raise ValueError('must specify at least the griddim and blockdim')
return self.configure(*args)
class CachedPTX(object):
"""A PTX cache that uses compute capability as a cache key
"""
def __init__(self, name, llvmir, options):
self.name = name
self.llvmir = llvmir
self.cache = {}
self._extra_options = options.copy()
def get(self):
"""
Get PTX for the current active context.
"""
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
ptx = self.cache.get(cc)
if ptx is None:
arch = nvvm.get_arch_option(*cc)
ptx = nvvm.llvm_to_ptx(self.llvmir, opt=3, arch=arch,
**self._extra_options)
self.cache[cc] = ptx
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self.name).center(80, '-'))
print(ptx.decode('utf-8'))
print('=' * 80)
return ptx
class CachedCUFunction(object):
"""
Get or compile CUDA function for the current active context
Uses device ID as key for cache.
"""
def __init__(self, entry_name, ptx, linking):
self.entry_name = entry_name
self.ptx = ptx
self.linking = linking
self.cache = {}
self.ccinfos = {}
def get(self):
cuctx = get_context()
device = cuctx.device
cufunc = self.cache.get(device.id)
if cufunc is None:
ptx = self.ptx.get()
# Link
linker = driver.Linker()
linker.add_ptx(ptx)
for path in self.linking:
linker.add_file_guess_ext(path)
cubin, _size = linker.complete()
compile_info = linker.info_log
module = cuctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self.entry_name)
self.cache[device.id] = cufunc
self.ccinfos[device.id] = compile_info
return cufunc
def get_info(self):
self.get() # trigger compilation
cuctx = get_context()
device = cuctx.device
ci = self.ccinfos[device.id]
return ci
class CUDAKernel(CUDAKernelBase):
'''
CUDA Kernel specialized for a given set of argument types. When called, this
object will validate that the argument types match those for which it is
specialized, and then launch the kernel on the device.
'''
def __init__(self, llvm_module, name, pretty_name,
argtypes, call_helper,
link=(), debug=False, fastmath=False,
type_annotation=None):
super(CUDAKernel, self).__init__()
self.entry_name = name
self.argument_types = tuple(argtypes)
self.linking = tuple(link)
self._type_annotation = type_annotation
options = {}
if fastmath:
options.update(dict(ftz=True,
prec_sqrt=False,
prec_div=False,
fma=True))
ptx = CachedPTX(pretty_name, str(llvm_module), options=options)
self._func = CachedCUFunction(self.entry_name, ptx, link)
self.debug = debug
self.call_helper = call_helper
def __call__(self, *args, **kwargs):
assert not kwargs
self._kernel_call(args=args,
griddim=self.griddim,
blockdim=self.blockdim,
stream=self.stream,
sharedmem=self.sharedmem)
def bind(self):
"""
Force binding to current CUDA context
"""
self._func.get()
@property
def ptx(self):
'''
PTX code for this kernel.
'''
return self._func.ptx.get().decode('utf8')
@property
def device(self):
"""
Get current active context
"""
return get_current_device()
def inspect_llvm(self):
'''
Returns the LLVM IR for this kernel.
'''
return str(self._func.ptx.llvmir)
def inspect_asm(self):
'''
Returns the PTX code for this kernel.
'''
return self._func.ptx.get().decode('ascii')
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if self._type_annotation is None:
raise ValueError("Type annotation is not available")
if file is None:
file = sys.stdout
print("%s %s" % (self.entry_name, self.argument_types), file=file)
print('-' * 80, file=file)
print(self._type_annotation, file=file)
print('=' * 80, file=file)
def _kernel_call(self, args, griddim, blockdim, stream=0, sharedmem=0):
# Prepare kernel
cufunc = self._func.get()
if self.debug:
excname = cufunc.name + "__errcode__"
excmem, excsz = cufunc.module.get_global_symbol(excname)
assert excsz == ctypes.sizeof(ctypes.c_int)
excval = ctypes.c_int()
excmem.memset(0, stream=stream)
# Prepare arguments
retr = [] # hold functors for writeback
kernelargs = []
for t, v in zip(self.argument_types, args):
self._prepare_args(t, v, stream, retr, kernelargs)
# Configure kernel
cu_func = cufunc.configure(griddim, blockdim,
stream=stream,
sharedmem=sharedmem)
# Invoke kernel
cu_func(*kernelargs)
if self.debug:
driver.device_to_host(ctypes.addressof(excval), excmem, excsz)
if excval.value != 0:
# An error occurred
def load_symbol(name):
mem, sz = cufunc.module.get_global_symbol("%s__%s__" %
(cufunc.name,
name))
val = ctypes.c_int()
driver.device_to_host(ctypes.addressof(val), mem, sz)
return val.value
tid = [load_symbol("tid" + i) for i in 'zyx']
ctaid = [load_symbol("ctaid" + i) for i in 'zyx']
code = excval.value
exccls, exc_args = self.call_helper.get_exception(code)
# Prefix the exception message with the thread position
prefix = "tid=%s ctaid=%s" % (tid, ctaid)
if exc_args:
exc_args = ("%s: %s" % (prefix, exc_args[0]),) + exc_args[1:]
else:
exc_args = prefix,
raise exccls(*exc_args)
# retrieve auto converted arrays
for wb in retr:
wb()
def _prepare_args(self, ty, val, stream, retr, kernelargs):
"""
Convert arguments to ctypes and append to kernelargs
"""
if isinstance(ty, types.Array):
devary, conv = devicearray.auto_device(val, stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(val, stream=stream))
c_intp = ctypes.c_ssize_t
meminfo = ctypes.c_void_p(0)
parent = ctypes.c_void_p(0)
nitems = c_intp(devary.size)
itemsize = c_intp(devary.dtype.itemsize)
data = ctypes.c_void_p(driver.device_pointer(devary))
kernelargs.append(meminfo)
kernelargs.append(parent)
kernelargs.append(nitems)
kernelargs.append(itemsize)
kernelargs.append(data)
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.shape[ax]))
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.strides[ax]))
elif isinstance(ty, types.Integer):
cval = getattr(ctypes, "c_%s" % ty)(val)
kernelargs.append(cval)
elif ty == types.float64:
cval = ctypes.c_double(val)
kernelargs.append(cval)
elif ty == types.float32:
cval = ctypes.c_float(val)
kernelargs.append(cval)
elif ty == types.boolean:
cval = ctypes.c_uint8(int(val))
kernelargs.append(cval)
elif ty == types.complex64:
kernelargs.append(ctypes.c_float(val.real))
kernelargs.append(ctypes.c_float(val.imag))
elif ty == types.complex128:
kernelargs.append(ctypes.c_double(val.real))
kernelargs.append(ctypes.c_double(val.imag))
elif isinstance(ty, types.Record):
devrec, conv = devicearray.auto_device(val, stream=stream)
if conv:
retr.append(lambda: devrec.copy_to_host(val, stream=stream))
kernelargs.append(devrec)
else:
raise NotImplementedError(ty, val)
class AutoJitCUDAKernel(CUDAKernelBase):
'''
CUDA Kernel object. When called, the kernel object will specialize itself
for the given arguments (if no suitable specialized version already exists)
and launch on the device associated with the current context.
Kernel objects are not to be constructed by the user, but instead are
created using the :func:`numba.cuda.jit` decorator.
'''
def __init__(self, func, bind, targetoptions):
super(AutoJitCUDAKernel, self).__init__()
self.py_func = func
self.bind = bind
self.definitions = {}
self.targetoptions = targetoptions
from .descriptor import CUDATargetDesc
self.typingctx = CUDATargetDesc.typingctx
def __call__(self, *args):
'''
Specialize and invoke this kernel with *args*.
'''
kernel = self.specialize(*args)
cfg = kernel[self.griddim, self.blockdim, self.stream, self.sharedmem]
cfg(*args)
def specialize(self, *args):
'''
Compile and bind to the current context a version of this kernel
specialized for the given *args*.
'''
argtypes = tuple(
[self.typingctx.resolve_argument_type(a) for a in args])
kernel = self.definitions.get(argtypes)
if kernel is None:
if 'link' not in self.targetoptions:
self.targetoptions['link'] = ()
kernel = compile_kernel(self.py_func, argtypes,
**self.targetoptions)
self.definitions[argtypes] = kernel
if self.bind:
kernel.bind()
return kernel
def inspect_llvm(self, signature=None):
'''
Return the LLVM IR for all signatures encountered thus far, or the LLVM
IR for a specific signature if given.
'''
if signature is not None:
return self.definitions[signature].inspect_llvm()
else:
return dict((sig, defn.inspect_llvm())
for sig, defn in self.definitions.items())
def inspect_asm(self, signature=None):
'''
Return the generated assembly code for all signatures encountered thus
far, or the LLVM IR for a specific signature if given.
'''
if signature is not None:
return self.definitions[signature].inspect_asm()
else:
return dict((sig, defn.inspect_asm())
for sig, defn in self.definitions.items())
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if file is None:
file = sys.stdout
for ver, defn in utils.iteritems(self.definitions):
defn.inspect_types(file=file)
|
|
import ptypes, math, logging
from ptypes import *
from .primitives import *
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### primitives
## float types
class FLOAT16(pfloat.half): pass
class FLOAT(pfloat.single): pass
class DOUBLE(pfloat.double): pass
## int types
class SI8(pint.int8_t): pass
class SI16(pint.int16_t): pass
class SI24(pint.int_t): length = 3
class SI32(pint.int32_t): pass
class SI64(pint.int64_t): pass
class UI8(pint.int8_t): pass
class UI16(pint.int16_t): pass
class UI24(pint.int_t): length = 3
class UI32(pint.int32_t): pass
class UI64(pint.int64_t): pass
(SI8, UI8, SI16, UI16, SI32, UI32, UI64) = ( pint.bigendian(x) for x in (SI8,UI8,SI16,UI16,SI32,UI32,UI64) )
## fixed-point types
class SI8_8(pfloat.sfixed_t): length,fractional = 2,8
class SI16_16(pfloat.sfixed_t): length,fractional = 4,16
class UI8_8(pfloat.ufixed_t): length,fractional = 2,8
class UI16_16(pfloat.ufixed_t): length,fractional = 4,16
#### Tags
class TagHeader(ptype.definition): cache = {}
class TagBody(ptype.definition): cache = {}
### AUDIODATA
@TagHeader.define
class AudioTagHeader(pbinary.struct):
type = 8
_fields_ = [
(4,'SoundFormat'),
(2,'SoundRate'),
(1,'SoundSize'),
(1,'SoundType'),
(lambda s: 8 if s['SoundFormat'] == 10 else 0,'AACPacketType'),
]
# FIXME
@TagBody.define
class AudioTagBody(pstruct.type):
type = 8
def __Data(self):
h = self.getparent(FLVTAG)['TagHeader'].li
return AudioPacketData.lookup(h['SoundFormat'])
_fields_ = [(__Data, 'Data')]
## audio packet data
class AudioPacketData(ptype.definition): cache = {}
@AudioPacketData.define
class AACAUDIODATA(pstruct.type):
type = 10
_fields_ = [(lambda s: AudioSpecificConfig if s.getparent(FLVTAG)['TagHeader'].li['AACPacketType'] == 0 else ptype.block, 'Data')]
### VIDEODATA
@TagHeader.define
class VideoTagHeader(pstruct.type):
type = 9
class Type(pbinary.struct):
_fields_ = [(4, 'FrameType'), (4, 'CodecID')]
def summary(self):
return 'FrameType:{:d} CodecId:{:d}'.format(self['FrameType'], self['CodecID'])
def __Header(self):
t = self['Type'].li
return VideoPacketHeader.withdefault(t['CodecID'], type=t['CodecID'])
_fields_ = [
(Type, 'Type'),
(__Header, 'Header'),
]
def summary(self):
h = self['Type']
return 'Type{{{:s}}} {:s}'.format(h.summary(), self['Header'].classname(), self['Header'].summary() or repr(''))
# FIXME
@TagBody.define
class VideoTagBody(pstruct.type):
type = 9
def __Data(self):
h = self.getparent(StreamTag)['Header'].li
t = h['Type']
if t['FrameType'] == 5:
return UI8
return VideoPacketData.lookup(t['CodecId'])
_fields_ = [(__Data,'Data')]
## video packet header
class VideoPacketHeader(ptype.definition):
cache = {}
class unknown(pstruct.type): _fields_ = []
default = unknown
@VideoPacketHeader.define
class AVCVIDEOPACKETHEADER(pstruct.type):
type = 7
class AVCPacketType(pint.enum, UI8):
_values_ = [
(0, 'AVC sequence header'),
(1, 'AVC NALU'),
(2, 'AVC end-of-sequence header'),
]
_fields_ = [
(AVCPacketType, 'AVCPacketType'),
(SI24, 'CompositionTime'),
]
## video packet data
class VideoPacketData(ptype.definition): cache = {}
@VideoPacketData.define
class H263VIDEOPACKET(pbinary.struct):
"""Sorenson H.263"""
type = 2
def __Custom(self):
t = self['PictureSize']
if t == 0:
return 8
elif t == 1:
return 16
return 0
class ExtraInformation(pbinary.terminatedarray):
class _object_(pbinary.struct):
_fields_ = [
(1, 'Flag'),
(lambda s: s['Flag'] and 8 or 0, 'Data'),
]
def isTerminator(self, value):
return self['Flag'] == 0
class MACROBLOCK(pbinary.struct):
class BLOCKDATA(ptype.block):
# FIXME: Look up H.263 ieee spec
pass
_fields_ = [
(1, 'CodecMacroBlockFlag'),
# ...
(ptype.block, 'MacroBlockType'), # H.263 5.3.2
(ptype.block, 'BlockPattern'), # H.263 5.3.5
(2, 'QuantizerInformation'), # H.263 5.3.6
(2, 'MotionVectorData'), # H.263 5.3.7
(6, 'ExtraMotionVectorData'), # H.263 5.3.8
(dyn.array(BLOCKDATA, 6), 'BlockData'),
]
_fields_ = [
(17, 'PictureStartCode'),
(5, 'Version'),
(8, 'TemporalReference'),
(3, 'PictureSize'),
(__Custom, 'CustomWidth'),
(__Custom, 'CustomHeight'),
(2, 'PictureType'),
(1, 'DeblockingFlag'),
(5, 'Quantizer'),
(ExtraInformation, 'ExtraInformation'),
(MACROBLOCK, 'Macroblock'),
]
@VideoPacketData.define
class SCREENVIDEOPACKET(pstruct.type):
"""Screen video"""
type = 3
class IMAGEBLOCK(pstruct.type):
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCK, math.trunc(count))
class Dim(pbinary.struct):
_fields_ = [(4,'Block'),(12,'Image')]
_fields_ = [
(Dim, 'Width'),
(Dim, 'Height'),
(__ImageBlocks, 'ImageBlocks'),
]
@VideoPacketData.define
class VP6FLVVIDEOPACKET(ptype.block):
"""On2 VP6"""
type = 4
class Adjustment(pbinary.struct):
_fields_ = [(4, 'Horizontal'),(4,'Vertical')]
_fields_ = [
(Adjustment, 'Adjustment'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - s['Adjustment'].li.size()), 'Data'),
]
@VideoPacketData.define
class VP6FLVALPHAVIDEOPACKET(pstruct.type):
"""On2 VP6 with alpha channel"""
type = 5
def __AlphaData(self):
return ptype.undefined
def __Data(self):
streamtag = self.getparent(StreamTag)
sz = streamtag.DataSize()
ofs = self['OffsetToAlpha'].li.int()
if ofs + self['Adjustment'].li.size() >= sz:
logging.warning('OffsetToAlpha incorrect : %x', self.getoffset())
return dyn.block(sz - self['Adjustment'].size() - self['OffsetToAlpha'].size())
return dyn.block(ofs)
_fields_ = [
(VP6FLVVIDEOPACKET.Adjustment, 'Adjustment'),
(UI24, 'OffsetToAlpha'),
# (lambda s: dyn.block(s['OffsetToAlpha'].li.int()), 'Data'),
(__Data, 'Data'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - (s['Adjustment'].li.size()+s['OffsetToAlpha'].li.size()+s['Data'].li.size())), 'AlphaData'),
]
@VideoPacketData.define
class SCREENV2VIDEOPACKET(pstruct.type):
"""Screen video version 2"""
type = 6
class Flags(pbinary.struct):
_fields_ = [
(6, 'Reserved'),
(1, 'HasIFrameImage'),
(1, 'HasPaletteInfo'),
]
class IMAGEBLOCKV2(pstruct.type):
class IMAGEFORMAT(pbinary.struct):
_fields_ = [
(3, 'Reserved'),
(2, 'ColorDepth'),
(1, 'HasDiffBlocks'),
(1, 'ZlibPrimeCompressCurrent'),
(1, 'ZlibPrimeCompressPrevious'),
]
class IMAGEDIFFPOSITION(pstruct.type):
_fields_ = [(UI8,n) for n in ('RowStart','Height')]
class IMAGEPRIMEPOSITION(pbinary.struct):
_fields_ = [(UI8,n) for n in ('Block column','Block row')]
def __ImageBlockHeader(self):
# FIXME: since this field depends on 2 separate flags...which one should get prio?
fmt = self['Format'].li
if fmt['HasDiffBlocks']:
return self.IMAGEDIFFPOSITION
elif fmt['ZlibPrimeCompressCurrent']:
return self.IMAGEPRIMEPOSITION
return ptype.undefined
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(IMAGEFORMAT, 'Format'),
(__ImageBlockHeader, 'ImageBlockHeader'),
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
def __IFrameImage(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
_fields_ = [
(SCREENVIDEOPACKET.Dim, 'Width'),
(SCREENVIDEOPACKET.Dim, 'Height'),
(Flags, 'Flags'),
(lambda s: s['Flags'].li['HasPaletteInfo'] and SCREENVIDEOPACKET.IMAGEBLOCK or ptype.block, 'PaletteInfo'),
(__ImageBlocks, 'ImageBlocks'),
(__IFrameImage, 'IFrameImage'),
]
@VideoPacketData.define
class AVCVIDEOPACKET(pstruct.type):
"""AVC"""
type = 7
def __Data(self):
h = self.getparent(StreamTag)['Header']
t = h['AVCPacketType'].int()
if t == 0:
# FIXME: ISO 14496-15, 5.2.4.1
return AVCDecoderConfigurationRecord
elif t == 1:
# FIXME: avcC
return NALU
return ptype.block
_fields_ = [
(__Data, 'Data')
]
### SCRIPTDATA
class SCRIPTDATAVALUE(pstruct.type):
def __ScriptDataValue(self):
t = self['Type'].li.int()
return SCRIPTDATATYPE.withdefault(t, type=t)
_fields_ = [
(UI8,'Type'),
(__ScriptDataValue, 'Value'),
]
def summary(self):
return '{:s}({:d})/{:s}'.format(self['Value'].classname(), self['Type'].int(), self['Value'].summary())
repr = summary
class SCRIPTDATATYPE(ptype.definition): cache = {}
class SCRIPTDATASTRING(pstruct.type):
_fields_ = [(UI16,'StringLength'),(lambda s:dyn.clone(STRING,length=s['StringLength'].li.int()),'StringData')]
def summary(self):
return self['StringData'].summary()
repr = summary
class SCRIPTDATAOBJECTPROPERTY(pstruct.type):
_fields_ = [(SCRIPTDATASTRING,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return '{!r}={!r}'.format(self['Name'].str(), self['Value'].str())
repr = summary
# FIXME
@TagBody.define
class ScriptTagBody(pstruct.type):
type = 18
_fields_ = [(SCRIPTDATAVALUE,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return 'Name:{:s} Value:{:s}'.format(self['Name'].summary(), self['Value'].summary())
repr = summary
@SCRIPTDATATYPE.define
class DOUBLE(DOUBLE):
type = 0
@SCRIPTDATATYPE.define
class UI8(UI8):
type = 1
@SCRIPTDATATYPE.define
class SCRIPTDATASTRING(SCRIPTDATASTRING):
type = 2
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECT(parray.terminated):
type = 3
_object_ = SCRIPTDATAOBJECTPROPERTY
def isTerminator(self, value):
return type(value['Value'].li['Value']) == SCRIPTDATAOBJECTEND
#return value['PropertyName'].li['StringLength'] == 0 and value['PropertyValue'].li['Type'].int() == SCRIPTDATAOBJECTEND.type
def summary(self):
return repr([ x.summary() for x in self ])
repr = summary
@SCRIPTDATATYPE.define
class UI16(UI16):
type = 7
@SCRIPTDATATYPE.define
class SCRIPTDATAECMAARRAY(pstruct.type):
type = 8
_fields_ = [
(UI32,'EcmaArrayLength'),
(SCRIPTDATAOBJECT, 'Variables'),
]
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECTEND(ptype.type):
type = 9
@SCRIPTDATATYPE.define
class SCRIPTDATASTRICTARRAY(pstruct.type):
type = 10
_fields_ = [(UI32,'StrictArrayLength'),(lambda s:dyn.clone(SCRIPTDATAVALUE,length=s['StrictArrayLength'].li.int()),'StrictArrayValue')]
def summary(self):
return '{!r}'.format([x.summary() for x in self['StrictArrayValue']])
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATADATE(pstruct.type):
type = 11
_fields_ = [(DOUBLE,'DateTime'),(SI16,'LocalDateTimeOffset')]
def summary(self):
return 'DataTime:{:s} LocalDateTimeOffset:{:d}'.format(self['DateTime'].summary(), self['LocalDateTimeOffset'].int())
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATALONGSTRING(pstruct.type):
type = 12
_fields_ = [
(UI32, 'StringLength'),
(lambda s: dyn.clone(STRING,length=s['StringLength'].li.int()), 'StringData'),
]
def summary(self):
return self['StringData'].str()
repr = summary
### Structures
class StreamTag(pstruct.type):
def __Header(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagHeader.withdefault(t, type=t)
def __FilterParams(self):
base = self.getparent(FLVTAG)
return FilterParams if base['Type'].li['Filter'] == 1 else ptype.undefined
def __Body(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagBody.withdefault(t, type=t, length=self.DataSize())
def DataSize(self):
base = self.getparent(FLVTAG)
sz = base['DataSize'].li.int()
ex = self['Header'].li.size() + self['FilterParams'].li.size()
return sz - ex
_fields_ = [
(__Header, 'Header'),
(__FilterParams, 'FilterParams'),
(__Body, 'Body'),
]
class EncryptionTagHeader(pstruct.type):
_fields_ = [
(UI8, 'NumFilters'),
(STRING, 'FilterName'),
(UI24, 'Length'),
]
class EncryptionFilterParams(pstruct.type):
_fields_ = [(dyn.array(UI8,16), 'IV')]
class SelectiveEncryptionFilterParams(pbinary.struct):
_fields_ = [(1,'EncryptedAU'),(7,'Reserved'),(lambda s: dyn.clone(pbinary.array,length=16,_object_=8),'IV')]
class FilterParams(pstruct.type):
def __FilterParams(self):
header = self.getparent(EncryptionTagHeader)
filtername = header['FilterName'].li.str()
if filtername == 'Encryption':
return EncryptionFilterParams
if filtername == 'SE':
return SelectiveEncryptionFilterParams
return ptype.undefined
_fields_ = [
(__FilterParams, 'FilterParams'),
]
class FLVTAG(pstruct.type):
class Type(pbinary.struct):
_fields_ = [(2,'Reserved'),(1,'Filter'),(5,'TagType')]
def summary(self):
return 'TagType:{:d} {:s}Reserved:{:d}'.format(self['TagType'], 'Filtered ' if self['Filter'] else '', self['Reserved'])
def __Extra(self):
sz = self['DataSize'].li.int()
ts = self['Stream'].li.size()
return dyn.block(sz-ts)
_fields_ = [
(Type, 'Type'),
(UI24, 'DataSize'),
(UI24, 'Timestamp'),
(UI8, 'TimestampExtended'),
(UI24, 'StreamID'),
(StreamTag, 'Stream'),
(__Extra, 'Extra'),
]
### file types
class File(pstruct.type):
class Header(pstruct.type):
class TypeFlags(pbinary.struct):
_fields_ = [(5,'Reserved(0)'),(1,'Audio'),(1,'Reserved(1)'),(1,'Video')]
def summary(self):
res = []
if self['Audio']: res.append('Audio')
if self['Video']: res.append('Video')
if self['Reserved(1)'] or self['Reserved(0)']: res.append('Reserved?')
return '/'.join(res)
def __Padding(self):
sz = self['DataOffset'].li.int()
return dyn.block(sz - 9)
_fields_ = [
(dyn.array(UI8,3), 'Signature'),
(UI8, 'Version'),
(TypeFlags, 'TypeFlags'),
(UI32, 'DataOffset'),
(__Padding, 'Padding'),
]
def __Padding(self):
h = self['Header'].li
sz = h['DataOffset'].int()
return dyn.block(sz - h.size())
class Body(parray.block):
class _object_(pstruct.type):
_fields_ = [
(UI32, 'PreviousTagSize'),
(FLVTAG, 'Tag'),
]
def __Body(self):
ex = self['Header'].li['DataOffset'].int()
return dyn.clone(self.Body, blocksize=lambda s:self.source.size() - ex)
_fields_ = [
(Header, 'Header'),
(__Body, 'Body'),
]
if __name__ == '__main__':
import ptypes,swf.flv as flv
ptypes.setsource(ptypes.prov.file('c:/users/user/Documents/blah.flv',mode='rb'))
a = flv.File()
a = a.l
print(a['Header']['TypeFlags'])
print(a['Header'])
print(a['Header']['Padding'].hexdump())
print(a['Body'][0]['Tag'])
print(a['Body'][0]['Tag']['TagData'])
|
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Jorge A. Gomes (jorgegomes83 at hotmail dot com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import pygame as pg
from OpenGL.GL import GL_LINE_STRIP, GL_TRIANGLES
from typing import Optional, Callable
from easygl.arrays import VertexArrayData, DType, attribute, vertex, vertex_copy, VertexArray
from easygl.shaders import ShaderProgramData, ShaderProgram
from easygl.textures import TexDescriptor, TextureData, MipMap, Wrap, Filter
from easygl.structures import FrozenMat4, Vec2, Vec4
from easygl.display import BlendMode, GLWindow
__all__ = [
'init',
'rect_line',
'rect_fill',
'oriented_rect_line',
'oriented_rect_fill',
]
_initialized = False
# region - - -- ----==<[ STUBS ]>==---- -- - -
def rect_line(window, view, projection, position, size, origin, color, tex=None, vcoord=0., blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, Vec4, Optional[TexDescriptor], Optional[float], BlendMode) -> None
pass
def oriented_rect_line(window, view, projection, position, size, origin, angle, color, tex=None, vcoord=0., blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, float, Vec4, Optional[TexDescriptor], Optional[float], BlendMode) -> None
pass
def rect_fill(window, view, projection, position, size, origin, color, blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, Vec4, BlendMode) -> None
pass
def oriented_rect_fill(window, view, projection, position, size, origin, angle, color, blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, float, Vec4, BlendMode) -> None
pass
# endregion
def init():
global _initialized, rect_line, oriented_rect_line, rect_fill, oriented_rect_fill
if _initialized:
return
# region - - -- ----==<[ ARRAY DATA ]>==---- -- - -
rectangle_vertex_data = VertexArrayData()
with rectangle_vertex_data.definition():
attribute('position', DType.float_v2)
attribute('ucoord', DType.float)
with rectangle_vertex_data.new_primitive('quad_line', 4):
vertex(position=(1., 1.), ucoord=0.) # top right
vertex(position=(1., 0.), ucoord=1.) # bottom right
vertex(position=(0., 0.), ucoord=0.) # bottom left
vertex(position=(0., 1.), ucoord=1.) # top left
vertex_copy(0)
with rectangle_vertex_data.new_primitive('quad_fill', 6, ucoord=0.):
vertex(position=(1., 1.)) # top right
vertex(position=(1., 0.)) # bottom right
vertex(position=(0., 1.)) # top left
vertex_copy(1)
vertex(position=(0., 0.)) # bottom left
vertex_copy(2)
# endregion
# region - - -- ----==<[ TEXTURES ]>==---- -- - -
s = pg.Surface((4, 1))
s.fill((255, 255, 255))
texdata = TextureData()
texdata.create_from_surface('rect_tex', s, False, False, MipMap.linear_linear, Wrap.repeat,
Filter.linear)
# endregion
# region - - -- ----==<[ SHADERS ]>==---- -- - -
rect_vshader_code = """
#version 330 core
in vec2 position;
in float ucoord;
uniform vec2 origin;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform float vcoord;
out vec2 coord;
void main() {
gl_Position = projection * view * model * vec4(position - origin, 0.f, 1.f);
coord = vec2(ucoord, vcoord);
}
"""
rect_fshader_code = """
#version 330 core
in vec2 coord;
uniform vec4 color;
uniform sampler2D tex;
uniform bool solidcolor;
void main() {
vec4 texcolor = texture(tex, coord);
if (solidcolor)
texcolor = vec4(1.f, 1.f, 1.f, 1.f);
gl_FragColor = texture(tex, coord) * color;
}
"""
rect_shader_data = ShaderProgramData("")
rect_shader_data.compile_vertex_shader('rect', shader_code=rect_vshader_code)
rect_shader_data.compile_fragment_shader('rect', shader_code=rect_fshader_code)
rect_shader_data.link('rect_shader', vertex='rect', fragment='rect')
rect_shader = rect_shader_data.build('rect_shader')
# endregion
# region - - -- ----==<[ VAOS ]>==---- -- - -
rectline_vertex_array = VertexArray(rectangle_vertex_data, 'quad_line', rect_shader)
rectfill_vertex_array = VertexArray(rectangle_vertex_data, 'quad_fill', rect_shader)
# endregion
# region - - -- ----==<[ RENDER FUNCTIONS ]>==---- -- - -
def rect_line(window, view, projection, position, size, origin, color, tex=None, vcoord=0., blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, Vec4, Optional[TexDescriptor], Optional[float], BlendMode) -> None
model = FrozenMat4.transform(Vec4(position, 0., 1.), 0., Vec4(size, 0., 1.))
current = window.blend_mode
window.blend_mode = blend
with rectline_vertex_array.render(GL_LINE_STRIP) as shader: # type: ShaderProgram
shader.load2f('origin', *origin)
shader.load_matrix4f('model', 1, False, model)
shader.load_matrix4f('view', 1, False, tuple(view))
shader.load_matrix4f('projection', 1, False, tuple(projection))
shader.load1f('vcoord', vcoord)
shader.load4f('color', *color)
if isinstance(tex, TexDescriptor):
shader.load_sampler2d('tex', tex.id, 0)
shader.load1i('solidcolor', 0)
else:
shader.load_sampler2d('tex', texdata['rect_tex'].id, 0)
shader.load1i('solidcolor', 1)
window.blend_mode = current
def oriented_rect_line(window, view, projection, position, size, origin, angle, color, tex=None, vcoord=0., blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, float, Vec4, Optional[TexDescriptor], Optional[float], BlendMode) -> None
model = FrozenMat4.transform(Vec4(position, 0., 1.), angle, Vec4(size, 0., 1.))
current = window.blend_mode
window.blend_mode = blend
with rectline_vertex_array.render(GL_LINE_STRIP) as shader: # type: ShaderProgram
shader.load2f('origin', *origin)
shader.load_matrix4f('model', 1, False, model)
shader.load_matrix4f('view', 1, False, tuple(view))
shader.load_matrix4f('projection', 1, False, tuple(projection))
shader.load1f('vcoord', vcoord)
shader.load4f('color', *color)
if isinstance(tex, TexDescriptor):
shader.load_sampler2d('tex', tex.id, 0)
shader.load1i('solidcolor', 0)
else:
shader.load_sampler2d('tex', texdata['rect_tex'].id, 0)
shader.load1i('solidcolor', 1)
window.blend_mode = current
def rect_fill(window, view, projection, position, size, origin, color, blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, Vec4, BlendMode) -> None
model = FrozenMat4.transform(Vec4(position, 0., 1.), 0., Vec4(size, 0., 1.))
current = window.blend_mode
window.blend_mode = blend
with rectfill_vertex_array.render(GL_TRIANGLES) as shader: # type: ShaderProgram
shader.load2f('origin', *origin)
shader.load_matrix4f('model', 1, False, model)
shader.load_matrix4f('view', 1, False, tuple(view))
shader.load_matrix4f('projection', 1, False, tuple(projection))
shader.load1f('vcoord', 0.)
shader.load4f('color', *color)
shader.load_sampler2d('tex', texdata['rect_tex'].id, 0)
shader.load1i('solidcolor', 1)
window.blend_mode = current
def oriented_rect_fill(window, view, projection, position, size, origin, angle, color, blend=BlendMode.alpha):
# type: (GLWindow, FrozenMat4, FrozenMat4, Vec2, Vec2, Vec2, float, Vec4, BlendMode) -> None
model = FrozenMat4.transform(Vec4(position, 0., 1.), angle, Vec4(size, 0., 1.))
current = window.blend_mode
window.blend_mode = blend
with rectfill_vertex_array.render(GL_TRIANGLES) as shader: # type: ShaderProgram
shader.load2f('origin', *origin)
shader.load_matrix4f('model', 1, False, model)
shader.load_matrix4f('view', 1, False, tuple(view))
shader.load_matrix4f('projection', 1, False, tuple(projection))
shader.load1f('vcoord', 0.)
shader.load4f('color', *color)
shader.load_sampler2d('tex', texdata['rect_tex'].id, 0)
shader.load1i('solidcolor', 1)
window.blend_mode = current
# endregion
_initialized = True
|
|
#--------------------------
# lexer.py
#
# Verilog-AMS Lexical Analyzer
#
# Copyright (C) 2015, Andrew Plumb
# License: Apache 2.0
#--------------------------
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import re
sys.path.insert(0, os.path.dirname(os.path.dirname(os.dirname(os.path.abspath(__file)))) )
from pyvams.vamsparser import ply
from pyvams.vamsparser.ply.lex import *
class VerilogAMSLexer(object):
""" Verilog-AMS Lexical Analyzer """
def __init__(self, error_func):
self.filename = ''
self.error_func = error_func
self.directives = []
self.default_nettype = 'wire'
def build(self, **kwargs):
self.lexer = ply.lex.lex(object=self, **kwargs)
def input(self, data):
self.lexer.input(data)
def reset_lineno(self):
self.lexer.lineno = 1
def get_directives(self):
return tuple(self.directives)
def get_default_nettype(self):
return self.default_nettype
def token(self):
return self.lexer.token()
// Annex B - List of keywords - Table B.1--Reserved keywords
keywords = (
'ABOVE','ABS','ABSDELAY','ABSDELTA','ABSTOL','ACCESS','ACOS','ACOSH','AC_STIM','ALIASPARAM',
'ALWAYS','ANALOG','ANALYSIS','AND','ASIN','ASINH','AUTOMATIC','BEGIN','BRANCH','BUF',
'BUFIF0','BUFIF1','CASE','CASEX','CASEZ','CEIL','CELL','CMOS','CONFIG','CONNECT',
'CONNECTMODULE','CONNECTRULES','CONTINUOUS','COS','COSH','CROSS','DDT','DDT_NATURE','DDX','DEASSIGN',
'DEFAULT','DEFPARAM','DESIGN','DISABLE','DISCIPLINE','DISCRETE','DOMAIN','DRIVER_UPDATE','EDGE','ELSE',
'END','ENDCASE','ENDCONFIG','ENDCONNECTRULES','ENDDISCIPLINE','ENDFUNCTION','ENDGENERATE','ENDMODULE','ENDNATURE','ENDPARAMSET',
'ENDPRIMITIVE','ENDSPECIFY','ENDTABLE','ENDTASK','EVENT','EXCLUDE','EXP','FINAL_STEP','FLICKER_NOISE','FLOOR',
'FLOW','FOR','FORCE','FOREVER','FORK','FROM','FUNCTION','GENERATE','GENVAR','GROUND',
'HIGHZ0','HIGHZ1','HYPOT','IDT','IDTMOD','IDT_NATURE','IF','IFNONE','INCDIR','INCLUDE',
'INF','INITIAL','INITIAL_STEP','INOUT','INPUT','INSTANCE','INTEGER','JOIN','LAPLACE_ND','LAPLACE_NP',
'LAPLACE_ZD','LAPLACE_ZP','LARGE','LAST_CROSSING','LIBLIST','LIBRARY','LIMEXP','LN','LOCALPARAM','LOG',
'MACROMODULE','MAX','MEDIUM','MERGED','MIN','MODULE','NAND','NATURE','NEGEDGE','NET_RESOLUTION',
'NMOS','NOISE_TABLE','NOISE_TABLE_LOG','NOR','NOSHOWCANCELLED','NOT','NOTIF0','NOTIF1','OR','OUTPUT',
'PARAMETER','PARAMSET','PMOS','POSEDGE','POTENTIAL','POW','PRIMITIVE','PULL0','PULL1','PULLDOWN',
'PULLUP','PULSESTYLE_ONEVENT','PULSESTYLE_ONDETECT','RCMOS','REAL','REALTIME','REG','RELEASE','REPEAT','RESOLVETO',
'RNMOS','RPMOS','RTRAN','RTRANIF0','RTRANIF1','SCALARED','SIN','SINH','SHOWCANCELLED','SIGNED',
'SLEW','SMALL','SPECIFY','SPECPARAM','SPLIT','SQRT','STRING','STRONG0','STRONG1','SUPPLY0',
'SUPPLY1','TABLE','TAN','TANH','TASK','TIME','TIMER','TRAN','TRANIF0','TRANIF1',
'TRANSITION','TRI','TRI0','TRI1','TRIAND','TRIOR','TRIREG','UNITS','UNSIGNED','USE',
'UWIRE','VECTORED','WAIT','WAND','WEAK0','WEAK1','WHILE','WHITE_NOISE','WIRE','WOR',
'WREAL','XNOR','XOR','ZI_ND','ZI_NP','ZI_ZD','ZI_ZP',
)
reserved = {}
for keyword in keywords:
reserved[keyword.lower()] = keyword
operators = (
'PLUS','MINUS','POWER','TIMES','DIVIDE','MOD',
'SYM_NOT','SYM_OR','SYM_NOR','SYM_AND','SYM_NAND','SYM_XOR','SYM_XNOR',
'LOR','LAND','LNOT',
'LSHIFTA','RSHIFTA','LSHIFT','RSHIFT',
'LT','GT','LE','GE','EQ','NE','EQL','NEL',
'COND',
'EQUALS',
)
tokens = keywords + operators + (
'ID',
'AT','COMMA','COLON','SEMICOLON','DOT',
'PLUSCOLON','MINUSCOLON',
'FLOATNUMBER','STRING_LITERAL',
'INTNUMBER_DEC','SIGNED_INTNUMBER_DEC',
'INTNUMBER_HEX','SIGNED_INTNUMBER_HEX',
'INTNUMBER_OCT','SIGNED_INTNUMBER_OCT',
'INTNUMBER_BIN','SIGNED_INTNUMBER_BIN',
'LPAREN','RPAREN','LBRACKET','RBRACKET','LBRACE','RBRACE',
'DELAY','DOLLAR',
)
skipped = (
'COMMENTOUT','LINECOMMENT','DIRECTIVE',
)
# Ignore
t_ignore = ' \t'
# Directive
directive = r"""\`.*?\n"""
@TOKEN(directive)
def t_DIRECTIVE(self,t):
self.directives.append( (self.lexer.lineno,t.value) )
t.lexer.lineno += t.value.count("\n")
m = re.match("^`default_nettype\s+(.+)\n",t.value)
if m: self.default_nettype = m.group(1)
pass
# Comment
linecomment = r"""//.*?\n"""
commentout = r"""/\*(.|\n)*?\*/"""
@TOKEN(linecomment)
def t_LINECOMMENT(self,t):
t.lexer.lineno += t.value.count("\n")
pass
@TOKEN(commentout)
def t_COMMENTOUT(self,t):
t.lexer.lineno += t.value.count("\n")
pass
# Operator
t_LOR = r'\|\|'
t_LAND = r'\&\&'
t_SYM_NOR = r'~\|'
t_SYM_NAND = r'~\&'
t_SYM_XNOR = r'~\^'
t_SYM_OR = r'\|'
t_SYM_AND = r'\&'
t_SYM_XOR = r'\^'
t_SYM_NOT = r'~'
t_LNOT = r'!'
t_LSHIFTA = r'<<<'
t_RSHIFTA = r'>>>'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_EQL = r'==='
t_NEL = r'!=='
t_EQ = r'=='
t_NE = r'!='
t_LE = r'<='
t_GE = r'>='
t_LT = r'<'
t_GT = r'>'
t_POWER = r'\*\*'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_CONT = r'\?'
t_EQUALS = r'='
t_PLUSCOLON = r'\+:'
t_MINUSCOLON = r'-:'
t_AT = r'@'
t_COMMA = r','
t_SEMICOLON = r';'
t_COLON = r':'
t_DOT = r'\.'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_DELAY = r'\#'
t_DOLLAR = r'\$'
bin_number = '[0-9]*\'[bB][0-1xXzZ?][0-1xXzZ?_]*'
signed_bin_number = '[0-9]*\'[sS][bB][0-1xZzZ?][0-1xXzZ?_]*'
octal_number = '[0-9]*\'[oO][0-7xXzZ?][0-7xXzZ?_]*'
signed_octal_number = '[0-9]*\'[sS][oO][0-7xXzZ?][0-7xXzZ?_]*'
hex_number = '[0-9]*\'[hH][0-9a-fA-FxXzZ?][0-9a-fA-FxXzZ?_]*'
signed_hex_number = '[0-9]*\'[sS][hH][0-9a-fA-FxXzZ?][0-9a-fA-FxXzZ?_]*'
decimal_number = '([0-9]*\'[dD][0-9xXzZ?][0-9xXzZ?_]*)|([0-9][0-9_]*)'
signed_decimal_number = '[0-9]*\'[sS][dD][0-9xXzZ?][0-9xXzZ?_]*'
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
float_number = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+')))'
simple_escape = r"""([a-zA-Z\\?'"])"""
octal_escape = r"""([0-7]{1,3})"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
escape_sequence = r"""(\\("""+simple_escape+'|'+octal_escape+'|'+hex_escape+'))'
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
identifier = r"""(([a-zA-Z_])([a-zA-Z_0-9$])*)|((\\\S)(\S)*)"""
@TOKEN(string_literal)
def t_STRING_LITERAL(self, t):
return t
@TOKEN(float_number)
def t_FLOATNUMBER(self, t):
return t
@TOKEN(signed_bin_number)
def t_SIGNED_INTNUMBER_BIN(self, t):
return t
@TOKEN(bin_number)
def t_INTNUMBER_BIN(self, t):
return t
@TOKEN(signed_octal_number)
def t_SIGNED_INTNUMBER_OCT(self, t):
return t
@TOKEN(octal_number)
def t_INTNUMBER_OCT(self, t):
return t
@TOKEN(signed_hex_number)
def t_SIGNED_INTNUMBER_HEX(self, t):
return t
@TOKEN(hex_number)
def t_INTNUMBER_HEX(self, t):
return t
@TOKEN(signed_decimal_number)
def t_SIGNED_INTNUMBER_DEC(self, t):
return t
@TOKEN(decimal_number)
def t_INTNUMBER_DEC(self, t):
return t
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.reserved.get(t.value, 'ID')
return t
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
pass
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _find_tok_column(self, token):
i = token.lexpos
while i > 0:
if self.lexer.lexdata[i] == '\n': break
i -= 1
return (token.lexpos - i) + 1
def _make_tok_location(self, token):
return (token.lineno, self._find_tok_column(token))
#-------------------------------------------------------------------------------
def dump_tokens(text):
def my_error_func(msg, a, b):
sys.write(msg + "\n")
sys.exit()
lexer = VerilogAMSLexer(error_func=my_error_func)
lexer.build()
lexer.input(text)
ret = []
# Tokenize
while True:
tok = lexer.token()
if not tok: break # No more input
ret.append("%s %s %d %s %d\n" %
(tok.value, tok.type, tok.lineno, lexer.filename, tok.lexpos))
return ''.join(ret)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import pyvams.utils.version
from pyvams.vparser.preprocessor import preprocess
from optparse import OptionParser
INFO = "Verilog Preprocessor"
VERSION = pyvams.utils.version.VERSION
USAGE = "Usage: python preprocessor.py file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
text = preprocess(filelist,
preprocess_include=options.include,
preprocess_define=options.define)
dump = dump_tokens(text)
print(dump)
|
|
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
Univariate Statistics
Controls saving and access to various statistics
The number of statistics per window has to be provided by the user
This is better for long-term usage
"""
import numpy as np
import scipy.stats as stats
import scipy.interpolate as interp
from copy import deepcopy
# import utils
from utilsIO import *
from utilsRobust import *
from utilsProcess import smooth1d
# frequency statistics
# have a single statistics for each evaluation frequency
# and for each window
# meaning, in total, there are nwindow*nfreq statistics
class StatisticCalculator(object):
###################
### CONSTRUCTOR
##################
def __init__(self):
# default evaluation frequencies
self.evalFreq = []
# power smoothing vals
self.winLen = 13
self.winType = "hanning"
# set some defaults
self.inChans = ["Hx", "Hy"]
self.inSize = len(self.inChans)
self.outChans = ["Ex", "Ey"]
self.outSize = len(self.outChans)
self.specChans = self.inChans + self.outChans
self.remoteChans = self.inChans
self.psdChans = ["Ex", "Ey", "Hx", "Hy"]
self.cohPairs = [["Ex", "Hx"], ["Ex", "Hy"], ["Ey", "Hx"], ["Ey", "Hy"]]
self.polDirs = [["Ex", "Ey"],["Hx","Hy"]]
# set data presets
self.spec = {}
# self.specChans = []
# output data and marker for transfer function calculated
self.tfCalculated = False
self.remoteCalculated = False
self.intercept = False
self.outData = {}
###################
### GET FUNCTIONS
###################
def getEvalFreq(self):
return deepcopy(self.evalFreq)
def getInChans(self):
return deepcopy(self.inChans)
def getOutChans(self):
return deepcopy(self.outChans)
def getSpecChans(self):
return deepcopy(self.specChans)
def getRemoteChans(self):
return deepcopy(self.remoteChans)
def getPSDChans(self):
return deepcopy(self.psdChans)
def getCohPairs(self):
return deepcopy(self.cohPairs)
def getPolDirs(self):
return deepcopy(self.polDirs)
def getWinLen(self):
return self.winLen
def getWinType(self):
return self.winType
def getSpectra(self):
return self.spec
def getIntercept(self):
return self.intercept
# note: autopowers are real
def getAutoPower(self, chan):
idx = self.specChans.index(chan)
# then return the autopower
return self.spectralMatrix[idx, idx].real
def getAutoPowerEval(self, chan, eIdx):
idx = self.specChans.index(chan)
# then return the autopower
return self.evalMatrix[idx, idx, eIdx].real
def getCrossPower(self, chan1, chan2):
idx1 = self.specChans.index(chan1)
idx2 = self.specChans.index(chan2)
# then return the autopower
return self.spectralMatrix[idx1, idx2]
def getCrossPowerEval(self, chan1, chan2, eIdx):
idx1 = self.specChans.index(chan1)
idx2 = self.specChans.index(chan2)
# then return the autopower
return self.evalMatrix[idx1, idx2, eIdx]
def getOutData(self):
return deepcopy(self.outData)
###################
### SET FUNCTIONS
###################
def setInChans(self, inChans):
self.inChans = inChans
self.inSize = len(self.inChans)
def setOutChans(self, outChans):
self.outChans = outChans
self.outSize = len(self.outChans)
def setRemoteChans(self, remoteChans):
self.remoteChans = remoteChans
def setPSDChans(self, psdChans):
self.psdChans = psdChans
def setCohPairs(self, cohPairs):
self.cohPairs = cohPairs
def setPolDirs(self, polDirs):
self.polDirs = polDirs
def setSpectra(self, freq, spectra, evalFreq):
self.freq = freq
self.spec = spectra
self.evalFreq = evalFreq
# self.specChans = sorted(self.spec.keys())
self.numChans = len(self.specChans)
self.dataSize = self.spec[self.specChans[0]].size
# calculate the power matrix
self.calculateSpectralMatrix()
self.calculateEvalMatrix()
# clear the out dictionary and set that transfer function not calculated
self.prepareOutDict()
def setIntercept(self, intercept):
self.intercept = intercept
###################
### INITIAL HELPER FUNCTIONS
### SPEED UP OTHER CALCULATIONS
###################
def calculateSpectralMatrix(self):
# create the 3d array
self.spectralMatrix = np.empty(shape=(self.numChans, self.numChans, self.dataSize), dtype="complex")
# now need to go through the chans
for i in xrange(0, self.numChans):
for j in xrange(i, self.numChans):
chan1 = self.specChans[i]
chan2 = self.specChans[j]
self.spectralMatrix[i, j] = smooth1d(self.spec[chan1]*np.conjugate(self.spec[chan2]), self.winLen, self.winType)
if i == j:
self.spectralMatrix[j, i] = np.conjugate(self.spectralMatrix[i, j]) # conjugate symmtry
def calculateEvalMatrix(self):
# create the array
self.evalMatrix = np.empty(shape=(self.numChans, self.numChans, len(self.evalFreq)), dtype="complex")
for i in xrange(0, self.numChans):
for j in xrange(i, self.numChans):
self.evalMatrix[i,j] = self.interpolateToEvalFreq(self.spectralMatrix[i,j])
if i != j:
self.evalMatrix[j, i] = np.conjugate(self.evalMatrix[i, j]) # conjugate symmtry
###################
### ADD REMOTE SPEC
### AND REMOTE GET FUNCTIONS
###################
def addRemoteSpec(self, remoteSpec, **kwargs):
self.remoteSpec = remoteSpec
if "remotechans" in kwargs:
self.remoteChans = kwargs["remotechans"]
# now calculate some remote reference related values
self.calculateRemoteSpectralMatrix()
self.calculateRemoteEvalMatrix()
self.calculateReferenceSpectralMatrix()
self.calculateReferenceEvalMatrix()
def calculateRemoteSpectralMatrix(self):
# create the 3d array
numRemoteChans = len(self.remoteChans)
self.remoteSpectralMatrix = np.empty(shape=(numRemoteChans, numRemoteChans, self.dataSize), dtype="complex")
# now need to go through the chans
for i in xrange(0, numRemoteChans):
for j in xrange(i, numRemoteChans):
chan1 = self.remoteChans[i]
chan2 = self.remoteChans[j]
self.remoteSpectralMatrix[i, j] = smooth1d(self.remoteSpec[chan1]*np.conjugate(self.remoteSpec[chan2]), self.winLen, self.winType)
if i == j:
self.remoteSpectralMatrix[j, i] = np.conjugate(self.remoteSpectralMatrix[i, j]) # conjugate symmtry
def calculateRemoteEvalMatrix(self):
# create the array
numRemoteChans = len(self.remoteChans)
self.remoteEvalMatrix = np.empty(shape=(numRemoteChans, numRemoteChans, len(self.evalFreq)), dtype="complex")
for i in xrange(0, numRemoteChans):
for j in xrange(i, numRemoteChans):
self.remoteEvalMatrix[i,j] = self.interpolateToEvalFreq(self.remoteSpectralMatrix[i,j])
if i != j:
self.remoteEvalMatrix[j, i] = np.conjugate(self.remoteEvalMatrix[i, j]) # conjugate symmtry
def calculateReferenceSpectralMatrix(self):
# cannot use conjugate symmetry in this case
self.referenceSpectralMatrix = np.empty(shape=(self.numChans, len(self.remoteChans), self.dataSize), dtype="complex")
for i, chan1 in enumerate(self.specChans):
for j, chan2 in enumerate(self.remoteChans):
self.referenceSpectralMatrix[i,j] = smooth1d(self.spec[chan1]*np.conjugate(self.remoteSpec[chan2]), self.winLen, self.winType)
def calculateReferenceEvalMatrix(self):
self.referenceEvalMatrix = np.empty(shape=(self.numChans, len(self.remoteChans), len(self.evalFreq)), dtype="complex")
for i, chan1 in enumerate(self.specChans):
for j, chan2 in enumerate(self.remoteChans):
self.referenceEvalMatrix[i,j] = self.interpolateToEvalFreq(self.referenceSpectralMatrix[i,j])
def getRemoteAutoPower(self, chan):
idx = self.remoteChans.index(chan)
return self.remoteSpectralMatrix[idx, idx].real
def getRemoteAutoPowerEval(self, chan, eIdx):
idx = self.remoteChans.index(chan)
return self.remoteEvalMatrix[idx, idx, eIdx].real
def getRemoteCrossPower(self, chan1, chan2):
idx1 = self.remoteChans.index(chan1)
idx2 = self.remoteChans.index(chan2)
return self.remoteSpectralMatrix[idx1, idx2]
def getRemoteCrossPowerEval(self, chan1, chan2, eIdx):
idx1 = self.remoteChans.index(chan1)
idx2 = self.remoteChans.index(chan2)
return self.remoteSpectralMatrix[idx1, idx2, eIdx]
def getReferenceCrossPower(self, dataChan, remoteChan):
idx1 = self.specChans.index(dataChan)
idx2 = self.remoteChans.index(remoteChan)
return self.referenceSpectralMatrix[idx1,idx2]
def getReferenceCrossPowerEval(self, dataChan, remoteChan, eIdx):
idx1 = self.specChans.index(dataChan)
idx2 = self.remoteChans.index(remoteChan)
return self.referenceEvalMatrix[idx1, idx2, eIdx]
###################
### HELPER FUNCTION - dictionaries and interpolate to eval freq
###################
def interpolateToEvalFreq(self, data):
interpFunc = interp.interp1d(self.freq, data)
interpData = interpFunc(self.evalFreq)
return interpData
def prepareOutDict(self):
self.outData = {}
for e in self.evalFreq:
self.outData[e] = {}
# set various calculated flags to false
self.tfCalculated = False
self.remoteCalculated = False
###################
### HELPER FUNCTION - return based on name of stat
###################
def getDataForStatName(self, statName):
if statName == "absvalEqn":
return self.winAbsVal()
if statName == "psd":
return self.winPSD()
elif statName == "coherence":
return self.winCoherence()
elif statName == "poldir":
return self.winPolarisations()
elif statName == "partialcoh":
return self.winPartials()
elif statName == "transFunc" or statName == "resPhase":
if self.tfCalculated:
return self.getOutData()
return self.winTransferFunction()
elif statName == "coherenceRR":
return self.winRemoteCoherence()
elif statName == "coherenceRREqn":
return self.winRemoteEqnCoherence()
elif statName == "absvalRREqn":
return self.winRemoteAbsVal()
elif statName == "transFuncRR" or statName == "resPhaseRR":
if self.remoteCalculated:
return self.getOutData()
return self.winRemoteTransferFunction()
else:
self.printWarning("Statistic in getDataForStatName not recognised")
return self.winCoherence()
###################
### CALCULATE STATISTICS
### POWER / COHERENCIES / POLARISATION DIRECTIONS
###################
def winPSD(self):
# calculate PSD - want to divide by length of time too
freqLen = self.freq.size
timeLen = (freqLen-1)*2 # minus 1 because time sections are usually even
fs = self.freq[-1]*2 # sampling frequency
# and then calculate amount of time
duration = timeLen/fs
# interpolate onto evaluation frequency and output to outData
for eIdx, eF in enumerate(self.evalFreq):
for chan in self.getPSDChans():
key = "psd{}".format(chan)
self.outData[eF][key] = self.getAutoPowerEval(chan, eIdx)/duration
return self.getOutData()
def winCoherence(self):
# now calculate out the relevant coherencies
for idx, p in enumerate(self.getCohPairs()):
c1 = p[0] # chan1
c2 = p[1] # chan2
for eIdx, eF in enumerate(self.evalFreq):
# now calculate the nominator and denominator
cohNom = np.power(np.absolute(self.getCrossPowerEval(c1, c2, eIdx)), 2).real
cohDenom = self.getAutoPowerEval(c1, eIdx) * self.getAutoPowerEval(c2, eIdx)
# save in outData
key = "coh{}".format(c1+c2)
self.outData[eF][key] = cohNom/cohDenom
return self.getOutData()
def winPolarisations(self):
# calculate polarisation directions
for idx, p in enumerate(self.getPolDirs()):
c1 = p[0] # chan1
c2 = p[1] # chan2
for eIdx, eF in enumerate(self.evalFreq):
# now calculate the nominator and denominator
cohNom = 2*self.getCrossPowerEval(c1, c2, eIdx).real # take the real part of this
cohDenom = self.getAutoPowerEval(c1, eIdx) - self.getAutoPowerEval(c2, eIdx)
# save to out dictionary
key = "pol{}".format(c1+c2)
self.outData[eF][key] = np.arctan(cohNom/cohDenom)*(180.0/np.pi)
return self.getOutData()
# this is based on paper Weckmann, Magunia Ritter 2005
def winPartials(self):
# calculate partial coherencies
# e.g. Ex, Hx w.r.t Hy
# this currently only works for impedance tensor calculations
# do not want to get into higher power partial coherencies
# get the coherences - these will be required later
winCoherence = self.winCoherence()
for i, outChan in enumerate(self.outChans):
for eIdx, eFreq in enumerate(self.evalFreq):
inChan1 = self.inChans[0]
inChan2 = self.inChans[1]
xOutIn1 = self.getCrossPowerEval(outChan, inChan1, eIdx)
xOutIn2 = self.getCrossPowerEval(outChan, inChan2, eIdx)
xIn1In2 = self.getCrossPowerEval(inChan1, inChan2, eIdx)
xIn2In1 = self.getCrossPowerEval(inChan2, inChan1, eIdx)
# calculate out transFunc components
denom = self.getAutoPowerEval(inChan1, eIdx)*self.getAutoPowerEval(inChan2, eIdx) - xIn1In2*xIn2In1
# Z1
Z1nom = xOutIn1*self.getAutoPowerEval(inChan2, eIdx) - xIn2In1*xOutIn2
Z1 = Z1nom/denom
# Z2
Z2nom = self.getAutoPowerEval(inChan1, eIdx)*xOutIn2 - xIn1In2*xOutIn1
Z2 = Z2nom/denom
# calculate bivariate coherency
rb = Z1*self.getCrossPowerEval(inChan1, outChan, eIdx) + Z2*self.getCrossPowerEval(inChan2, outChan, eIdx)
rb = rb / self.getAutoPowerEval(outChan, eIdx)
# now calculate out partials
# calculate partial inChan, outChan1 with respect to outChan2
cohkey = "coh{}".format(outChan+inChan2)
rp1 = (rb - winCoherence[eFreq][cohkey]) / (1.0 - winCoherence[eFreq][cohkey])
# calculate partial inChan, outChan2 with respect to outChan1
cohkey = "coh{}".format(outChan+inChan1)
rp2 = (rb - winCoherence[eFreq][cohkey]) / (1.0 - winCoherence[eFreq][cohkey])
# now save in outDict
self.outData[eFreq]["bivar{}".format(outChan)] = rb
self.outData[eFreq]["par{}".format(outChan+inChan1)] = rp1
self.outData[eFreq]["par{}".format(outChan+inChan2)] = rp2
return self.getOutData()
# simply save the absolute values of the cross power matrix
# this is useful for cross plotting
def winAbsVal(self):
for eIdx, eFreq in enumerate(self.evalFreq):
for iChan, chan in enumerate(self.specChans):
# first do the outchans multiplied by every other channel
for iOut, outChan in enumerate(self.outChans):
absval = np.absolute(self.getCrossPowerEval(outChan, chan, eIdx))
key = "abs{}{}".format(outChan, chan)
self.outData[eFreq][key] = absval
# then do the inchans multiplied by every other channel
for iIn, inChan in enumerate(self.inChans):
absval = np.absolute(self.getCrossPowerEval(inChan, chan, eIdx))
key = "abs{}{}".format(inChan, chan)
self.outData[eFreq][key] = absval
# return the dictionary
return self.getOutData()
###################
### CALCULATE STATISTICS
### TRANSFER FUNCTIONS
###################
# calculate components of impedance tensor
# separately for each window
def winTransferFunction(self):
# now do the work
totalSize = self.inSize + self.outSize
# now want to calculate the transfer function for each evaluation frequency
output = np.empty(shape=(self.evalFreq.size, self.outSize, self.inSize), dtype="complex")
for eIdx, eFreq in enumerate(self.evalFreq):
# solve transfer function
obs = np.empty(shape=(self.outSize, totalSize), dtype="complex")
reg = np.empty(shape=(self.outSize, totalSize, self.inSize), dtype="complex")
for i in xrange(0, self.outSize):
for j in xrange(0, totalSize):
# this is the observation row where,i is the observed output
# idx in the evaluation frequency
obs[i, j] = self.getCrossPowerEval(self.outChans[i], self.specChans[j], eIdx)
for k in xrange(0, self.inSize):
reg[i, j, k] = self.getCrossPowerEval(self.inChans[k], self.specChans[j], eIdx)
for i in xrange(0, self.outSize):
observation = obs[i,:]
predictors = reg[i,:,:]
# now do the solution
out, resids, squareResid, rank, s = olsModel(predictors, observation, intercept=self.getIntercept())
# out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=False)
# not interested in the intercept (const) term
if self.getIntercept():
output[eIdx, i] = out[1:]
else:
output[eIdx, i] = out
# calculate components of transfer function and res and phase
for i in xrange(0, self.outSize):
for j in xrange(0, self.inSize):
period = 1.0/eFreq
res = 0.2 * period * np.power(np.absolute(output[eIdx, i, j]), 2)
phase = np.angle(output[eIdx, i, j], deg=True)
keyRes = self.outChans[i] + self.inChans[j] + "Res"
keyPhase = self.outChans[i] + self.inChans[j] + "Phase"
self.outData[eFreq][keyRes] = res
self.outData[eFreq][keyPhase] = phase
# add the components
keyReal = self.outChans[i] + self.inChans[j] + "Real"
keyImag = self.outChans[i] + self.inChans[j] + "Imag"
self.outData[eFreq][keyReal] = output[eIdx, i, j].real
self.outData[eFreq][keyImag] = output[eIdx, i, j].imag
# set transfer function calculated as true
# saves having to do it again
self.tfCalculated = True
return self.getOutData()
###################
### CALCULATE STATISTICS
### REMOTE REFERENCE
###################
def winRemoteCoherence(self):
# this is the coherence of ExHxR, ExHyR, EyHxR, EyHyR, HxHxR, HxHyR, HyHxR, HyHyR
# now let's calculate coherency
# abs(crosspower(A,B))^2/autopower(A)*autpower(B)
for dataChan in self.specChans:
for remoteChan in self.remoteChans:
key = "{}{}RR".format(dataChan, remoteChan)
for eIdx, eFreq in enumerate(self.evalFreq):
cohNom = np.power(np.absolute(self.getReferenceCrossPowerEval(dataChan, remoteChan, eIdx)),2)
cohDenom = self.getAutoPowerEval(dataChan, eIdx)*self.getRemoteAutoPowerEval(remoteChan, eIdx)
coh = cohNom/cohDenom
self.outData[eFreq][key] = coh
return self.getOutData()
def winRemoteEqnCoherence(self):
# now calculate out the relevant coherencies
# here we calculate the coherency between <Ex,HyR> and <Hy,HyR> for example
for iOut, outChan in enumerate(self.outChans):
for iIn, inChan in enumerate(self.inChans):
for iRemote, remoteChan in enumerate(self.remoteChans):
# calculate powers
c1c1 = smooth1d(self.getReferenceCrossPower(outChan, remoteChan)*np.conjugate(self.getReferenceCrossPower(outChan, remoteChan)), self.winLen, self.winType)
c2c2 = smooth1d(self.getReferenceCrossPower(inChan, remoteChan)*np.conjugate(self.getReferenceCrossPower(inChan, remoteChan)), self.winLen, self.winType)
c1c2 = smooth1d(self.getReferenceCrossPower(outChan, remoteChan)*np.conjugate(self.getReferenceCrossPower(inChan, remoteChan)), self.winLen, self.winType)
# now interpolate
c1c1 = self.interpolateToEvalFreq(c1c1)
c2c2 = self.interpolateToEvalFreq(c2c2)
c1c2 = self.interpolateToEvalFreq(c1c2)
# now calculate the nominator and denominator
cohNom = np.power(np.absolute(c1c2), 2)
cohDenom = c1c1*c2c2
coh = cohNom/cohDenom # cast as float - discard complex part (complex part should be zero anyway)
# now need the coherencies for the evaluation frequencies
# this involves interpolation
key = "{}{}R-{}{}R".format(outChan, remoteChan, inChan, remoteChan)
for iFreq, eFreq in enumerate(self.evalFreq):
self.outData[eFreq][key] = coh[iFreq]
return self.getOutData()
def winRemoteAbsVal(self):
for eIdx, eFreq in enumerate(self.evalFreq):
for iOut, outChan in enumerate(self.outChans):
for iRemote, remoteChan in enumerate(self.remoteChans):
absOut = np.absolute(self.getReferenceCrossPowerEval(outChan, remoteChan, eIdx))
keyOut = "abs{}{}R".format(outChan, remoteChan)
self.outData[eFreq][keyOut] = absOut
for iIn, inChan in enumerate(self.inChans):
absIn = np.absolute(self.getReferenceCrossPowerEval(inChan, remoteChan, eIdx))
keyIn = "abs{}{}R".format(inChan, remoteChan)
self.outData[eFreq][keyIn] = absIn
return self.getOutData()
def winRemoteTransferFunction(self):
output = np.empty(shape=(self.evalFreq.size, self.outSize, self.inSize), dtype="complex")
for eIdx, eFreq in enumerate(self.evalFreq):
# solve transfer function
obs = np.empty(shape=(self.outSize, self.inSize), dtype="complex")
reg = np.empty(shape=(self.outSize, self.inSize, self.inSize), dtype="complex")
for i, outChan in enumerate(self.outChans):
for j, remoteChan in enumerate(self.remoteChans):
# this is the observation row where,i is the observed output
# eIdx in the evaluation frequency
obs[i, j] = self.getReferenceCrossPowerEval(outChan, remoteChan, eIdx)
for k, inChan in enumerate(self.inChans):
reg[i, j, k] = self.getReferenceCrossPowerEval(inChan, remoteChan, eIdx)
for i in xrange(0, self.outSize):
observation = obs[i,:]
predictors = reg[i,:,:]
# now do the solution
out, resids, squareResid, rank, s = olsModel(predictors, observation, intercept=self.getIntercept())
# out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=False)
# not interested in the intercept (const) term
if self.getIntercept():
output[eIdx, i] = out[1:]
else:
output[eIdx, i] = out
# calculate components of transfer function and res and phase
for i in xrange(0, self.outSize):
for j in xrange(0, self.inSize):
period = 1.0/eFreq
res = 0.2 * period * np.power(np.absolute(output[eIdx, i, j]), 2)
phase = np.angle(output[eIdx, i, j], deg=True)
keyRes = self.outChans[i] + self.inChans[j] + "ResRR"
keyPhase = self.outChans[i] + self.inChans[j] + "PhaseRR"
self.outData[eFreq][keyRes] = res
self.outData[eFreq][keyPhase] = phase
# add the components
keyReal = self.outChans[i] + self.inChans[j] + "RealRR"
keyImag = self.outChans[i] + self.inChans[j] + "ImagRR"
self.outData[eFreq][keyReal] = output[eIdx, i, j].real
self.outData[eFreq][keyImag] = output[eIdx, i, j].imag
# set transfer function calculated as true
# saves having to do it again
self.remoteCalculated = True
return self.getOutData()
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("STATISTIC CALCULATOR INFO BEGIN")
self.printText("####################")
self.printText("Default options")
self.printText("\tInput Chans = {}".format(listToString(self.getInChans())))
self.printText("\tOutput Chans = {}".format(listToString(self.getOutChans())))
self.printText("\tRemote Chans = {}".format(listToString(self.getRemoteChans())))
self.printText("\tPowers = {}".format(listToString(self.getPSDChans())))
self.printText("\tCoherence pairs = {}".format(listToString(self.getCohPairs())))
self.printText("\tPartial coherence = {}".format(listToString(self.getPolDirs())))
if len(self.getEvalFreq()) == 0:
self.printText("Evaluation frequencies = {}")
else:
self.printText("Evaluation frequencies = {}".format(arrayToString(self.getEvalFreq())))
self.printText("####################")
self.printText("STATISTIC CALCULATOR INFO END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Statistic Calculator Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Statistic Calculator Warning", warnStr)
|
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based machine translation model."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
# See issue #620.
# pytype: disable=wrong-arg-count
# pytype: disable=wrong-keyword-args
# pytype: disable=attribute-error
from functools import partial
from typing import Any, Callable, Tuple, Optional
import jax
from flax import linen as nn
from flax import struct
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
import flax.linen.linear as fll
import flax.linen.module as flm
import flax.linen.initializers as fli
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
Array = Any
@struct.dataclass
class TransformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
output_vocab_size: int
share_embeddings: bool = False
logits_via_embedding: bool = False
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_layers: int = 6
qkv_dim: int = 512
mlp_dim: int = 2048
max_len: int = 2048
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Optional[Callable] = None
sinusoidal: bool = False
relative_radius: Optional[int] = None
relative_bias: Optional[bool] = False
enc2dec: Optional[bool] = False
copy_decoder: bool = False
def shift_right(x, axis=1):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
def sinusoidal_init(max_len=2048,
min_scale=1.0,
max_scale=10000.0):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input.
min_scale: float: minimum frequency-scale in sine grating.
max_scale: float: maximum frequency-scale in sine grating.
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
pe[:, :d_feature // 2] = np.sin(position * div_term)
pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
def create_relative_ids(in_length, relative_radius, tar_length1=None,
tar_length2=None, dec2enc_ids=None, cache_idx=jnp.zeros(1)):
"""Creates 2D Relative IDs for Relative Position Encodings. Relative ID matrices
are Toeplitz matrices with shape with shape (d1, d2).
Args:
in_length: int: maximum possible length for the input.
relative_radius: int: relative radius for relative attention.
tar_length1: int: maximum possible length for the output currently being
decoded (when decode=True, tar_length1 is different then tar_length2).
tar_length1: int: maximum possible length for the output.
dec2enc_ids: bool: whether to return a Toeplitz matrix (True) or a constant
matrix equal to relative_radius (False) for the decoder-encoder IDs.
cache_idx: index of the current output position (used when decode=True).
Returns:
output: encoder relative IDs with shape (in_length, in_length) when
tar_length1 is None; encoder relative IDs with shape (in_length, in_length),
decoder relative IDs with shape (tar_length1, tar_length2) and
decoder-to-encoder relative IDs with shape (tar_length1, in_length) otherwise.
"""
indices = np.arange(in_length)
diff = np.subtract(np.expand_dims(indices, 1), np.expand_dims(indices, 0))
diff = jnp.array(diff)
enc_relative_ids = relative_radius
enc_relative_ids += jnp.minimum(jnp.maximum(diff, -relative_radius), relative_radius)
enc_relative_ids = jnp.array(enc_relative_ids, dtype=int)
if tar_length1 is not None:
if tar_length2 is None:
tar_length2 = tar_length1
indices1 = np.arange(tar_length1)
indices2 = np.arange(tar_length2)
diff = np.subtract(np.expand_dims(indices1, 1), np.expand_dims(indices2, 0))
diff = jnp.array(diff) + cache_idx
dec_relative_ids = relative_radius + jnp.minimum(jnp.maximum(diff,
-relative_radius), relative_radius)
dec_relative_ids = jnp.array(dec_relative_ids, dtype=int)
if dec2enc_ids:
indices1 = np.arange(tar_length1)
indices2 = np.arange(in_length)
diff = np.subtract(np.expand_dims(indices1, 1), np.expand_dims(indices2, 0))
diff = jnp.array(diff) + cache_idx
dec2enc_relative_ids = relative_radius
dec2enc_relative_ids += jnp.minimum(jnp.maximum(diff, -relative_radius),
relative_radius)
else:
dec2enc_relative_ids = jnp.ones([tar_length1, in_length], dtype=int)
dec2enc_relative_ids *= relative_radius
dec2enc_relative_ids = jnp.array(dec2enc_relative_ids, dtype=int)
return enc_relative_ids, dec_relative_ids, dec2enc_relative_ids
else:
return enc_relative_ids
# TODO: Create unit tests for create_relative_ids
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
decode: whether to run in single-position autoregressive mode.
"""
config: TransformerConfig
decode: bool = False
@nn.compact
def __call__(self,
inputs,
inputs_positions=None):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init in the configuration.
Args:
inputs: input data.
inputs_positions: input position indices for packed sequences.
Returns:
output: `(bs, timesteps, in_dim)`
"""
cfg = self.config
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
length = inputs.shape[1]
pos_emb_shape = (1, cfg.max_len, inputs.shape[-1])
if cfg.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(max_len=cfg.max_len)(
None, pos_emb_shape, None)
else:
pos_embedding = self.param('pos_embedding',
cfg.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
# We use a cache position index for tracking decoding position.
if self.decode:
is_initialized = self.has_variable('cache', 'cache_index')
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.uint32))
if is_initialized:
i = cache_index.value
cache_index.value = i + 1
_, _, df = pos_embedding.shape
pe = lax.dynamic_slice(pos_embedding,
jnp.array((0, i, 0)),
(1, 1, df))
if inputs_positions is None:
# normal unpacked case:
return inputs + pe
else:
# for packed data we need to use known position indices:
return inputs + jnp.take(pe[0], inputs_positions, axis=0)
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
config: TransformerConfig
out_dim: Optional[int] = None
@nn.compact
def __call__(self, inputs):
"""Applies Transformer MlpBlock module."""
cfg = self.config
actual_out_dim = (inputs.shape[-1] if self.out_dim is None
else self.out_dim)
x = nn.Dense(cfg.mlp_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(inputs)
x = nn.relu(x)
x = nn.Dropout(rate=cfg.dropout_rate)(
x, deterministic=cfg.deterministic)
output = nn.Dense(actual_out_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(x)
output = nn.Dropout(rate=cfg.dropout_rate)(
output, deterministic=cfg.deterministic)
return output
def dot_product_relative_attention_weights(query: Array,
key: Array,
bias: Optional[Array] = None,
relative_ids: Optional[Array] = None,
relative_embeddings: Optional[Callable] = None,
relative_biases: Optional[Callable] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: Dtype = jnp.float32,
precision: Optional[lax.Precision] = None):
"""Computes dot-product attention weights given query and key.
Used by :func:`dot_product_attention`, which is what you'll most likely use.
But if you want access to the attention weights for introspection, then
you can directly call this function and call einsum yourself.
Args:
query: queries for calculating attention with shape of
`[batch..., q_length, num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of
`[batch..., kv_length, num_heads, qk_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks, padding masks,
proximity bias, etc.
relative_ids: relative ids used to calculate relative position encodings with
shape of `[q_length, kv_length]`.
relative_embeddings: Callable: function used to calculate relative position
encodings from relative_ids.
relative_biases: Callable: function used to calculate relative bias from
relative_ids.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[batch..., num_heads, q_length, kv_length]`.
"""
assert query.ndim == key.ndim, 'q, k must have same rank.'
assert query.shape[:-3] == key.shape[:-3], (
'q, k batch dims must match.')
assert query.shape[-2] == key.shape[-2], (
'q, k num_heads must match.')
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# calculate attention matrix
depth = query.shape[-1]
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum('...qhd,...khd->...hqk', query, key,
precision=precision)
if relative_ids is not None:
if relative_embeddings is not None:
r = relative_embeddings(relative_ids)
matmul_qrel = jnp.einsum('...qhd,...qkd->...hqk', query, r,
precision=precision)
attn_weights += matmul_qrel
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
if relative_biases is not None:
attn_weights += jnp.squeeze(relative_biases(relative_ids), axis = -1)
attn_weights = attn_weights / jnp.sqrt(depth).astype(dtype)
# normalize the attention weights
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (keep.astype(attn_weights.dtype) /
jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
return attn_weights
def dot_product_relative_attention(query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
relative_ids: Optional[Array] = None,
relative_embeddings: Optional[Callable] = None,
relative_biases: Optional[Callable] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: Dtype = jnp.float32,
precision: Optional[lax.Precision] = None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of
`[batch..., q_length, num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of
`[batch..., kv_length, num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of
`[batch..., kv_length, num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks, padding masks,
proximity bias, etc.
relative_ids: relative ids used to calculate relative position encodings with
shape of `[q_length, kv_length]`.
relative_embeddings: Callable: function used to calculate relative position
encodings from relative_ids.
relative_biases: Callable: function used to calculate relative bias from
relative_ids.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
# compute attention weights
attn_weights = dot_product_relative_attention_weights(
query, key, bias, relative_ids, relative_embeddings,
relative_biases, broadcast_dropout, dropout_rng, dropout_rate,
deterministic, dtype, precision)
# return weighted sum over values for each query position
return jnp.einsum('...hqk,...khd->...qhd', attn_weights, value,
precision=precision)
class MultiHeadDotProductRelativeAttention(flm.Module):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
deterministic: if false, the attention weight is masked randomly
using dropout, whereas if true, the attention weights
are deterministic.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
decode: whether to prepare and use an autoregressive cache.
relative_radius: relative attention radius.
relative_bias: bool: whether to add relative bias to attention matrix.
"""
num_heads: int
dtype: Dtype = jnp.float32
qkv_features: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
deterministic: Optional[bool] = None
precision: Any = None
kernel_init: Callable[[PRNGKey, Shape, Dtype], Array] = fll.default_kernel_init
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = fli.zeros
use_bias: bool = True
attention_fn: Callable[[Array, Array, Array], Array] = dot_product_relative_attention
decode: bool = False
relative_radius: Optional[int] = None
relative_bias: bool = False
@flm.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
relative_ids: Optional[Array] = None,
mask: Optional[Array] = None,
deterministic: Optional[bool] = None):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
Args:
inputs_q: input queries of shape
`[batch_sizes..., length, features]`.
inputs_kv: key/values of shape
`[batch_sizes..., length, features]`.
relative_ids: relative ids used to calculate relative position encodings with
shape of `[q_length, kv_length]`.
mask: attention mask of shape
`[batch_sizes..., num_heads, query_length, key/value_length]`.
deterministic: if false, the attention weight is masked randomly
using dropout, whereas if true, the attention weights
are deterministic.
Returns:
output of shape `[batch_sizes..., length, features]`.
"""
if self.dropout_rate > 0.: # Require `deterministic` only if using dropout.
deterministic = flm.merge_param('deterministic', self.deterministic, deterministic)
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
assert qkv_features % self.num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // self.num_heads
dense = partial(fll.DenseGeneral,
axis=-1,
features=(self.num_heads, head_dim),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
precision=self.precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [batch..., length, n_heads, n_features_per_head]
query, key, value = (dense(dtype=self.dtype, name='query')(inputs_q),
dense(dtype=self.dtype, name='key')(inputs_kv),
dense(dtype=self.dtype, name='value')(inputs_kv))
relative_embeddings = None
relative_biases = None
if self.relative_radius is not None:
relative_vocab_size = 2 * self.relative_radius + 1
head_dim = self.qkv_features // self.num_heads
relative_embeddings = nn.Embed(relative_vocab_size, head_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
if self.relative_bias:
relative_biases = nn.Embed(relative_vocab_size, 1,
embedding_init=nn.initializers.normal(stddev=1.0))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.decode:
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
cached_key = self.variable('cache', 'cached_key',
jnp.zeros, key.shape, key.dtype)
cached_value = self.variable('cache', 'cached_value',
jnp.zeros, value.shape, value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = (
cached_key.value.shape)
# shape check of cached keys against query input
expected_shape = tuple(batch_dims) + (1, num_heads, depth_per_head)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_shape, query.shape))
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
# causal mask for cached decoder self-attention:
# our single query position should only attend to those key
# positions that have already been generated and cached,
# not the remaining zero elements.
mask = nn.combine_masks(
mask,
jnp.broadcast_to(jnp.arange(max_length) <= cur_index,
tuple(batch_dims) + (1, 1, max_length)))
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# apply attention
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
relative_ids=relative_ids,
relative_embeddings=relative_embeddings,
relative_biases=relative_biases,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
broadcast_dropout=self.broadcast_dropout,
deterministic=deterministic,
dtype=self.dtype,
precision=self.precision) # pytype: disable=wrong-keyword-args
# back to the original inputs dimensions
out = fll.DenseGeneral(features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
name='out')(x)
return out
class SelfRelativeAttention(MultiHeadDotProductRelativeAttention):
"""Self-attention special case of multi-head dot-product attention."""
@flm.compact
def __call__(self, inputs_q: Array, relative_ids: Optional[Array] = None,
mask: Optional[Array] = None,
deterministic: Optional[bool] = None):
return super().__call__(inputs_q, inputs_q, relative_ids, mask,
deterministic=deterministic)
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
relative_radius: relative attention radius.
"""
config: TransformerConfig
relative_radius: Optional[int] = None
@nn.compact
def __call__(self,
inputs,
relative_ids=None,
encoder_mask=None):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
relative_ids: relative ids used to calculate relative position encodings.
encoder_mask: encoder self-attention mask.
Returns:
output after transformer encoder block.
"""
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(inputs)
x = SelfRelativeAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic,
relative_radius=self.relative_radius,
relative_bias=cfg.relative_bias)(x, relative_ids=relative_ids,
mask=encoder_mask)
x = nn.Dropout(rate=cfg.dropout_rate)(
x, deterministic=cfg.deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = MlpBlock(config=cfg)(y)
return x + y
class EncoderDecoder1DBlock(nn.Module):
"""Transformer encoder-decoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
relative_radius: relative attention radius.
"""
config: TransformerConfig
relative_radius: Optional[int] = None
@nn.compact
def __call__(self,
targets,
encoded,
relative_ids_dec=None,
relative_ids_enc_dec=None,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies EncoderDecoder1DBlock module.
Args:
targets: input data for decoder
encoded: input data from encoder
relative_ids_dec: relative ids used to calculate the decoder relative
position encodings.
relative_ids_enc_dec: relative ids used to calculate the encoder-decoder
relative position encodings.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output after transformer encoder-decoder block.
"""
cfg = self.config
# Decoder block.
assert targets.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(targets)
x = SelfRelativeAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic,
decode=cfg.decode,
relative_radius=self.relative_radius,
relative_bias=cfg.relative_bias)(x, relative_ids=relative_ids_dec,
mask=decoder_mask)
x = nn.Dropout(rate=cfg.dropout_rate)(
x, deterministic=cfg.deterministic)
x = x + targets
# Encoder-Decoder block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = MultiHeadDotProductRelativeAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic,
relative_radius=self.relative_radius,
relative_bias=cfg.relative_bias)(
y, encoded, relative_ids_enc_dec, encoder_decoder_mask)
y = nn.Dropout(rate=cfg.dropout_rate)(
y, deterministic=cfg.deterministic)
y = y + x
# MLP block.
z = nn.LayerNorm(dtype=cfg.dtype)(y)
z = MlpBlock(config=cfg)(z)
return y + z
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self,
inputs,
inputs_positions=None,
encoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: decoder self-attention mask.
Returns:
output of a transformer encoder.
"""
cfg = self.config
assert inputs.ndim == 2 # (batch, len)
# Input Embedding
if self.shared_embedding is None:
input_embed = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = self.shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if cfg.sinusoidal:
x = AddPositionEmbs(config=cfg, decode=False, name='posembed_input')(
x, inputs_positions=inputs_positions)
x = nn.Dropout(rate=cfg.dropout_rate)(
x, deterministic=cfg.deterministic)
x = x.astype(cfg.dtype)
relative_ids = None
if cfg.relative_radius is not None:
relative_ids = create_relative_ids(inputs.shape[1], cfg.relative_radius)
# Input Encoder
for lyr in range(cfg.num_layers):
x = Encoder1DBlock(config=cfg, relative_radius=cfg.relative_radius,
name=f'encoderblock_{lyr}')(x, relative_ids, encoder_mask)
encoded = nn.LayerNorm(dtype=cfg.dtype, name='encoder_norm')(x)
return encoded
class Decoder(nn.Module):
"""Transformer Model Decoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self,
encoded,
targets,
targets_positions=None,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
encoded: encoded input data from encoder.
targets: target inputs.
targets_positions: input subsequence positions for packed examples.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output of a transformer decoder.
"""
cfg = self.config
assert encoded.ndim == 3 # (batch, len, depth)
assert targets.ndim == 2 # (batch, len)
# Target Embedding
if self.shared_embedding is None:
output_embed = nn.Embed(
num_embeddings=cfg.output_vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
output_embed = self.shared_embedding
y = targets.astype('int32')
if not cfg.decode:
y = shift_right(y)
y = output_embed(y)
if cfg.sinusoidal:
y = AddPositionEmbs(config=cfg, decode=cfg.decode, name='posembed_output')(
y, inputs_positions=targets_positions)
y = nn.Dropout(rate=cfg.dropout_rate)(
y, deterministic=cfg.deterministic)
y = y.astype(cfg.dtype)
relative_ids_dec, relative_ids_enc_dec = None, None
if cfg.relative_radius is not None:
_, relative_ids_dec, relative_ids_enc_dec = create_relative_ids(encoded.shape[1],
cfg.relative_radius,
targets.shape[1],
None,
cfg.enc2dec)
if cfg.decode:
is_initialized = self.has_variable('cache', 'cache_index')
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.uint32))
if is_initialized:
idx = cache_index.value
cache_index.value = idx + 1
_, relative_ids_dec, relative_ids_enc_dec = create_relative_ids(encoded.shape[1],
cfg.relative_radius,
targets.shape[1],
cfg.max_len,
cfg.enc2dec, idx)
# Target-Input Decoder
for lyr in range(cfg.num_layers):
y = EncoderDecoder1DBlock(
config=cfg, relative_radius=cfg.relative_radius, name=f'encoderdecoderblock_{lyr}')(
y,
encoded,
relative_ids_dec=relative_ids_dec,
relative_ids_enc_dec=relative_ids_enc_dec,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
y = nn.LayerNorm(dtype=cfg.dtype, name='encoderdecoder_norm')(y)
# Decoded Logits
if cfg.logits_via_embedding:
# Use the transpose of embedding matrix for logit transform.
logits = output_embed.attend(y.astype(jnp.float32))
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
else:
logits = nn.Dense(
cfg.output_vocab_size,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='logitdense')(y)
return logits
class Transformer(nn.Module):
"""Transformer Model for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
def setup(self):
cfg = self.config
if cfg.share_embeddings:
if cfg.output_vocab_size is not None:
assert cfg.output_vocab_size == cfg.vocab_size, (
"can't share embedding with different vocab sizes.")
self.shared_embedding = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
self.shared_embedding = None
self.encoder = Encoder(config=cfg,
shared_embedding=self.shared_embedding)
self.decoder = Decoder(config=cfg,
shared_embedding=self.shared_embedding)
if cfg.copy_decoder:
self.final_layer_copy = nn.Dense(cfg.qkv_dim, kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init) # pe_input is the maximum input length
self.final_layer_copy_weight = nn.Dense(1, kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)
# We want vocab_size -> vocab_size, but that might be too big.
# So, we do a low-rank approximation, bringing it down to d_model first,
# in case d_model < vocab_size:
if cfg.qkv_dim < cfg.output_vocab_size:
self.final_layer_copy2a = nn.Dense(cfg.qkv_dim, kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)
self.final_layer_copy2b = nn.Dense(cfg.output_vocab_size, kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)
else:
self.final_layer_copy2 = nn.Dense(cfg.output_vocab_size, kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)
def encode(self,
inputs,
inputs_positions=None,
inputs_segmentation=None):
"""Applies Transformer encoder-branch on the inputs.
Args:
inputs: input data.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
Returns:
encoded feature array from the transformer encoder.
"""
cfg = self.config
# Make padding attention mask.
encoder_mask = nn.make_attention_mask(
inputs > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if inputs_segmentation is not None:
encoder_mask = nn.combine_masks(
encoder_mask,
nn.make_attention_mask(inputs_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype)
)
return self.encoder(
inputs,
inputs_positions=inputs_positions,
encoder_mask=encoder_mask)
def decode(self,
encoded,
inputs, # only needed for masks
targets,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
inputs: input data (only needed for masking).
targets: target data.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from transformer decoder.
"""
cfg = self.config
# Make padding attention masks.
if cfg.decode:
# for fast autoregressive decoding only a special encoder-decoder mask is used
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0, dtype=cfg.dtype)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=cfg.dtype),
nn.make_causal_mask(targets, dtype=cfg.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
decoder_mask = nn.combine_masks(
decoder_mask,
nn.make_attention_mask(targets_segmentation,
targets_segmentation,
jnp.equal,
dtype=cfg.dtype))
encoder_decoder_mask = nn.combine_masks(
encoder_decoder_mask,
nn.make_attention_mask(targets_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype))
logits = self.decoder(
encoded,
targets,
targets_positions=targets_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
return logits.astype(self.config.dtype)
def __call__(self,
inputs,
targets,
inputs_positions=None,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data.
targets: target data.
inputs_positions: input subsequence positions for packed examples.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from full transformer.
"""
encoded = self.encode(inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation)
dec_output = self.decode(encoded,
inputs, # only used for masks
targets,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation)
cfg = self.config
if not cfg.copy_decoder:
return dec_output
else:
final_output = nn.softmax(dec_output) # (batch_size, tar_seq_len, vocab_size)
copy_output_query = self.final_layer_copy(dec_output) # (batch_size, tar_seq_len, d_model)
copy_output_weight = nn.sigmoid(self.final_layer_copy_weight(dec_output))
copy_output = dot_product_relative_attention(
copy_output_query, # (batch_size, tar_seq_len, d_model)
encoded, # (batch_size, inp_seq_len, d_model)
jax.nn.one_hot(inputs, cfg.output_vocab_size)) # (batch_size, inp_seq_len, vocab_size)
if cfg.qkv_dim < cfg.output_vocab_size:
copy_output = nn.softmax(self.final_layer_copy2b(
self.final_layer_copy2a(copy_output)))
else:
copy_output = nn.softmax(self.final_layer_copy2(copy_output))
final_output = jnp.log(
(1 - copy_output_weight) * final_output + copy_output_weight * copy_output)
return final_output
|
|
'''
Used to parse / process hawkeye data
'''
import numpy as np
from collections import deque
import os
import math
import utilities.paths as paths
import glob
import collections
DRIVE = paths.get_drive(2)
# get projection file list in order, put into dict, where key is timestamp reset to relate to frame of input video and value is file name
def load_prj(video_id):
_, _, _, time_offset = get_offset(video_id)
prjfs = glob.glob(DRIVE + 'DATASETS/VIDEO/TENNIS/HE_DATA/'+video_id+'/Players/*.prj')
prj_files = {}
for prjf in sorted(prjfs):
timestamp = prjf[prjf.rfind('_')+1:prjf.rfind('.')]
assert len(timestamp) == 6
# convert to frames: HH + MM + SS
timestamp = int(timestamp[0:2])*60*60*25 + int(timestamp[2:4])*60*25 + int(timestamp[4:6])*25
prj_files[timestamp - time_offset] = prjf
return prj_files
# read trj files and place into data structure
# {key: point start frame, value: {key: 'arc start frame', value: arc start frame,}}
def load_trj(video_id):
_, _, _, time_offset = get_offset(video_id)
trjfs = glob.glob(DRIVE + 'DATASETS/VIDEO/TENNIS/HE_DATA/'+video_id+'/*.trj')
trj_files = {}
for trjf in sorted(trjfs):
timestamp = trjf[trjf.rfind('_')+1:trjf.rfind('.')]
assert len(timestamp) == 6
# convert to frames: HH + MM + SS
timestamp = int(timestamp[0:2])*60*60*25 + int(timestamp[2:4])*60*25 + int(timestamp[4:6])*25
with open(trjf) as f:
lines = f.read()
lines = lines.split('\n')
for i in range(len(lines)):
lines[i] = lines[i].replace('\r','').replace('\n','')
# create dicts for arc and non arc data
arc_entries = {}
entries = {}
_key = None
for line in lines:
if len(line) > 0: # ignore lines with no text
if line[0] is '<': # this is a new key
if _key is not None: # if isn't the first key, ie. we have a key to write before starting new one
if any(char.isdigit() for char in _key): # if there is number in key it refers to particular arc
if len(_value) is 1:
arc_entries[_key] = _value[0]
else:
arc_entries[_key] = _value
else:
if len(_value) is 1:
entries[_key] = _value[0]
else:
entries[_key] = _value
_key = line[1:-1]
_value = []
else:
_value.append(line)
# go through arc entries saving as list of arc dicts [{dict for arc 0}, {dict for arc 1} ... ]
arc_number_upto = 0
arcs = []
while True: # need a break statement
added = False
arc_dict = {}
for k, v in arc_entries.iteritems():
if k.find(str(arc_number_upto)) > -1: # found arc with arc_number_upto
# fix case for single digits being picked up in double digit numbers ie 0,1 is picked up in arc 10
if k[k.find(str(arc_number_upto))-1].isdigit() or (len(k)-1) != (k.find(str(arc_number_upto))+len(str(arc_number_upto))-1):
continue
else:
arc_dict[k.replace(str(arc_number_upto),'').rstrip(' ')] = v
added = True # yes we have added at least one
arc_number_upto += 1
if added:
arcs.append(arc_dict)
else: # no data for arc number added, must have been last arc before
break
# add to entries dict
entries['arcs'] = arcs
trj_files[timestamp - time_offset] = entries
return trj_files
# Build player positions given dict of prj files
def build_player_positions(video_id):
prj_files = load_prj(video_id)
player_positions = {}
for frame_number in prj_files.keys():
with open(prj_files[frame_number]) as f:
lines = f.readlines()
for line in lines:
p1 = line.rstrip().split(',')[1].split()
p1 = [float(p) for p in p1]
p2 = line.rstrip().split(',')[2].split()
p2 = [float(p) for p in p2]
player_positions[frame_number+int(round(25*float(line.rstrip().split(',')[0])))] = [p1, p2]
return player_positions
# Extract arcs from a the trj files, given an trj file dict
def get_arcs(video_id):
trj_files = load_trj(video_id)
all_arcs = {}
for frame_number in trj_files.keys():
file_dict = trj_files[frame_number]
if 'arcs' in file_dict.keys():
for arc in file_dict['arcs']:
arcc = {}
if 'ImpactPointIn' in arc.keys():
imp_pos = arc['ImpactPointIn'].split()
arcc['imp_pos'] = [float(imp_pos[0]),float(imp_pos[1])]
if 'ArcSvaMatrix' in arc.keys():
arcc['st_s'] = float(arc['StartTime'])
arcc['et_s'] = float(arc['EndTime'])
arcc['st_f'] = frame_number+int(arcc['st_s']*25)
arcc['et_f'] = frame_number+int(arcc['et_s']*25)
x_co = [float(arc['ArcSvaMatrix'][0].split()[0]),
float(arc['ArcSvaMatrix'][0].split()[1]),
float(arc['ArcSvaMatrix'][0].split()[2]),
float(arc['ArcSvaMatrix'][0].split()[3])]
y_co = [float(arc['ArcSvaMatrix'][1].split()[0]),
float(arc['ArcSvaMatrix'][1].split()[1]),
float(arc['ArcSvaMatrix'][1].split()[2]),
float(arc['ArcSvaMatrix'][1].split()[3])]
z_co = [float(arc['ArcSvaMatrix'][2].split()[0]),
float(arc['ArcSvaMatrix'][2].split()[1]),
float(arc['ArcSvaMatrix'][2].split()[2]),
float(arc['ArcSvaMatrix'][2].split()[3])]
arcc['coeffs'] = {'x': x_co, 'y': y_co, 'z': z_co}
all_arcs[arcc['st_f']] = arcc
return all_arcs
# Use arcs dict to return dict of ball positions per frame
def build_ball_positions(video_id):
arcs = get_arcs(video_id)
ball_positions = {}
for arc in arcs.values():
st_s = arc['st_s']
et_s = arc['et_s']
st_f = arc['st_f']
et_f = arc['et_f']
x_co = arc['coeffs']['x']
y_co = arc['coeffs']['y']
z_co = arc['coeffs']['z']
for current_frame in range(st_f, et_f):
ct = st_s + (et_s-st_s)*((current_frame-st_f)/float(et_f-st_f))
ball_positions[current_frame] = [(x_co[3] * ct**3) + (x_co[2] * ct**2) + (x_co[1] * ct) + (x_co[0]),
(y_co[3] * ct**3) + (y_co[2] * ct**2) + (y_co[1] * ct) + (y_co[0]),
(z_co[3] * ct**3) + (z_co[2] * ct**2) + (z_co[1] * ct) + (z_co[0])]
return ball_positions
# Takes arcs and ball positions dicts and builds a shots dict (shot is multiple arc between individual hits)
def build_shots(video_id):
arcs = get_arcs(video_id)
ball_positions = build_ball_positions(video_id)
shots = []
shot = {}
prev_v = -1
od = collections.OrderedDict(sorted(arcs.items()))
for frame_num, arc in od.iteritems():
if abs(ball_positions[arc['st_f']][0]-ball_positions[arc['et_f']-1][0]) > .5: # ball must move at least half meter in arc to be new shot (helps shots that hit net)
if ball_positions[arc['st_f']][0] > ball_positions[arc['et_f']-1][0]: # velocity of the ball Left or right
v = 1
else:
v = 0
if (prev_v == v) and ((arc['st_f'] - shot['et_f']) < 50): # if same velocity and a recent (within 50 frames of last arc)
shot['arcs'].append(arc)
shot['st_f'] = min(shot['st_f'], arc['st_f'])
shot['et_f'] = max(shot['et_f'], arc['et_f'])
shot['st_s'] = min(shot['st_s'], arc['st_s'])
shot['et_s'] = max(shot['et_s'], arc['et_s'])
else: # otherwise it's a new shot
if prev_v > -1:
shots.append(shot)
shot = {}
prev_v = v
shot['arcs'] = [arc]
shot['st_f'] = arc['st_f']
shot['et_f'] = arc['et_f']
shot['st_s'] = arc['st_s']
shot['et_s'] = arc['et_s']
return shots
# Get alignment frame numbers and offsets
def get_offset(video_id):
if video_id == 'V011':
video_start_frame = 18*60*25 + 15*25
video_end_frame = 4*60*60*25 + 22*60*25 + 20*25
prj_start_frame = 19*60*60*25 + 43*60*25 + 12*25 - 10
offset = prj_start_frame - video_start_frame
return video_start_frame, video_end_frame, prj_start_frame, offset
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This code implements a basic, Twitter-aware tokenizer.
A tokenizer is a function that splits a string of text into words. In
Python terms, we map string and unicode objects into lists of unicode
objects.
There is not a single right way to do tokenizing. The best method
depends on the application. This tokenizer is designed to be flexible
and this easy to adapt to new domains and tasks. The basic logic is
this:
1. The tuple regex_strings defines a list of regular expression
strings.
2. The regex_strings strings are put, in order, into a compiled
regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the
user-supplied string, inside the tokenize() method of the class
Tokenizer.
4. When instantiating Tokenizer objects, there is a single option:
preserve_case. By default, it is set to True. If it is set to
False, then the tokenizer will downcase everything except for
emoticons.
The __main__ method illustrates by tokenizing a few examples.
I've also included a Tokenizer method tokenize_random_tweet(). If the
twitter library is installed (http://code.google.com/p/python-twitter/)
and Twitter is cooperating, then it should tokenize a random
English-language tweet.
"""
__author__ = "Christopher Potts"
__copyright__ = "Copyright 2011, Christopher Potts"
__credits__ = []
__license__ = "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License: http://creativecommons.org/licenses/by-nc-sa/3.0/"
__version__ = "1.0"
__maintainer__ = "Christopher Potts"
__email__ = "See the author's website"
######################################################################
import re
import html.entities
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most imporatantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# This particular element is used in a couple ways, so we define it
# with a name:
emoticon_string = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
)"""
# The components of the tokenizer:
regex_strings = (
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[\-\s.]*
)?
(?: # (area code)
[\(]?
\d{3}
[\-\s.\)]*
)?
\d{3} # exchange
[\-\s.]*
\d{4} # base
)"""
,
# Emoticons:
emoticon_string
,
# HTML tags:
r"""<[^>]+>"""
,
# Twitter username:
r"""(?:@[\w_]+)"""
,
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)"""
,
# Remaining word types:
r"""
(?:[a-z][a-z'\-_]+[a-z]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
"""
)
######################################################################
# This is the core tokenizing regex:
word_re = re.compile(r"""(%s)""" % "|".join(regex_strings), re.VERBOSE | re.I | re.UNICODE)
# The emoticon string gets its own regex so that we can preserve case for them as needed:
emoticon_re = re.compile(regex_strings[1], re.VERBOSE | re.I | re.UNICODE)
# These are for regularizing HTML entities to Unicode:
html_entity_digit_re = re.compile(r"&#\d+;")
html_entity_alpha_re = re.compile(r"&\w+;")
amp = "&"
######################################################################
class Tokenizer:
def __init__(self, preserve_case=False):
self.preserve_case = preserve_case
def tokenize(self, s):
"""
Argument: s -- any string or unicode object
Value: a tokenize list of strings; conatenating this list returns the original string if preserve_case=False
"""
# Try to ensure unicode:
try:
s = str(s)
except UnicodeDecodeError:
s = str(s).encode('string_escape')
s = str(s)
# Fix HTML character entitites:
s = self.__html2unicode(s)
# Tokenize:
words = word_re.findall(s)
# Possible alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = map((lambda x: x if emoticon_re.search(x) else x.lower()), words)
return words
def tokenize_random_tweet(self):
"""
If the twitter library is installed and a twitter connection
can be established, then tokenize a random tweet.
"""
try:
import twitter
except ImportError:
print
"Apologies. The random tweet functionality requires the Python twitter library: http://code.google.com/p/python-twitter/"
from random import shuffle
api = twitter.Api()
tweets = api.GetPublicTimeline()
if tweets:
for tweet in tweets:
if tweet.user.lang == 'en':
return self.tokenize(tweet.text)
else:
raise Exception(
"Apologies. I couldn't get Twitter to give me a public English-language tweet. Perhaps try again")
def __html2unicode(self, s):
"""
Internal metod that seeks to replace all the HTML entities in
s with their corresponding unicode characters.
"""
# First the digits:
ents = set(html_entity_digit_re.findall(s))
if len(ents) > 0:
for ent in ents:
entnum = ent[2:-1]
try:
entnum = int(entnum)
s = s.replace(ent, unichr(entnum))
except:
pass
# Now the alpha versions:
ents = set(html_entity_alpha_re.findall(s))
ents = filter((lambda x: x != amp), ents)
for ent in ents:
entname = ent[1:-1]
try:
s = s.replace(ent, unichr(html.entities.name2codepoint[entname]))
except:
pass
s = s.replace(amp, " and ")
return s
###############################################################################
if __name__ == '__main__':
tok = Tokenizer(preserve_case=False)
samples = (
u"RT @ #happyfuncoding: this is a typical Twitter tweet :-)",
u"HTML entities & other Web oddities can be an ácute <em class='grumpy'>pain</em> >:(",
u"It's perhaps noteworthy that phone numbers like +1 (800) 123-4567, (800) 123-4567, and 123-4567 are treated as words despite their whitespace."
)
for s in samples:
print
"======================================================================"
print
s
tokenized = tok.tokenize(s)
print
"\n".join(tokenized)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import ldap
from ldap import filter as ldap_filter
from keystone import exception
from keystone.openstack.common import log as logging
LOG = logging.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
def py2ldap(val):
if isinstance(val, str):
return val
elif isinstance(val, bool):
return 'TRUE' if val else 'FALSE'
else:
return str(val)
def ldap2py(val):
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return val
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError((_('Invalid LDAP deref option: %s. Choose one of: ') %
opt) + ', '.join(LDAP_DEREF.keys()))
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def get_handler(conn_url):
for prefix, handler in _HANDLERS.iteritems():
if conn_url.startswith(prefix):
return handler
return LdapWrapper
class BaseLdap(object):
DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
if self.options_name is not None:
self.suffix = conf.ldap.suffix
if self.suffix is None:
self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.iteritems():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
filter = '%s_filter' % self.options_name
self.filter = getattr(conf.ldap, filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = getattr(conf.ldap, 'use_dumb_member')
self.dumb_member = (getattr(conf.ldap, 'dumb_member') or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = getattr(conf.ldap,
'allow_subtree_delete')
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>')
% item)
continue
if attr_map not in self.attribute_mapping:
LOG.warn(_('Invalid additional attribute mapping: "%(item)s". '
'Value "%(attr_map)s" must use one of %(keys)s.') %
{'item': item, 'attr_map': attr_map,
'keys': ', '.join(self.attribute_mapping.keys())})
continue
mapping[ldap_attr] = attr_map
return mapping
def get_connection(self, user=None, password=None):
handler = get_handler(self.LDAP_URL)
conn = handler(self.LDAP_URL,
self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, id):
return '%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(str(id)),
self.tree_dn)
def _id_to_dn(self, id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(id)
conn = self.get_connection()
try:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(str(id)),
'objclass': self.object_class})
finally:
conn.unbind_s()
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(id)
@staticmethod
def _dn_to_id(dn):
return ldap.dn.str2dn(dn)[0][0][1]
def _ldap_res_to_model(self, res):
obj = self.model(id=self._dn_to_id(res[0]))
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
v = res[1][self.attribute_mapping.get(k, k)]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
conn = self.get_connection()
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.iteritems():
if k == 'id' or k in self.attribute_ignore:
continue
if v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.iteritems()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
try:
conn.add_s(self._id_to_dn(values['id']), attrs)
finally:
conn.unbind_s()
return values
def _ldap_get(self, id, filter=None):
conn = self.get_connection()
query = ('(&(%(id_attr)s=%(id)s)'
'%(filter)s'
'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(str(id)),
'filter': (filter or self.filter or ''),
'object_class': self.object_class})
try:
attrs = list(set((self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
res = conn.search_s(self.tree_dn, self.LDAP_SCOPE, query, attrs)
except ldap.NO_SUCH_OBJECT:
return None
finally:
conn.unbind_s()
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, filter=None):
conn = self.get_connection()
query = '(&%s(objectClass=%s))' % (filter or self.filter or '',
self.object_class)
try:
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
self.attribute_mapping.values())
except ldap.NO_SUCH_OBJECT:
return []
finally:
conn.unbind_s()
def get(self, id, filter=None):
res = self._ldap_get(id, filter)
if res is None:
raise self._not_found(id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, filter=None):
query = ('(%s=%s)' % (self.attribute_mapping['name'],
ldap_filter.escape_filter_chars(name)))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(filter)]
def update(self, id, values, old_obj=None):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
if old_obj is None:
old_obj = self.get(id)
modlist = []
for k, v in values.iteritems():
if k == 'id' or k in self.attribute_ignore:
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
conn = self.get_connection()
try:
conn.modify_s(self._id_to_dn(id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(id)
finally:
conn.unbind_s()
return self.get(id)
def delete(self, id):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
conn = self.get_connection()
try:
conn.delete_s(self._id_to_dn(id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(id)
finally:
conn.unbind_s()
def deleteTree(self, id):
conn = self.get_connection()
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
try:
conn.delete_ext_s(self._id_to_dn(id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(id)
finally:
conn.unbind_s()
class LdapWrapper(object):
def __init__(self, url, page_size, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand'):
LOG.debug(_("LDAP init: url=%s"), url)
LOG.debug(_('LDAP init: use_tls=%(use_tls)s\n'
'tls_cacertfile=%(tls_cacertfile)s\n'
'tls_cacertdir=%(tls_cacertdir)s\n'
'tls_req_cert=%(tls_req_cert)s\n'
'tls_avail=%(tls_avail)s\n') %
{'use_tls': use_tls,
'tls_cacertfile': tls_cacertfile,
'tls_cacertdir': tls_cacertdir,
'tls_req_cert': tls_req_cert,
'tls_avail': ldap.TLS_AVAIL
})
#NOTE(topol)
#for extra debugging uncomment the following line
#ldap.set_option(ldap.OPT_DEBUG_LEVEL, 4095)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
if use_tls:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
#NOTE(topol)
#python ldap TLS does not verify CACERTFILE or CACERTDIR
#so we add some extra simple sanity check verification
#Also, setting these values globally (i.e. on the ldap object)
#works but these values are ignored when setting them on the
#connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
#NOTE(topol)
#python ldap TLS does not verify CACERTFILE or CACERTDIR
#so we add some extra simple sanity check verification
#Also, setting these values globally (i.e. on the ldap object)
#works but these values are ignored when setting them on the
#connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in LDAP_TLS_CERTS.values():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug(_("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s"),
tls_req_cert)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
def simple_bind_s(self, user, password):
LOG.debug(_("LDAP bind: dn=%s"), user)
return self.conn.simple_bind_s(user, password)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, attrs):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in attrs]
sane_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug(_('LDAP add: dn=%(dn)s, attrs=%(attrs)s') % {
'dn': dn, 'attrs': sane_attrs})
return self.conn.add_s(dn, ldap_attrs)
def search_s(self, dn, scope, query, attrlist=None):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug(_(
'LDAP search: dn=%(dn)s, scope=%(scope)s, query=%(query)s, '
'attrs=%(attrlist)s') % {
'dn': dn,
'scope': scope,
'query': query,
'attrlist': attrlist})
if self.page_size:
res = self.paged_search_s(dn, scope, query, attrlist)
else:
res = self.conn.search_s(dn, scope, query, attrlist)
o = []
for dn, attrs in res:
o.append((dn, dict((kind, [ldap2py(x) for x in values])
for kind, values in attrs.iteritems())))
return o
def paged_search_s(self, dn, scope, query, attrlist=None):
res = []
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
msgid = self.conn.search_ext(dn,
scope,
query,
attrlist,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == ldap.LDAP_CONTROL_PAGE_OID]
if pctrls:
# LDAP server supports pagination
est, cookie = pctrls[0].controlValue
if cookie:
# There is more data still on the server
# so we request another page
lc.controlValue = (self.page_size, cookie)
msgid = self.conn.search_ext(dn,
scope,
query,
attrlist,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
sane_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug(_('LDAP modify: dn=%(dn)s, modlist=%(modlist)s') % {
'dn': dn, 'modlist': sane_modlist})
return self.conn.modify_s(dn, ldap_modlist)
def delete_s(self, dn):
LOG.debug(_("LDAP delete: dn=%s"), dn)
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls):
LOG.debug(
_('LDAP delete_ext: dn=%(dn)s, serverctrls=%(serverctrls)s') % {
'dn': dn, 'serverctrls': serverctrls})
return self.conn.delete_ext_s(dn, serverctrls)
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_$name,$tree_dn
Where $name is self.options_name ('user' or 'tenant'), $tree_dn is
self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
self.enabled_emulation_dn = ('cn=enabled_%ss,%s' %
(self.options_name, self.tree_dn))
def _get_enabled(self, object_id):
conn = self.get_connection()
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
finally:
conn.unbind_s()
def _add_enabled(self, object_id):
if not self._get_enabled(object_id):
conn = self.get_connection()
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member',
[self._id_to_dn(object_id)])]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
finally:
conn.unbind_s()
def _remove_enabled(self, object_id):
conn = self.get_connection()
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
finally:
conn.unbind_s()
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, filter=None):
ref = super(EnabledEmuMixIn, self).get(object_id, filter)
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
ref['enabled'] = self._get_enabled(object_id)
return ref
def get_all(self, filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(filter)
if x[0] != self.enabled_emulation_dn]
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
|
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.cli.pos
~~~~~~~~~~~~~~~~~~~~~~~~~
Part of speech tagging commands.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import click
from ..doc import Document, Text
from ..nlp.corpus import genia_training, wsj_training, wsj_evaluation, genia_evaluation
from ..nlp.pos import TAGS, ChemApPosTagger, ChemCrfPosTagger
log = logging.getLogger(__name__)
@click.group(name='pos')
@click.pass_context
def pos_cli(ctx):
"""POS tagger commands."""
pass
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.pass_context
def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
@pos_cli.command()
@click.argument('model', required=True)
@click.pass_context
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('wb'), help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_obj
def train_perceptron(ctx, output, corpus, clusters):
"""Train Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(clusters=clusters)
tagger.train(training_corpus)
tagger.save(output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.pass_obj
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.argument('input', type=click.File('rb'), default=click.get_binary_stream('stdin'))
@click.pass_obj
def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n')
|
|
import numpy as np
from simupy.block_diagram import SimulationMixin
import warnings
need_state_equation_function_msg = ("if dim_state > 0, DynamicalSystem must"
+ " have a state_equation_function")
need_output_equation_function_msg = ("if dim_state == 0, DynamicalSystem must"
+ " have an output_equation_function")
zero_dim_output_msg = "A DynamicalSystem must provide an output"
def full_state_output(t, x, *args):
"""
A drop-in ``output_equation_function`` for stateful systems that provide
output the full state directly.
"""
return x
class DynamicalSystem(SimulationMixin):
"""
A dynamical system which models systems of the form::
xdot(t) = state_equation_function(t,x,u)
y(t) = output_equation_function(t,x)
or::
y(t) = output_equation_function(t,u)
These could also represent discrete-time systems, in which case xdot(t)
represents x[k+1].
This can also model discontinuous systems. Discontinuities must occur on
zero-crossings of the ``event_equation_function``, which take the same
arguments as ``output_equation_function``, depending on ``dim_state``.
At the zero-crossing, ``update_equation_function`` is called with the same
arguments. If ``dim_state`` > 0, the return value of
``update_equation_function`` is used as the state of the system immediately
after the discontinuity.
"""
def __init__(self, state_equation_function=None,
output_equation_function=None, event_equation_function=None,
update_equation_function=None, dim_state=0, dim_input=0,
dim_output=0, num_events=0, dt=0, initial_condition=None):
"""
Parameters
----------
state_equation_function : callable, optional
The derivative (or update equation) of the system state. Not needed
if ``dim_state`` is zero.
output_equation_function : callable, optional
The output equation of the system. A system must have an
``output_equation_function``. If not set, uses full state output.
event_equation_function : callable, optional
The function whose output determines when discontinuities occur.
update_equation_function : callable, optional
The function called when a discontinuity occurs.
dim_state : int, optional
Dimension of the system state. Optional, defaults to 0.
dim_input : int, optional
Dimension of the system input. Optional, defaults to 0.
dim_output : int, optional
Dimension of the system output. Optional, defaults to dim_state.
num_events : int, optional
Dimension of the system event functions. Optional, defaults to 0.
dt : float, optional
Sample rate of the system. Optional, defaults to 0 representing a
continuous time system.
initial_condition : array_like of numerical values, optional
Array or Matrix used as the initial condition of the system.
Defaults to zeros of the same dimension as the state.
"""
self.dim_state = dim_state
self.dim_input = dim_input
self.dim_output = dim_output or dim_state
self.num_events = num_events
self.state_equation_function = state_equation_function
self.output_equation_function = (
full_state_output
if output_equation_function is None and self.dim_state > 0
else output_equation_function
)
self.initial_condition = initial_condition
if ((num_events != 0) and ((event_equation_function is None) or
(update_equation_function is None))):
raise ValueError("Cannot provide event_equation_function or " +
"update_Equation_function with num_events == 0")
self.event_equation_function = event_equation_function
# TODO: do some defensive checks and/or wrapping of update function to consume
# a channel number
self.update_equation_function = update_equation_function
self.dt = dt
self.validate()
@property
def dt(self):
return self._dt
@dt.setter
def dt(self, dt):
if dt <= 0:
self._dt = 0
return
if self.num_events != 0:
raise ValueError("Cannot set dt > 0 and use event API " +
"with non-zero num_events")
self.num_events = 1
self._dt = dt
self.event_equation_function = lambda t, *args: np.atleast_1d(np.sin(np.pi*t/self.dt))
# if t else np.sin(np.finfo(np.float_).eps))
self._state_equation_function = self.state_equation_function
self._output_equation_function = self.output_equation_function
self.state_equation_function = \
lambda *args: np.zeros(self.dim_state)
if self.dim_state:
self.update_equation_function = (
lambda *args, event_channels=0: self._state_equation_function(*args)
)
else:
self._prev_output = 0
def _update_equation_function(*args, event_channels=0):
self._prev_output = self._output_equation_function(*args)
self.update_equation_function = _update_equation_function
self.output_equation_function = lambda *args: self._prev_output
@property
def initial_condition(self):
if self._initial_condition is None:
self._initial_condition = np.zeros(self.dim_state)
return self._initial_condition
@initial_condition.setter
def initial_condition(self, initial_condition):
if initial_condition is not None:
if isinstance(initial_condition, np.ndarray):
size = initial_condition.size
else:
size = len(initial_condition)
assert size == self.dim_state
self._initial_condition = np.array(initial_condition,
dtype=np.float_).reshape(-1)
else:
self._initial_condition = None
def prepare_to_integrate(self, t0, state_or_input=None):
if not self.dim_state and self.num_events:
self.update_equation_function(t0, state_or_input)
if self.dim_state or self.dim_input:
return self.output_equation_function(t0, state_or_input)
else:
return self.output_equation_function(t0)
def validate(self):
if self.dim_output == 0:
raise ValueError(zero_dim_output_msg)
if (self.dim_state > 0
and getattr(self, 'state_equation_function', None) is None):
raise ValueError(need_state_equation_function_msg)
if (self.dim_state == 0
and getattr(self, 'output_equation_function', None) is None):
raise ValueError(need_output_equation_function_msg)
def SystemFromCallable(incallable, dim_input, dim_output, dt=0):
"""
Construct a memoryless system from a callable.
Parameters
----------
incallable : callable
Function to use as the output_equation_function. Should have signature
(t, u) if dim_input > 0 or (t) if dim_input = 0.
dim_input : int
Dimension of input.
dim_output : int
Dimension of output.
"""
system = DynamicalSystem(output_equation_function=incallable,
dim_input=dim_input, dim_output=dim_output, dt=dt)
return system
class SwitchedSystem(DynamicalSystem):
"""
Provides a useful pattern for discontinuous systems where the state and
output equations change depending on the value of a function of the state
and/or input (``event_variable_equation_function``). Most of the usefulness
comes from constructing the ``event_equation_function`` with a Bernstein
basis polynomial with roots at the boundaries. This class also provides
logic for outputting the correct state and output equation based on the
``event_variable_equation_function`` value.
"""
def __init__(self, state_equations_functions=None,
output_equations_functions=None,
event_variable_equation_function=None, event_bounds=None,
state_update_equation_function=None, dim_state=0, dim_input=0,
dim_output=0, initial_condition=None):
"""
Parameters
----------
state_equations_functions : array_like of callables, optional
The derivative (or update equation) of the system state. Not needed
if ``dim_state`` is zero. The array indexes the
event-state and should be one more than the number of event bounds.
This should also be indexed to match the boundaries (i.e., the
first function is used when the event variable is below the first
event_bounds value). If only one callable is provided, the callable
is used in each condition.
output_equations_functions : array_like of callables, optional
The output equation of the system. A system must have an
``output_equation_function``. If not set, uses full state output.
The array indexes the event-state and should be one more than the
number of event bounds. This should also be indexed to match the
boundaries (i.e., the first function is used when the event
variable is below the first event_bounds value). If only one
callable is provided, the callable is used in each condition.
event_variable_equation_function : callable
When the output of this function crosses the values in
``event_bounds``, a discontuity event occurs.
event_bounds : array_like of floats
Defines the boundary points the trigger discontinuity events based
on the output of ``event_variable_equation_function``.
state_update_equation_function : callable, optional
When an event occurs, the state update equation function is called
to determine the state update. If not set, uses full state output,
so the state is not changed upon a zero-crossing of the event
variable function.
dim_state : int, optional
Dimension of the system state. Optional, defaults to 0.
dim_input : int, optional
Dimension of the system input. Optional, defaults to 0.
dim_output : int, optional
Dimension of the system output. Optional, defaults to dim_state.
"""
self.dim_state = dim_state
self.dim_input = dim_input
self.dim_output = dim_output or dim_state
self.event_bounds = event_bounds
self.state_equations_functions = np.empty(self.n_conditions,
dtype=object)
self.state_equations_functions[:] = state_equations_functions
self.output_equations_functions = np.empty(self.n_conditions,
dtype=object)
self.output_equations_functions[:] = (
full_state_output
if output_equations_functions is None and self.dim_state > 0
else output_equations_functions
)
self.event_variable_equation_function = \
event_variable_equation_function
self.state_update_equation_function = (
state_update_equation_function or
full_state_output
)
self.initial_condition = initial_condition
self.dt = 0
self.validate()
def validate(self):
super().validate()
if (self.dim_state > 0
and np.any(np.equal(self.state_equations_functions, None))):
raise ValueError(need_state_equation_function_msg)
if (self.dim_state == 0
and np.any(np.equal(self.output_equations_functions, None))):
raise ValueError(need_output_equation_function_msg)
if self.event_variable_equation_function is None:
raise ValueError("A SwitchedSystem requires " +
"event_variable_equation_function")
@property
def event_bounds(self):
return self._event_bounds
@event_bounds.setter
def event_bounds(self, event_bounds):
if event_bounds is None:
raise ValueError("A SwitchedSystem requires event_bounds")
self._event_bounds = np.array(event_bounds).reshape(1, -1)
self.n_conditions = self._event_bounds.size + 1
if self.n_conditions == 2:
self.event_bounds_range = 1
else:
self.event_bounds_range = np.diff(self.event_bounds[0, [0, -1]])
def output_equation_function(self, *args):
return self.output_equations_functions[self.condition_idx](*args)
def state_equation_function(self, *args):
return self.state_equations_functions[self.condition_idx](*args)
def event_equation_function(self, *args):
event_var = self.event_variable_equation_function(*args)
return np.prod(
(self.event_bounds_range-self.event_bounds)*event_var -
self.event_bounds*(self.event_bounds_range - event_var),
axis=1
)
def update_equation_function(self, *args):
event_var = self.event_variable_equation_function(*args)
if self.condition_idx is None:
self.condition_idx = np.where(np.all(np.r_[
np.c_[[[True]], event_var >= self.event_bounds],
np.c_[event_var <= self.event_bounds, [[True]]]
], axis=0))[0][0]
return
sq_dist = (event_var - self.event_bounds)**2
crossed_root_idx = np.where(sq_dist == np.min(sq_dist))[1][0]
if crossed_root_idx == self.condition_idx:
self.condition_idx += 1
elif crossed_root_idx == self.condition_idx-1:
self.condition_idx -= 1
else:
warnings.warn("SwitchedSystem did not cross a neighboring " +
"boundary. This may indicate an integration " +
"error. Continuing without updating " +
"condition_idx", UserWarning)
return self.state_update_equation_function(*args)
def prepare_to_integrate(self):
if self.dim_state:
event_var = self.event_variable_equation_function(0,
self.initial_condition)
self.condition_idx = np.where(np.all(np.r_[
np.c_[[[True]], event_var >= self.event_bounds],
np.c_[event_var <= self.event_bounds, [[True]]]
], axis=0))[0][0]
else:
self.condition_idx = None
class LTISystem(DynamicalSystem):
"""
A linear, time-invariant system.
"""
def __init__(self, *args, initial_condition=None, dt=0):
"""
Construct an LTI system with the following input formats:
1. state matrix A, input matrix B, output matrix C for systems with
state::
dx_dt = Ax + Bu
y = Hx
2. state matrix A, input matrix B for systems with state, assume full
state output::
dx_dt = Ax + Bu
y = Ix
3. gain matrix K for systems without state::
y = Kx
The matrices should be numeric arrays of consistent shape. The class
provides ``A``, ``B``, ``C`` and ``F``, ``G``, ``H`` aliases for the
matrices of systems with state, as well as a ``K`` alias for the gain
matrix. The ``data`` alias provides the matrices as a tuple.
"""
if len(args) not in (1, 2, 3):
raise ValueError("LTI system expects 1, 2, or 3 args")
self.num_events = 0
self.event_equation_function = None
self.update_equation_function = None
# TODO: setup jacobian functions
if len(args) == 1:
self.gain_matrix = gain_matrix = np.array(args[0])
self.dim_input = (self.gain_matrix.shape[1]
if len(gain_matrix.shape) > 1
else 1)
self.dim_output = self.gain_matrix.shape[0]
self.dim_state = 0
self.initial_condition = None
self.state_equation_function = None
self.output_equation_function = \
lambda t, x: (gain_matrix@x).reshape(-1)
self.dt = dt
return
if len(args) == 2:
state_matrix, input_matrix = args
output_matrix = np.eye(
getattr(state_matrix, 'shape', len(state_matrix))[0]
)
elif len(args) == 3:
state_matrix, input_matrix, output_matrix = args
if len(input_matrix.shape) == 1:
input_matrix = input_matrix.reshape(-1, 1)
state_matrix = np.array(state_matrix)
input_matrix = np.array(input_matrix)
output_matrix = np.array(output_matrix)
self.dim_state = state_matrix.shape[0]
self.dim_input = input_matrix.shape[1]
self.dim_output = output_matrix.shape[0]
self.state_matrix = state_matrix
self.input_matrix = input_matrix
self.output_matrix = output_matrix
self.initial_condition = initial_condition
if self.dim_input:
self.state_equation_function = \
(lambda t, x, u=np.zeros(self.dim_input): \
(state_matrix@x + input_matrix@u))
else:
self.state_equation_function = lambda t, x, u=np.zeros(0): state_matrix@x
self.output_equation_function = \
lambda t, x: (output_matrix@x)
self.dt = dt
self.validate()
def validate(self):
super().validate()
if self.dim_state:
assert self.state_matrix.shape[1] == self.dim_state
assert self.input_matrix.shape[0] == self.dim_state
assert self.output_matrix.shape[1] == self.dim_state
@property
def data(self):
if self.dim_state:
return self.state_matrix, self.input_matrix, self.output_matrix
else:
return self.gain_matrix
@property
def A(self):
return self.state_matrix
@property
def F(self):
return self.state_matrix
@property
def B(self):
return self.input_matrix
@property
def G(self):
return self.input_matrix
@property
def C(self):
return self.output_matrix
@property
def H(self):
return self.output_matrix
@property
def K(self):
return self.gain_matrix
|
|
"""
mobile
=======
Devices which (are supposed to) move in a predictable way
Configurable parameters::
{
"area_centre" : e.g. "London, UK" } optional, but both must be specified if either are. Points-to-visit will be within this set.
"area_radius" : e.g. "Manchester, UK" }
"num_locations" : 10 The total number of defined locations
"points_to_visit" : 4 Number of these locations that any individual device can visit (MUST be <= num_locations!). Optional, but if specified then must be at least 2
"update_period" : "PT1H" (optional) How often to update position
"generate_fleet_management_metrics" : False If true then output several properties to do with fleet management (fuel, miles etc.)
"route_plan" : null (optional) If set then use realistic route-planning, with given mode (e.g. "walking", "driving")
"google_maps_key" : "xyz" Google Maps now requires this. Often defined in ../synth_accounts/default.json
}
Device properties created::
{
"latitude" : latitude in degrees as a floating-point number
"longitude" : longitude in degrees as a floating-point number
}
"""
from .device import Device
from common.geo import google_maps, geo
import random, math
import isodate
import logging
MINUTES = 60
HOURS = MINUTES * 60
DAYS = HOURS * 24
WEEKS = DAYS * 7
# Because we need to cache the geo point-picker, we have two levels of hierarchy:
# 1) Mobile behaviour may be instantiated by entirely different, unconnected groups of devices - for example mobile pallets in England and mobile trucks in LA.
# So we call each of these a "loc_group" and cache the (expensive) point picker and location lookups per loc_group.
# 2) All devices in that loc_group then share a set of potential locations they can visit
# But each device sets its own unique fixed itinerary between some of those locations (points[])
# One reason for the above design is to minimise the combinations of routes, which would otherwise drive up our Google Maps bill by the factorial of the number of locations!
# We gradually move from point to point, and dwell for a while at each point
DEFAULT_UPDATE_PERIOD = "PT1H"
MPH_MIN = 5
MPH_MAX = 70
SEND_AT_LEAST_EVERY = 99999999999 # Even when not moving, send an update at least this often (large number for never)
DEFAULT_NUMBER_OF_LOCATIONS = 10
DEFAULT_POINTS_TO_VISIT = 4
DEFAULT_MIN_DWELL_H = 3
DEFAULT_MAX_DWELL_H = 24*14
DEFAULT_STUCK_IN_TRANSIT_MTBF = 1 * WEEKS # amount of travel time, not elapsed time
DEFAULT_STUCK_IN_TRANSIT_RECOVERY_DURATION = 1 * WEEKS
MPG = 8 # USA levels of fuel-efficiency!
LATLON_TO_MILES = 88 # Very approximate conversion factor from "latlong distance in degrees" to miles!
class Location_group():
""" A group of locations that devices might visit """
def __init__(self, context, num_locs, area_centre, area_radius, first_location_at_centre=False):
self.pp = geo.point_picker() # Very expensive, so do only once
area = None
if area_centre != None:
area = [area_centre, area_radius]
self.google_maps_key = context.get("google_maps_key", None)
self.locations = [] # Array of (lon,lat,address)
for L in range(num_locs): # Choose the locations that any devices in this loc group can visit
first_loc = first_location_at_centre and (L==0)
while True:
if first_loc:
(lon,lat) = google_maps.address_to_lon_lat(area_centre)
else:
(lon,lat) = self.pp.pick_point(area, self.google_maps_key)
address_info = google_maps.lon_lat_to_address(lon, lat, self.google_maps_key)
if ("address_postal_code" in address_info) and (("address_postal_town" in address_info) or ("address_route" in address_info)): # Only use locations which have addresses (e.g. don't accidentally pick the sea!)
break
if first_loc: # Avoid infinite loop if first location doesn't have required address info
break
if "address_postal_town" in address_info:
addr = address_info["address_postal_town"] + " " + address_info["address_postal_code"]
else:
addr = address_info["address_route"] + " " + address_info["address_postal_code"]
logging.info("Location "+str(L)+" for mobile devices to visit is "+repr(addr)+" at "+str((lon,lat)))
self.locations.append( (lon,lat, addr) )
self.base_location = random.randrange(0, num_locs)
class Route_Follower():
""" Understands how to follow a route made of individual segments """
def __init__(self, route):
self.route = route
# logging.info("Route_Follower with route: ")
# for r in self.route:
# logging.info(str(r))
self.route_segment = 0
self.seconds_into_segment = 0
def current_latlon(self):
seg = self.route[self.route_segment]
frac = float(self.seconds_into_segment) / seg["duration"]
# logging.info("frac="+str(frac))
lat = seg["start_lat"] * (1.0-frac) + seg["end_lat"] * frac
lon = seg["start_lng"] * (1.0-frac) + seg["end_lng"] * frac
return { "latitude" : lat, "longitude" : lon }
def time_has_passed(self, secs):
# logging.info("time_has_passed("+str(secs)+")")
remaining_secs = secs
while True:
seg = self.route[self.route_segment]
# logging.info("route_segment="+str(self.route_segment)+" duration="+str(seg["duration"])+" seconds_into_segment="+str(self.seconds_into_segment)+" remaining_secs="+str(remaining_secs))
if self.seconds_into_segment + remaining_secs < seg["duration"]:
self.seconds_into_segment += remaining_secs
break
else: # Move to next segment
remaining_secs -= seg["duration"] - self.seconds_into_segment
if self.route_segment >= len(self.route)-1: # If this was the last segment
self.seconds_into_segment = seg["duration"] # go to the end of it
break
else:
self.seconds_into_segment = 0
self.route_segment += 1
# logging.info("Leaving thp() with route_segment = "+str(self.route_segment)+" seconds_into_segment="+str(self.seconds_into_segment)+" remaining_secs="+str(remaining_secs))
def journey_complete(self):
if self.route_segment == len(self.route)-1:
if self.seconds_into_segment >= self.route[self.route_segment]["duration"]:
return True
return False
def total_journey_time(self):
t = 0
for seg in self.route:
t += seg["duration"]
return t
class Mobile(Device):
# Class variables
loc_groups = {}
def __init__(self, instance_name, time, engine, update_callback, context, params):
super(Mobile,self).__init__(instance_name, time, engine, update_callback, context, params)
self.generate_addresses = params["mobile"].get("generate_addresses", False)
self.area_centre = params["mobile"].get("area_centre", None)
self.area_radius = params["mobile"].get("area_radius", None)
num_locs = params["mobile"].get("num_locations", DEFAULT_NUMBER_OF_LOCATIONS)
self.points_to_visit = params["mobile"].get("points_to_visit", DEFAULT_POINTS_TO_VISIT)
assert self.points_to_visit <= num_locs, "for mobile devices, points_to_visit must be <= num_locations"
self.fleet_mgmt = params["mobile"].get("generate_fleet_management_metrics", False)
self.update_period = isodate.parse_duration(params["mobile"].get("update_period", DEFAULT_UPDATE_PERIOD)).total_seconds()
self.route_plan = params["mobile"].get("route_plan", None)
self.dwell_h_min = params["mobile"].get("dwell_h_min", DEFAULT_MIN_DWELL_H) # "dwell" is how long an asset dwells at each target location
self.dwell_h_max = params["mobile"].get("dwell_h_max", DEFAULT_MAX_DWELL_H)
self.stuck_in_transit_mtbf = params["mobile"].get("stuck_in_transit_mtbf", DEFAULT_STUCK_IN_TRANSIT_MTBF)
self.stuck_in_transit_recovery_duration = params["mobile"].get("stuck_in_transit_recovery_duration", DEFAULT_STUCK_IN_TRANSIT_RECOVERY_DURATION)
self.stuck_in_transit = False
self.tire_deflation_rate = min(1.0, 1.0 - random.gauss(0.001, 0.0001))
first_location_at_centre = params["mobile"].get("first_location_at_centre", False)
the_key = str(self.area_centre) + "." + str(self.area_radius) # Needs to be unique-enough between location groups
if the_key not in Mobile.loc_groups:
Mobile.loc_groups[the_key] = Location_group(context, num_locs, self.area_centre, self.area_radius, first_location_at_centre) # Creates a new group
self.loc_group = Mobile.loc_groups[the_key]
# Choose which points this device will move between
self.points = [] # Array of indices into self.loc_group.locations[]
self.points.append(self.loc_group.base_location) # All devices start at the base location
for P in range(self.points_to_visit-1):
while True:
loc = random.randrange(0, len(self.loc_group.locations))
if loc not in self.points:
break # Ensure no repeats (which means we'll hang if we try to choose more points than locations!)
self.points.append(loc)
if self.fleet_mgmt:
self.pump_up_tires()
self.prepare_new_journey(0,1)
self.engine.register_event_in(self.update_period, self.tick_update_position, self, self)
def comms_ok(self):
return super(Mobile,self).comms_ok()
def external_event(self, event_name, arg):
super(Mobile,self).external_event(event_name, arg)
def close(self):
super(Mobile,self).close()
# Private methods
def miles_between(self, lon1,lat1, lon2,lat2):
(delta_lon, delta_lat) = (lon2-lon1, lat2-lat1)
return math.sqrt(delta_lon * delta_lon + delta_lat * delta_lat) * LATLON_TO_MILES
def update_lon_lat(self):
if self.route_plan:
self.set_properties(self.route_follower.current_latlon())
else: # Just driven linearly between the two points
(prev_lon, prev_lat) = (self.get_property_or_None("longitude"), self.get_property_or_None("latitude"))
(lon_from, lat_from) = self.loc_group.locations[self.points[self.from_point]][0:2]
(lon_to, lat_to) = self.loc_group.locations[self.points[self.to_point]][0:2]
lon = lon_from * (1.0 - self.travel_fraction) + lon_to * self.travel_fraction
lat = lat_from * (1.0 - self.travel_fraction) + lat_to * self.travel_fraction
self.set_properties({ "longitude" : lon, "latitude" : lat }) # Important to update these together (some client apps don't cope well with lat/lon being split between messages, even if contemporaneous)
if self.fleet_mgmt:
if prev_lon is not None:
delta_miles = self.miles_between(prev_lon, prev_lat, lon, lat)
self.set_property("miles", int(10*delta_miles)/10.0)
self.set_property("av_speed_mph", int(delta_miles/(self.update_period/3600)))
self.set_property("fuel_gallons", int(100*(delta_miles/MPG))/100.0)
def update_moving_and_location(self):
self.set_property("moving", self.dwell_count == 0)
if self.dwell_count == 0:
self.set_property("location_mobile", None)
else:
self.set_property("location_mobile", self.loc_group.locations[self.points[self.from_point]][2])
def update_everything(self):
self.update_lon_lat()
self.update_moving_and_location()
def tick_update_position(self, _):
if self.dwell_count > 0: # Stationary at an official Location
if (self.dwell_count % SEND_AT_LEAST_EVERY)==0:
self.update_lon_lat()
self.dwell_count -= 1
if self.dwell_count == 0: # About to move
self.update_everything()
else: # In transit (should be moving)
if not self.stuck_in_transit:
if self.stuck_in_transit_mtbf is not None:
if random.random() < float(self.update_period) / self.stuck_in_transit_mtbf:
logging.info(self.get_property("$id")+" is now stuck in transit")
self.stuck_in_transit = True
else: # IS stuck in transit
if random.random() < float(self.update_period) / self.stuck_in_transit_recovery_duration:
logging.info(self.get_property("$id")+" is now unstuck and resuming transit")
self.stuck_in_transit = False
if not self.stuck_in_transit:
if self.route_plan:
self.route_follower.time_has_passed(self.update_period)
self.update_lon_lat()
if self.route_follower.journey_complete():
self.prepare_new_journey((self.from_point + 1) % self.points_to_visit, (self.to_point + 1) % self.points_to_visit)
else:
self.travel_fraction += self.travel_rate
if self.travel_fraction <= 1.0:
self.update_lon_lat()
else: # Reached destination
self.prepare_new_journey((self.from_point + 1) % self.points_to_visit, (self.to_point + 1) % self.points_to_visit)
if self.fleet_mgmt:
tp = self.get_property("tire_pressure_psi")
if tp < 25:
self.pump_up_tires() # Pump tire up again
else:
self.set_property("tire_pressure_psi", tp * self.tire_deflation_rate)
self.engine.register_event_in(self.update_period, self.tick_update_position, self, self)
def prepare_new_journey(self, from_point, to_point):
self.from_point = from_point
self.to_point = to_point
self.travel_fraction = 0.0
# How far to travel, and speed?
(lon_from, lat_from) = self.loc_group.locations[self.points[self.from_point]][0:2]
(lon_to, lat_to) = self.loc_group.locations[self.points[self.to_point]][0:2]
if self.route_plan:
self.route_follower = Route_Follower(google_maps.get_route_from_lat_lons(lat_from, lon_from, lat_to, lon_to, mode=self.route_plan, google_maps_api_key = self.loc_group.google_maps_key))
logging.info("Journey prepared for " + str(self.get_property("$id")) +
" from " + self.loc_group.locations[self.points[self.from_point]][2] +
" to " + self.loc_group.locations[self.points[self.to_point]][2] +
" with total journey time " + str(self.route_follower.total_journey_time()))
else:
miles = self.miles_between(lon_from, lat_from, lon_to, lat_to)
mph = random.randrange(MPH_MIN, MPH_MAX)
ticks_of_travel = (miles / mph) / (self.update_period / 3600.0) # If we try to move from a point to itself, this will be zero
# therefore what fraction of entire distance to travel in each tick
if ticks_of_travel == 0:
self.travel_rate = 0
else:
self.travel_rate = 1.0 / ticks_of_travel
self.dwell_count = random.randrange(self.dwell_h_min / (self.update_period / 3600.0), self.dwell_h_max / (self.update_period / 3600.0)) # Wait here for a while before commencing
self.update_everything()
def pump_up_tires(self):
self.set_property("tire_pressure_psi", random.gauss(35,5))
|
|
from cspace import CSpace
from .. import robotsim
from ..model import collide
from cspaceutils import AdaptiveCSpace,EmbeddedCSpace
import math
import random
class RobotCSpace(CSpace):
"""A basic robot cspace that allows collision free motion.
Warning: if your robot has non-standard joints, like a free-
floating base or continuously rotating (spin) joints, you will need to
overload the sample() method."""
def __init__(self,robot,world=None,collider=None):
"""Arguments:
- robot: the robot which should move.
- collider (optional): a collide.WorldCollider instance containing
the world in which the robot lives. Any ignored collisions will be
respected in the collision checker.
"""
CSpace.__init__(self)
self.robot = robot
self.setBounds(zip(*robot.getJointLimits()))
self.collider = collider
self.addFeasibilityTest((lambda x: self.inJointLimits(x)),"joint limits")
def setconfig(x):
self.robot.setConfig(x)
return True
if collider:
bb0 = ([float('inf')]*3,[float('-inf')]*3)
bb = [bb0[0],bb0[1]]
def calcbb(x):
bb[0] = bb0[0]
bb[1] = bb0[1]
for i in xrange(self.robot.numLinks()):
g = self.robot.link(i).geometry()
if not g.empty():
bbi = g.getBB()
bb[0] = [min(a,b) for (a,b) in zip(bb[0],bbi[0])]
bb[1] = [max(a,b) for (a,b) in zip(bb[1],bbi[1])]
return True
def objCollide(o):
obb = self.collider.world.rigidObject(o).geometry().getBB()
if not collide.bb_intersect(obb,bb): return False
return any(True for _ in self.collider.robotObjectCollisions(self.robot.index,o))
def terrCollide(o):
obb = self.collider.world.terrain(o).geometry().getBB()
if not collide.bb_intersect(obb,bb): return False
return any(True for _ in self.collider.robotTerrainCollisions(self.robot.index,o))
self.addFeasibilityTest(setconfig,"setconfig")
self.addFeasibilityTest(calcbb,"calcbb",dependencies="setconfig")
self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig")
#self.addFeasibilityTest((lambda x: not self.envCollision()),"env collision")
for o in range(self.collider.world.numRigidObjects()):
self.addFeasibilityTest((lambda x,o=o: not objCollide(o)),"obj collision "+str(o)+" "+self.collider.world.rigidObject(o).getName(),dependencies="calcbb")
for o in range(self.collider.world.numTerrains()):
self.addFeasibilityTest((lambda x,o=o: not terrCollide(o)),"terrain collision "+str(o)+" "+self.collider.world.terrain(o).getName(),dependencies="calcbb")
else:
self.addFeasibilityTest(setconfig,"setconfig")
self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig")
self.properties['geodesic'] = 1
def addConstraint(self,checker,name=None):
self.addFeasiblilityTest(checker,name)
def sample(self):
"""Overload this to implement custom sampling strategies or to handle
non-standard joints. This one will handle spin joints and
rotational axes of floating bases."""
res = CSpace.sample(self)
for i,x in enumerate(res):
if math.isnan(x):
res[i] = random.uniform(0,math.pi*2.0)
return res
def inJointLimits(self,x):
"""Checks joint limits of the configuration x"""
for (xi,bi) in zip(x,self.bound):
if xi < bi[0] or xi > bi[1]:
return False
return True
def selfCollision(self,x=None):
"""Checks whether the robot at its current configuration is in
self collision"""
#This should be faster than going through the collider...
if x is not None: self.robot.setConfig(x)
return self.robot.selfCollides()
#if not self.collider: return False
#return any(self.collider.robotSelfCollisions(self.robot.index))
def envCollision(self,x=None):
"""Checks whether the robot at its current configuration is in
collision with the environment."""
if not self.collider: return False
if x is not None: self.robot.setConfig(x)
for o in xrange(self.collider.world.numRigidObjects()):
if any(self.collider.robotObjectCollisions(self.robot.index,o)):
return True;
for o in xrange(self.collider.world.numTerrains()):
if any(self.collider.robotTerrainCollisions(self.robot.index,o)):
return True;
return False
def interpolate(self,a,b,u):
return self.robot.interpolate(a,b,u)
def distance(self,a,b):
return self.robot.distance(a,b)
def sendPathToController(self,path,controller):
"""Given a planned CSpace path 'path' and a SimRobotController 'controller',
sends the path so that it is executed correctly by the controller (this assumes
a fully actuated robot)."""
controller.setMilestone(path[0])
for q in path[1:]:
controller.appendMilestoneLinear(q)
class RobotSubsetCSpace(EmbeddedCSpace):
"""A basic robot cspace that allows collision free motion of a *subset*
of joints. The subset is given by the indices in the list "subset"
provided to the constructor. The configuration space is R^k where k
is the number of DOFs in the subset.
This class will automatically disable all collisions for inactive robot links
in the collider.
Note: to convert from start/goal robot configurations to the CSpace, call
the project(qrobot) method for the start and goal. (see EmbeddedCSpace.project())
Note: to convert from a planned path back to the robot's full configuration space,
you will need to call the lift(q) method for all configurations q in the planned
path. (see EmbeddedCSpace.lift())
Warning: if your robot has non-standard joints, like a free-
floating base or continuously rotating (spin) joints, you will need to
overload the sample() method."""
def __init__(self,robot,subset,collider=None):
EmbeddedCSpace.__init__(self,RobotCSpace(robot,collider),subset,xinit=robot.getConfig())
self.collider = collider
if self.collider:
inactive = []
for i in range(robot.numLinks()):
if i not in subset: inactive.append(i)
#disable self-collisions for inactive objects
for i in inactive:
rindex = self.collider.robots[robot.index][i]
self.collider.mask[rindex] = set()
def liftPath(self,path):
"""Given a CSpace path path, lifts this to the full robot configuration"""
return [self.lift(q) for q in path]
def sendPathToController(self,path,controller):
"""Given a planned CSpace path 'path' and a SimRobotController 'controller',
sends the path so that it is executed correctly by the controller (this assumes
a fully actuated robot)."""
lpath = self.liftPath(path)
controller.setMilestone(lpath[0])
for q in lpath[1:]:
controller.appendMilestoneLinear(q)
class ClosedLoopRobotCSpace(RobotCSpace):
"""A closed loop cspace. Allows one or more IK constraints to be
maintained during the robot's motion.
Attributes:
- solver: the IKSolver that is used.
- maxIters: the maximum number of iterations for numerical IK solver
- tol: how closely the IK constraint must be met, in meters/radians
To satisfy the IK constraint, the motion planner ensures that configuration
samples are projected to the manifold of closed-loop IK solutions. To create
edges between samples a and b, the straight line path a and b is projected to
the manifold via an IK solve.
"""
def __init__(self,robot,iks,collider=None):
RobotCSpace.__init__(self,robot,collider)
self.solver = robotsim.IKSolver(robot)
if hasattr(iks,'__iter__'):
for ik in iks:
self.solver.add(ik)
else:
self.solver.add(iks)
#root finding iterations
self.maxIters = 100
self.tol = 1e-3
self.addFeasibilityTest((lambda x: self.closedLoop(x)),'closed loop constraint')
def setIKActiveDofs(self,activeSet):
"""Marks that only a subset of the DOFs of the robot are to be used for solving
the IK constraint."""
self.solver.setActiveDofs(activeSet)
def sample(self):
"""Samples directly on the contact manifold. The basic method samples arbitrarily in
the configuration space and then solves IK constraints. This may be an ineffective
method especially for floating-base robots, since the floating joints may be sampled
arbitrarily."""
x = RobotCSpace.sample(self)
return self.solveConstraints(x)
def sampleneighborhood(self,c,r):
"""Samples a neighborhood in ambient space and then projects onto the contact manifold"""
x = RobotCSpace.sampleneighborhood(self,c,r)
return self.solveConstraints(x)
def solveConstraints(self,x):
"""Given an initial configuration of the robot x, attempts to solve the IK constraints
given in this space. Return value is the best configuration found via local optimization."""
self.robot.setConfig(x)
self.solver.setMaxIters(self.maxIters)
self.solver.setTolerance(self.tol)
res = self.solver.solve()
return self.robot.getConfig()
def closedLoop(self,config=None,tol=None):
"""Returns true if the closed loop constraint has been met at config,
or if config==None, the robot's current configuration."""
if config is not None: self.robot.setConfig(config)
e = self.solver.getResidual()
if tol==None: tol = self.tol
return max(abs(ei) for ei in e) <= tol
def interpolate(self,a,b,u):
"""Interpolates on the manifold. Used by edge collision checking"""
x = RobotCSpace.interpolate(self,a,b,u)
return self.solveConstraints(x)
def interpolationPath(self,a,b,epsilon=1e-2):
"""Creates a discretized path on the contact manifold between the points a and b, with
resolution epsilon"""
d = self.distance(a,b)
nsegs = int(math.ceil(d/epsilon))
if nsegs <= 1: return [a,b]
res = [a]
for i in xrange(nsegs-1):
u = float(i+1)/float(nsegs)
res.append(self.interpolate(a,b,u))
res.append(b)
return res
def discretizePath(self,path,epsilon=1e-2):
"""Given a CSpace path path, generates a path that satisfies closed-loop constraints
up to the given distance between milestones"""
if path is None: return None
if len(path)==0: return []
respath = [path[0]]
for a,b in zip(path[:-1],path[1:]):
respath += self.interpolationPath(a,b,epsilon)[1:]
return respath
def sendPathToController(self,path,controller,epsilon=1e-2):
"""Given a CSpace path path, sends the path to be executed to the SimRobotController.
This discretizes the path and sends it as a piecewise linear curve, limited in speed
by the robot's maximum velocity.
NOTE: this isn't the best thing to do for robots with slow acceleration limits
and/or high inertias because it ignores acceleration. A better solution can be found
in the MInTOS package or the C++ code in Klampt/Planning/RobotTimeScaling.h."""
dpath = self.discretizePath(path,epsilon)
vmax = controller.model().getVelocityLimits()
assert len(dpath[0]) == len(vmax)
controller.setMilestone(dpath[0])
for a,b in zip(dpath[:-1],dpath[1:]):
dt = 0.0
for i in xrange(len(a)):
if vmax[i] == 0:
if a[i] != b[i]: print "ClosedLoopRobotCSpace.sendPathToController(): Warning, path moves on DOF %d with maximum velocity 0"%(i,)
else:
dt = max(dt,abs(a[i]-b[i])/vmax[i])
#this does a piecewise lienar interpolation
controller.appendLinear(dt,b)
class ImplicitManifoldRobotCSpace(RobotCSpace):
"""A closed loop cspace with an arbitrary numerical manifold f(q)=0
to constrain the robot's motion. The argument implicitConstraint
should be a function f(q) returning a list of values that should be
equal to 0 up to the given tolerance. Essentially this is a
ClosedLoopRobotCSpace except with a user-provided function.
See ClosedLoopRobotCSpace.
"""
def __init__(self,robot,implicitConstraint,collider=None):
RobotCSpace.__init__self(robot,collider)
self.implicitConstraint = implicitConstraint
#root finding iterations
self.maxIters = 100
self.tol = 1e-3
self.addFeasibilityTest((lambda x: self.onManifold(x)),'implicit manifold constraint')
def sample(self):
"""Samples directly on the contact manifold"""
x = RobotCSpace.sample()
return self.solveManifold(x)
def onManifold(self,x,tol=None):
"""Returns true if the manifold constraint has been met at x."""
e = self.implicitConstraint.eval(x)
if tol==None: tol = self.tol
return max(abs(ei) for ei in e) <= tol
def solveManifold(self,x,tol=None,maxIters=None):
"""Solves the manifold constraint starting from x, to the given
tolerance and with the given maximum iteration count. Default
uses the values set as attributes of this class.
"""
if tol==None: tol = self.tol
if maxIters==None: maxIters = self.maxIters
import rootfind
rootfind.setXTolerance(1e-8)
rootfind.setFTolerance(tol)
rootfind.setVectorField(self.implicitConstraint)
(res,x,val) = rootfind.findRootsBounded(x,self.bound)
return x
def interpolate(self,a,b,u):
"""Interpolates on the manifold. Used by edge collision checking"""
x = RobotCSpace.interpolate(self,a,b,u)
return self.solveManifold(x)
|
|
import cherrypy, re, os.path, cPickle as pickle, time, calendar
from base_proxy import BaseProxy
import tools.hashlib_shortcuts, tools.file
def expires_to_timestamp(expires):
try:
# Expires: 0
int(expires)
return time.time()-86400
except:
return calendar.timegm(time.strptime(expires, '%a, %d %b %Y %H:%M:%S %Z'))
class Proxy(BaseProxy):
default_remote_host = cherrypy.config['remote.host']
reverse_host_map = {
cherrypy.config['remote.host']: '',
}
adjust_host_in_content_types = ('text/html', 'application/xml', 'application/json')
def __init__(self):
BaseProxy.__init__(self)
self.html_comment_re = re.compile(r'<!--.*?-->', re.S)
self.html_script_re = re.compile(r'<script.*?</script>', re.S)
# note: we use re.match which requires match from beginning
self.public_paths_re = re.compile(r'/(s/|images/|favicon\.ico|rest/api/1\.0/(header-separator|dropdowns)($|\?))')
def perform(self, **kwargs):
BaseProxy.perform(self, **kwargs)
self._adjust_cache_directives()
self._adjust_host_in_links()
def _adjust_cache_directives(self):
r = self._remote_response
# kill no-cache and no-store directives.
# note whether response was marked public; only public responses are saved to disk
r.public = False
if r.headers.has_key('pragma'):
value = r.headers['pragma']
# xxx hack: multiple pragmas are theoretically possible, but unlikely
if value == 'no-cache':
del r.headers['pragma']
if r.headers.has_key('cache-control'):
value = r.headers['cache-control']
parts = [part.strip() for part in value.split(',')]
new_parts = [part for part in parts if part not in ['no-cache', 'no-store']]
if len(parts) != len(new_parts):
new_value = ', '.join(new_parts)
r.headers['cache-control'] = new_value
if 'public' in new_parts:
r.public = True
# kill past expiration dates
if r.headers.has_key('expires'):
expires_at = expires_to_timestamp(r.headers['expires'])
if expires_at < time.time():
del r.headers['expires']
def _adjust_host_in_links(self):
r = self._remote_response
# post responses have no content type, thus nothing to adjust
if not r.headers.has_key('content-type'):
return
content_type = r.headers['content-type'].lower()
for check in self.__class__.adjust_host_in_content_types:
if content_type.startswith(check):
content = r.content
content = self.html_comment_re.sub('', content)
local_host = cherrypy.config.get('local.host')
incoming_host = cherrypy.request.headers.get('host')
search = cherrypy.config['remote.host']
replace = local_host or incoming_host or self.__class__.default_remote_host
content = content.replace(search, replace)
if local_host:
content = content.replace(incoming_host, local_host)
#content = self.html_script_re.sub(lambda match: match.group(0).replace(search, replace), content)
r.content = content
break
def _collect_request_parameters(self, **kwargs):
BaseProxy._collect_request_parameters(self, **kwargs)
# jira puts hostname into self-referential links, and hostname comes from jira configuration.
# in case of jira proxy-served pages, that hostname is wrong.
# this means an http accelerator like varnish which does not edit response bodies
# cannot serve usable pages when running on any host other than configured jira host.
# in turn this means jira proxy must always be involved in proxying process.
# running an accelerator on top of jira-proxy makes latency even worse, so to maintain
# some semblance of sanity we have to do all transformations that varnish does.
self._adjust_request()
# header adjustments from varnish
def _adjust_request(self):
if self.public_paths_re.match(self._params.path):
self._params.clear_cookies()
def _issue_remote_request(self):
BaseProxy._issue_remote_request(self)
# see note in _collect_request_parameters.
# do what should be done in varnish
self._adjust_response()
# header adjustments from varnish
def _adjust_response(self):
if self.public_paths_re.match(self._params.path):
self._remote_response.clear_cookies()
self._make_response_public()
# be aggressive here since we don't get much traffic
self._force_min_expiration(86400)
def _make_response_public(self):
h = self._remote_response.headers
if h.has_key('cache-control'):
cache_control = [part.strip() for part in h['cache-control'].split(',')]
if 'private' in cache_control:
# asked to make public a private response...
# we strip cookies on public paths so we should be ok to ignore this
cache_control.delete('private')
if 'public' not in cache_control:
cache_control.append('public')
cache_control = ', '.join(cache_control)
else:
cache_control = 'public'
h['cache-control'] = cache_control
def _force_min_expiration(self, time_in_seconds):
h = self._remote_response.headers
expires_at = self._determine_response_expiration_time(self._remote_response)
min_expires_at = int(time.time()) + time_in_seconds
if expires_at is None or expires_at < min_expires_at:
if h.has_key('cache-control'):
cache_control = [part.strip() for part in h['cache-control'].split(',')]
for part in cache_control:
if part.startswith('max-age='):
cache_control.remove(part)
break
else:
cache_control = []
cache_control.append('max-age=%d' % time_in_seconds)
h['cache-control'] = ', '.join(cache_control)
if h.has_key('expires'):
del h['expires']
def _determine_response_expiration_time(self, response):
h = response.headers
expires_at = None
# max-age takes precedence over expires
if h.has_key('cache-control'):
parts = [part.strip() for part in h['cache-control'].split(',')]
for part in parts:
if part.startswith('max-age='):
age = int(part[8:])
expires_at = int(time.time()) + age
break
if expires_at is None and h.has_key('expires'):
expires_at = expires_to_timestamp(h['expires'])
return expires_at
class ContentWrapper:
def __init__(self, content):
self.content = content
class CachingProxy(Proxy):
def perform_and_propagate(self, **kwargs):
self._setup_cache_variables()
content = self._find_in_cache()
if content is None:
response = Proxy.perform_and_propagate(self, **kwargs)
if response.public:
self._save_to_cache(response)
else:
# small hack for x-accel-redirect support
if content is True:
content = None
response = ContentWrapper(content)
return response
def _setup_cache_variables(self):
self.cache_absolute_path = self.cache_absolute_path_meta = None
r = cherrypy.request
if r.query_string:
hashed_qs = tools.hashlib_shortcuts.md5_hexdigest(r.query_string)
relative_path = r.path_info + '::' + hashed_qs
else:
relative_path = r.path_info
if relative_path.find('..') >= 0:
raise ValueError('Suspicious request relative path: %s' % relative_path)
assert relative_path[0] == '/'
self.cache_relative_path = relative_path
relative_path = relative_path[1:]
if relative_path:
assert relative_path[0] != '/'
self.cache_absolute_path = os.path.join(cherrypy.config['local.cache.dir'], relative_path)
self.cache_absolute_path_meta = self.cache_absolute_path + '.meta'
def _find_in_cache(self):
if self.cache_absolute_path_meta is not None and os.path.exists(self.cache_absolute_path_meta):
headers = pickle.loads(tools.file.read(self.cache_absolute_path_meta))
expires = headers['x-expires-timestamp']
now = time.time()
if expires >= now:
for key, value in headers.items():
cherrypy.response.headers[key] = value
if cherrypy.config.get('local.cache.x_accel_redirect.enabled'):
cherrypy.response.headers['x-accel-redirect'] = cherrypy.config['local.cache.x_accel_redirect.prefix'] + self.cache_relative_path
content = True
else:
content = tools.file.read(self.cache_absolute_path)
return content
def _save_to_cache(self, response):
if self.cache_absolute_path is None or response.code is not None:
return
headers = response.headers
expires_at = self._determine_response_expiration_time(response)
if expires_at is not None:
headers['x-expires-timestamp'] = expires_at
dir = os.path.dirname(self.cache_absolute_path)
if not os.path.exists(dir):
tools.file.safe_mkdirs(dir)
tools.file.safe_write(self.cache_absolute_path, response.content)
tools.file.safe_write(self.cache_absolute_path_meta, pickle.dumps(headers))
|
|
import html
import importlib
import re
import token
import tokenize
from io import StringIO
from pathlib import Path
from collections import defaultdict, deque, namedtuple, ChainMap
from typing import Dict, Iterable
TOK_COMMENT = "comment"
TOK_TEXT = "text"
TOK_VAR = "var"
TOK_BLOCK = "block"
tag_re = re.compile(r"{%\s*(?P<block>.+?)\s*%}|{{\s*(?P<var>.+?)\s*}}|{#\s*(?P<comment>.+?)\s*#}", re.DOTALL)
Token = namedtuple("Token", "type content")
class SafeStr(str):
__safe__ = True
def __str__(self):
return self
def tokenise(template):
upto = 0
for m in tag_re.finditer(template):
start, end = m.span()
if upto < start:
yield Token(TOK_TEXT, template[upto:start])
upto = end
mode = m.lastgroup
yield Token(mode, m[mode].strip())
if upto < len(template):
yield Token(TOK_TEXT, template[upto:])
class TemplateLoader(dict):
def __init__(self, paths):
self.paths = [Path(path).resolve() for path in paths]
def load(self, name, encoding="utf8"):
for path in self.paths:
full_path = path / name
if full_path.is_file():
return Template(full_path.read_text(encoding), loader=self, name=name)
raise LookupError(name)
def __missing__(self, key):
self[key] = tmpl = self.load(key)
return tmpl
class Context(ChainMap):
def __init__(self, *args, escape=html.escape):
super().__init__(*args)
self.maps.append({"True": True, "False": False, "None": None})
self.escape = escape
def push(self, data=None):
self.maps.insert(0, data or {})
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.maps.pop(0)
class Nodelist(list):
def render(self, context, output):
for node in self:
node.render(context, output)
def nodes_by_type(self, node_type):
for node in self:
if isinstance(node, node_type):
yield node
if isinstance(node, BlockNode):
yield from node.nodes_by_type(node_type)
class Template:
def __init__(self, src, loader=None, name=None):
self.tokens, self.loader = tokenise(src), loader
self.name = name # So we can report where the fault was
self.nodelist = self.parse_nodelist([])
def parse(self):
for tok in self.tokens:
if tok.type == TOK_TEXT:
yield TextTag(tok.content)
elif tok.type == TOK_VAR:
yield VarTag(tok.content)
elif tok.type == TOK_BLOCK:
m = re.match(r"\w+", tok.content)
if not m:
raise SyntaxError(tok)
yield BlockNode.__tags__[m.group(0)].parse(tok.content[m.end(0):].strip(), self)
def parse_nodelist(self, ends):
nodelist = Nodelist()
try:
node = next(self.parse())
while node.name not in ends:
nodelist.append(node)
node = next(self.parse())
except StopIteration:
node = None
nodelist.endnode = node
return nodelist
def render(self, context, output=None):
if not isinstance(context, Context):
context = Context(context)
if output is None:
dest = StringIO()
else:
dest = output
self.nodelist.render(context, dest)
if output is None:
return dest.getvalue()
class AstLiteral:
def __init__(self, arg):
self.arg = arg
def resolve(self, context):
return self.arg
class AstContext:
def __init__(self, arg):
self.arg = arg
def resolve(self, context):
return context.get(self.arg, "")
class AstLookup:
def __init__(self, left, right):
self.left = left
self.right = right
def resolve(self, context):
left = self.left.resolve(context)
right = self.right.resolve(context)
return left[right]
class AstAttr:
def __init__(self, left, right):
self.left = left
self.right = right
def resolve(self, context):
left = self.left.resolve(context)
return getattr(left, self.right, "")
class AstCall:
def __init__(self, func):
self.func = func
self.args = []
def add_arg(self, arg):
self.args.append(arg)
def resolve(self, context):
func = self.func.resolve(context)
args = [arg.resolve(context) for arg in self.args]
return func(*args)
class Expression:
def __init__(self, source):
self.tokens = tokenize.generate_tokens(StringIO(source).readline)
self.next() # prime the first token
def next(self):
self.current = next(self.tokens)
return self.current
@staticmethod
def parse(s):
p = Expression(s)
result = p._parse()
if p.current.exact_type not in (token.NEWLINE, token.ENDMARKER):
raise SyntaxError(f"Parse ended unexpectedly: {p.current}")
return result
def parse_kwargs(self):
kwargs = {}
tok = self.current
while tok.exact_type != token.ENDMARKER:
if tok.exact_type == token.NEWLINE:
tok = self.next()
continue
if tok.exact_type != token.NAME:
raise SyntaxError(f"Expected name, found {tok}")
name = tok.string
tok = self.next()
if tok.exact_type != token.EQUAL:
raise SyntaxError(f"Expected =, found {tok}")
tok = self.next()
kwargs[name] = self._parse()
tok = self.next()
return kwargs
def _parse(self):
tok = self.current
if tok.exact_type in (token.ENDMARKER, token.COMMA):
return # TODO
if tok.exact_type == token.STRING:
self.next()
return AstLiteral(tok.string[1:-1])
if tok.exact_type == token.NUMBER:
self.next()
try:
value = int(tok.string)
except ValueError:
value = float(tok.string)
return AstLiteral(value)
if tok.exact_type == token.NAME:
state = AstContext(tok.string)
while True:
tok = self.next()
if tok.exact_type == token.DOT:
tok = self.next()
if tok.exact_type != token.NAME:
raise SyntaxError(f"Invalid attr lookup: {tok}")
state = AstAttr(state, tok.string)
elif tok.exact_type == token.LSQB:
self.next()
right = self._parse()
state = AstLookup(state, right)
if self.current.exact_type != token.RSQB:
raise SyntaxError(f"Expected ] but found {self.current}")
elif tok.exact_type == token.LPAR:
state = AstCall(state)
self.next()
while self.current.exact_type != token.RPAR:
arg = self._parse()
state.add_arg(arg)
if self.current.exact_type != token.COMMA:
break
self.next()
if self.current.exact_type != token.RPAR:
raise SyntaxError(f"Expected ( but found {self.current}")
self.next()
else:
break
return state
raise SyntaxError(
f"Error parsing expression {tok.line !r}: Unexpected token {tok.string!r} at position {tok.start[0]}."
)
class Node:
name = None
def __init__(self, content):
self.content = content
def render(self, context, output):
pass # pragma: no cover
class TextTag(Node):
def render(self, context, output):
output.write(self.content)
class VarTag(Node):
def __init__(self, content):
self.expr = Expression.parse(content)
def render(self, context, output):
value = str(self.expr.resolve(context))
if not getattr(value, '__safe__', False):
value = context.escape(value)
output.write(value)
class BlockNode(Node):
__tags__ : Dict[str, 'BlockNode'] = {}
child_nodelists: Iterable[str] = ("nodelist",)
def __init_subclass__(cls, *, name):
super().__init_subclass__()
cls.name = name
BlockNode.__tags__[name] = cls
return cls
@classmethod
def parse(cls, content, parser):
return cls(content)
def nodes_by_type(self, node_type):
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
yield from nodelist.nodes_by_type(node_type)
class ForTag(BlockNode, name="for"):
child_nodelists = ("nodelist", "elselist")
def __init__(self, argname, iterable, nodelist, elselist):
self.argname, self.iterable, self.nodelist, self.elselist = argname, iterable, nodelist, elselist
@classmethod
def parse(cls, content, parser):
argname, iterable = content.split(" in ", 1)
nodelist = parser.parse_nodelist({"endfor", "else"})
elselist = parser.parse_nodelist({"endfor"}) if nodelist.endnode.name == "else" else None
return cls(argname.strip(), Expression.parse(iterable.strip()), nodelist, elselist)
def render(self, context, output):
iterable = self.iterable.resolve(context)
if iterable:
with context.push():
for idx, item in enumerate(iterable):
context.update({"loopcounter": idx, self.argname: item})
self.nodelist.render(context, output)
elif self.elselist:
self.elselist.render(context, output)
class ElseTag(BlockNode, name="else"):
pass
class EndforTag(BlockNode, name="endfor"):
pass
class IfTag(BlockNode, name="if"):
child_nodelists = ("nodelist", "elselist")
def __init__(self, condition, nodelist, elselist):
condition, inv = re.subn(r"^not\s+", "", condition, count=1)
self.inv, self.condition = bool(inv), Expression.parse(condition)
self.nodelist, self.elselist = nodelist, elselist
@classmethod
def parse(cls, content, parser):
nodelist = parser.parse_nodelist({"endif", "else"})
elselist = parser.parse_nodelist({"endif"}) if nodelist.endnode.name == "else" else None
return cls(content, nodelist, elselist)
def render(self, context, output):
if self.test_condition(context):
self.nodelist.render(context, output)
elif self.elselist:
self.elselist.render(context, output)
def test_condition(self, context):
return self.inv ^ bool(self.condition.resolve(context))
class EndifTag(BlockNode, name="endif"):
pass
class IncludeTag(BlockNode, name="include"):
def __init__(self, template_name, kwargs, loader):
self.template_name, self.kwargs, self.loader = template_name, kwargs, loader
@classmethod
def parse(cls, content, parser):
if parser.loader is None:
raise RuntimeError("Can't use {% include %} without a bound Loader")
tokens = Expression(content)
template_name = tokens._parse()
kwargs = tokens.parse_kwargs()
return cls(template_name, kwargs, parser.loader)
def render(self, context, output):
name = self.template_name.resolve(context)
tmpl = self.loader[name]
kwargs = {key: expr.resolve(context) for key, expr in self.kwargs.items()}
ctx = context.new_child(kwargs)
tmpl.render(ctx, output)
class LoadTag(BlockNode, name="load"):
@classmethod
def parse(cls, content, parser):
importlib.import_module(content)
return cls(None)
class ExtendsTag(BlockNode, name="extends"):
def __init__(self, parent, loader, nodelist):
self.parent, self.loader, self.nodelist = parent, loader, nodelist
@classmethod
def parse(cls, content, parser):
parent = Expression.parse(content)
nodelist = parser.parse_nodelist([])
return cls(parent, parser.loader, nodelist)
def render(self, context, output):
parent = self.loader[self.parent.resolve(context)]
block_context = getattr(context, "block_context", None)
if block_context is None:
block_context = context.block_context = defaultdict(deque)
for block in self.nodelist.nodes_by_type(BlockTag):
block_context[block.block_name].append(block)
if parent.nodelist[0].name != "extends":
for block in parent.nodelist.nodes_by_type(BlockTag):
block_context[block.block_name].append(block)
parent.render(context, output)
class BlockTag(BlockNode, name="block"):
def __init__(self, name, nodelist):
self.block_name, self.nodelist = name, nodelist
@classmethod
def parse(cls, content, parser):
m = re.match(r"\w+", content)
if not m:
raise ValueError(f'Invalid block label: {content !r}')
name = m.group(0)
nodelist = parser.parse_nodelist({"endblock"})
return cls(name, nodelist)
def render(self, context, output):
self.context = context
self.output = output
self._render()
def _render(self):
block_context = getattr(self.context, "block_context", None)
if not block_context:
block = self
else:
block = block_context[self.block_name].popleft()
with self.context.push({"block": self}):
block.nodelist.render(self.context, self.output)
if block_context:
block_context[self.block_name].appendleft(block)
@property
def super(self):
self._render()
return ""
class EndBlockTag(BlockNode, name="endblock"):
pass
class WithTag(BlockNode, name="with"):
def __init__(self, kwargs, nodelist):
self.kwargs, self.nodelist = kwargs, nodelist
@classmethod
def parse(cls, content, parser):
kwargs = Expression(content).parse_kwargs()
nodelist = parser.parse_nodelist({"endwith"})
return cls(kwargs, nodelist)
def render(self, context, output):
kwargs = {key: value.resolve(context) for key, value in self.kwargs.items()}
with context.push(kwargs):
self.nodelist.render(context, output)
class EndWithTag(BlockNode, name="endwith"):
pass
class CaseTag(BlockNode, name="case"):
def __init__(self, term, nodelist):
self.term, self.nodelist = term, nodelist
@classmethod
def parse(cls, content, parser):
term = Expression.parse(content)
nodelist = parser.parse_nodelist(["endcase"])
else_found = False
for node in nodelist:
if node.name not in {"when", "else"}:
raise SyntaxError(f"Only 'when' and 'else' allowed as children of case. Found: {node}")
if node.name == "else":
if else_found:
raise SyntaxError("Case tag can only have one else child")
else_found = True
nodelist.sort(key=lambda x: x.name, reverse=True)
return cls(term, nodelist)
def render(self, context, output):
value = self.term.resolve(context)
for node in self.nodelist:
if node.name == "when":
other = node.term.resolve(context)
else:
other = value
if value == other:
node.render(context, output)
return
class WhenTag(BlockNode, name="when"):
def __init__(self, term, nodelist):
self.term, self.nodelist = term, nodelist
@classmethod
def parse(cls, content, parser):
term = Expression.parse(content)
nodelist = parser.parse_nodelist()
return cls(term, nodelist)
def render(self, context, output):
self.nodelist.render(context, output)
class EndCaseTag(BlockNode, name="endcase"):
pass
|
|
from savman import gamefind
import os
import gzip
import string
import json
import yaml
import sys
import fnmatch
import logging
import hashlib
from savman.vbackup import Backup
class InvalidIdError(Exception): pass
class Game:
def __init__(self, gid, name):
self.id = gid
self.name = name
self.locations = []
class GameLocation:
def __init__(self, path = None, include = None, exclude = None):
self.path = path
self.include = include
self.exclude = exclude
class GameMan:
def __init__(self, database):
self.games = {}
self.backups = {} #Keys: game id, Values: backup path
self.db = database
self.finder = gamefind.Finder()
self.cachefile = ''
self.customfile = ''
self.customdirs = set()
if not database:
logging.error('No database loaded, exiting')
sys.exit(1)
def save_cache(self, file=None):
if not file: file = self.cachefile
games_json = {}
for game, data in self.games.items():
games_json[game] = [ loc.__dict__ for loc in data.locations ]
#print(games_json)
with gzip.open(file, 'wt') as cfile:
self.finder.trim_cache()
json.dump({'games': games_json, 'dirs': self.finder.export_cache(),
'backups': self.backups}, cfile)
def load_cache(self, file=None, dircache=True, cleargames=False):
if not file:
if not self.cachefile: raise TypeError('No cache file specified')
file = self.cachefile
try:
with gzip.open(file, 'rt') as cfile:
cache = json.load(cfile)
cgames = cache['games']
if dircache: self.finder.import_cache(cache['dirs'])
# Check that previously found game locations still exist
for game, data in cgames.copy().items():
for location in reversed(data):
path = location['path']
if not os.path.isdir(path) or path in self.customdirs:
data.remove(location)
if not data: del cgames[game]
# Check that backups still exist
for game, backups in cache['backups'].copy().items():
for backup in reversed(backups):
if not os.path.isfile(backup): backups.remove(backup)
if not backups: del cache['backups'][game]
self.backups = cache['backups']
if not cleargames:
for item, data in cgames.items():
if not item in self.games and item in self.db['games']:
game = Game(item, self.db['games'][item]['name'])
for loc in data:
game.locations.append(GameLocation(loc['path'],
loc['include'], loc['exclude']))
self.games[item] = game
if self.backups:
logging.info( 'Loaded {} games and {} backups from cache'.format(len(self.games),
len(self.backups)) )
else: logging.info( 'Loaded {} games from cache'.format(len(self.games)) )
except FileNotFoundError:
logging.info('Cache file not loaded (file not found)')
return False
def load_custom(self, file=None):
if not file: file = self.customfile
else: self.customfile = file
if not os.path.isfile(file): return
self.customdirs = set()
with open(file, 'r') as cfile:
for item in yaml.safe_load_all(cfile):
if not {'name', 'directory'} <= set(item): continue
name = item['name']
game_id = autoid(name)
include = list(item['include']) if 'include' in item else None
exclude = list(item['exclude']) if 'exclude' in item else None
directory = os.path.normpath(str(item['directory']))
if os.path.isdir(directory):
self.customdirs.add(directory)
location = GameLocation(directory, include, exclude)
if not game_id in self.games: self.games[game_id] = Game(game_id, name)
self.games[game_id].locations.append(location)
def find_games(self):
finder = self.finder
db = self.db
games = db['games']
locations = db['locations']
self.games = {}
self.load_custom()
# Game locations are stored in a dict, where each key is a tuple
# with the first value set to the associated game ID and the second
# value set to the location ID (or number).
for loc, data in locations.items():
variables = {'userdoc':gamefind.USERDOC,
'userprofile': gamefind.USERPROFILE,
'appdata': gamefind.APPDATA}
if data['type'] == 'variable':
finder.add_variable(loc, variables[data['variable']],
data['subdir'])
if data['type'] == 'profile':
finder.add_profile(loc, data['profile_items'],
profile_dir=data['profile_dir'], subdir=data['subdir'])
finder.find()
found = set()
for find, dirs in finder.found.items():
loc = locations[find] # Retrieve location data from database
dirs = [ d for d in dirs if not d in self.customdirs ]
gameid, locid = find # Split tuple
if not gameid in self.games:
game = Game(gameid, games[gameid]['name'])
self.games[gameid] = game
found.add(gameid)
else: game = self.games[gameid]
for directory in dirs:
location = GameLocation(directory, loc['include'], loc['exclude'])
game.locations.append(location)
logging.info("{} games found".format(len(found)))
def backup_games(self, dst, games=[], trim_min=None, trim_max=None):
if not os.path.isdir(dst):
raise FileNotFoundError("Destination does not exist: '{}'".format(location))
if not games: games = [ g for g in self.games ]
logging.info('Starting game backup...')
if not games:
logging.info('No games to backup')
return
#pool = multiprocessing.Pool(threads)
for game in sorted(games):
if not game in self.games:
logging.error("Could not backup '{}' - game ID not in database".format(game))
continue
for loc in self.games[game].locations:
# Append count if more than one directory found
dirhash = hashlib.sha1(loc.path.encode()).hexdigest()
name = '{}_{}.savman.vbak'.format(game, dirhash.upper()[:6])
path = os.path.join(dst, name)
backup = Backup(file=path, id=game)
backup.build(src=loc.path, include=loc.include,
exclude=loc.exclude)
backup.save()
if trim_min and trim_max: backup.autotrim(trim_min, trim_max)
#pool.close()
#pool.join()
def load_backups(self, location):
self.backups = {}
for item in os.listdir(location):
path = os.path.realpath(os.path.join(location, item))
if os.path.isfile(path):
if fnmatch.fnmatch(item, '*.savman.vbak'):
backup = Backup(path)
if backup.id in self.db['games']:
if not backup in self.backups: self.backups[backup.id] = [path]
else: self.backups[backup.id].append(path)
logging.info("Loaded {} backups from '{}'".format(len(self.backups), location))
def restore_backup(self, game_id, dst, source=None):
try: backups = self.backups[game_id]
except KeyError:
raise InvalidIdError("No backup found for game")
if len(backups) > 1:
if not source:
raise TypeError('Source location required as backup has multiple locations')
else:
backup = Backup(backups[0])
backup.restore(dst)
def restore_game(self, game_id, dst=None, source=None, target=None):
gid = next((g for g in self.games if g.lower() == game_id.lower()), game_id)
try: game = self.games[gid]
except KeyError:
raise InvalidIdError("Not found in current games")
if len(game.locations) > 1:
if not target:
raise TypeError('Target location required as game has multiple locations')
else:
if dst: self.restore_backup(gid, dst, source)
else: self.restore_backup(gid, game.locations[0].path, source)
def autoid(name):
wlist = []
name = name.replace('-',' ')
subabbr = ''
replace = {'II':'2', 'III':'3', 'IV':'4', 'V':'5', 'VI':'6', 'VII':'7', 'VIII':'8',
'IX':'9', 'X':'10', 'XI':'11', 'XII':'12', 'XIII':'13', '&':'And','HD':'HD'}
valid = list(string.ascii_letters+string.digits)+list(replace)
split = name.split(':', maxsplit=1)
if len(split) > 1 and len(name) > 32:
subs = split[1].strip().split(' ')
if len(subs) > 1:
for sub in subs:
sub = ''.join([ x for x in list(sub) if x in valid ])
if sub: subabbr += replace[sub] if sub in replace else sub[0].upper()
name = split[0]
for word in name.split(' '):
if word.lower() == 'the': continue
chars = [ x.lower() for x in list(word) if x in valid ]
if chars: chars[0] = chars[0].upper()
new = ''.join(chars)
if new.upper() in replace: wlist.append(replace[new.upper()])
else: wlist.append(new)
wlist.append(subabbr)
newname = ''.join(wlist)
return newname
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
from config import *
import instances as inst
import util
def _prompt_config(lab, path):
"""Prompt for the lab configuration script"""
# TODO: Check for valid input
files = [d for d in os.listdir(path) if os.path.isfile(os.path.join(path, d))]
print "Available configurations for the '{0}' lab:\n".format(lab)
files.sort()
for f in files:
if f.endswith('.py'):
print ' - ', f.strip('.py')
config = raw_input('\nWhich configuration would you like to execute?: ')
return config
def list_available_labs():
"""List available labs in LAB_DIR"""
available_labs = [d for d in os.listdir(LAB_DIR)
if os.path.isdir(os.path.join(LAB_DIR, d))]
print "\nAvailable Labs:"
available_labs.sort()
for lab in available_labs:
print ' {0}'.format(lab)
print ''
def lab_description(lab):
"""Display information for a single lab"""
file = open(LAB_DIR + lab + '/description.md', 'r')
print '\n', file.read()
file.close()
def calculate_lab_tag(conn, user_vpc, lab):
"""Auto-increment lab ID numbers"""
labs = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab tags
for instance in instances:
if 'Lab' in instance.tags:
if instance.tags['Lab'].startswith(lab):
labs.append(instance.tags['Lab'])
# remove duplicates
labs = list(set(labs))
# find first unused number
count = 1
while lab + '-' + str(count) in labs:
count += 1
return lab + '-' + str(count)
def get_running_labs(conn, user_vpc):
"""List/Return all running labs"""
labs = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab tags
for instance in instances:
if 'Lab' in instance.tags:
labs.append(instance.tags['Lab'])
# remove duplicates
labs = list(set(labs))
labs.sort()
if labs:
print "\nRunning labs:"
for lab in labs:
print " ", lab
print ""
return labs
else:
print "\nNo labs running ...\n"
def lab_info(conn, user_vpc):
"""List all running labs in AWS"""
labs = get_running_labs(conn, user_vpc)
if labs:
for lab in labs:
print "Instances running in lab '{0}':".format(lab)
instances = get_lab_instance_info(conn, user_vpc, lab)
for instance in instances:
print instance
print ""
def get_user_instance_info(conn, user_vpc, lab_tag, user):
"""List IP/DNS for each instance for user"""
reservations = conn.get_all_instances(filters = {'vpc-id': user_vpc.id,
'tag:Lab': lab_tag,
'tag:User': user})
final = []
for r in reservations:
for instance in r.instances:
final.append("""
Name: {0}
IP: {1}
Public DNS: {2}\n""".format(instance.tags['Name'],
instance.ip_address,
instance.public_dns_name))
final.sort()
return final
def get_lab_instance_info(conn, user_vpc, lab_tag):
"""List instance info for lab"""
reservations = conn.get_all_instances(filters = {'vpc-id': user_vpc.id,
'tag:Lab': lab_tag})
final = []
for r in reservations:
for instance in r.instances:
final.append("""
Name: {0}
Lab: {1}
Region: {2}
IP: {3}
Public DNS: {4}""".format(instance.tags['Name'],
instance.tags['Lab'],
str(instance.region).replace('RegionInfo:',''),
instance.ip_address,
instance.public_dns_name))
final.sort()
return final
def launch_lab(conn, user_vpc, lab):
"""Execute a lab configuration"""
path = LAB_DIR + lab + '/scripts/'
response = _prompt_config(lab, path)
# import lab configs
labmod = imp.load_source('labmod', path + response + '.py')
labmod.pre_process()
cfg = util.read_config(LAB_DIR + lab + '/instances.cfg')
# prompt for any dynamic configuration options
for instance in cfg['instance']:
for k, v in instance.iteritems():
if str(v).startswith('PROMPT:'):
instance[k] = raw_input('{0}: '.format(v.split(':')[1]))
if str(v).startswith('PROMPT#:'):
instance[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
for device in instance['device']:
for k, v in device.iteritems():
if str(v).startswith('PROMPT:'):
device[k] = raw_input('{0}: '.format(v.split(':')[1]))
if str(v).startswith('PROMPT#:'):
device[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
# connection and required info
security_groups = conn.get_all_security_groups(filters = {'vpc-id': user_vpc.id})
subnets = conn.get_all_subnets(filters = {'vpc-id': user_vpc.id})
# launch
inst.launch_instances(conn, user_vpc, lab, labmod, cfg, security_groups, subnets)
labmod.post_process()
def terminate_lab(conn, user_vpc, lab_tag):
"""Terminate a single lab and all instances"""
instance_ids = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab instances
for instance in instances:
if 'Lab' in instance.tags:
if instance.tags['Lab'] == lab_tag:
instance_ids.append(instance.id)
conn.terminate_instances(instance_ids=instance_ids)
with open(USER_FILE) as users:
for user in users:
os.remove('/host/share/{0}/{1}.txt'.format(user.strip(), lab_tag))
print "\nTerminate request sent for all lab instances ..."
print "Lab '{0}' has been deleted ...\n".format(lab_tag)
|
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lattice rules."""
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
qmc = tff.math.qmc
@test_util.run_all_in_graph_and_eager_modes
class LatticeRuleTest(tf.test.TestCase):
# Generating vectors for a lattice rule with n=2^20 points in 20 dimensions.
generating_vectors_values = [
1, 387275, 314993, 50301, 174023, 354905, 303021, 486111, 286797, 463237,
211171, 216757, 29831, 155061, 315509, 193933, 129563, 276501, 395079,
139111
]
def generating_vectors(self, dtype=tf.int32):
return tf.constant(self.generating_vectors_values, dtype=dtype)
def test_random_scrambling_vectors(self):
dim = 20
seed = (2, 3)
actual = qmc.random_scrambling_vectors(dim, seed, validate_args=True)
with self.subTest('Shape'):
self.assertEqual(actual.shape, (dim,))
with self.subTest('DType'):
self.assertEqual(actual.dtype, tf.float32)
with self.subTest('Min Value'):
self.assertAllLess(actual, tf.ones(shape=(), dtype=tf.float32))
with self.subTest('Max Value'):
self.assertAllGreaterEqual(actual, tf.zeros(shape=(), dtype=tf.float32))
def test_random_scrambling_vectors_with_dtype(self):
dim = 20
seed = (2, 3)
for dtype in [tf.float32, tf.float64]:
actual = qmc.random_scrambling_vectors(
dim, seed, dtype=dtype, validate_args=True)
with self.subTest('Shape'):
self.assertEqual(actual.shape, (dim,))
with self.subTest('DType'):
self.assertEqual(actual.dtype, dtype)
with self.subTest('Min Value'):
self.assertAllLess(actual, tf.ones(shape=(), dtype=dtype))
with self.subTest('Max Value'):
self.assertAllGreaterEqual(actual, tf.zeros(shape=(), dtype=dtype))
def test_lattice_rule_sample(self):
expected = tf.constant([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0625, 0.6875, 0.0625, 0.8125, 0.4375, 0.5625],
[0.1250, 0.3750, 0.1250, 0.6250, 0.8750, 0.1250],
[0.1875, 0.0625, 0.1875, 0.4375, 0.3125, 0.6875],
[0.2500, 0.7500, 0.2500, 0.2500, 0.7500, 0.2500],
[0.3125, 0.4375, 0.3125, 0.0625, 0.1875, 0.8125],
[0.3750, 0.1250, 0.3750, 0.8750, 0.6250, 0.3750],
[0.4375, 0.8125, 0.4375, 0.6875, 0.0625, 0.9375],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.5625, 0.1875, 0.5625, 0.3125, 0.9375, 0.0625],
[0.6250, 0.8750, 0.6250, 0.1250, 0.3750, 0.6250],
[0.6875, 0.5625, 0.6875, 0.9375, 0.8125, 0.1875],
[0.7500, 0.2500, 0.7500, 0.7500, 0.2500, 0.7500],
[0.8125, 0.9375, 0.8125, 0.5625, 0.6875, 0.3125],
[0.8750, 0.6250, 0.8750, 0.3750, 0.1250, 0.8750],
[0.9375, 0.3125, 0.9375, 0.1875, 0.5625, 0.4375]],
dtype=tf.float32)
for dtype in [tf.int32, tf.int64]:
actual = qmc.lattice_rule_sample(
self.generating_vectors(dtype=dtype), 6, 16, validate_args=True)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, expected.dtype)
def test_lattice_rule_sample_with_sequence_indices(self):
indices = [2, 3, 6, 9, 11, 14]
expected = tf.constant([[0.1250, 0.3750, 0.1250, 0.6250, 0.8750, 0.1250],
[0.1875, 0.0625, 0.1875, 0.4375, 0.3125, 0.6875],
[0.3750, 0.1250, 0.3750, 0.8750, 0.6250, 0.3750],
[0.5625, 0.1875, 0.5625, 0.3125, 0.9375, 0.0625],
[0.6875, 0.5625, 0.6875, 0.9375, 0.8125, 0.1875],
[0.8750, 0.6250, 0.8750, 0.3750, 0.1250, 0.8750]],
dtype=tf.float32)
actual = qmc.lattice_rule_sample(
self.generating_vectors(),
6,
16,
sequence_indices=tf.constant(indices, dtype=tf.int32),
validate_args=True)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, expected.dtype)
def test_lattice_rule_sample_with_zero_additive_shift(self):
generating_vectors = self.generating_vectors()
expected = tf.constant([[0.000, 0.000, 0.000, 0.000, 0.000],
[0.125, 0.375, 0.125, 0.625, 0.875],
[0.250, 0.750, 0.250, 0.250, 0.750],
[0.375, 0.125, 0.375, 0.875, 0.625],
[0.500, 0.500, 0.500, 0.500, 0.500],
[0.625, 0.875, 0.625, 0.125, 0.375],
[0.750, 0.250, 0.750, 0.750, 0.250],
[0.875, 0.625, 0.875, 0.375, 0.125]],
dtype=tf.float32)
for dtype in [tf.float32, tf.float64]:
actual = qmc.lattice_rule_sample(
generating_vectors,
5,
8,
additive_shift=tf.zeros_like(generating_vectors, dtype=dtype),
validate_args=True)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, expected.dtype)
def test_lattice_rule_sample_with_non_zero_additive_shift(self):
generating_vectors = self.generating_vectors()
additive_shift = [
.00, .05, .10, .15, .20, .25, .30, .35, .40, .45, .50, .55, .60, .65,
.70, .75, .80, .85, .90, .95
]
expected = tf.constant([[0.000, 0.050, 0.100, 0.150, 0.200],
[0.125, 0.425, 0.225, 0.775, 0.075],
[0.250, 0.800, 0.350, 0.400, 0.950],
[0.375, 0.175, 0.475, 0.025, 0.825],
[0.500, 0.550, 0.600, 0.650, 0.700],
[0.625, 0.925, 0.725, 0.275, 0.575],
[0.750, 0.300, 0.850, 0.900, 0.450],
[0.875, 0.675, 0.975, 0.525, 0.325]],
dtype=tf.float32)
for dtype in [tf.float32, tf.float64]:
actual = qmc.lattice_rule_sample(
generating_vectors,
5,
8,
additive_shift=tf.constant(additive_shift, dtype=dtype),
validate_args=True)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, expected.dtype)
def test_lattice_rule_sample_with_tent_transform(self):
expected = tf.constant([[0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.750, 0.250, 0.750, 0.250],
[0.500, 0.500, 0.500, 0.500, 0.500],
[0.750, 0.250, 0.750, 0.250, 0.750],
[1.000, 1.000, 1.000, 1.000, 1.000],
[0.750, 0.250, 0.750, 0.250, 0.750],
[0.500, 0.500, 0.500, 0.500, 0.500],
[0.250, 0.750, 0.250, 0.750, 0.250]],
dtype=tf.float32)
actual = qmc.lattice_rule_sample(
self.generating_vectors(),
5,
8,
apply_tent_transform=True,
validate_args=True)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, expected.dtype)
def test_lattice_rule_sample_with_dtype(self):
generating_vectors = self.generating_vectors()
for dtype in [tf.float32, tf.float64]:
expected = tf.constant([[0.000, 0.000, 0.000, 0.000, 0.000],
[0.125, 0.375, 0.125, 0.625, 0.875],
[0.250, 0.750, 0.250, 0.250, 0.750],
[0.375, 0.125, 0.375, 0.875, 0.625],
[0.500, 0.500, 0.500, 0.500, 0.500],
[0.625, 0.875, 0.625, 0.125, 0.375],
[0.750, 0.250, 0.750, 0.750, 0.250],
[0.875, 0.625, 0.875, 0.375, 0.125]],
dtype=dtype)
actual = qmc.lattice_rule_sample(
generating_vectors, 5, 8, validate_args=True, dtype=dtype)
with self.subTest('Values'):
self.assertAllClose(
self.evaluate(actual), self.evaluate(expected), rtol=1e-6)
with self.subTest('DType'):
self.assertEqual(actual.dtype, dtype)
if __name__ == '__main__':
tf.test.main()
|
|
from enum import IntEnum
import datetime
from project.bl.utils import Resource
from project.extensions import db
from project.lib.orm.types import TypeEnum, GUID
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.ext.associationproxy import association_proxy
from project.lib.orm.conditions import ConditionDeleted, ConditionHidden
class Vacancy(db.Model):
__tablename__ = 'vacancies'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
short_description = db.Column(db.String(300), nullable=False)
text = db.Column(db.Text(), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
category = db.relationship('Category', backref=db.backref('vacancies'))
name_in_url = db.Column(db.String(50), nullable=False, unique=True)
visits = db.Column(db.Integer, nullable=False, default=0)
salary = db.Column(db.String(50))
description = db.Column(db.String(200)) # for search spider
keywords = db.Column(db.String(1000))
city_id = db.Column(db.Integer, db.ForeignKey('cities.id'))
city = db.relationship('City', backref=db.backref('vacancies'))
is_hidden = db.Column(db.Boolean, nullable=False, default=False)
is_deleted = db.Column(db.Boolean, nullable=False, default=False)
updated_at = db.Column(db.DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
who_updated = db.relationship('User')
condition_is_hidden = ConditionHidden()
condition_is_deleted = ConditionDeleted()
bl = Resource("bl.vacancy")
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.title)
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False, unique=True)
bl = Resource('bl.category')
def __str__(self):
return self.name
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.name)
class User(db.Model):
__tablename__ = 'users'
# noinspection PyTypeChecker
ROLE = IntEnum('Role', {
'staff': 0,
'superuser': 1,
})
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(30), unique=True, nullable=False)
password = db.Column(db.String(100), nullable=False)
name = db.Column(db.String(30))
surname = db.Column(db.String(30))
email = db.Column(db.String(320), nullable=False, unique=True)
role = db.Column(TypeEnum(ROLE), nullable=False, default=ROLE.staff)
bl = Resource('bl.user')
def __repr__(self):
return '{} ({})'.format(self.login, self.get_full_name())
def get_full_name(self):
return '{} {}'.format(self.name or '', self.surname or '').strip()
def is_superuser(self):
return self.role == self.ROLE.superuser
class City(db.Model):
__tablename__ = 'cities'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False, unique=True)
bl = Resource('bl.city')
def __str__(self):
return self.name
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.name)
class BlockPageAssociation(db.Model):
__tablename__ = 'block_page_associations'
page_id = db.Column(
db.Integer,
db.ForeignKey('pages.id'),
primary_key=True
)
block_id = db.Column(
db.Integer,
db.ForeignKey('pageblocks.id'),
primary_key=True
)
position = db.Column(db.Integer)
block = db.relationship(
'PageBlock',
)
class PageChunk(db.Model):
__tablename__ = 'pagechunks'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True, nullable=False) # use in template
title = db.Column(db.Text, unique=True, nullable=False)
text = db.Column(db.Text)
bl = Resource('bl.pagechunk')
class PageBlock(db.Model):
__tablename__ = 'pageblocks'
# noinspection PyTypeChecker
TYPE = IntEnum(
'Block_type',
{
'img_left': 0,
'img_right': 1,
'no_img': 2,
},
)
id = db.Column(db.Integer, primary_key=True)
block_type = db.Column(
TypeEnum(TYPE),
default=TYPE.img_left,
nullable=False
)
# header
title = db.Column(db.VARCHAR(128), nullable=True)
text = db.Column(db.Text)
# used for mainpage
short_description = db.Column(db.VARCHAR(256), nullable=True)
image = db.Column(db.Text, nullable=True)
bl = Resource('bl.pageblock')
def __str__(self):
return '%s: %s' % (self.title, self.text or self.short_description)
class Page(db.Model):
__tablename__ = 'pages'
# noinspection PyTypeChecker
TYPE = IntEnum('Page_type', {
'PROJECTS': 1,
'ABOUT': 2,
'CONTACTS': 3,
'MAINPAGE': 4,
})
id = db.Column(db.Integer, primary_key=True)
type = db.Column(TypeEnum(TYPE), unique=True, nullable=False)
title = db.Column(db.VARCHAR(128))
_blocks = db.relationship(
"BlockPageAssociation",
order_by='BlockPageAssociation.position',
collection_class=ordering_list('position'),
cascade='save-update, merge, delete, delete-orphan',
)
blocks = association_proxy(
'_blocks',
'block',
creator=lambda _pb: BlockPageAssociation(block=_pb)
)
bl = Resource('bl.page')
def __str__(self):
return '%s (%s)' % (self.title, self.url)
class Token(db.Model):
__tablename__ = 'tokens'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship(
'User',
)
token = db.Column(db.String, nullable=False)
bl = Resource('bl.token')
class MailTemplate(db.Model):
__tablename__ = 'mailtemplates'
# noinspection PyTypeChecker
MAIL = IntEnum('Mail', {
'CV': 0,
'REPLY': 1,
})
id = db.Column(db.Integer, primary_key=True)
mail = db.Column(TypeEnum(MAIL), nullable=False)
title = db.Column(db.String, nullable=False)
subject = db.Column(db.String(79), nullable=False)
html = db.Column(db.Text, nullable=False)
help_msg = db.Column(db.Text)
updated_at = db.Column(db.Date, onupdate=datetime.datetime.now,
default=datetime.datetime.now)
bl = Resource('bl.mailtemplate')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
who_updated = db.relationship('User')
def __repr__(self):
return str(self.title)
class UploadedImage(db.Model):
__tablename__ = 'uploaded_images'
IMG_CATEGORY = IntEnum('ImageCategory', {
'other': 0,
'gallery': 1,
})
id = db.Column(db.Integer, primary_key=True)
name = db.Column(GUID, nullable=False)
ext = db.Column(db.VARCHAR, nullable=False)
img_category = db.Column(
TypeEnum(IMG_CATEGORY),
default=IMG_CATEGORY.other,
nullable=False,
)
title = db.Column(db.VARCHAR(32))
description = db.Column(db.VARCHAR(128))
__table_args__ = (
db.UniqueConstraint(
'name',
'ext',
'img_category',
),
)
bl = Resource('bl.uploadedimage')
def init_db():
db.drop_all()
db.create_all()
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Forker configuration broker.
A simple servlet handling GET and DELETE commands to provide a raw JSON
configuration for the requested isolate, if available.
The stored configurations should be the ones given by a monitor requesting to
start an isolate.
A configuration should be deleted on a request by the isolate itself when it
read it correctly.
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import json
import logging
import threading
import cohorte
import cohorte.version
import pelix.http
from pelix.ipopo.decorators import ComponentFactory, Invalidate, Property, \
Provides
# Pelix framework
# COHORTE constants
# ------------------------------------------------------------------------------
# Bundle version
__version__ = cohorte.version.__version__
# ------------------------------------------------------------------------------
MIME_TYPE_JSON = 'application/json'
""" JSON data MIME type """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-forker-broker-factory")
@Provides(cohorte.SERVICE_CONFIGURATION_BROKER, controller='_svc_flag')
@Provides(pelix.http.HTTP_SERVLET)
@Property("_servlet_path", pelix.http.HTTP_SERVLET_PATH, "/cohorte/broker")
class ConfigBroker(object):
"""
The configuration broker servlet
"""
def __init__(self):
"""
Sets up members
"""
# The broker flag
self._svc_flag = False
# The path to this servlet
self._servlet_path = None
# Servlet access
self._host = None
self._port = None
# Configurations : Isolate UID -> JSON string
self._configurations = {}
# Configurations lock
self.__config_lock = threading.Lock()
def bound_to(self, path, parameters):
"""
Servlet bound to a HTTP service
:param path: The path to access the servlet
:param parameters: The server & servlet parameters
"""
if path == self._servlet_path:
# Update our access information
self._host = parameters['http.address']
self._port = int(parameters['http.port'])
# Register our service
self._svc_flag = True
else:
_logger.warning("Bound to a HTTP service with a different path."
"Ignore.")
def unbound_from(self, path, parameters):
"""
Servlet unbound from a HTTP service
:param path: The path to access the servlet
:param parameters: The server & servlet parameters
"""
if path == self._servlet_path:
# Unregister our service
self._svc_flag = False
# Clear our access information
self._host = None
self._port = None
def do_GET(self, request, response):
"""
Handles GET requests
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the isolate UID (last part of the request path)
uid = request.get_path().split('/')[-1]
with self.__config_lock:
# Get the associated configuration
json_config = self._configurations.get(uid)
_logger.debug("get isolate configuration {}".format(json_config))
if json_config:
# Send the found configuration
response.send_content(200, json_config, MIME_TYPE_JSON)
else:
# Unknown isolate
error = {'uid': uid,
'result': False,
'message': "Unknown isolate UID"}
response.send_content(404, json.dumps(error), MIME_TYPE_JSON)
def do_DELETE(self, request, response):
"""
Handles DELETE requests
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the isolate UID (last part of the request path)
uid = request.get_path().split('/')[-1]
result = {'uid': uid}
if self.delete_configuration(uid):
# Success
code = 200
result['result'] = True
result['message'] = "Configuration deleted"
else:
# Error
code = 404
result['result'] = False
result['message'] = "Unknown isolate UID"
response.send_content(code, json.dumps(result), MIME_TYPE_JSON)
def delete_configuration(self, uid):
"""
Deletes the configuration of the given isolate
:param uid: An isolate UID
:return: True if the isolate was known, else False
"""
with self.__config_lock:
if uid in self._configurations:
# Found !
del self._configurations[uid]
return True
return False
def store_configuration(self, uid, dict_config):
"""
Stores the configuration of the given isolate
:param uid: An isolate UID
:param dict_config: The configuration dictionary of the given isolate
:return: The URL to access this configuration
:raise ValueError: Invalid parameter
"""
if not uid or not dict_config:
# Invalid parameters
raise ValueError("Can't store an invalid configuration")
_logger.debug("store isolate {} configuration {}".format(uid, dict_config))
with self.__config_lock:
# Store the configuration as a JSON string
self._configurations[uid] = json.dumps(dict_config)
# Send a "localhost" address to avoid an "address not available" error
# under Windows
if ':' in self._host:
# IPv6 host
host = '[::1]'
else:
host = '127.0.0.1'
return 'http://{host}:{port}{path}/{uid}'\
.format(uid=uid, host=host, port=self._port,
path=self._servlet_path)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
# Reset the service flag
self._svc_flag = False
with self.__config_lock:
self._configurations.clear()
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print('--- running EC2Connection tests ---')
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print('\tinstance is %s' % instance.state)
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print('--- tests completed ---')
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError as e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError as e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small'
)
time.sleep(120)
try:
rs = c.stop_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError as e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.terminate_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError as e:
self.assertTrue(dry_run_msg in str(e))
# And kill it.
rs.instances[0].terminate()
|
|
#!/usr/bin/env python
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import salt.ext.tornado.escape
from salt.ext.tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode, squeeze, recursive_unicode
from salt.ext.tornado.util import unicode_type
from salt.ext.tornado.test.util import unittest
linkify_tests = [
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
u'hello <a href="http://world.com/">http://world.com/</a>!'),
("hello http://world.com/with?param=true&stuff=yes", {},
u'hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>'),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
u'<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
u'<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................'),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
u'<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)'),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>'),
("http://foo.com/blah_blah/", {},
u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>'),
("(Something like http://foo.com/blah_blah)", {},
u'(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)'),
("http://foo.com/blah_blah_(wikipedia)", {},
u'<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>'),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
u'<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>'),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
u'(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)'),
("http://foo.com/blah_blah.", {},
u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.'),
("http://foo.com/blah_blah/.", {},
u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.'),
("<http://foo.com/blah_blah>", {},
u'<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>'),
("<http://foo.com/blah_blah/>", {},
u'<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>'),
("http://foo.com/blah_blah,", {},
u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,'),
("http://www.example.com/wpstyle/?p=364.", {},
u'<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.'),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u'<a href="rdar://1234">rdar://1234</a>'),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
u'<a href="rdar:/1234">rdar:/1234</a>'),
("http://userid:[email protected]:8080", {},
u'<a href="http://userid:[email protected]:8080">http://userid:[email protected]:8080</a>'),
("http://[email protected]", {},
u'<a href="http://[email protected]">http://[email protected]</a>'),
("http://[email protected]:8080", {},
u'<a href="http://[email protected]:8080">http://[email protected]:8080</a>'),
("http://userid:[email protected]", {},
u'<a href="http://userid:[email protected]">http://userid:[email protected]</a>'),
("message://%[email protected]%3e",
{"permitted_protocols": ["http", "message"]},
u'<a href="message://%[email protected]%3e">message://%[email protected]%3e</a>'),
(u"http://\u27a1.ws/\u4a39", {},
u'<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>'),
("<tag>http://example.com</tag>", {},
u'<tag><a href="http://example.com">http://example.com</a></tag>'),
("Just a www.example.com link.", {},
u'Just a <a href="http://www.example.com">www.example.com</a> link.'),
("Just a www.example.com link.",
{"require_protocol": True},
u'Just a www.example.com link.'),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u'A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>'),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u'A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!'),
("A file:///passwords.txt and http://web.com link", {},
u'A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link'),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u'A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link'),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra'),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = salt.ext.tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
(u"<foo>", u"<foo>"),
(b"<foo>", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&amp;"),
(u"<\u00e9>", u"<\u00e9>"),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_xhtml_unescape_numeric(self):
tests = [
('foo bar', 'foo bar'),
('foo bar', 'foo bar'),
('foo bar', 'foo bar'),
('foo઼bar', u'foo\u0abcbar'),
('foo&#xyz;bar', 'foo&#xyz;bar'), # invalid encoding
('foo&#;bar', 'foo&#;bar'), # invalid encoding
('foo&#x;bar', 'foo&#x;bar'), # invalid encoding
]
for escaped, unescaped in tests:
self.assertEqual(unescaped, xhtml_unescape(escaped))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u'\u00e9'.encode('utf8'), '%C3%A9'),
(u'\u00e9'.encode('latin1'), '%E9'),
# unicode strings become utf8
(u'\u00e9', '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
('%C3%A9', u'\u00e9', 'utf8'),
('%C3%A9', u'\u00c3\u00a9', 'latin1'),
('%C3%A9', utf8(u'\u00e9'), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = '+ #%'
plus_escaped = '%2B+%23%25'
escaped = '%2B%20%23%25'
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None),
utf8(unescaped))
self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
utf8(unescaped))
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u"foo")
self.assertEqual(json_decode(u'"foo"'), u"foo")
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
if bytes is str:
self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
self.assertEqual(squeeze(u'sequences of whitespace chars'), u'sequences of whitespace chars')
def test_recursive_unicode(self):
tests = {
'dict': {b"foo": b"bar"},
'list': [b"foo", b"bar"],
'tuple': (b"foo", b"bar"),
'bytes': b"foo"
}
self.assertEqual(recursive_unicode(tests['dict']), {u"foo": u"bar"})
self.assertEqual(recursive_unicode(tests['list']), [u"foo", u"bar"])
self.assertEqual(recursive_unicode(tests['tuple']), (u"foo", u"bar"))
self.assertEqual(recursive_unicode(tests['bytes']), u"foo")
|
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from django.conf import settings
from website.project.metadata.schemas import ACTIVE_META_SCHEMAS, LATEST_SCHEMA_VERSION
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.models import Node, User, Comment, Institution, MetaSchema, DraftRegistration
from website.exceptions import NodeStateError
from website.util import permissions as osf_permissions
from website.project.model import NodeUpdateError
from api.base.utils import get_user_auth, get_object_or_error, absolute_reverse
from api.base.serializers import (JSONAPISerializer, WaterbutlerLink, NodeFileHyperLinkField, IDField, TypeField,
TargetTypeField, JSONAPIListField, LinksField, RelationshipField,
HideIfRegistration, RestrictedDictSerializer,
JSONAPIRelationshipSerializer, relationship_diff)
from api.base.exceptions import InvalidModelValueError, RelationshipPostMakesNoChanges
class NodeTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class NodeLicenseSerializer(RestrictedDictSerializer):
copyright_holders = ser.ListField(allow_empty=True, read_only=True)
year = ser.CharField(allow_blank=True, read_only=True)
class NodeSerializer(JSONAPISerializer):
# TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it
# handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this
# instance
filterable_fields = frozenset([
'id',
'title',
'description',
'public',
'tags',
'category',
'date_created',
'date_modified',
'root',
'parent',
'contributors'
])
non_anonymized_fields = [
'id',
'title',
'description',
'category',
'date_created',
'date_modified',
'registration',
'tags',
'public',
'license',
'links',
'children',
'comments',
'contributors',
'files',
'node_links',
'parent',
'root',
'logs',
'wikis'
]
id = IDField(source='_id', read_only=True)
type = TypeField()
category_choices = settings.NODE_CATEGORY_MAP.items()
category_choices_string = ', '.join(["'{}'".format(choice[0]) for choice in category_choices])
title = ser.CharField(required=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(choices=category_choices, help_text='Choices: ' + category_choices_string)
date_created = ser.DateTimeField(read_only=True)
date_modified = ser.DateTimeField(read_only=True)
registration = ser.BooleanField(read_only=True, source='is_registration')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = ser.BooleanField(read_only=True, source='is_collection')
tags = JSONAPIListField(child=NodeTagField(), required=False)
node_license = NodeLicenseSerializer(read_only=True, required=False)
template_from = ser.CharField(required=False, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.')
current_user_permissions = ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.')
# Public is only write-able by admins--see update method
public = ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes')
links = LinksField({'html': 'get_absolute_html_url'})
# TODO: When we have osf_permissions.ADMIN permissions, make this writable for admins
license = RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
)
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'})
contributors = RelationshipField(
related_view='nodes:node-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'},
)
files = RelationshipField(
related_view='nodes:node-providers',
related_view_kwargs={'node_id': '<pk>'}
)
wikis = RelationshipField(
related_view='nodes:node-wikis',
related_view_kwargs={'node_id': '<pk>'}
)
forked_from = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
)
forks = RelationshipField(
related_view='nodes:node-forks',
related_view_kwargs={'node_id': '<pk>'}
)
node_links = RelationshipField(
related_view='nodes:node-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'},
)
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<pk>'}
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_registration_count'}
))
affiliated_institutions = RelationshipField(
related_view='nodes:node-institutions',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-relationships-institutions',
self_view_kwargs={'node_id': '<pk>'}
)
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
logs = RelationshipField(
related_view='nodes:node-logs',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_logs_count'}
)
def get_current_user_permissions(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return ['read']
permissions = obj.get_permissions(user=user)
if not permissions:
permissions = ['read']
return permissions
class Meta:
type_ = 'nodes'
def get_absolute_url(self, obj):
return obj.get_absolute_url()
# TODO: See if we can get the count filters into the filter rather than the serializer.
def get_logs_count(self, obj):
return len(obj.logs)
def get_node_count(self, obj):
auth = get_user_auth(self.context['request'])
nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary and not node.is_deleted]
return len(nodes)
def get_contrib_count(self, obj):
return len(obj.contributors)
def get_registration_count(self, obj):
auth = get_user_auth(self.context['request'])
registrations = [node for node in obj.registrations_all if node.can_view(auth)]
return len(registrations)
def get_pointers_count(self, obj):
return len(obj.nodes_pointer)
def get_unread_comments_count(self, obj):
user = get_user_auth(self.context['request']).user
node_comments = Comment.find_n_unread(user=user, node=obj, page='node')
return {
'node': node_comments
}
def create(self, validated_data):
if 'template_from' in validated_data:
request = self.context['request']
user = request.user
template_from = validated_data.pop('template_from')
template_node = Node.load(key=template_from)
if template_node is None:
raise exceptions.NotFound
if not template_node.has_permission(user, 'read', check_parent=False):
raise exceptions.PermissionDenied
validated_data.pop('creator')
changed_data = {template_from: validated_data}
node = template_node.use_as_template(auth=get_user_auth(request), changes=changed_data)
else:
node = Node(**validated_data)
try:
node.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return node
def update(self, node, validated_data):
"""Update instance with the validated data. Requires
the request to be in the serializer context.
"""
assert isinstance(node, Node), 'node must be a Node'
auth = get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in node.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
node.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
node.remove_tag(deleted_tag, auth=auth)
if validated_data:
try:
node.update(validated_data, auth=auth)
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except PermissionsError:
raise exceptions.PermissionDenied
except NodeUpdateError as e:
raise exceptions.ValidationError(detail=e.reason)
except NodeStateError as e:
raise InvalidModelValueError(detail=e.message)
return node
class NodeDetailSerializer(NodeSerializer):
"""
Overrides NodeSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class NodeForksSerializer(NodeSerializer):
category_choices = settings.NODE_CATEGORY_MAP.items()
category_choices_string = ', '.join(["'{}'".format(choice[0]) for choice in category_choices])
title = ser.CharField(required=False)
category = ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string)
forked_date = ser.DateTimeField(read_only=True)
def create(self, validated_data):
node = validated_data.pop('node')
fork_title = validated_data.pop('title', None)
request = self.context['request']
auth = get_user_auth(request)
fork = node.fork_node(auth, title=fork_title)
try:
fork.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return fork
class NodeContributorsSerializer(JSONAPISerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
non_anonymized_fields = ['bibliographic', 'permission']
filterable_fields = frozenset([
'id',
'bibliographic',
'permission'
])
id = IDField(source='_id', required=True)
type = TypeField()
bibliographic = ser.BooleanField(help_text='Whether the user will be included in citations for this node or not.',
default=True)
permission = ser.ChoiceField(choices=osf_permissions.PERMISSIONS, required=False, allow_null=True,
default=osf_permissions.reduce_permissions(osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS),
help_text='User permission level. Must be "read", "write", or "admin". Defaults to "write".')
unregistered_contributor = ser.SerializerMethodField()
links = LinksField({
'self': 'get_absolute_url'
})
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
def get_unregistered_contributor(self, obj):
unclaimed_records = obj.unclaimed_records.get(obj.node_id, None)
if unclaimed_records:
return unclaimed_records.get('name', None)
class NodeContributorsCreateSerializer(NodeContributorsSerializer):
"""
Overrides NodeContributorsSerializer to add target_type field
"""
target_type = TargetTypeField(target_type='users')
def create(self, validated_data):
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
contributor = get_object_or_error(User, validated_data['_id'], display_name='user')
# Node object checks for contributor existence but can still change permissions anyway
if contributor in node.contributors:
raise exceptions.ValidationError('{} is already a contributor'.format(contributor.fullname))
bibliographic = validated_data['bibliographic']
permissions = osf_permissions.expand_permissions(validated_data.get('permission')) or osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS
node.add_contributor(contributor=contributor, auth=auth, visible=bibliographic, permissions=permissions, save=True)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeContributorDetailSerializer(NodeContributorsSerializer):
"""
Overrides node contributor serializer to add additional methods
"""
def update(self, instance, validated_data):
contributor = instance
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
visible = validated_data.get('bibliographic')
permission = validated_data.get('permission')
try:
node.update_contributor(contributor, permission, visible, auth, save=True)
except NodeStateError as e:
raise exceptions.ValidationError(detail=e.message)
except ValueError as e:
raise exceptions.ValidationError(detail=e.message)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeLinksSerializer(JSONAPISerializer):
id = IDField(source='_id')
type = TypeField()
target_type = TargetTypeField(target_type='nodes')
# TODO: We don't show the title because the current user may not have access to this node. We may want to conditionally
# include this field in the future.
# title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this Node Link '
# 'points to')
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'node_links'
links = LinksField({
'self': 'get_absolute_url'
})
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
def create(self, validated_data):
request = self.context['request']
user = request.user
auth = Auth(user)
node = self.context['view'].get_node()
target_node_id = validated_data['_id']
pointer_node = Node.load(target_node_id)
if not pointer_node or pointer_node.is_collection:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id)
)
try:
pointer = node.add_pointer(pointer_node, auth, save=True)
return pointer
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, node._id)
)
def update(self, instance, validated_data):
pass
class NodeProviderSerializer(JSONAPISerializer):
id = ser.SerializerMethodField(read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
node = ser.CharField(source='node_id', read_only=True)
provider = ser.CharField(read_only=True)
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
links = LinksField({
'upload': WaterbutlerLink(),
'new_folder': WaterbutlerLink(kind='folder')
})
class Meta:
type_ = 'files'
@staticmethod
def get_id(obj):
return '{}:{}'.format(obj.node._id, obj.provider)
def get_absolute_url(self, obj):
return absolute_reverse(
'nodes:node-provider-detail',
kwargs={
'node_id': obj.node._id,
'provider': obj.provider
}
)
class InstitutionRelated(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'institutions'
class NodeInstitutionsRelationshipSerializer(ser.Serializer):
data = ser.ListField(child=InstitutionRelated())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].institutions_relationship_url()
def get_related_url(self, obj):
return obj['self'].institutions_url()
class Meta:
type_ = 'institutions'
def get_institutions_to_add_remove(self, institutions, new_institutions):
diff = relationship_diff(
current_items={inst._id: inst for inst in institutions},
new_items={inst['_id']: inst for inst in new_institutions}
)
insts_to_add = []
for inst_id in diff['add']:
inst = Institution.load(inst_id)
if not inst:
raise exceptions.NotFound(detail='Institution with id "{}" was not found'.format(inst_id))
insts_to_add.append(inst)
return insts_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
return {
'data': obj.affiliated_institutions,
'self': obj
}
def update(self, instance, validated_data):
node = instance['self']
user = self.context['request'].user
add, remove = self.get_institutions_to_add_remove(
institutions=instance['data'],
new_institutions=validated_data['data']
)
for inst in add:
if inst not in user.affiliated_institutions:
raise exceptions.PermissionDenied(detail='User needs to be affiliated with {}'.format(inst.name))
for inst in remove:
node.remove_affiliated_institution(inst, user)
for inst in add:
node.add_affiliated_institution(inst, user)
node.save()
return self.make_instance_obj(node)
def create(self, validated_data):
instance = self.context['view'].get_object()
user = self.context['request'].user
node = instance['self']
add, remove = self.get_institutions_to_add_remove(
institutions=instance['data'],
new_institutions=validated_data['data']
)
if not len(add):
raise RelationshipPostMakesNoChanges
for inst in add:
if inst not in user.affiliated_institutions:
raise exceptions.PermissionDenied(detail='User needs to be affiliated with {}'.format(inst.name))
for inst in add:
node.add_affiliated_institution(inst, user)
node.save()
return self.make_instance_obj(node)
class NodeAlternativeCitationSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
name = ser.CharField(required=True)
text = ser.CharField(required=True)
class Meta:
type_ = 'citations'
def create(self, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise exceptions.ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
citation = node.add_citation(auth, save=True, **validated_data)
return citation
def update(self, instance, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise exceptions.ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
instance = node.edit_citation(auth, instance, save=True, **validated_data)
return instance
def error_checker(self, data):
errors = []
name = data.get('name', None)
text = data.get('text', None)
citations = self.context['view'].get_node().alternative_citations
if not (self.instance and self.instance.name == name) and citations.find(Q('name', 'eq', name)).count() > 0:
errors.append("There is already a citation named '{}'".format(name))
if not (self.instance and self.instance.text == text):
matching_citations = citations.find(Q('text', 'eq', text))
if matching_citations.count() > 0:
names = "', '".join([str(citation.name) for citation in matching_citations])
errors.append("Citation matches '{}'".format(names))
return errors
def get_absolute_url(self, obj):
# Citations don't have urls
raise NotImplementedError
class DraftRegistrationSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
registration_supplement = ser.CharField(source='registration_schema._id', required=True)
registration_metadata = ser.DictField(required=False)
datetime_initiated = ser.DateTimeField(read_only=True)
datetime_updated = ser.DateTimeField(read_only=True)
branched_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<branched_from._id>'}
)
initiator = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<initiator._id>'},
)
registration_schema = RelationshipField(
related_view='metaschemas:metaschema-detail',
related_view_kwargs={'metaschema_id': '<registration_schema._id>'}
)
links = LinksField({
'html': 'get_absolute_url'
})
def get_absolute_url(self, obj):
return obj.absolute_url
def create(self, validated_data):
node = validated_data.pop('node')
initiator = validated_data.pop('initiator')
metadata = validated_data.pop('registration_metadata', None)
schema_id = validated_data.pop('registration_schema').get('_id')
schema = get_object_or_error(MetaSchema, schema_id)
if schema.schema_version != LATEST_SCHEMA_VERSION or schema.name not in ACTIVE_META_SCHEMAS:
raise exceptions.ValidationError('Registration supplement must be an active schema.')
draft = DraftRegistration.create_from_node(node=node, user=initiator, schema=schema)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
if metadata:
try:
# Required fields are only required when creating the actual registration, not updating the draft.
draft.validate_metadata(metadata=metadata, reviewer=reviewer, required_fields=False)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
draft.update_metadata(metadata)
draft.save()
return draft
class Meta:
type_ = 'draft_registrations'
class DraftRegistrationDetailSerializer(DraftRegistrationSerializer):
"""
Overrides DraftRegistrationSerializer to make id and registration_metadata required.
registration_supplement cannot be changed after draft has been created.
Also makes registration_supplement read-only.
"""
id = IDField(source='_id', required=True)
registration_metadata = ser.DictField(required=True)
registration_supplement = ser.CharField(read_only=True, source='registration_schema._id')
def update(self, draft, validated_data):
"""
Update draft instance with the validated metadata.
"""
metadata = validated_data.pop('registration_metadata', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
if metadata:
try:
# Required fields are only required when creating the actual registration, not updating the draft.
draft.validate_metadata(metadata=metadata, reviewer=reviewer, required_fields=False)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
draft.update_metadata(metadata)
draft.save()
return draft
|
|
from unittest import TestCase
from unittest import SkipTest
from chatterbot.adapters.storage import MongoDatabaseAdapter
from chatterbot.conversation import Statement, Response
class MongoAdapterTestCase(TestCase):
def setUp(self):
"""
Instantiate the adapter.
"""
from pymongo.errors import ServerSelectionTimeoutError
from pymongo import MongoClient
database_name = "test_db"
# Skip these tests if a mongo client is not running.
try:
client = MongoClient(
serverSelectionTimeoutMS=0.5
)
client.server_info()
self.adapter = MongoDatabaseAdapter(database=database_name)
except ServerSelectionTimeoutError:
raise SkipTest("Unable to connect to mongo database.")
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class MongoDatabaseAdapterTestCase(MongoAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
statement = Statement("Test statement")
self.adapter.update(statement)
self.assertEqual(self.adapter.count(), 1)
def test_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
self.assertEqual(self.adapter.find("Non-existant"), None)
def test_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
statement = Statement("New statement")
self.adapter.update(statement)
found_statement = self.adapter.find("New statement")
self.assertNotEqual(found_statement, None)
self.assertEqual(found_statement.text, statement.text)
def test_update_adds_new_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertNotEqual(statement_found, None)
self.assertEqual(statement_found.text, statement.text)
def test_update_modifies_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
# Check the initial values
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 0
)
# Update the statement value
statement.add_response(
Response("New response")
)
self.adapter.update(statement)
# Check that the values have changed
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 1
)
def test_get_random_returns_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, statement.text)
def test_find_returns_nested_responses(self):
response_list = [
Response("Yes"),
Response("No")
]
statement = Statement(
"Do you like this?",
in_response_to=response_list
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertIn("Yes", result.in_response_to)
self.assertIn("No", result.in_response_to)
def test_multiple_responses_added_on_update(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks.")
]
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertEqual(len(result.in_response_to), 2)
self.assertIn(statement.in_response_to[0], result.in_response_to)
self.assertIn(statement.in_response_to[1], result.in_response_to)
def test_update_saves_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thanks."),
Response("Thank you.")
]
)
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 2)
def test_getting_and_updating_statement(self):
statement = Statement("Hi")
self.adapter.update(statement)
statement.add_response(Response("Hello"))
statement.add_response(Response("Hello"))
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 1)
self.assertEqual(response.in_response_to[0].occurrence, 2)
def test_deserialize_responses(self):
response_list = [
{"text": "Test", "occurrence": 3},
{"text": "Testing", "occurrence": 1},
]
results = self.adapter.deserialize_responses(response_list)
self.assertEqual(len(results), 2)
class MongoAdapterFilterTestCase(MongoAdapterTestCase):
def setUp(self):
super(MongoAdapterFilterTestCase, self).setUp()
self.statement1 = Statement(
"Testing...",
in_response_to=[
Response("Why are you counting?")
]
)
self.statement2 = Statement(
"Testing one, two, three.",
in_response_to=[
Response("Testing...")
]
)
def test_filter_text_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(text="Howdy")
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to=[Response("Maybe")]
)
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
"Testing...",
in_response_to=[]
)
statement2 = Statement(
"Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter(in_response_to=[])
self.assertEqual(len(results), 2)
self.assertIn(statement1, results)
self.assertIn(statement2, results)
def test_filter_contains_result(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_contains_no_result(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to__contains="How do you do?"
)
self.assertEqual(results, [])
def test_filter_multiple_parameters(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Testing...",
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_multiple_parameters_no_results(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Test",
in_response_to__contains="Not an existing response."
)
self.assertEqual(len(results), 0)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
statement1 = Statement("Testing...")
statement2 = Statement("Testing one, two, three.")
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter()
self.assertEqual(len(results), 2)
def test_filter_returns_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thanks."),
Response("Thank you.")
]
)
self.adapter.update(statement)
response = self.adapter.filter(
in_response_to__contains="Thanks."
)
# Get the first response
response = response[0]
self.assertEqual(len(response.in_response_to), 2)
def test_response_list_in_results(self):
"""
If a statement with response values is found using
the filter method, they should be returned as
response objects.
"""
statement = Statement(
"The first is to help yourself, the second is to help others.",
in_response_to=[
Response("Why do people have two hands?")
]
)
self.adapter.update(statement)
found = self.adapter.filter(text=statement.text)
self.assertEqual(len(found[0].in_response_to), 1)
self.assertEqual(type(found[0].in_response_to[0]), Response)
class ReadOnlyMongoDatabaseAdapterTestCase(MongoAdapterTestCase):
def test_update_does_not_add_new_statement(self):
self.adapter.read_only = True
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found, None)
def test_update_does_not_modify_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
self.adapter.read_only = True
statement.add_response(
Response("New response")
)
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(
statement_found.text, statement.text
)
self.assertEqual(
len(statement_found.in_response_to), 0
)
|
|
# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for classes that are used to invoke DataCore SANsymphony API."""
import mock
from oslo_utils import units
import six
import suds
from suds.sax import parser
from suds import wsdl
from cinder import test
from cinder.volume.drivers.datacore import api
from cinder.volume.drivers.datacore import exception
class FakeWebSocketException(Exception):
pass
class DataCoreClientTestCase(test.TestCase):
"""Tests for the DataCore SANsymphony client."""
def setUp(self):
super(DataCoreClientTestCase, self).setUp()
self.mock_storage_services = mock.MagicMock()
self.mock_executive_service = mock.MagicMock()
self.mock_suds_client = mock.MagicMock()
self.mock_object(
api.suds_client, 'Client', return_value=self.mock_suds_client)
self.mock_channel = mock.MagicMock()
mock_websocket = self.mock_object(api, 'websocket')
mock_websocket.WebSocketException = FakeWebSocketException
mock_websocket.create_connection.return_value = self.mock_channel
setattr(self.mock_suds_client.service.__getitem__,
'side_effect',
self._get_service_side_effect)
self.client = api.DataCoreClient('hostname', 'username', 'password', 1)
self.client.API_RETRY_INTERVAL = 0
def _get_service_side_effect(self, service_name):
self.assertIn(service_name,
[
api.DataCoreClient.STORAGE_SERVICES_BINDING,
api.DataCoreClient.EXECUTIVE_SERVICE_BINDING
])
if service_name is api.DataCoreClient.STORAGE_SERVICES_BINDING:
return self.mock_storage_services
else:
return self.mock_executive_service
def _assert_storage_services_method_called(self, method_name):
return self.mock_storage_services.__getitem__.assert_called_with(
method_name)
@property
def mock_storage_service_context(self):
return self.mock_storage_services.__getitem__()()
@property
def mock_executive_service_context(self):
return self.mock_executive_service.__getitem__()()
def test_process_request_failed(self):
def fail_with_socket_error():
raise FakeWebSocketException()
def fail_with_web_fault(message):
fault = mock.Mock()
fault.faultstring = "General error."
document = mock.Mock()
raise suds.WebFault(fault, document)
self.mock_channel.recv.side_effect = fail_with_socket_error
self.assertRaises(exception.DataCoreConnectionException,
self.client.get_server_groups)
self.mock_channel.recv.side_effect = None
(self.mock_storage_service_context.process_reply
.side_effect) = fail_with_web_fault
self.assertRaises(exception.DataCoreFaultException,
self.client.get_server_groups)
def test_channel_closing_failed(self):
def fail_with_socket_error():
raise FakeWebSocketException()
def fail_with_web_fault(message):
fault = mock.Mock()
fault.faultstring = "General error."
document = mock.Mock()
raise suds.WebFault(fault, document)
self.mock_channel.close.side_effect = fail_with_socket_error
(self.mock_storage_service_context.process_reply
.side_effect) = fail_with_web_fault
self.assertRaises(exception.DataCoreFaultException,
self.client.get_server_groups)
def test_update_api_endpoints(self):
def fail_with_socket_error():
try:
raise FakeWebSocketException()
finally:
self.mock_channel.recv.side_effect = None
self.mock_channel.recv.side_effect = fail_with_socket_error
mock_executive_endpoints = [{
'network_address': '127.0.0.1:3794',
'http_endpoint': 'http://127.0.0.1:3794/',
'ws_endpoint': 'ws://127.0.0.1:3794/',
}]
self.mock_object(self.client,
'_executive_service_endpoints',
mock_executive_endpoints)
mock_storage_endpoint = {
'network_address': '127.0.0.1:3794',
'http_endpoint': 'http://127.0.0.1:3794/',
'ws_endpoint': 'ws://127.0.0.1:3794/',
}
self.mock_object(self.client,
'_storage_services_endpoint',
mock_storage_endpoint)
node = mock.Mock()
node.HostAddress = '127.0.0.1:3794'
reply = mock.MagicMock()
reply.RegionNodeData = [node]
self.mock_storage_service_context.process_reply.return_value = reply
result = self.client.get_server_groups()
self.assertIsNotNone(result)
def test_update_api_endpoints_failed(self):
def fail_with_socket_error():
try:
raise FakeWebSocketException()
finally:
self.mock_channel.recv.side_effect = None
self.mock_channel.recv.side_effect = fail_with_socket_error
mock_executive_endpoints = [{
'network_address': '127.0.0.1:3794',
'http_endpoint': 'http://127.0.0.1:3794/',
'ws_endpoint': 'ws://127.0.0.1:3794/',
}]
self.mock_object(self.client,
'_executive_service_endpoints',
mock_executive_endpoints)
reply = mock.MagicMock()
reply.RegionNodeData = []
self.mock_storage_service_context.process_reply.return_value = reply
self.mock_executive_service_context.process_reply.return_value = None
result = self.client.get_server_groups()
self.assertIsNotNone(result)
def test_get_server_groups(self):
self.client.get_server_groups()
self._assert_storage_services_method_called('GetServerGroups')
def test_get_servers(self):
self.client.get_servers()
self._assert_storage_services_method_called('GetServers')
def test_get_disk_pools(self):
self.client.get_disk_pools()
self._assert_storage_services_method_called('GetDiskPools')
def test_get_logical_disks(self):
self.client.get_logical_disks()
self._assert_storage_services_method_called('GetLogicalDisks')
def test_create_pool_logical_disk(self):
pool_id = 'pool_id'
pool_volume_type = 'Striped'
size = 1 * units.Gi
min_quota = 1
max_quota = 1 * units.Gi
self.client.create_pool_logical_disk(
pool_id, pool_volume_type, size, min_quota, max_quota)
self._assert_storage_services_method_called('CreatePoolLogicalDisk')
def test_delete_logical_disk(self):
logical_disk_id = 'disk_id'
self.client.delete_logical_disk(logical_disk_id)
self._assert_storage_services_method_called('DeleteLogicalDisk')
def test_get_logical_disk_chunk_allocation_map(self):
logical_disk_id = 'disk_id'
self.client.get_logical_disk_chunk_allocation_map(logical_disk_id)
self._assert_storage_services_method_called(
'GetLogicalDiskChunkAllocationMap')
def test_get_next_virtual_disk_alias(self):
base_alias = 'volume'
self.client.get_next_virtual_disk_alias(base_alias)
self._assert_storage_services_method_called('GetNextVirtualDiskAlias')
def test_get_virtual_disks(self):
self.client.get_virtual_disks()
self._assert_storage_services_method_called('GetVirtualDisks')
def test_build_virtual_disk_data(self):
disk_alias = 'alias'
disk_type = 'Mirrored'
size = 1 * units.Gi
description = 'description'
storage_profile_id = 'storage_profile_id'
vd_data = self.client.build_virtual_disk_data(
disk_alias, disk_type, size, description, storage_profile_id)
self.assertEqual(disk_alias, vd_data.Alias)
self.assertEqual(size, vd_data.Size.Value)
self.assertEqual(description, vd_data.Description)
self.assertEqual(storage_profile_id, vd_data.StorageProfileId)
self.assertTrue(hasattr(vd_data, 'Type'))
self.assertTrue(hasattr(vd_data, 'SubType'))
self.assertTrue(hasattr(vd_data, 'DiskStatus'))
self.assertTrue(hasattr(vd_data, 'RecoveryPriority'))
def test_create_virtual_disk_ex2(self):
disk_alias = 'alias'
disk_type = 'Mirrored'
size = 1 * units.Gi
description = 'description'
storage_profile_id = 'storage_profile_id'
first_disk_id = 'disk_id'
second_disk_id = 'disk_id'
add_redundancy = True
vd_data = self.client.build_virtual_disk_data(
disk_alias, disk_type, size, description, storage_profile_id)
self.client.create_virtual_disk_ex2(
vd_data, first_disk_id, second_disk_id, add_redundancy)
self._assert_storage_services_method_called('CreateVirtualDiskEx2')
def test_set_virtual_disk_size(self):
disk_id = 'disk_id'
size = 1 * units.Gi
self.client.set_virtual_disk_size(disk_id, size)
self._assert_storage_services_method_called('SetVirtualDiskSize')
def test_delete_virtual_disk(self):
virtual_disk_id = 'disk_id'
delete_logical_disks = True
self.client.delete_virtual_disk(virtual_disk_id, delete_logical_disks)
self._assert_storage_services_method_called('DeleteVirtualDisk')
def test_serve_virtual_disks_to_host(self):
host_id = 'host_id'
disks = ['disk_id']
self.client.serve_virtual_disks_to_host(host_id, disks)
self._assert_storage_services_method_called('ServeVirtualDisksToHost')
def test_unserve_virtual_disks_from_host(self):
host_id = 'host_id'
disks = ['disk_id']
self.client.unserve_virtual_disks_from_host(host_id, disks)
self._assert_storage_services_method_called(
'UnserveVirtualDisksFromHost')
def test_unserve_virtual_disks_from_port(self):
port_id = 'port_id'
disks = ['disk_id']
self.client.unserve_virtual_disks_from_port(port_id, disks)
self._assert_storage_services_method_called(
'UnserveVirtualDisksFromPort')
def test_bind_logical_disk(self):
disk_id = 'disk_id'
logical_disk_id = 'disk_id'
role = 'Second'
create_mirror_mappings = True
create_client_mappings = False
add_redundancy = True
self.client.bind_logical_disk(
disk_id, logical_disk_id, role, create_mirror_mappings,
create_client_mappings, add_redundancy)
self._assert_storage_services_method_called(
'BindLogicalDisk')
def test_get_snapshots(self):
self.client.get_snapshots()
self._assert_storage_services_method_called('GetSnapshots')
def test_create_snapshot(self):
disk_id = 'disk_id'
name = 'name'
description = 'description'
pool_id = 'pool_id'
snapshot_type = 'Full'
duplicate_disk_id = False
storage_profile_id = 'profile_id'
self.client.create_snapshot(
disk_id, name, description, pool_id, snapshot_type,
duplicate_disk_id, storage_profile_id)
self._assert_storage_services_method_called('CreateSnapshot')
def test_delete_snapshot(self):
snapshot_id = "snapshot_id"
self.client.delete_snapshot(snapshot_id)
self._assert_storage_services_method_called('DeleteSnapshot')
def test_get_storage_profiles(self):
self.client.get_storage_profiles()
self._assert_storage_services_method_called('GetStorageProfiles')
def test_designate_map_store(self):
pool_id = 'pool_id'
self.client.designate_map_store(pool_id)
self._assert_storage_services_method_called('DesignateMapStore')
def test_get_performance_by_type(self):
types = ['DiskPoolPerformance']
self.client.get_performance_by_type(types)
self._assert_storage_services_method_called('GetPerformanceByType')
def test_get_ports(self):
self.client.get_ports()
self._assert_storage_services_method_called('GetPorts')
def test_build_scsi_port_data(self):
host_id = 'host_id'
port_name = 'port_name'
port_mode = 'Initiator'
port_type = 'iSCSI'
port_data = self.client.build_scsi_port_data(
host_id, port_name, port_mode, port_type)
self.assertEqual(host_id, port_data.HostId)
self.assertEqual(port_name, port_data.PortName)
self.assertTrue(hasattr(port_data, 'PortMode'))
self.assertTrue(hasattr(port_data, 'PortType'))
def test_register_port(self):
port_data = self.client.build_scsi_port_data(
'host_id', 'port_name', 'initiator', 'iSCSI')
self.client.register_port(port_data)
self._assert_storage_services_method_called('RegisterPort')
def test_assign_port(self):
client_id = 'client_id'
port_id = 'port_id'
self.client.assign_port(client_id, port_id)
self._assert_storage_services_method_called('AssignPort')
def test_set_server_port_properties(self):
port_id = 'port_id'
port_properties = mock.MagicMock()
self.client.set_server_port_properties(port_id, port_properties)
self._assert_storage_services_method_called('SetServerPortProperties')
def test_build_access_token(self):
initiator_node_name = 'initiator'
initiator_username = 'initiator_username'
initiator_password = 'initiator_password'
mutual_authentication = True
target_username = 'target_username'
target_password = 'target_password'
access_token = self.client.build_access_token(
initiator_node_name, initiator_username, initiator_password,
mutual_authentication, target_username, target_password)
self.assertEqual(initiator_node_name, access_token.InitiatorNodeName)
self.assertEqual(initiator_username, access_token.InitiatorUsername)
self.assertEqual(initiator_password, access_token.InitiatorPassword)
self.assertEqual(mutual_authentication,
access_token.MutualAuthentication)
self.assertEqual(target_username, access_token.TargetUsername)
self.assertEqual(target_password, access_token.TargetPassword)
def test_set_access_token(self):
port_id = 'port_id'
access_token = self.client.build_access_token(
'initiator_name', None, None, False, 'initiator_name', 'password')
self.client.set_access_token(port_id, access_token)
self._assert_storage_services_method_called('SetAccessToken')
def test_get_clients(self):
self.client.get_clients()
self._assert_storage_services_method_called('GetClients')
def test_register_client(self):
host_name = 'name'
description = 'description'
machine_type = 'Other'
mode = 'PreferredServer'
preferred_server_ids = None
self.client.register_client(
host_name, description, machine_type, mode, preferred_server_ids)
self._assert_storage_services_method_called('RegisterClient')
def test_set_client_capabilities(self):
client_id = 'client_id'
mpio = True
alua = True
self.client.set_client_capabilities(client_id, mpio, alua)
self._assert_storage_services_method_called('SetClientCapabilities')
def test_get_target_domains(self):
self.client.get_target_domains()
self._assert_storage_services_method_called('GetTargetDomains')
def test_create_target_domain(self):
initiator_host_id = 'host_id'
target_host_id = 'host_id'
self.client.create_target_domain(initiator_host_id, target_host_id)
self._assert_storage_services_method_called('CreateTargetDomain')
def test_delete_target_domain(self):
domain_id = 'domain_id'
self.client.delete_target_domain(domain_id)
self._assert_storage_services_method_called('DeleteTargetDomain')
def test_get_target_devices(self):
self.client.get_target_devices()
self._assert_storage_services_method_called('GetTargetDevices')
def test_build_scsi_port_nexus_data(self):
initiator_id = 'initiator_id'
target_id = 'target_id'
nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
self.assertEqual(initiator_id, nexus.InitiatorPortId)
self.assertEqual(target_id, nexus.TargetPortId)
def test_create_target_device(self):
domain_id = 'domain_id'
nexus = self.client.build_scsi_port_nexus_data('initiator_id',
'target_id')
self.client.create_target_device(domain_id, nexus)
self._assert_storage_services_method_called('CreateTargetDevice')
def test_delete_target_device(self):
device_id = 'device_id'
self.client.delete_target_device(device_id)
self._assert_storage_services_method_called('DeleteTargetDevice')
def test_get_next_free_lun(self):
device_id = 'device_id'
self.client.get_next_free_lun(device_id)
self._assert_storage_services_method_called('GetNextFreeLun')
def test_get_logical_units(self):
self.client.get_logical_units()
self._assert_storage_services_method_called('GetLogicalUnits')
def test_map_logical_disk(self):
disk_id = 'disk_id'
lun = 0
host_id = 'host_id'
mapping_type = 'Client'
initiator_id = 'initiator_id'
target_id = 'target_id'
nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
self.client.map_logical_disk(
disk_id, nexus, lun, host_id, mapping_type)
self._assert_storage_services_method_called('MapLogicalDisk')
def test_unmap_logical_disk(self):
logical_disk_id = 'disk_id'
nexus = self.client.build_scsi_port_nexus_data('initiator_id',
'target_id')
self.client.unmap_logical_disk(logical_disk_id, nexus)
self._assert_storage_services_method_called('UnmapLogicalDisk')
FAKE_WSDL_DOCUMENT = """<?xml version="1.0" encoding="utf-8"?>
<wsdl:definitions name="ExecutiveServices"
targetNamespace="http://tempuri.org/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:tns="http://tempuri.org/"
xmlns:wsa10="http://www.w3.org/2005/08/addressing"
xmlns:wsaw="http://www.w3.org/2006/05/addressing/wsdl">
<wsdl:types>
<xs:schema elementFormDefault="qualified"
targetNamespace="http://tempuri.org/"
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:import
namespace="http://schemas.microsoft.com/2003/10/Serialization/Arrays"/>
<xs:import
namespace="http://schemas.datacontract.org/2004/07/DataCore.Executive"/>
<xs:element name="StartExecutive">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
<xs:element name="StartExecutiveResponse">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
<xs:element name="StopExecutive">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
<xs:element name="StopExecutiveResponse">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
<xs:element name="ExecutiveStarted">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
<xs:element name="ExecutiveStopped">
<xs:complexType>
<xs:sequence/>
</xs:complexType>
</xs:element>
</xs:schema>
</wsdl:types>
<wsdl:message name="IExecutiveServiceEx_StartExecutive_InputMessage">
<wsdl:part name="parameters" element="tns:StartExecutive"/>
</wsdl:message>
<wsdl:message name="IExecutiveServiceEx_StartExecutive_OutputMessage">
<wsdl:part name="parameters" element="tns:StartExecutiveResponse"/>
</wsdl:message>
<wsdl:message
name="IExecutiveServiceEx_StartExecutive_ExecutiveError_FaultMessage">
<wsdl:part name="detail" element="ExecutiveError"/>
</wsdl:message>
<wsdl:message name="IExecutiveServiceEx_StopExecutive_InputMessage">
<wsdl:part name="parameters" element="tns:StopExecutive"/>
</wsdl:message>
<wsdl:message name="IExecutiveServiceEx_StopExecutive_OutputMessage">
<wsdl:part name="parameters" element="tns:StopExecutiveResponse"/>
</wsdl:message>
<wsdl:message
name="IExecutiveServiceEx_StopExecutive_ExecutiveError_FaultMessage">
<wsdl:part name="detail" element="ExecutiveError"/>
</wsdl:message>
<wsdl:message
name="IExecutiveServiceEx_ExecutiveStarted_OutputCallbackMessage">
<wsdl:part name="parameters" element="tns:ExecutiveStarted"/>
</wsdl:message>
<wsdl:message
name="IExecutiveServiceEx_ExecutiveStopped_OutputCallbackMessage">
<wsdl:part name="parameters" element="tns:ExecutiveStopped"/>
</wsdl:message>
<wsdl:portType name="IExecutiveServiceEx">
<wsdl:operation name="StartExecutive">
<wsdl:input
wsaw:Action="http://tempuri.org/IExecutiveService/StartExecutive"
message="tns:IExecutiveServiceEx_StartExecutive_InputMessage"/>
<wsdl:output
wsaw:Action="http://tempuri.org/IExecutiveService/StartExecutiveResponse"
message="tns:IExecutiveServiceEx_StartExecutive_OutputMessage"/>
<wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
message="tns:IExecutiveServiceEx_StartExecutive_ExecutiveError_FaultMessage"/>
</wsdl:operation>
<wsdl:operation name="StopExecutive">
<wsdl:input
wsaw:Action="http://tempuri.org/IExecutiveService/StopExecutive"
message="tns:IExecutiveServiceEx_StopExecutive_InputMessage"/>
<wsdl:output
wsaw:Action="http://tempuri.org/IExecutiveService/StopExecutiveResponse"
message="tns:IExecutiveServiceEx_StopExecutive_OutputMessage"/>
<wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
message="tns:IExecutiveServiceEx_StopExecutive_ExecutiveError_FaultMessage"/>
</wsdl:operation>
<wsdl:operation name="ExecutiveStarted">
<wsdl:output
wsaw:Action="http://tempuri.org/IExecutiveService/ExecutiveStarted"
message="tns:IExecutiveServiceEx_ExecutiveStarted_OutputCallbackMessage"/>
<wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
message="tns:"/>
</wsdl:operation>
<wsdl:operation name="ExecutiveStopped">
<wsdl:output
wsaw:Action="http://tempuri.org/IExecutiveService/ExecutiveStopped"
message="tns:IExecutiveServiceEx_ExecutiveStopped_OutputCallbackMessage"/>
<wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
message="tns:"/>
</wsdl:operation>
</wsdl:portType>
<wsdl:binding name="CustomBinding_IExecutiveServiceEx"
type="tns:IExecutiveServiceEx">
<soap:binding transport="http://schemas.microsoft.com/soap/websocket"/>
<wsdl:operation name="StartExecutive">
<soap:operation
soapAction="http://tempuri.org/IExecutiveService/StartExecutive"
style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
<wsdl:fault name="ExecutiveError">
<soap:fault use="literal" name="ExecutiveError" namespace=""/>
</wsdl:fault>
</wsdl:operation>
<wsdl:operation name="StopExecutive">
<soap:operation
soapAction="http://tempuri.org/IExecutiveService/StopExecutive"
style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
<wsdl:fault name="ExecutiveError">
<soap:fault use="literal" name="ExecutiveError" namespace=""/>
</wsdl:fault>
</wsdl:operation>
<wsdl:operation name="ExecutiveStarted">
<soap:operation
soapAction="http://tempuri.org/IExecutiveService/ExecutiveStarted"
style="document"/>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
<wsdl:fault name="ExecutiveError">
<soap:fault use="literal" name="ExecutiveError" namespace=""/>
</wsdl:fault>
</wsdl:operation>
<wsdl:operation name="ExecutiveStopped">
<soap:operation
soapAction="http://tempuri.org/IExecutiveService/ExecutiveStopped"
style="document"/>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
<wsdl:fault name="ExecutiveError">
<soap:fault use="literal" name="ExecutiveError" namespace=""/>
</wsdl:fault>
</wsdl:operation>
</wsdl:binding>
<wsdl:service name="ExecutiveServices">
<wsdl:port name="CustomBinding_IExecutiveServiceEx"
binding="tns:CustomBinding_IExecutiveServiceEx">
<soap:address
location="ws://mns-vsp-001:3794/IExecutiveServiceEx"/>
<wsa10:EndpointReference>
<wsa10:Address>ws://mns-vsp-001:3794/IExecutiveServiceEx
</wsa10:Address>
</wsa10:EndpointReference>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>"""
class FaultDefinitionsFilterTestCase(test.TestCase):
"""Tests for the plugin to process the DataCore API WSDL document."""
@staticmethod
def _binding_operation_has_fault(document, operation_name):
for binding in document.getChildren('binding', wsdl.wsdlns):
for operation in binding.getChildren('operation', wsdl.wsdlns):
if operation.get('name') == operation_name:
fault = operation.getChildren('fault', wsdl.wsdlns)
if fault:
return True
return False
@staticmethod
def _port_type_operation_has_fault(document, operation_name):
for port_type in document.getChildren('portType', wsdl.wsdlns):
for operation in port_type.getChildren('operation', wsdl.wsdlns):
if operation.get('name') == operation_name:
fault = operation.getChildren('fault', wsdl.wsdlns)
if fault:
return True
return False
def _operation_has_fault(self, document, operation_name):
_binding_has_fault = self._binding_operation_has_fault(
document, operation_name)
_port_type_has_fault = self._port_type_operation_has_fault(
document, operation_name)
self.assertEqual(_binding_has_fault, _port_type_has_fault)
return _binding_has_fault
def test_parsed(self):
context = mock.Mock()
sax = parser.Parser()
wsdl_document = FAKE_WSDL_DOCUMENT
if isinstance(wsdl_document, six.text_type):
wsdl_document = wsdl_document.encode('utf-8')
context.document = sax.parse(string=wsdl_document).root()
self.assertTrue(self._operation_has_fault(context.document,
'StartExecutive'))
self.assertTrue(self._operation_has_fault(context.document,
'StopExecutive'))
self.assertTrue(self._operation_has_fault(context.document,
'ExecutiveStarted'))
self.assertTrue(self._operation_has_fault(context.document,
'ExecutiveStopped'))
plugin = api.FaultDefinitionsFilter()
plugin.parsed(context)
self.assertTrue(self._operation_has_fault(context.document,
'StartExecutive'))
self.assertTrue(self._operation_has_fault(context.document,
'StopExecutive'))
self.assertFalse(self._operation_has_fault(context.document,
'ExecutiveStarted'))
self.assertFalse(self._operation_has_fault(context.document,
'ExecutiveStopped'))
|
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
self.next_record_identifier = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: Boolean
:param alias_evaluate_target_health: *Required for alias resource record
sets* Indicates whether this Resource Record Set should respect the
health status of any health checks associated with the ALIAS target
record which it is linked to.
:type health_check: str
:param health_check: Health check to associate with this record
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check, failover=failover)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName,
NextRecordType and NextRecordIdentifier to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
elif name == 'NextRecordIdentifier':
self.next_record_identifier = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name,
type=self.next_record_type,
identifier=self.next_record_identifier)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover = failover
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health}
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier,
"weight": self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier,
"region": self.region}
elif self.identifier is not None and self.failover is not None:
weight = self.FailoverBody % {"identifier": self.identifier,
"failover": self.failover}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
elif self.identifier is not None and self.failover is not None:
rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
elif name == 'Failover':
self.failover = value
elif name == 'HealthCheckId':
self.health_check = value
def startElement(self, name, attrs, connection):
return None
|
|
# -- coding: utf-8 --
import logging
import matplotlib.pyplot as plt
import numpy
import data_capture
import mks
class PostProcessor:
def __init__(self, run_experiment, run_data_capture, cfg, notify):
self._run_experiment = run_experiment
self._run_data_capture = run_data_capture
self._cfg = cfg
self._notify = notify
self._logger = logging.getLogger(__name__)
@staticmethod
def get_supported_data_capture():
raise NotImplementedError()
def process(self, data):
raise NotImplementedError()
def log(self, level, msg):
self._logger.log(level, msg)
if self._notify is not None:
self._notify.send_message(msg, title='Post-Processor Message')
class ScopeSignalProcessor(PostProcessor):
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
# Setup axes
# plt.ion()
self._scope_fig, self._scope_axes = plt.subplots(2, sharex=True)
self._scope_axes[0].set_title('Signals')
self._scope_axes[1].set_xlabel('Time (us)')
self._scope_axes[0].set_ylabel('Input Signal (mV)')
self._scope_axes[1].set_ylabel('Output Signal (mV)')
self._scope_axes_line = [None, None]
plt.show(block=False)
@staticmethod
def get_supported_data_capture():
return data_capture.PulseData,
def process(self, data):
time = [x * 1000000 for x in data['result_scope_time']]
scope = ([x * 1000 for x in data['result_scope_in'][0]], [x * 1000 for x in data['result_scope_out'][0]])
self._scope_axes[0].set_title("Signals {}".format(data['capture_id']))
for line in [0, 1]:
if self._scope_axes_line[line] is None:
self._scope_axes_line[line], = self._scope_axes[line].plot(time, scope[line])
else:
self._scope_axes_line[line].set_ydata(scope[line])
self._scope_fig.canvas.draw()
plt.pause(0.001)
return data
class DeltaScopeSignalProcessor(PostProcessor):
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
# Setup axes
# plt.ion()
self._scope_fig, self._scope_axes = plt.subplots(2, sharex=True)
self._scope_axes[0].set_title('Signals')
self._scope_axes[1].set_xlabel('Time (us)')
self._scope_axes[0].set_ylabel('Input Signal (mV)')
self._scope_axes[1].set_ylabel('Output Signal (mV)')
self._scope_axes_line = [None, None]
plt.show(block=False)
self._prev = None;
@staticmethod
def get_supported_data_capture():
return data_capture.PulseData,
def process(self, data):
time = [x * 1000000 for x in data['result_scope_time']]
scope = ([x * 1000 for x in data['result_scope_in'][0]], [x * 1000 for x in data['result_scope_out'][0]])
if self._prev is not None:
delta_scope = [[scope[x][y] - self._prev[x][y] for y in range(0, len(scope[x]))] for x in range(0, 2)]
self._scope_axes[0].set_title("Signals {}".format(data['capture_id']))
for line in [0, 1]:
line_data = delta_scope[line]
if self._scope_axes_line[line] is None:
self._scope_axes_line[line], = self._scope_axes[line].plot(time, line_data)
else:
self._scope_axes_line[line].set_ydata(line_data)
self._scope_axes[line].set_xlim(min(time), max(time))
ymax = max([abs(x) for x in line_data])
self._scope_axes[line].set_ylim(-1.05 * ymax, 1.05 * ymax)
self._scope_fig.canvas.draw()
plt.pause(0.001)
self._prev = scope
return data
class FrequencyCountProcessor(PostProcessor):
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
self._prev_f = None
self._rolling_f = []
self._threshold = 1e6
@staticmethod
def get_supported_data_capture():
return (data_capture.FrequencyData, data_capture.FrequencyDataLegacy,)
def process(self, data):
f = data['result_counter_frequency'][0]
if self._prev_f is not None:
df = f - self._prev_f
else:
df = 0
self._rolling_f.append(f)
while len(self._rolling_f) > 32:
self._rolling_f.pop(0)
a = numpy.array(self._rolling_f)
meana = numpy.mean(a)
da = numpy.diff(a)
meandelta = numpy.mean(da)
stda = numpy.std(a)
stdd = numpy.std(da)
if abs(df) > self._threshold:
msg = "Possible mode hop: {} Hz -> {} Hz (delta: {:.1f} Hz)".format(self._prev_f, f, df)
self.log(logging.WARNING, msg)
self._prev_f = f
self._logger.info("Frequency: {:.1f} Hz".format(f))
self._logger.info("Frequency delta: {:.1f} Hz".format(df))
self._logger.info("Frequency mean: {:.1f} Hz".format(meana))
self._logger.info("Frequency dmean: {:.1f} Hz".format(meandelta))
self._logger.info("Frequency std: {:.1f} Hz".format(stda))
self._logger.info("Frequency dstd: {:.1f} Hz".format(stdd))
return data
class FrequencyDisplayProcessor(PostProcessor):
_THRESHOLD = [60, 60 * 60, 24 * 60 * 60]
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
self._freq_fig, self._freq_axes = plt.subplots(len(self._THRESHOLD) + 1)
self._freq_axes[0].set_title('Counter Frequency')
for a in self._freq_axes:
a.set_xlabel('Time (s)')
a.set_ylabel('Frequency (Hz)')
self._freq_axes_line = [None] * (len(self._THRESHOLD) + 1)
self._plot_values = []
plt.show(block=False)
@staticmethod
def get_supported_data_capture():
return data_capture.FrequencyData, data_capture.FrequencyDataLegacy,
def process(self, data):
t = data['capture_timestamp']
t = data['capture_timestamp']
f = data['result_counter_frequency'][0]
self._plot_values.append((t, f,))
# Only store the last _THRESHOLD seconds for plotting
while self._plot_values[0][0] < (t - max(self._THRESHOLD)):
self._plot_values.pop(0)
# Generate plot sets
for n in range(1, len(self._THRESHOLD) + 1):
plot_values = [v for v in self._plot_values if (v[0] >= (t - self._THRESHOLD[n - 1]))]
x = [v[0] - plot_values[-1][0] for v in plot_values]
y = [v[1] for v in plot_values]
if self._freq_axes_line[n] is None:
self._freq_axes_line[n], = self._freq_axes[n].plot(x, y)
else:
self._freq_axes_line[n].set_xdata(x)
self._freq_axes_line[n].set_ydata(y)
ymin = min(y)
ymax = max(y)
yspan = ymax - ymin
self._freq_axes[n].set_xlim(min(x), max(x))
self._freq_axes[n].set_ylim(ymin - 0.05 * yspan, ymax + 0.05 * yspan)
# First plot is for delta frequency over shortest
plot_values = [v for v in self._plot_values if (v[0] >= (t - self._THRESHOLD[0]))]
if len(plot_values) > 2:
x = [v[0] - plot_values[-1][0] for v in plot_values]
y = [v[1] for v in plot_values]
x = x[:-1]
y = [y[n] - y[n - 1] for n in range(1, len(y))]
if self._freq_axes_line[0] is None:
self._freq_axes_line[0], = self._freq_axes[0].plot(x, y)
else:
self._freq_axes_line[0].set_xdata(x)
self._freq_axes_line[0].set_ydata(y)
ymin = min(y)
ymax = max(y)
yspan = ymax - ymin
self._freq_axes[0].set_xlim(min(x), max(x))
self._freq_axes[0].set_ylim(ymin - 0.05 * yspan, ymax + 0.05 * yspan)
self._freq_fig.canvas.draw()
plt.pause(0.001)
return data
class BlackMagicDetector(PostProcessor):
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
self._prev_timestamp = None
@staticmethod
def get_supported_data_capture():
return data_capture.FrequencyData, data_capture.FrequencyDataLegacy, data_capture.PulseData, \
data_capture.VNAData
def process(self, data):
timestamp = data['capture_timestamp']
if self._prev_timestamp is not None and timestamp <= self._prev_timestamp:
dt = timestamp - self._prev_timestamp
self.log(logging.WARNING, "Negative time shift since last capture dt: {:.3f} sec".format(dt))
self._prev_timestamp = timestamp
return data
class MKSMonitorPostProcessor(PostProcessor):
_CFG_SECTION = 'mks'
def __init__(self, run_experiment, run_data_capture, cfg, notify):
PostProcessor.__init__(self, run_experiment, run_data_capture, cfg, notify)
mks_port = self._cfg.get(self._CFG_SECTION, 'port')
self._expiry = self._cfg.getfloat(self._CFG_SECTION, 'expiry')
self._timeout = self._cfg.getfloat(self._CFG_SECTION, 'timeout')
# Connect to MKS
self._mks = mks.MKSSerialMonitor(mks_port)
@staticmethod
def get_supported_data_capture():
return data_capture.PulseData,
def process(self, data):
# If data is too old then wait for an update
if self._mks.get_lag() > self._expiry:
self._logger.warn('Waiting for MKS update')
if not self._mks.update_wait(self._timeout):
raise mks.MKSException('MKS timed out')
mks_state = self._mks.get_state()
# Print to logging
self._logger.info("Recorded MKS Flow: {}sccm".format('sccm, '.join(str(x) for x in mks_state['mks_flow'])))
self._logger.info("Recorded VGen RTD: {}".format(', '.join(str(x) for x in mks_state['mks_vgen_rtd'])))
self._logger.info("Recorded VGen RH: {}%".format(str(mks_state['mks_vgen_relative_value'][0])))
data.update(mks_state)
return data
|
|
"""Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD Style.
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
import sys
import warnings
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, array2d, check_arrays
from ..utils.extmath import logsumexp
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import _random_sample_mask
from ..tree._tree import DTYPE, TREE_LEAF
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0)")
self.alpha = alpha
def fit(self, X, y):
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y):
self.mean = np.mean(y)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
def fit(self, X, y):
n_pos = np.sum(y)
self.prior = np.log(n_pos / (y.shape[0] - n_pos))
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y):
class_counts = np.bincount(y)
self.priors = class_counts / float(y.shape[0])
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class LossFunction(object):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
__metaclass__ = ABCMeta
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self, X, y):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k])
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(LossFunction):
"""Base class for regression loss functions. """
__metaclass__ = ABCMeta
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression")
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred):
return np.mean((y - pred.ravel()) ** 2.0)
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
return np.abs(y - pred.ravel()).mean()
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = np.median(y.take(terminal_region, axis=0) -
pred.take(terminal_region, axis=0))
class HuberLossFunction(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
gamma_mask = np.abs(diff) <= gamma
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
return (sq_loss + lin_loss) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
pred = pred.ravel()
diff = y - pred
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
gamma = self.gamma
diff = y.take(terminal_region, axis=0) - \
pred.take(terminal_region, axis=0)
median = np.median(diff)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
return (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
diff = y.take(terminal_region, axis=0) - \
pred.take(terminal_region, axis=0)
val = stats.scoreatpercentile(diff, self.percentile)
tree.value[leaf, 0] = val
class BinomialDeviance(LossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("%s requires 2 classes." %
self.__class__.__name__)
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred):
"""Compute the deviance (= negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
return np.sum(np.logaddexp(0.0, -2 * y * pred)) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
denominator = np.sum((y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
class MultinomialDeviance(LossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("%s requires more than 2 classes."
% self.__class__.__name__)
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
numerator *= (self.K - 1) / self.K
denominator = np.sum((y - residual) * (1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'bdeviance': BinomialDeviance,
'mdeviance': MultinomialDeviance,
'deviance': None} # for both, multinomial and binomial
class BaseGradientBoosting(BaseEnsemble):
"""Abstract base class for Gradient Boosting. """
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, learn_rate=None):
if not learn_rate is None:
learning_rate = learn_rate
warnings.warn(
"Parameter learn_rate has been renamed to "
'learning_rate'" and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, X_argsorted, y, y_pred, sample_mask):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion="mse",
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_density=self.min_density,
max_features=self.max_features,
compute_importances=False,
random_state=self.random_state)
tree.fit(X, residual, sample_mask, X_argsorted, check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_mask, self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
# Check input
X, y = check_arrays(X, y, sparse_format='dense')
X = np.asfortranarray(X, dtype=DTYPE)
y = np.ravel(y, order='C')
# Check parameters
n_samples, n_features = X.shape
self.n_features = n_features
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if self.loss not in LOSS_FUNCTIONS:
raise ValueError("Loss '%s' not supported. " % self.loss)
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be larger than 0")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be larger than 0")
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.max_features is None:
self.max_features = n_features
if not (0 < self.max_features <= n_features):
raise ValueError("max_features must be in (0, n_features]")
if self.max_depth <= 0:
raise ValueError("max_depth must be larger than 0")
if self.init is not None:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
else:
self.init = self.loss_.init_estimator()
if not (0.0 < self.alpha and self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
self.random_state = check_random_state(self.random_state)
# use default min_density (0.1) only for deep trees
self.min_density = 0.0 if self.max_depth < 6 else 0.1
# create argsorted X for fast tree induction
X_argsorted = np.asfortranarray(
np.argsort(X.T, axis=1).astype(np.int32).T)
# fit initial model
self.init.fit(X, y)
# init predictions
y_pred = self.init.predict(X)
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
self.oob_score_ = np.zeros((self.n_estimators), dtype=np.float64)
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
self.random_state = check_random_state(self.random_state)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if self.subsample < 1.0:
# TODO replace with ``np.choice`` if possible.
sample_mask = _random_sample_mask(n_samples, n_inbag,
self.random_state)
# fit next stage of trees
y_pred = self._fit_stage(i, X, X_argsorted, y, y_pred, sample_mask)
# track deviance (= loss)
if self.subsample < 1.0:
self.train_score_[i] = self.loss_(y[sample_mask],
y_pred[sample_mask])
self.oob_score_[i] = self.loss_(y[~sample_mask],
y_pred[~sample_mask])
if self.verbose > 1:
print("built tree %d of %d, train score = %.6e, "
"oob score = %.6e" % (i + 1, self.n_estimators,
self.train_score_[i],
self.oob_score_[i]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 1:
print("built tree %d of %d, train score = %.6e" %
(i + 1, self.n_estimators, self.train_score_[i]))
if self.verbose == 1:
print(end='.')
sys.stdout.flush()
return self
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be %d, not %d." %
(self.n_features, X.shape[1]))
score = self.init.predict(X).astype(np.float64)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order='C')
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order='C')
score = self._init_decision_function(X)
for i in range(self.n_estimators):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(
tree.tree_.compute_feature_importances(method='gini')
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, None, optional (default=None)
The number of features to consider when looking for the best split.
Features are choosen randomly at each split point.
If None, then `max_features=n_features`. Choosing
`max_features < n_features` leads to a reduction of variance
and an increase in bias.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints '.' for every tree built.
If greater than 1 then it prints the score for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier().fit(samples, labels)
>>> print(gb.predict([[0.5, 0, 0]]))
[0]
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=1, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0, learn_rate=None):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, verbose=verbose, learn_rate=learn_rate)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
y = np.searchsorted(self.classes_, y)
if self.loss == 'deviance':
self.loss = 'mdeviance' if len(self.classes_) > 2 else 'bdeviance'
return super(GradientBoostingClassifier, self).fit(X, y)
def _score_to_proba(self, score):
"""Compute class probability estimates from decision scores. """
proba = np.ones((score.shape[0], self.n_classes_), dtype=np.float64)
if not self.loss_.is_multi_class:
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
else:
proba = np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
return proba
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
score = self.decision_function(X)
return self._score_to_proba(score)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
proba = self.predict_proba(X)
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
def staged_predict(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for proba in self.staged_predict_proba(X):
yield self.classes_.take(np.argmax(proba, axis=1), axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function soley based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, None, optional (default=None)
The number of features to consider when looking for the best split.
Features are choosen randomly at each split point.
If None, then `max_features=n_features`. Choosing
`max_features < n_features` leads to a reduction of variance
and an increase in bias.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints '.' for every tree built.
If greater than 1 then it prints the score for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> gb = GradientBoostingRegressor().fit(samples, labels)
>>> print(gb.predict([[0, 0, 0]]))
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[ 1.32806...
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=1, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, learn_rate=None):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha, verbose, learn_rate=learn_rate)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
|
|
""" Classes for interpolating values.
"""
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange']
from numpy import shape, sometrue, rank, array, transpose, searchsorted, \
ones, logical_or, atleast_1d, atleast_2d, meshgrid, ravel, \
dot, poly1d, asarray, intp
import numpy as np
import scipy.special as spec
import math
import fitpack
import _fitpack
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all,axis=0)
return all
def lagrange(x, w):
"""Return the Lagrange interpolating polynomial of the data-points (x,w)
Warning: This implementation is numerically unstable; do not expect to
be able to use more than about 20 points even if they are chosen optimally.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j: continue
fac = x[j]-x[k]
pt *= poly1d([1.0,-x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2D grid.
Parameters
----------
x, y : 1D arrays
Arrays defining the coordinates of a 2D grid. If the
points lie on a regular grid, `x` can specify the column coordinates
and `y` the row coordinates, e.g.::
x = [0,1,2]; y = [0,3,7]
otherwise x and y must specify the full coordinates, i.e.::
x = [0,1,2,0,1,2,0,1,2]; y = [0,0,0,3,3,3,7,7,7]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : 1D array
The values of the interpolated function on the grid points. If
z is a multi-dimensional array, it is flattened before use.
kind : {'linear', 'cubic', 'quintic'}
The kind of interpolation to use.
copy : bool
If True, then data is copied, otherwise only a reference is held.
bounds_error : bool
If True, when interpolated values are requested outside of the
domain of the input data, an error is raised.
If False, then `fill_value` is used.
fill_value : number
If provided, the value to use for points outside of the
interpolation domain. Defaults to NaN.
Raises
------
ValueError when inputs are invalid.
See Also
--------
bisplrep, bisplev : spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=np.nan):
self.x, self.y, self.z = map(ravel, map(asarray, [x, y, z]))
if len(self.z) == len(self.x) * len(self.y):
self.x, self.y = meshgrid(x,y)
self.x, self.y = map(ravel, [self.x, self.y])
if len(self.x) != len(self.y):
raise ValueError("x and y must have equal lengths")
if len(self.z) != len(self.x):
raise ValueError("Invalid length for input z")
try:
kx = ky = {'linear' : 1,
'cubic' : 3,
'quintic' : 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
self.tck = fitpack.bisplrep(self.x, self.y, self.z, kx=kx, ky=ky, s=0.)
def __call__(self,x,y,dx=0,dy=0):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if len(z)==1:
z = z[0]
return array(z)
class interp1d(object):
""" Interpolate a 1D function.
See Also
--------
splrep, splev - spline interpolation based on FITPACK
UnivariateSpline - a more recent wrapper of the FITPACK routines
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan):
""" Initialize a 1D linear interpolation class.
Description
-----------
x and y are arrays of values used to approximate some function f:
y = f(x)
This class returns a function whose call method uses linear
interpolation to find the value of new points.
Parameters
----------
x : array
A 1D array of monotonically increasing real values. x cannot
include duplicate values (otherwise f is overspecified)
y : array
An N-D array of real values. y's length along the interpolation
axis must be equal to the length of x.
kind : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer
specifying the order of the spline interpolator to use.
axis : int
Specifies the axis of y along which to interpolate. Interpolation
defaults to the last axis of y.
copy : bool
If True, the class makes internal copies of x and y.
If False, references to x and y are used.
The default is to copy.
bounds_error : bool
If True, an error is thrown any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary).
If False, out of bounds values are assigned fill_value.
By default, an error is raised.
fill_value : float
If provided, then this value will be used to fill in for requested
points outside of the data range.
If not provided, then the default is NaN.
"""
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest':0, 'zero':0,'slinear':1,
'quadratic':2, 'cubic':3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "\
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Normalize the axis to ensure that it is positive.
self.axis = axis % len(y.shape)
self._kind = kind
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
axes = range(y.ndim)
del axes[self.axis]
axes.append(self.axis)
oriented_y = y.transpose(axes)
minval = 2
len_y = oriented_y.shape[-1]
if kind == 'linear':
self._call = self._call_linear
elif kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self._call_nearest
else:
axes = range(y.ndim)
del axes[self.axis]
axes.insert(0, self.axis)
oriented_y = y.transpose(axes)
minval = order + 1
len_y = oriented_y.shape[0]
self._call = self._call_spline
self._spline = splmake(x,oriented_y,order=order)
len_x = len(x)
if len_x != len_y:
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
if len_x < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self.x = x
self.y = oriented_y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self.y[..., lo]
y_hi = self.y[..., hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi-y_lo) / (x_hi-x_lo)
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new-x_lo) + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self.y[..., x_new_indices]
return y_new
def _call_spline(self, x_new):
x_new =np.asarray(x_new)
result = spleval(self._spline,x_new.ravel())
return result.reshape(x_new.shape+result.shape[1:])
def __call__(self, x_new):
"""Find interpolated y_new = f(x_new).
Parameters
----------
x_new : number or array
New independent variable(s).
Returns
-------
y_new : ndarray
Interpolated value(s) corresponding to x_new.
"""
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(x_new)
# Rotate the values of y_new back so that they correspond to the
# correct x_new values. For N-D x_new, take the last (for linear)
# or first (for other splines) N axes
# from y_new and insert them where self.axis was in the list of axes.
nx = x_new.ndim
ny = y_new.ndim
# 6. Fill any values that were out of bounds with fill_value.
# and
# 7. Rotate the values back to their proper place.
if nx == 0:
# special case: x is a scalar
if out_of_bounds:
if ny == 0:
return asarray(self.fill_value)
else:
y_new[...] = self.fill_value
return asarray(y_new)
elif self._kind in ('linear', 'nearest'):
y_new[..., out_of_bounds] = self.fill_value
axes = range(ny - nx)
axes[self.axis:self.axis] = range(ny - nx, ny)
return y_new.transpose(axes)
else:
y_new[out_of_bounds] = self.fill_value
axes = range(nx, ny)
axes[self.axis:self.axis] = range(nx)
return y_new.transpose(axes)
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class ppform(object):
"""The ppform of the piecewise polynomials is given in terms of coefficients
and breaks. The polynomial in the ith interval is
x_{i} <= x < x_{i+1}
S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k)
where k is the degree of the polynomial.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
self.coeffs = np.asarray(coeffs)
if sort:
self.breaks = np.sort(breaks)
else:
self.breaks = np.asarray(breaks)
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, xnew):
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= self.a) & (xnew <= self.b)
res[~mask] = self.fill
xx = xnew.compress(mask)
indxs = np.searchsorted(self.breaks, xx)-1
indxs = indxs.clip(0,len(self.breaks))
pp = self.coeffs
diff = xx - self.breaks.take(indxs)
V = np.vander(diff,N=self.K)
# values = np.diag(dot(V,pp[:,indxs]))
values = array([dot(V[k,:],pp[:,indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
def fromspline(cls, xk, cvals, order, fill=0.0):
N = len(xk)-1
sivals = np.empty((order+1,N), dtype=float)
for m in xrange(order,-1,-1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m,:] = res
return cls(sivals, xk, fill=fill)
fromspline = classmethod(fromspline)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = range(b.ndim)
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u,s,vh = np.dual.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = np.dual.solve(Q,tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
assert (a.ndim==2)
M,N = a.shape
if k > 0:
start = k
num = N-k
else:
num = M+k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk)-1
Np1 = N+1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1,N))
for k in range(-N,N):
if (k<0):
l = np.arange(-k,Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N-l
if ((k % 2)):
v = -v
_setdiag(Bd,k,v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError, "%s not supported" % kind
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = concatenate((B,lh),axis=0)
w = concatenate((yk,rh),axis=0)
M,N = B.shape
if (M>N):
raise ValueError("over-specification of conditions")
elif (M<N):
return _find_smoothest(xk, yk, order, None, B)
else:
return np.dual.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk,yk,order=3,kind='smoothest',conds=None):
"""Return a (xk, cvals, k) representation of a spline given
data-points where the (internal) knots are at the data-points.
yk can be an N-d array to represent more than one curve, through
the same xk points. The first dimension is assumed to be the
interpolating dimension.
kind can be 'smoothest', 'not_a_knot', 'fixed',
'clamped', 'natural', 'periodic', 'symmetric',
'user', 'mixed'
it is ignored if order < 2
"""
yk = np.asanyarray(yk)
N = yk.shape[0]-1
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval((xj,cvals,k),xnew,deriv=0):
"""Evaluate a fixed spline represented by the given tuple at the new
x-values. The xj values are the interior knot points. The approximation
region is xj[0] to xj[-1]. If N+1 is the length of xj, then cvals should
have length N+k where k is the order of the spline.
Internally, an additional k-1 knot points are added on either side of
the spline.
If cvals represents more than one curve (cvals.ndim > 1) and/or xnew is
N-d, then the result is xnew.shape + cvals.shape[1:] providing the
interpolation of multiple curves.
"""
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk,cvals,k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk,yk,xnew,order=3,kind='smoothest',conds=None):
"""Interpolate a curve (xk,yk) at points xnew using a spline fit.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
|
|
import echidna.core.spectra as spectra
import numpy
class Shift(object):
""" A class for shifting the parameter space of a spectra.
Attributes:
_shift (float): The factor you want to shift a parameter by.
"""
def __init__(self):
""" Initialise the Shift class.
"""
self._shift = 0.
def get_shift(self):
""" Returns the shift factor.
Returns:
float: The shift factor.
"""
return self._shift
def set_shift(self, shift):
""" Sets the shift factor.
Args:
shift (float): Value you wish to set the shift factor to.
"""
self._shift = float(shift)
def shift(self, spectrum, dimension, **kwargs):
""" Shifts a given spectrum's dimension using interpolation.
Args:
spectrum (float): The spectrum you want to shift.
dimension (string): The dimension of the spectrum you want to shift.
kwargs (dict): To passed to the interpolation function in
:class:`echidna.core.spectra.Spectra`
Returns:
:class:`echidna.core.spectra.Spectra`: The shifted spectrum.
"""
shift = self.get_shift()
step = spectrum.get_config().get_par(dimension).get_width()
if numpy.isclose(shift % step, 0.):
# shift size multiple of step size. Interpolation not required.
return self.shift_by_bin(spectrum, dimension)
preshift_sum = spectrum.sum()
interpolation = spectrum.interpolate1d(dimension, **kwargs)
shifted_spec = spectra.Spectra(spectrum._name+"_shift" +
str(shift),
spectrum._num_decays,
spectrum.get_config())
n_dim = len(spectrum._data.shape)
axis = spectrum.get_config().get_index(dimension)
par = spectrum.get_config().get_par(dimension)
low = par._low
high = par._high
n_bins = par._bins
for bin in range(n_bins):
x = low + (bin + 0.5) * step
if (x - shift) < low or (x - shift) > high:
continue # Trying to shift values outside range (Unknown)
y = interpolation(x - shift)
if y <= 0.: # Cant have negative num_events
continue
old_bin1 = par.get_bin(x - shift)
old_bin_centre1 = par.get_bin_centre(old_bin1)
if old_bin_centre1 > x - shift:
old_bin2 = old_bin1 - 1
if old_bin2 >= 0:
x_low1 = old_bin_centre1 - 0.5*step # Equals x_high2
x_high1 = x - shift + 0.5*step
area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *
(interpolation(x_high1) +
interpolation(x_low1)))
x_low2 = x - shift - 0.5*step
area2 = numpy.fabs(0.5 * (x_low1 - x_low2) *
(interpolation(x_low1) +
interpolation(x_low2)))
else:
old_bin2 = 0
area2 = 0. # This will set scale2 == 0
area1 = 1.
else:
old_bin2 = old_bin1 + 1
if old_bin2 < n_bins:
x_low1 = x - shift - 0.5*step
x_high1 = old_bin_centre1 + 0.5*step # Equals x_low2
area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *
(interpolation(x_high1) +
interpolation(x_low1)))
x_high2 = x - shift + 0.5*step
area2 = numpy.fabs(0.5 * (x_high2 - x_high1) *
(interpolation(x_high2) +
interpolation(x_high1)))
else:
old_bin2 = n_bins - 1
area2 = 0. # This will set scale2 == 0
area1 = 1.
scale1 = area1 / (area1 + area2)
scale2 = area2 / (area1 + area2)
# Prepare array split. Is there a better way to do this not using
# eval and exec?
cur_slice = "["
old_slice1 = "["
old_slice2 = "["
for dim in range(n_dim):
if dim == axis:
if bin < n_bins - 1:
cur_slice += str(bin) + ":" + str(bin + 1) + ","
else:
cur_slice += str(bin) + ":,"
if old_bin1 < n_bins - 1:
old_slice1 += (str(old_bin1) + ":" +
str(old_bin1 + 1) + ",")
else:
old_slice1 += str(old_bin1) + ":,"
if old_bin2 < n_bins - 1:
old_slice2 += (str(old_bin2) + ":" +
str(old_bin2 + 1) + ",")
else:
old_slice2 += str(old_bin2) + ":,"
else:
cur_slice += ":,"
old_slice1 += ":,"
old_slice2 += ":,"
cur_slice = cur_slice[:-1] + "]"
old_slice1 = old_slice1[:-1] + "]"
old_slice2 = old_slice2[:-1] + "]"
old_data1 = eval("spectrum._data"+old_slice1)
unshifted_sum1 = float(old_data1.sum())
old_data2 = eval("spectrum._data"+old_slice2)
unshifted_sum2 = float(old_data2.sum())
# Check to see if there is data to shift
if unshifted_sum1 <= 0. and unshifted_sum2 <= 0.:
continue
elif unshifted_sum1 <= 0.:
fill_cmd = ("shifted_spec._data" + cur_slice + " += "
"old_data2 * (y / unshifted_sum2)")
exec(fill_cmd)
elif unshifted_sum2 <= 0.:
fill_cmd = ("shifted_spec._data" + cur_slice + " += "
"old_data1 * (y / unshifted_sum1)")
exec(fill_cmd)
else:
fill_cmd = ("shifted_spec._data" + cur_slice + "+="
"old_data1 * scale1 * (y / unshifted_sum1) +"
"old_data2 * scale2 * (y / unshifted_sum2)")
exec(fill_cmd)
# renormalise to prescale number of counts
shifted_spec._num_decays = shifted_spec.sum()
shifted_spec.scale(preshift_sum)
shifted_spec._num_decays = spectrum._num_decays
return shifted_spec
def shift_by_bin(self, spectrum, dimension):
""" Shifts a given spectrum's dimension by shifting bins.
Args:
spectrum (float): The spectrum you want to shift.
dimension (string): The dimension of the spectrum you want to shift.
kwargs (dict): To passed to the interpolation function in
:class:`echidna.core.spectra.Spectra`
Returns:
:class:`echidna.core.spectra.Spectra`: The shifted spectrum.
"""
shift = self.get_shift()
step = spectrum.get_config().get_par(dimension).get_width()
if not numpy.isclose(shift % step, 0.):
raise ValueError("Shift (%s) must be a multiple of bin width (%s)"
% (shift, step))
shifted_spec = spectra.Spectra(spectrum._name+"_shift" +
str(shift),
spectrum._num_decays,
spectrum.get_config())
n_dim = len(spectrum._data.shape)
axis = spectrum.get_config().get_index(dimension)
low = spectrum.get_config().get_par(dimension)._low
high = spectrum.get_config().get_par(dimension)._high
n_bins = spectrum.get_config().get_par(dimension)._bins
for bin in range(n_bins):
x = low + (bin + 0.5) * step
if (x - shift) < low or (x - shift) > high:
continue # Trying to shift values outside range (Unknown)
old_bin = spectrum.get_config().get_par(dimension).get_bin(x -
shift)
# Prepare array split. Is there a better way to do this not using
# eval and exec?
cur_slice = "["
old_slice = "["
for dim in range(n_dim):
if dim == axis:
if bin < n_bins - 1:
cur_slice += str(bin) + ":" + str(bin + 1) + ","
else:
cur_slice += str(bin) + ":,"
if old_bin < n_bins - 1:
old_slice += str(old_bin) + ":" + str(old_bin + 1)+","
else:
old_slice += str(old_bin) + ":,"
else:
cur_slice += ":,"
old_slice += ":,"
cur_slice = cur_slice[:-1] + "]"
old_slice = old_slice[:-1] + "]"
old_data = eval("spectrum._data"+old_slice)
unshifted_sum = float(old_data.sum())
# Check to see if there is data
if unshifted_sum > 0.:
fill_cmd = "shifted_spec._data" + cur_slice + "+= old_data"
exec(fill_cmd)
return shifted_spec
|
|
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# Set an alternative OrderForm class for the checkout process.
# SHOP_CHECKOUT_FORM_CLASS = 'cartridge.shop.forms.OrderForm'
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
# SHOP_CURRENCY_LOCALE = ""
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from .local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
|
import argparse
import pickle
import os, sys
"""
NetworkAnalysis
2017 Joaquim Aguirre-Plans
Structural Bioinformatics Laboratory
Universitat Pompeu Fabra
"""
#################
#### CLASSES ####
#################
class HouseKeepingGenes(object):
""" Class defining a HouseKeepingGenes object """
def __init__(self, type_id):
"""
@param: type_id
@pdef: Type of IDs in the network
@ptype: {String}
"""
self.main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
self.pickles_path = os.path.join(main_path, 'data')
self.type_id = type_id
self.hpa_hk_genes = self.get_hpa_hk_genes()
self.eisenberg_hk_genes = self.get_eisenberg_hk_genes()
self.all_hk_genes = self.get_all_hk_genes()
def get_hpa_hk_genes(self):
"""
Obtains a set containing the Human Protein Atlas house keeping genes
"""
if self.type_id == 'biana':
hpa_uE_dump = os.path.join(self.pickles_path,'hpa_hk_uEs.pcl')
hpa_hk_genes = pickle.load(open(hpa_uE_dump, 'rb'))
elif self.type_id == 'geneid':
hpa_geneid_dump = os.path.join(self.pickles_path,'hpa_hk_geneIDs.pcl')
hpa_hk_genes = pickle.load(open(hpa_geneid_dump, 'rb'))
else:
print('The input_type must be \'geneid\' or \'biana\'\n')
sys.exit(10)
return hpa_hk_genes
def get_eisenberg_hk_genes(self):
"""
Obtains a set containing the Eisenberg data of house keeping genes
"""
if self.type_id == 'biana':
elieis_uE_dump = os.path.join(self.pickles_path,'eisenberg_hk_uEs.pcl')
eisenberg_hk_genes = pickle.load(open(elieis_uE_dump, 'rb'))
elif self.type_id == 'geneid':
elieis_geneid_dump = os.path.join(self.pickles_path,'eisenberg_hk_geneIDs.pcl')
eisenberg_hk_genes = pickle.load(open(elieis_geneid_dump, 'rb'))
else:
print('The input_type must be \'geneid\' or \'biana\'\n')
sys.exit(10)
return eisenberg_hk_genes
def get_all_hk_genes(self):
"""
Obtains a set containing all the house keeping genes in Human Protein
Atlas and Eisenberg datasets
"""
return self.hpa_hk_genes | self.eisenberg_hk_genes
###################
#### FUNCTIONS ####
###################
def filter_network_tissue_specific(input_network, tissue, permission, output_network, output_nodes):
"""Gets a complete network and filters by tissue interactions in the same tissue"""
permission = int(permission)
# Check that the permission parameter is well introduced
if permission != 0 and permission != 1 and permission != 2:
print('In the parameter --permission, introduce 0, 1 or 2\n')
sys.exit(10)
# Open files
network_fd = open(input_network, 'r')
output_fd = open(output_network, 'w')
output_nodes_fd = open(output_nodes, 'w')
ue2hpainfo = {}
ue2jenseninfo = {}
tissue_nodes = set()
# Process the network
for line in network_fd:
fields = line.strip().split('\t')
uE1 = int(fields[0])
uE2 = int(fields[1])
other = fields[2:len(fields)]
other = '\t'.join(other)
#print('\nChecking interaction {} and {}...\n'.format(uE1, uE2))
for uE in [uE1, uE2]:
if uE not in ue2jenseninfo:
ue2jenseninfo.setdefault(uE, {'info':False, 'specific':False})
# Report if there is info about the protein in Jensen or not
if uE not in tissue.UEprot2UETissues:
ue2jenseninfo[uE]['info'] = False
else:
ue2jenseninfo[uE]['info'] = True
# Report if there is specificity in Jensen or not
if uE in tissue.proteins_jensen:
ue2jenseninfo[uE]['specific'] = True
else:
ue2jenseninfo[uE]['specific'] = False
if uE not in ue2hpainfo:
ue2hpainfo.setdefault(uE, {'info':False, 'specific':False})
# Report if there is info about the protein in HPA or not
if uE not in tissue.UEprot2UEHPA:
ue2hpainfo[uE]['info'] = False
else:
ue2hpainfo[uE]['info'] = True
# Report if there is specificity in HPA or not
if uE in tissue.proteins_hpa:
ue2hpainfo[uE]['specific'] = True
else:
ue2hpainfo[uE]['specific'] = False
############ Order the information obtained from Tissues and HPA ############
result_hpa = { 'info' : False, 'specific' : False, 'db' : 'hpa' }
result_jensen = { 'info' : False, 'specific' : False, 'db' : 'jensen' }
for database, result in [ [ue2jenseninfo, result_jensen], [ue2hpainfo, result_hpa] ]:
# If we have info in both proteins, it is True
if database[uE1]['info'] == True and database[uE1]['info'] == True:
result['info'] = True
# If we only have info about one of the proteins...
if (database[uE1]['info'] == True and database[uE2]['info'] == False) or (database[uE1]['info'] == False and database[uE2]['info'] == True):
for uE in [uE1, uE2]:
if database[uE]['info'] == True: # Identify the protein in which we have info
if database[uE]['specific'] == True: # If the protein is tissue-specific, we say that the interaction is partially tissue-specific
result['info'] = 'partial'
# If one of the proteins is specific and the other not, or both are not, we consider the interaction as not specific!
if (database[uE1]['specific'] == True and database[uE2]['specific'] == False) or (database[uE1]['specific'] == False and database[uE2]['specific'] == True) or (database[uE1]['specific'] == False and database[uE2]['specific'] == False):
result['specific'] = False
# If both are specific, then it is specific!
elif database[uE1]['specific'] == True and database[uE2]['specific'] == True:
result['specific'] = True
#print('\nHPA info: {}\tHPA specificity: {}'.format(result_hpa['info'], result_hpa['specific']))
#print('JENSEN info: {}\tJENSEN specificity: {}'.format(result_jensen['info'], result_jensen['specific']))
############ Decide if they are tissue specific or not... ############
# If both specificity results are True, we consider tissue-specificity
if result_hpa['specific'] == True and result_jensen['specific'] == True:
#print('\n... tissue specific!\n')
database = ';'.join([result_hpa['db'], result_jensen['db']])
additional = '-'
output_fd.write('{}\t{}\t{}\t{}\t{}\n'.format(uE1, uE2, other, database, additional))
tissue_nodes.add(uE1)
tissue_nodes.add(uE2)
# If there is True info in only one of the databases and it is tissue-specific, we consider tissue-specificity
if (result_hpa['info'] == True and result_jensen['info'] == False) or (result_hpa['info'] == False and result_jensen['info'] == True):
for result in [result_jensen, result_hpa]:
if result['info'] == True and result['specific'] == True:
#print('\n... tissue specific!\n')
database = result['db']
additional = '-'
output_fd.write('{}\t{}\t{}\t{}\t{}\n'.format(uE1, uE2, other, database, additional))
tissue_nodes.add(uE1)
tissue_nodes.add(uE2)
# If there is contradiction (one of them is True and the other is False), we add the one which is True
if (result_hpa['specific'] == True and result_jensen['info'] == True and result_jensen['specific'] == False) or (result_hpa['info'] == True and result_hpa['specific'] == False and result_jensen['specific'] == True):
#print('\n... contradiction!\n')
for result in [result_jensen, result_hpa]:
if result['info'] == True and result['specific'] == True:
database = result['db']
additional = 'contradiction'
output_fd.write('{}\t{}\t{}\t{}\t{}\n'.format(uE1, uE2, other, database, additional))
tissue_nodes.add(uE1)
tissue_nodes.add(uE2)
# If there is no info, we check the permissivity
if result_hpa['info'] == False and result_jensen['info'] == False:
#print('\n... no info!\n')
# If the level of permissivity is 2, we include it
if permission == 2:
#print('\n... as permission is level {}, we include it!\n'.format(permission))
database = '-'
additional = 'no info'
output_fd.write('{}\t{}\t{}\t{}\t{}\n'.format(uE1, uE2, other, database, additional))
tissue_nodes.add(uE1)
tissue_nodes.add(uE2)
else:
continue
# If there is only partial info in one of the databases or both, we check the level of permission
if (result_hpa['info'] == 'partial' and result_jensen['info'] == False) or (result_hpa['info'] == False and result_jensen['info'] == 'partial') or (result_hpa['info'] == 'partial' and result_jensen['info'] == 'partial'):
# If the permission is medium or high, we include it!
if permission == 1 or permission == 2:
database = []
for result in [result_jensen, result_hpa]:
if result['info'] == 'partial':
database.append(result['db'])
#print('\n... only partial info in one of the databases... tissue specific!\n')
database = ';'.join(database)
additional = 'partial info'
output_fd.write('{}\t{}\t{}\t{}\t{}\n'.format(uE1, uE2, other, database, additional))
tissue_nodes.add(uE1)
tissue_nodes.add(uE2)
#print('\n')
# Print the nodes in the nodes file
for node in tissue_nodes:
output_nodes_fd.write('{}\n'.format(node))
# if nodes[node]['tissue_specific'] == True:
# if 'level' not in nodes[node]:
# nodes[node]['level'] = '-'
# nodes[node]['reliability'] = '-'
# if 'confidence' not in nodes[node]:
# nodes[node]['confidence'] = '-'
# output_nodes_fd.write('{}\t{}\t{}\t{}\n'.format(node, nodes[node]['level'], nodes[node]['reliability'], nodes[node]['confidence']))
network_fd.close()
output_fd.close()
output_nodes_fd.close()
return
|
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routines to create discourse tree objects out of token and parent lists."""
from __future__ import absolute_import
from __future__ import print_function
import itertools
import math
import Queue as Q
import numpy as np
def build_discourse_tree(edu_ids, parent_ids):
"""Build DiscourseTreeNode object from paired (node,parent) IDs for spans."""
ids_and_parents = zip(edu_ids, parent_ids)
ids_and_parents.sort(key=lambda t: t[0])
by_id = itertools.groupby(ids_and_parents, key=lambda t: t[0])
nodes = []
ids_to_nodes = {}
for k, g in by_id:
g = list(g)
new_node = DiscourseTreeNode(k, g[0][1], len(g))
nodes.append(new_node)
ids_to_nodes[k] = new_node
for node in nodes:
if node.parent_id != -1:
parent_node = ids_to_nodes[node.parent_id]
node.parent = parent_node
parent_node.children.append(node)
root_node = (node for node in nodes if node.parent_id == -1).next()
for node in discourse_tree_depth_first_walk(root_node):
if node.parent_id != -1:
node.parent.total_num_leaves += node.total_num_leaves
for node in nodes:
if node.children:
node.child_sum_tree = build_sum_tree(
[(n.total_num_leaves + 1, n.node_id) for n in node.children])
add_tree_levels(root_node)
return root_node
class DiscourseTreeNode(object):
"""Class representing a discourse-parsed sentence/document."""
def __init__(self, node_id, parent_id, node_size, span_tokens=None):
self.node_id = node_id
self.parent_id = parent_id
self.node_size = node_size
self.span_tokens = span_tokens
self.parent = None
self.level = None
self.children = []
self.child_sum_tree = None
self.total_num_leaves = node_size
def tree_num_nodes(self):
num_nodes = 0
for c in self.children:
num_nodes += c.tree_num_nodes()
return num_nodes + 1
def build_sum_tree(num_leaves_node_id_pairs):
"""Builds tree for fft-tree inference across multiple sibling sets.
Lay out cousin sibling sets aligned to powers of 2 so that binary-tree
auxiliary variable model can be masked to keep cousin sums separate.
Args:
num_leaves_node_id_pairs: list of (size, id) pairs for nodes.
Returns:
SumTreeBranch object describing layout.
"""
q = Q.PriorityQueue()
for num_leaves, node_id in num_leaves_node_id_pairs:
q.put((num_leaves, SumTreeLeaf(node_id, num_leaves)))
while not q.empty():
node_a = q.get()
if q.empty():
ret = node_a[1]
else:
node_b = q.get()
new_branch = SumTreeBranch(node_a[1], node_b[1])
q.put((new_branch.width, new_branch))
return ret
def sum_tree_depth_first_leaf_walk(node):
if isinstance(node, SumTreeLeaf):
yield node
else:
for n in sum_tree_depth_first_leaf_walk(node.left):
yield n
for n in sum_tree_depth_first_leaf_walk(node.right):
yield n
def round_up_to_power_2(x):
return 2.0**math.ceil(math.log(x, 2.0))
class SumTreeLeaf(object):
def __init__(self, span_id, width):
self.span_id = span_id
self.update_width(width)
def update_width(self, new_width):
self.width = round_up_to_power_2(new_width)
self.depth = math.log(self.width, 2.0)
class SumTreeBranch(object):
def __init__(self, left, right):
self.left = left
self.right = right
self.update_width(
round_up_to_power_2(self.left.width + self.right.width))
def update_width(self, new_width):
self.width = new_width
self.depth = math.log(self.width, 2.0)
self.left.update_width(new_width / 2)
self.right.update_width(new_width / 2)
def discourse_tree_depth_first_walk(node):
for n in node.children:
for desc in discourse_tree_depth_first_walk(n):
yield desc
yield node
def add_tree_levels(tree):
if tree.parent is None:
tree.level = 0
else:
tree.level = tree.parent.level + 1
for c in tree.children:
add_tree_levels(c)
def get_junction_tree_dimensions(examples, tree_cutoff_depth=20):
"""Find dimensions of minimum-sized containing PGM for a set of examples.
Args:
examples: list of SummaryExamples.
tree_cutoff_depth: max depth for BP tree.
Returns:
Dimensions of junction tree.
"""
# take a generator of examples and stream over it to find the
# size proportions of the junction tree
max_num_spans = -1
max_num_sentences = -1
max_tree_nodes_any_level = -1
max_tree_widths_at_level = np.zeros([tree_cutoff_depth])
max_fft_tree_widths_at_level = np.zeros([tree_cutoff_depth])
global_fft_tree_width = -1
scratch_tree_widths_at_level = np.zeros([tree_cutoff_depth])
scratch_fft_tree_widths_at_level = np.zeros([tree_cutoff_depth])
scratch_nodes_at_level = np.zeros([tree_cutoff_depth])
for ex in examples:
trees = [build_discourse_tree(edu_ids, parent_ids)
for edu_ids, parent_ids in zip(ex.article_edu_ids,
ex.article_parent_ids)]
max_num_sentences = max(max_num_sentences, len(trees))
max_num_spans = max(max_num_spans, sum([t.tree_num_nodes() for t in trees]))
# get per-sentence dimensions
for tree in trees:
scratch_tree_widths_at_level[:] = 0
scratch_fft_tree_widths_at_level[:] = 0
scratch_nodes_at_level[:] = 0
all_nodes = list(discourse_tree_depth_first_walk(tree))
# to layout cousins we have to sort them biggest first
# but to find the total width we don't need to sort
for n in all_nodes:
scratch_tree_widths_at_level[n.level] += n.total_num_leaves + 1
scratch_nodes_at_level[n.level] += 1
if n.child_sum_tree is not None:
scratch_fft_tree_widths_at_level[n.level] += n.child_sum_tree.width
max_tree_nodes_any_level = max(
np.max(scratch_nodes_at_level), max_tree_nodes_any_level)
max_tree_widths_at_level = np.maximum(max_tree_widths_at_level,
scratch_tree_widths_at_level)
max_fft_tree_widths_at_level = np.maximum(
max_fft_tree_widths_at_level, scratch_fft_tree_widths_at_level)
# get global sum tree dimension
global_sum_tree = build_sum_tree(
[(dt.total_num_leaves + 1, dt.node_id) for dt in trees])
global_fft_tree_width = max(global_fft_tree_width, global_sum_tree.width)
return (max_num_spans, max_num_sentences, max_tree_nodes_any_level,
max_tree_widths_at_level, max_fft_tree_widths_at_level,
global_fft_tree_width)
|
|
"""
Enhanced subprocess.Popen subclass, supporting:
* .communicate() with timeout
Sample usage:
out, err = Popen(...).communicate(input, timeout=300)
"""
# --------------------------------------------------------------------
import os, subprocess
if subprocess.mswindows:
import threading
else:
import select, errno
# --------------------------------------------------------------------
__all__ = subprocess.__all__[:]
# --------------------------------------------------------------------
def __import():
for i in subprocess.__all__:
globals()[i] = getattr(subprocess, i)
__import()
# --------------------------------------------------------------------
class Popen(subprocess.Popen):
def _fo_read_no_intr(self, obj):
"""Like obj.read(), but retries on EINTR"""
while True:
try:
return obj.read()
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
def _fo_write_no_intr(self, obj, data):
"""Like obj.write(), but retries on EINTR"""
while True:
try:
return obj.write(data)
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
def _fo_write_no_intr(self, obj, data):
"""Like obj.write(), but retries on EINTR"""
while True:
try:
return obj.write(data)
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
def communicate(self, input=None, timeout=None):
self.timeout = timeout
# If we are only using one pipe, or no pipe at all, using
# select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self._fo_write_no_intr(self.stdin, input)
self.stdin.close()
elif self.stdout:
stdout = self._fo_read_no_intr(self.stdout)
self.stdout.close()
elif self.stderr:
stderr = self._fo_read_no_intr(self.stderr)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if subprocess.mswindows:
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join(self.timeout)
if self.stderr:
stderr_thread.join(self.timeout)
# if the threads are still alive, that means the thread join timed out
timed_out = (self.stdout and stdout_thread.isAlive() or
self.stderr and stderr_thread.isAlive())
if timed_out:
self.kill()
else:
self.wait()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
return (stdout, stderr)
else: # POSIX
def _communicate(self, input):
timed_out = False
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [], self.timeout)
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
timed_out = not (rlist or wlist or xlist)
if timed_out:
break
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
chunk = input[input_offset:input_offset + 512]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
if timed_out:
self.kill()
else:
self.wait()
return (stdout, stderr)
|
|
"""Unit test for cgutils module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import mock
import treadmill
from treadmill import cgutils
from treadmill import cgroups
class CGutilsTest(unittest.TestCase):
"""Tests for teadmill.cgutils.
"""
_BLKIO_THROTTLE_IOPS = os.path.join(
os.path.dirname(__file__),
'blkio.throttle.io_serviced.data'
)
_BLKIO_THROTTLE_BPS = os.path.join(
os.path.dirname(__file__),
'blkio.throttle.io_service_bytes.data'
)
_BLKIO_BPS_EMPTY = os.path.join(
os.path.dirname(__file__),
'blkio.io_service_bytes.empty.data'
)
_BLKIO_SECTORS_EMPTY = os.path.join(
os.path.dirname(__file__),
'blkio.sectors.empty.data'
)
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('io.open', mock.mock_open())
@mock.patch('treadmill.cgroups.makepath', mock.Mock())
@mock.patch('treadmill.cgroups.set_value', mock.Mock())
@mock.patch('treadmill.syscall.eventfd.eventfd',
mock.Mock(return_value=42))
def test_get_memory_oom_eventfd(self):
"""Test registration of oom events.
"""
treadmill.cgroups.makepath.return_value = 'mock_oom_control'
mock_handle = io.open.return_value
mock_handle.fileno.return_value = 43
res = cgutils.get_memory_oom_eventfd('some_cgrp')
treadmill.syscall.eventfd.eventfd.assert_called_with(
0, treadmill.syscall.eventfd.EFD_CLOEXEC
)
treadmill.cgroups.makepath.assert_called_with(
'memory', 'some_cgrp', 'memory.oom_control'
)
io.open.assert_called_with('mock_oom_control')
treadmill.cgroups.set_value.assert_called_with(
'memory', 'some_cgrp', 'cgroup.event_control',
# '<eventfd_fd> <oom_control_fd>'
'42 43'
)
# Should be returning the eventfd socket
self.assertEqual(res, 42)
@mock.patch('treadmill.cgroups._get_mountpoint', mock.Mock(set_spec=True))
@mock.patch('os.rmdir', mock.Mock(set_spec=True))
def test_delete_rec(self):
"""Tests recursive cgroup deletion.
"""
# pylint: disable=W0212
cgroups_dir = os.path.join(self.root, 'cgroups')
treadmill.cgroups._get_mountpoint.return_value = cgroups_dir
group = os.path.join('treadmill', 'apps', 'test1')
# Create a directory and subdirs for the cgroup
os.makedirs(os.path.join(cgroups_dir, group, 'foo', 'bar', 'baz'))
cgutils.delete('cpu', group)
os.rmdir.assert_has_calls([
mock.call(os.path.join(cgroups_dir, group, 'foo/bar/baz')),
mock.call(os.path.join(cgroups_dir, group, 'foo/bar')),
mock.call(os.path.join(cgroups_dir, group, 'foo')),
mock.call(os.path.join(cgroups_dir, group)),
])
@mock.patch('treadmill.cgroups.get_data', mock.Mock())
def test_get_blkio_bps_info(self):
"""Test reading of blkio throttle bps information.
"""
with io.open(self._BLKIO_THROTTLE_BPS) as f:
data = f.read()
treadmill.cgroups.get_data.side_effect = [data]
data = cgutils.get_blkio_info('mycgrp',
'blkio.throttle.io_service_bytes')
treadmill.cgroups.get_data.assert_called_with(
'blkio', 'mycgrp', 'blkio.throttle.io_service_bytes'
)
self.assertEqual(
data['253:6'],
{
'Read': 331776,
'Write': 74817536,
'Sync': 0,
'Async': 75149312,
'Total': 75149312,
}
)
@mock.patch('treadmill.cgroups.get_data', mock.Mock())
def test_get_blkio_info_empty(self):
"""Test reading of blkio information with empty file.
"""
with io.open(self._BLKIO_BPS_EMPTY) as f:
data = f.read()
treadmill.cgroups.get_data.side_effect = [data]
data = cgutils.get_blkio_info('mycgrp',
'blkio.io_service_bytes')
treadmill.cgroups.get_data.assert_called_with(
'blkio', 'mycgrp', 'blkio.io_service_bytes'
)
self.assertEqual(
data,
{}
)
@mock.patch('treadmill.cgroups.get_data', mock.Mock())
def test_get_blkio_value_empty(self):
"""Test reading of blkio information with empty file.
"""
with io.open(self._BLKIO_SECTORS_EMPTY) as f:
data = f.read()
treadmill.cgroups.get_data.side_effect = [data]
data = cgutils.get_blkio_value('mycgrp',
'blkio.sectors')
treadmill.cgroups.get_data.assert_called_with(
'blkio', 'mycgrp', 'blkio.sectors'
)
self.assertEqual(
data,
{}
)
@mock.patch('treadmill.cgroups.get_data', mock.Mock())
def test_get_blkio_iops_info(self):
"""Test reading of blkio throttle iops information.
"""
with io.open(self._BLKIO_THROTTLE_IOPS) as f:
data = f.read()
treadmill.cgroups.get_data.side_effect = [data]
data = cgutils.get_blkio_info('mycgrp',
'blkio.throttle.io_serviced')
treadmill.cgroups.get_data.assert_called_with(
'blkio', 'mycgrp', 'blkio.throttle.io_serviced'
)
self.assertEqual(
data['253:6'],
{
'Read': 81,
'Write': 18266,
'Sync': 0,
'Async': 18347,
'Total': 18347,
}
)
@mock.patch('treadmill.cgroups.create', mock.Mock())
@mock.patch('treadmill.cgroups.set_value', mock.Mock())
@mock.patch('treadmill.cgroups.get_data',
mock.Mock(side_effect=['0', '0', '', '1024', '512']))
@mock.patch('treadmill.sysinfo.cpu_count',
mock.Mock(return_value=4))
def test_create_treadmill_cgroups(self):
"""Test the creation of core treadmill cgroups.
"""
treadmill_core_cpu_shares = 10
treadmill_apps_cpu_shares = 90
treadmill_core_cpuset_cpus = '0-15'
treadmill_app_cpuset_cpus = '1-15'
treadmill_core_mem = 512
treadmill_apps_mem = 256
cgutils.create_treadmill_cgroups(treadmill_core_cpu_shares,
treadmill_apps_cpu_shares,
treadmill_core_cpuset_cpus,
treadmill_app_cpuset_cpus,
treadmill_core_mem,
treadmill_apps_mem)
calls = [mock.call('cpu', 'treadmill/core'),
mock.call('cpu', 'treadmill/apps'),
mock.call('cpuacct', 'treadmill/core'),
mock.call('cpuacct', 'treadmill/apps'),
mock.call('cpuset', 'treadmill/core'),
mock.call('cpuset', 'treadmill/apps'),
mock.call('memory', 'treadmill/core'),
mock.call('memory', 'treadmill/apps')]
treadmill.cgroups.create.assert_has_calls(calls)
calls = [mock.call('cpu', 'treadmill/core',
'cpu.shares', treadmill_core_cpu_shares),
mock.call('cpu', 'treadmill/apps',
'cpu.shares', treadmill_apps_cpu_shares),
mock.call('cpuset', 'treadmill/core',
'cpuset.mems', '0'),
mock.call('cpuset', 'treadmill/apps',
'cpuset.mems', '0'),
mock.call('cpuset', 'treadmill/core',
'cpuset.cpus', '0-15'),
mock.call('cpuset', 'treadmill/apps',
'cpuset.cpus', '1-15'),
mock.call('memory', 'treadmill/core',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill/apps',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill/core',
'memory.limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/core',
'memory.memsw.limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/core',
'memory.soft_limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/apps',
'memory.limit_in_bytes', treadmill_apps_mem),
mock.call('memory', 'treadmill/apps',
'memory.memsw.limit_in_bytes', treadmill_apps_mem)]
treadmill.cgroups.set_value.assert_has_calls(calls)
@mock.patch('treadmill.cgroups.set_value',
mock.Mock())
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=512))
@mock.patch('treadmill.cgroups.makepath',
mock.Mock(return_value='/cgroup/memory/treadmill/apps'))
@mock.patch('treadmill.cgutils.total_soft_memory_limits',
mock.Mock(return_value=1024))
@mock.patch('os.listdir',
mock.Mock(return_value=['a', 'b']))
@mock.patch('os.path.isdir',
mock.Mock(return_value=True))
def test_reset_mem_limit_in_bytes(self):
"""Make sure we are setting hardlimits right.
"""
cgutils.reset_memory_limit_in_bytes()
mock_calls = [mock.call('memory',
'treadmill/apps',
'memory.limit_in_bytes'),
mock.call('memory',
'treadmill/apps/a',
'memory.soft_limit_in_bytes'),
mock.call('memory',
'treadmill/apps/b',
'memory.soft_limit_in_bytes')]
cgroups.get_value.assert_has_calls(mock_calls)
mock_calls = [mock.call('memory',
'treadmill/apps/a',
'memory.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/a',
'memory.memsw.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/b',
'memory.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/b',
'memory.memsw.limit_in_bytes',
512)]
cgroups.set_value.assert_has_calls(mock_calls)
@mock.patch('treadmill.cgutils.set_memory_hardlimit', mock.Mock())
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=512))
@mock.patch('treadmill.cgroups.makepath',
mock.Mock(return_value='/cgroup/memory/treadmill/apps'))
@mock.patch('treadmill.cgutils.total_soft_memory_limits',
mock.Mock(return_value=1024))
@mock.patch('os.listdir',
mock.Mock(return_value=['a']))
@mock.patch('os.path.isdir',
mock.Mock(return_value=True))
def test_reset_mem_limit_kill(self):
"""Make sure we kill groups when we cannot lower their hardlimits.
"""
treadmill.cgutils.set_memory_hardlimit.side_effect = \
cgutils.TreadmillCgroupError('test')
res = cgutils.reset_memory_limit_in_bytes()
self.assertEqual(res, ['a'])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# this script sets up the testing packages to allow the tests
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import os
import subprocess
import platform
import sys
pip_packages = [
"autest==1.7.2",
"hyper",
"requests",
"dnslib",
"httpbin",
"traffic-replay" # this should install TRLib, MicroServer, MicroDNS, Traffic-Replay
]
distro_packages = {
"RHEL": [
"install epel-release",
"install python35",
"install rh-python35-python-virtualenv"
],
"Fedora": [
"install python3",
"install python3-virtualenv",
"install python-virtualenv",
],
"Ubuntu": [
"install python3",
"install python3-virtualenv",
"install virtualenv",
"install python3-dev"
],
"CentOS": [
"install epel-release",
"install rh-python35-python-virtualenv"
]
}
def command_output(cmd_str):
print(cmd_str)
proc = subprocess.Popen(
cmd_str,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
# while command runs get output
while proc.poll() == None:
tmp = proc.stdout.readline()
sys.stdout.write(tmp)
for last_output in proc.stdout.readlines():
sys.stdout.write(last_output)
return proc.returncode
def get_distro():
return platform.linux_distribution()
def distro_version():
return int(get_distro()[1].split(".")[0])
def isFedora():
return get_distro()[0].startswith("Fedora")
def isCentOS():
return get_distro()[0].startswith("CentOS")
def distro():
if isFedora():
return "Fedora"
if isCentOS():
return "CentOS"
if get_distro()[0].startswith("Red Hat"):
return "RHEL"
if get_distro()[0].startswith("Ubuntu"):
return "Ubuntu"
def isRedHatBased():
return get_distro()[0].startswith("Red Hat") or get_distro()[0].startswith(
"Fedora") or get_distro()[0].startswith("CentOS")
def isInstalled(prog):
out = subprocess.Popen(
["which", prog], stdout=subprocess.PIPE).communicate()
if out[0] != '':
return True
return False
def installManagerName():
if isRedHatBased() and distro_version() >= 22:
ret = "sudo dnf -y" # Fedora 22 or newer
elif isRedHatBased():
ret = "sudo yum -y" # Red Hat distro
else:
ret = "sudo apt-get -y" # Ubuntu/Debian
return ret
def installToolName():
if isRedHatBased():
ret = "rpm -ihv" # Red Hat Based
else:
ret = "dpkg -iv" # Ubuntu/Debian
return ret
def run_cmds(cmds):
for cmd in cmds:
# print (cmd.split[" "])
# subprocess.call(cmd.split[" "])
if command_output(cmd):
print("'{0}'' - Failed".format(cmd))
def gen_package_cmds(packages):
# main install tool/manager (yum, dnf, apt-get, etc)
mtool = installManagerName()
# core install tool (rpm, dpkg, etc)
itool = installToolName()
ret = []
for p in packages:
if p.startswith("wget"):
pth = p[5:]
pack = os.path.split(pth)[1]
cmd = ["wget {0}".format(pth), "{0} ./{1}".format(itool, pack)]
else:
cmd = ["{0} {1}".format(mtool, p)]
ret.extend(cmd)
return ret
extra = ''
if distro() == 'RHEL' or distro() == 'CentOS':
extra = ". /opt/rh/rh-python35/enable ;"
def venv_cmds(path):
'''
Create virtual environment and add it
to the path being used for the script
'''
return [
# first command only needed for rhel and centos systems at this time
extra + " virtualenv --python=python3 {0}".format(path),
extra + " {0}/bin/pip install pip --upgrade".format(path)
]
def main():
" main script logic"
parser = argparse.ArgumentParser()
parser.add_argument(
"--use-pip", nargs='?', default="pip", help="Which pip to use")
parser.add_argument(
"venv_path",
nargs='?',
default="env-test",
help="The directory to us to for the virtualenv")
parser.add_argument(
"--disable-virtualenv",
default=False,
action='store_true',
help="Do not create virtual environment to install packages under")
parser.add_argument(
'-V', '--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
# print(args)
# print(get_distro())
# do we know of packages to install for the given platform
dist = distro()
cmds = []
if dist:
cmds = gen_package_cmds(distro_packages[dist])
# test to see if we should use a certain version of pip
path_to_pip = None
if args.use_pip != "pip":
path_to_pip = args.use_pip
# install on the system, or use virtualenv for pip based stuff
if not args.disable_virtualenv:
# Create virtual env
cmds += venv_cmds(args.venv_path)
if path_to_pip is None:
path_to_pip = os.path.join(args.venv_path, "bin", args.use_pip)
cmds += [extra + "{0} install {1}".format(path_to_pip, " ".join(pip_packages))]
run_cmds(cmds)
if __name__ == '__main__':
main()
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.cloudfunctions import (
cloud_function_pb2,
)
from google3.cloud.graphite.mmv2.services.google.cloudfunctions import (
cloud_function_pb2_grpc,
)
from typing import List
class CloudFunction(object):
def __init__(
self,
name: str = None,
description: str = None,
source_archive_url: str = None,
source_repository: dict = None,
https_trigger: dict = None,
event_trigger: dict = None,
status: str = None,
entry_point: str = None,
runtime: str = None,
timeout: int = None,
available_memory_mb: int = None,
service_account_email: str = None,
update_time: str = None,
version_id: int = None,
labels: dict = None,
environment_variables: dict = None,
network: str = None,
max_instances: int = None,
vpc_connector: str = None,
vpc_connector_egress_settings: str = None,
ingress_settings: str = None,
region: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.source_archive_url = source_archive_url
self.source_repository = source_repository
self.https_trigger = https_trigger
self.event_trigger = event_trigger
self.entry_point = entry_point
self.runtime = runtime
self.timeout = timeout
self.available_memory_mb = available_memory_mb
self.service_account_email = service_account_email
self.labels = labels
self.environment_variables = environment_variables
self.network = network
self.max_instances = max_instances
self.vpc_connector = vpc_connector
self.vpc_connector_egress_settings = vpc_connector_egress_settings
self.ingress_settings = ingress_settings
self.region = region
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = cloud_function_pb2_grpc.CloudfunctionsCloudFunctionServiceStub(
channel.Channel()
)
request = cloud_function_pb2.ApplyCloudfunctionsCloudFunctionRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.source_archive_url):
request.resource.source_archive_url = Primitive.to_proto(
self.source_archive_url
)
if CloudFunctionSourceRepository.to_proto(self.source_repository):
request.resource.source_repository.CopyFrom(
CloudFunctionSourceRepository.to_proto(self.source_repository)
)
else:
request.resource.ClearField("source_repository")
if CloudFunctionHttpsTrigger.to_proto(self.https_trigger):
request.resource.https_trigger.CopyFrom(
CloudFunctionHttpsTrigger.to_proto(self.https_trigger)
)
else:
request.resource.ClearField("https_trigger")
if CloudFunctionEventTrigger.to_proto(self.event_trigger):
request.resource.event_trigger.CopyFrom(
CloudFunctionEventTrigger.to_proto(self.event_trigger)
)
else:
request.resource.ClearField("event_trigger")
if Primitive.to_proto(self.entry_point):
request.resource.entry_point = Primitive.to_proto(self.entry_point)
if Primitive.to_proto(self.runtime):
request.resource.runtime = Primitive.to_proto(self.runtime)
if Primitive.to_proto(self.timeout):
request.resource.timeout = Primitive.to_proto(self.timeout)
if Primitive.to_proto(self.available_memory_mb):
request.resource.available_memory_mb = Primitive.to_proto(
self.available_memory_mb
)
if Primitive.to_proto(self.service_account_email):
request.resource.service_account_email = Primitive.to_proto(
self.service_account_email
)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.environment_variables):
request.resource.environment_variables = Primitive.to_proto(
self.environment_variables
)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.max_instances):
request.resource.max_instances = Primitive.to_proto(self.max_instances)
if Primitive.to_proto(self.vpc_connector):
request.resource.vpc_connector = Primitive.to_proto(self.vpc_connector)
if CloudFunctionVPCConnectorEgressSettingsEnum.to_proto(
self.vpc_connector_egress_settings
):
request.resource.vpc_connector_egress_settings = CloudFunctionVPCConnectorEgressSettingsEnum.to_proto(
self.vpc_connector_egress_settings
)
if CloudFunctionIngressSettingsEnum.to_proto(self.ingress_settings):
request.resource.ingress_settings = CloudFunctionIngressSettingsEnum.to_proto(
self.ingress_settings
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyCloudfunctionsCloudFunction(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.source_archive_url = Primitive.from_proto(response.source_archive_url)
self.source_repository = CloudFunctionSourceRepository.from_proto(
response.source_repository
)
self.https_trigger = CloudFunctionHttpsTrigger.from_proto(
response.https_trigger
)
self.event_trigger = CloudFunctionEventTrigger.from_proto(
response.event_trigger
)
self.status = CloudFunctionStatusEnum.from_proto(response.status)
self.entry_point = Primitive.from_proto(response.entry_point)
self.runtime = Primitive.from_proto(response.runtime)
self.timeout = Primitive.from_proto(response.timeout)
self.available_memory_mb = Primitive.from_proto(response.available_memory_mb)
self.service_account_email = Primitive.from_proto(
response.service_account_email
)
self.update_time = Primitive.from_proto(response.update_time)
self.version_id = Primitive.from_proto(response.version_id)
self.labels = Primitive.from_proto(response.labels)
self.environment_variables = Primitive.from_proto(
response.environment_variables
)
self.network = Primitive.from_proto(response.network)
self.max_instances = Primitive.from_proto(response.max_instances)
self.vpc_connector = Primitive.from_proto(response.vpc_connector)
self.vpc_connector_egress_settings = CloudFunctionVPCConnectorEgressSettingsEnum.from_proto(
response.vpc_connector_egress_settings
)
self.ingress_settings = CloudFunctionIngressSettingsEnum.from_proto(
response.ingress_settings
)
self.region = Primitive.from_proto(response.region)
self.project = Primitive.from_proto(response.project)
@classmethod
def delete(self, project, region, name, service_account_file=""):
stub = cloud_function_pb2_grpc.CloudfunctionsCloudFunctionServiceStub(
channel.Channel()
)
request = cloud_function_pb2.DeleteCloudfunctionsCloudFunctionRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
request.Name = name
response = stub.DeleteCloudfunctionsCloudFunction(request)
@classmethod
def list(self, project, region, service_account_file=""):
stub = cloud_function_pb2_grpc.CloudfunctionsCloudFunctionServiceStub(
channel.Channel()
)
request = cloud_function_pb2.ListCloudfunctionsCloudFunctionRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
return stub.ListCloudfunctionsCloudFunction(request).items
@classmethod
def from_any(self, any_proto):
# Marshal any proto to regular proto.
res_proto = cloud_function_pb2.CloudfunctionsCloudFunction()
any_proto.Unpack(res_proto)
res = CloudFunction()
res.name = Primitive.from_proto(res_proto.name)
res.description = Primitive.from_proto(res_proto.description)
res.source_archive_url = Primitive.from_proto(res_proto.source_archive_url)
res.source_repository = CloudFunctionSourceRepository.from_proto(
res_proto.source_repository
)
res.https_trigger = CloudFunctionHttpsTrigger.from_proto(
res_proto.https_trigger
)
res.event_trigger = CloudFunctionEventTrigger.from_proto(
res_proto.event_trigger
)
res.status = CloudFunctionStatusEnum.from_proto(res_proto.status)
res.entry_point = Primitive.from_proto(res_proto.entry_point)
res.runtime = Primitive.from_proto(res_proto.runtime)
res.timeout = Primitive.from_proto(res_proto.timeout)
res.available_memory_mb = Primitive.from_proto(res_proto.available_memory_mb)
res.service_account_email = Primitive.from_proto(
res_proto.service_account_email
)
res.update_time = Primitive.from_proto(res_proto.update_time)
res.version_id = Primitive.from_proto(res_proto.version_id)
res.labels = Primitive.from_proto(res_proto.labels)
res.environment_variables = Primitive.from_proto(
res_proto.environment_variables
)
res.network = Primitive.from_proto(res_proto.network)
res.max_instances = Primitive.from_proto(res_proto.max_instances)
res.vpc_connector = Primitive.from_proto(res_proto.vpc_connector)
res.vpc_connector_egress_settings = CloudFunctionVPCConnectorEgressSettingsEnum.from_proto(
res_proto.vpc_connector_egress_settings
)
res.ingress_settings = CloudFunctionIngressSettingsEnum.from_proto(
res_proto.ingress_settings
)
res.region = Primitive.from_proto(res_proto.region)
res.project = Primitive.from_proto(res_proto.project)
return res
class CloudFunctionSourceRepository(object):
def __init__(self, url: str = None, deployed_url: str = None):
self.url = url
self.deployed_url = deployed_url
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cloud_function_pb2.CloudfunctionsCloudFunctionSourceRepository()
if Primitive.to_proto(resource.url):
res.url = Primitive.to_proto(resource.url)
if Primitive.to_proto(resource.deployed_url):
res.deployed_url = Primitive.to_proto(resource.deployed_url)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return CloudFunctionSourceRepository(
url=resource.url, deployed_url=resource.deployed_url,
)
class CloudFunctionSourceRepositoryArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [CloudFunctionSourceRepository.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [CloudFunctionSourceRepository.from_proto(i) for i in resources]
class CloudFunctionHttpsTrigger(object):
def __init__(self, url: str = None):
self.url = url
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cloud_function_pb2.CloudfunctionsCloudFunctionHttpsTrigger()
if Primitive.to_proto(resource.url):
res.url = Primitive.to_proto(resource.url)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return CloudFunctionHttpsTrigger(url=resource.url,)
class CloudFunctionHttpsTriggerArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [CloudFunctionHttpsTrigger.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [CloudFunctionHttpsTrigger.from_proto(i) for i in resources]
class CloudFunctionEventTrigger(object):
def __init__(
self,
event_type: str = None,
resource: str = None,
service: str = None,
failure_policy: bool = None,
):
self.event_type = event_type
self.resource = resource
self.service = service
self.failure_policy = failure_policy
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cloud_function_pb2.CloudfunctionsCloudFunctionEventTrigger()
if Primitive.to_proto(resource.event_type):
res.event_type = Primitive.to_proto(resource.event_type)
if Primitive.to_proto(resource.resource):
res.resource = Primitive.to_proto(resource.resource)
if Primitive.to_proto(resource.service):
res.service = Primitive.to_proto(resource.service)
if Primitive.to_proto(resource.failure_policy):
res.failure_policy = Primitive.to_proto(resource.failure_policy)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return CloudFunctionEventTrigger(
event_type=resource.event_type,
resource=resource.resource,
service=resource.service,
failure_policy=resource.failure_policy,
)
class CloudFunctionEventTriggerArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [CloudFunctionEventTrigger.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [CloudFunctionEventTrigger.from_proto(i) for i in resources]
class CloudFunctionStatusEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionStatusEnum.Value(
"CloudfunctionsCloudFunctionStatusEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionStatusEnum.Name(resource)[
len("CloudfunctionsCloudFunctionStatusEnum") :
]
class CloudFunctionVPCConnectorEgressSettingsEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionVPCConnectorEgressSettingsEnum.Value(
"CloudfunctionsCloudFunctionVPCConnectorEgressSettingsEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionVPCConnectorEgressSettingsEnum.Name(
resource
)[
len("CloudfunctionsCloudFunctionVPCConnectorEgressSettingsEnum") :
]
class CloudFunctionIngressSettingsEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionIngressSettingsEnum.Value(
"CloudfunctionsCloudFunctionIngressSettingsEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return cloud_function_pb2.CloudfunctionsCloudFunctionIngressSettingsEnum.Name(
resource
)[len("CloudfunctionsCloudFunctionIngressSettingsEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions implementing some of the
algorithms contained within Jean Meeus, 'Astronomical Algorithms',
second edition, 1998, Willmann-Bell.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.polynomial.polynomial import polyval
from .. import units as u
from .. import _erfa as erfa
from . import ICRS, SkyCoord, GeocentricTrueEcliptic
from .builtin_frames.utils import get_jd12
__all__ = ["calc_moon"]
# Meeus 1998: table 47.A
# D M M' F l r
_MOON_L_R = (
(0, 0, 1, 0, 6288774, -20905355),
(2, 0, -1, 0, 1274027, -3699111),
(2, 0, 0, 0, 658314, -2955968),
(0, 0, 2, 0, 213618, -569925),
(0, 1, 0, 0, -185116, 48888),
(0, 0, 0, 2, -114332, -3149),
(2, 0, -2, 0, 58793, 246158),
(2, -1, -1, 0, 57066, -152138),
(2, 0, 1, 0, 53322, -170733),
(2, -1, 0, 0, 45758, -204586),
(0, 1, -1, 0, -40923, -129620),
(1, 0, 0, 0, -34720, 108743),
(0, 1, 1, 0, -30383, 104755),
(2, 0, 0, -2, 15327, 10321),
(0, 0, 1, 2, -12528, 0),
(0, 0, 1, -2, 10980, 79661),
(4, 0, -1, 0, 10675, -34782),
(0, 0, 3, 0, 10034, -23210),
(4, 0, -2, 0, 8548, -21636),
(2, 1, -1, 0, -7888, 24208),
(2, 1, 0, 0, -6766, 30824),
(1, 0, -1, 0, -5163, -8379),
(1, 1, 0, 0, 4987, -16675),
(2, -1, 1, 0, 4036, -12831),
(2, 0, 2, 0, 3994, -10445),
(4, 0, 0, 0, 3861, -11650),
(2, 0, -3, 0, 3665, 14403),
(0, 1, -2, 0, -2689, -7003),
(2, 0, -1, 2, -2602, 0),
(2, -1, -2, 0, 2390, 10056),
(1, 0, 1, 0, -2348, 6322),
(2, -2, 0, 0, 2236, -9884),
(0, 1, 2, 0, -2120, 5751),
(0, 2, 0, 0, -2069, 0),
(2, -2, -1, 0, 2048, -4950),
(2, 0, 1, -2, -1773, 4130),
(2, 0, 0, 2, -1595, 0),
(4, -1, -1, 0, 1215, -3958),
(0, 0, 2, 2, -1110, 0),
(3, 0, -1, 0, -892, 3258),
(2, 1, 1, 0, -810, 2616),
(4, -1, -2, 0, 759, -1897),
(0, 2, -1, 0, -713, -2117),
(2, 2, -1, 0, -700, 2354),
(2, 1, -2, 0, 691, 0),
(2, -1, 0, -2, 596, 0),
(4, 0, 1, 0, 549, -1423),
(0, 0, 4, 0, 537, -1117),
(4, -1, 0, 0, 520, -1571),
(1, 0, -2, 0, -487, -1739),
(2, 1, 0, -2, -399, 0),
(0, 0, 2, -2, -381, -4421),
(1, 1, 1, 0, 351, 0),
(3, 0, -2, 0, -340, 0),
(4, 0, -3, 0, 330, 0),
(2, -1, 2, 0, 327, 0),
(0, 2, 1, 0, -323, 1165),
(1, 1, -1, 0, 299, 0),
(2, 0, 3, 0, 294, 0),
(2, 0, -1, -2, 0, 8752)
)
# Meeus 1998: table 47.B
# D M M' F b
_MOON_B = (
(0, 0, 0, 1, 5128122),
(0, 0, 1, 1, 280602),
(0, 0, 1, -1, 277693),
(2, 0, 0, -1, 173237),
(2, 0, -1, 1, 55413),
(2, 0, -1, -1, 46271),
(2, 0, 0, 1, 32573),
(0, 0, 2, 1, 17198),
(2, 0, 1, -1, 9266),
(0, 0, 2, -1, 8822),
(2, -1, 0, -1, 8216),
(2, 0, -2, -1, 4324),
(2, 0, 1, 1, 4200),
(2, 1, 0, -1, -3359),
(2, -1, -1, 1, 2463),
(2, -1, 0, 1, 2211),
(2, -1, -1, -1, 2065),
(0, 1, -1, -1, -1870),
(4, 0, -1, -1, 1828),
(0, 1, 0, 1, -1794),
(0, 0, 0, 3, -1749),
(0, 1, -1, 1, -1565),
(1, 0, 0, 1, -1491),
(0, 1, 1, 1, -1475),
(0, 1, 1, -1, -1410),
(0, 1, 0, -1, -1344),
(1, 0, 0, -1, -1335),
(0, 0, 3, 1, 1107),
(4, 0, 0, -1, 1021),
(4, 0, -1, 1, 833),
# second column
(0, 0, 1, -3, 777),
(4, 0, -2, 1, 671),
(2, 0, 0, -3, 607),
(2, 0, 2, -1, 596),
(2, -1, 1, -1, 491),
(2, 0, -2, 1, -451),
(0, 0, 3, -1, 439),
(2, 0, 2, 1, 422),
(2, 0, -3, -1, 421),
(2, 1, -1, 1, -366),
(2, 1, 0, 1, -351),
(4, 0, 0, 1, 331),
(2, -1, 1, 1, 315),
(2, -2, 0, -1, 302),
(0, 0, 1, 3, -283),
(2, 1, 1, -1, -229),
(1, 1, 0, -1, 223),
(1, 1, 0, 1, 223),
(0, 1, -2, -1, -220),
(2, 1, -1, -1, -220),
(1, 0, 1, 1, -185),
(2, -1, -2, -1, 181),
(0, 1, 2, 1, -177),
(4, 0, -2, -1, 176),
(4, -1, -1, -1, 166),
(1, 0, 1, -1, -164),
(4, 0, 1, -1, 132),
(1, 0, -1, -1, -119),
(4, -1, 0, -1, 115),
(2, -2, 0, 1, 107)
)
"""
Coefficients of polynomials for various terms:
Lc : Mean longitude of Moon, w.r.t mean Equinox of date
D : Mean elongation of the Moon
M: Sun's mean anomaly
Mc : Moon's mean anomaly
F : Moon's argument of latitude (mean distance of Moon from its ascending node).
"""
_coLc = (2.18316448e+02, 4.81267881e+05, -1.57860000e-03,
1.85583502e-06, -1.53388349e-08)
_coD = (2.97850192e+02, 4.45267111e+05, -1.88190000e-03,
1.83194472e-06, -8.84447000e-09)
_coM = (3.57529109e+02, 3.59990503e+04, -1.53600000e-04,
4.08329931e-08)
_coMc = (1.34963396e+02, 4.77198868e+05, 8.74140000e-03,
1.43474081e-05, -6.79717238e-08)
_coF = (9.32720950e+01, 4.83202018e+05, -3.65390000e-03,
-2.83607487e-07, 1.15833246e-09)
_coA1 = (119.75, 131.849)
_coA2 = (53.09, 479264.290)
_coA3 = (313.45, 481266.484)
_coE = (1.0, -0.002516, -0.0000074)
def calc_moon(t):
"""
Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50)
This is the simplified version of Jean Meeus, Astronomical Algorithms,
second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10"
in longitude and 4" in latitude, with no specified time range.
Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the
date range CE 1950-2050.
Parameters
-----------
time : `~astropy.time.Time`
Time of observation.
Returns
--------
skycoord : `~astropy.coordinates.SkyCoord`
ICRS Coordinate for the body
"""
# number of centuries since J2000.0.
# This should strictly speaking be in Ephemeris Time, but TDB or TT
# will introduce error smaller than intrinsic accuracy of algorithm.
T = (t.tdb.jyear-2000.0)/100.
# constants that are needed for all calculations
Lc = u.Quantity(polyval(T, _coLc), u.deg)
D = u.Quantity(polyval(T, _coD), u.deg)
M = u.Quantity(polyval(T, _coM), u.deg)
Mc = u.Quantity(polyval(T, _coMc), u.deg)
F = u.Quantity(polyval(T, _coF), u.deg)
A1 = u.Quantity(polyval(T, _coA1), u.deg)
A2 = u.Quantity(polyval(T, _coA2), u.deg)
A3 = u.Quantity(polyval(T, _coA3), u.deg)
E = polyval(T, _coE)
suml = sumr = 0.0
for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R:
corr = E ** abs(MNum)
suml += LFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)
sumr += RFac*corr*np.cos(D*DNum+M*MNum+Mc*McNum+F*FNum)
sumb = 0.0
for DNum, MNum, McNum, FNum, BFac in _MOON_B:
corr = E ** abs(MNum)
sumb += BFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)
suml += (3958*np.sin(A1) + 1962*np.sin(Lc-F) + 318*np.sin(A2))
sumb += (-2235*np.sin(Lc) + 382*np.sin(A3) + 175*np.sin(A1-F) +
175*np.sin(A1+F) + 127*np.sin(Lc-Mc) - 115*np.sin(Lc+Mc))
# ensure units
suml = suml*u.microdegree
sumb = sumb*u.microdegree
# nutation of longitude
jd1, jd2 = get_jd12(t, 'tt')
nut, _ = erfa.nut06a(jd1, jd2)
nut = nut*u.rad
# calculate ecliptic coordinates
lon = Lc + suml + nut
lat = sumb
dist = (385000.56+sumr/1000)*u.km
# Meeus algorithm gives GeocentricTrueEcliptic coordinates
ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist,
equinox=t)
return SkyCoord(ecliptic_coo.transform_to(ICRS))
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import gzip
import json
import os
import random
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
from pymatgen import yaml
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.lammps.data import (
CombinedData,
ForceField,
LammpsBox,
LammpsData,
Topology,
lattice_2_lmpbox,
structure_2_lmpdata,
)
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps")
class LammpsBoxTest(PymatgenTest):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsBox(
bounds=[
[36.840194, 64.211560],
[41.013691, 68.385058],
[29.768095, 57.139462],
]
)
cls.quartz = LammpsBox(
bounds=[[0, 4.913400], [0, 4.255129], [0, 5.405200]],
tilt=[-2.456700, 0.0, 0.0],
)
def test_volume(self):
obounds = np.array(self.peptide.bounds)
ov = np.prod(obounds[:, 1] - obounds[:, 0])
self.assertEqual(self.peptide.volume, ov)
self.assertAlmostEqual(self.quartz.volume, 113.00733165874873)
def test_get_string(self):
peptide = self.peptide.get_string(5)
peptide_5 = """36.84019 64.21156 xlo xhi
41.01369 68.38506 ylo yhi
29.76809 57.13946 zlo zhi"""
self.assertEqual(peptide, peptide_5)
quartz = self.quartz.get_string(4)
quartz_4 = """0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz"""
self.assertEqual(quartz, quartz_4)
def test_get_box_shift(self):
peptide = self.peptide
self.assertEqual(peptide.get_box_shift([1, 0, 0])[0], 64.211560 - 36.840194)
self.assertEqual(peptide.get_box_shift([0, 0, -1])[-1], 29.768095 - 57.139462)
quartz = self.quartz
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 0, 1]), [0, 0, 5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 1, -1]), [-2.4567, 4.2551, -5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([1, -1, 0]), [4.9134 + 2.4567, -4.2551, 0], 4)
def test_to_lattice(self):
peptide = self.peptide.to_lattice()
np.testing.assert_array_almost_equal(peptide.abc, [27.371367] * 3)
self.assertTrue(peptide.is_orthogonal)
quartz = self.quartz.to_lattice()
np.testing.assert_array_almost_equal(
quartz.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
class LammpsDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsData.from_file(filename=os.path.join(test_dir, "data.peptide"))
cls.ethane = LammpsData.from_file(filename=os.path.join(test_dir, "ethane.data"))
cls.quartz = LammpsData.from_file(filename=os.path.join(test_dir, "data.quartz"), atom_style="atomic")
cls.virus = LammpsData.from_file(filename=os.path.join(test_dir, "virus.data"), atom_style="angle")
cls.tatb = LammpsData.from_file(
filename=os.path.join(test_dir, "tatb.data"),
atom_style="charge",
sort_id=True,
)
def test_structure(self):
quartz = self.quartz.structure
np.testing.assert_array_almost_equal(
quartz.lattice.matrix,
[[4.913400, 0, 0], [-2.456700, 4.255129, 0], [0, 0, 5.405200]],
)
self.assertEqual(quartz.formula, "Si3 O6")
self.assertNotIn("molecule-ID", self.quartz.atoms.columns)
ethane = self.ethane.structure
np.testing.assert_array_almost_equal(ethane.lattice.matrix, np.diag([10.0] * 3))
lbounds = np.array(self.ethane.box.bounds)[:, 0]
coords = self.ethane.atoms[["x", "y", "z"]].values - lbounds
np.testing.assert_array_almost_equal(ethane.cart_coords, coords)
np.testing.assert_array_almost_equal(ethane.site_properties["charge"], self.ethane.atoms["q"])
tatb = self.tatb.structure
frac_coords = tatb.frac_coords[381]
real_frac_coords = frac_coords - np.floor(frac_coords)
np.testing.assert_array_almost_equal(real_frac_coords, [0.01553397, 0.71487872, 0.14134139])
co = Structure.from_spacegroup(194, Lattice.hexagonal(2.50078, 4.03333), ["Co"], [[1 / 3, 2 / 3, 1 / 4]])
ld_co = LammpsData.from_structure(co)
self.assertEqual(ld_co.structure.composition.reduced_formula, "Co")
ni = Structure.from_spacegroup(225, Lattice.cubic(3.50804), ["Ni"], [[0, 0, 0]])
ld_ni = LammpsData.from_structure(ni)
self.assertEqual(ld_ni.structure.composition.reduced_formula, "Ni")
def test_sort_structure(self):
s = Structure(Lattice.cubic(4), ["S", "Fe"], [[0, 0, 0], [0.5, 0.5, 0.5]])
lmp = LammpsData.from_structure(s, is_sort=False)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
# internally element:type will be {Fe: 1, S: 2},
# therefore without sorting the atom types in structure
# will be [2, 1], i.e., (S, Fe)
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [2, 1])
# with sorting the atom types in structures will be [1, 2]
lmp = LammpsData.from_structure(s, is_sort=True)
lmp.write_file("test1.data")
lmp2 = LammpsData.from_file("test1.data", atom_style="charge")
self.assertListEqual(lmp2.atoms["type"].values.tolist(), [1, 2])
def test_get_string(self):
pep = self.peptide.get_string(distance=7, velocity=5, charge=4)
pep_lines = pep.split("\n")
pep_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(pep_lines) if l in pep_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], pep_kws)
# header
pep_header = "\n".join(pep_lines[: kw_inds["Masses"]])
pep_header_7 = """Generated by pymatgen.io.lammps.data.LammpsData
2004 atoms
1365 bonds
786 angles
207 dihedrals
12 impropers
14 atom types
18 bond types
31 angle types
21 dihedral types
2 improper types
36.8401940 64.2115600 xlo xhi
41.0136910 68.3850580 ylo yhi
29.7680950 57.1394620 zlo zhi
"""
self.assertEqual(pep_header, pep_header_7)
# int vs float for coeffs
pep_dihedral_coeff = pep_lines[kw_inds["Dihedral Coeffs"] + 2]
self.assertEqual(pep_dihedral_coeff, "1 0.200 1 180 1.0")
# distance and charge
pep_atom = pep_lines[kw_inds["Atoms"] + 2]
self.assertEqual(
pep_atom,
"1 1 1 0.5100 43.9999300 " "58.5267800 36.7855000 0 0 0",
)
# velocity
pep_velo = pep_lines[kw_inds["Velocities"] + 2]
self.assertEqual(pep_velo, "1 -0.00067 -0.00282 0.00383")
# no floats in topology sections
pep_topos = "\n".join(pep_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", pep_topos)
c2h6 = self.ethane.get_string(distance=5, charge=3)
c2h6_lines = c2h6.split("\n")
c2h6_kws = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
"Atoms",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
kw_inds = {l: i for i, l in enumerate(c2h6_lines) if l in c2h6_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)], c2h6_kws)
# header
c2h6_header = "\n".join(c2h6_lines[: kw_inds["Masses"]])
c2h6_header_5 = """Generated by pymatgen.io.lammps.data.LammpsData
8 atoms
7 bonds
12 angles
9 dihedrals
8 impropers
2 atom types
2 bond types
2 angle types
1 dihedral types
2 improper types
0.21455 10.21454 xlo xhi
0.11418 10.11418 ylo yhi
-10.00014 -0.00015 zlo zhi
"""
self.assertEqual(c2h6_header, c2h6_header_5)
# distance and charge
c2h6_atom = c2h6_lines[kw_inds["Atoms"] + 2]
self.assertEqual(c2h6_atom, "1 1 1 -0.080 4.46291 5.14833 -5.00041" " 0 0 0")
# no floats in topology sections
c2h6_topos = "\n".join(c2h6_lines[kw_inds["Bonds"] :])
self.assertNotIn(".", c2h6_topos)
quartz = self.quartz.get_string(distance=4)
quartz_lines = quartz.split("\n")
quartz_kws = ["Masses", "Atoms"]
kw_inds = {l: i for i, l in enumerate(quartz_lines) if l in quartz_kws}
# header
quartz_header = "\n".join(quartz_lines[: kw_inds["Masses"]])
quartz_header_4 = """Generated by pymatgen.io.lammps.data.LammpsData
9 atoms
2 atom types
0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz
"""
self.assertEqual(quartz_header, quartz_header_4)
# distance
quartz_atom = quartz_lines[kw_inds["Atoms"] + 2]
self.assertEqual(quartz_atom, "1 1 2.3088 0.0000 3.6035")
virus = self.virus.get_string()
virus_lines = virus.split("\n")
pairij_coeff = virus_lines[virus_lines.index("PairIJ Coeffs") + 5]
self.assertEqual(pairij_coeff.strip().split(), ["1", "4", "1", "1.000", "1.12250"])
def test_write_file(self):
filename1 = "test1.data"
self.ethane.write_file(filename=filename1)
c2h6 = LammpsData.from_file(filename1)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff_kw = random.sample(self.ethane.force_field.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.force_field[ff_kw], self.ethane.force_field[ff_kw], ff_kw)
topo_kw = random.sample(self.ethane.topology.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.topology[topo_kw], self.ethane.topology[topo_kw], topo_kw)
filename2 = "test2.data"
self.virus.write_file(filename=filename2)
v = LammpsData.from_file(filename2, atom_style="angle")
pd.testing.assert_frame_equal(v.force_field["PairIJ Coeffs"], self.virus.force_field["PairIJ Coeffs"])
def test_disassemble(self):
# general tests
c = LammpsData.from_file(os.path.join(test_dir, "crambin.data"))
_, c_ff, topos = c.disassemble()
mass_info = [
("N1", 14.0067),
("H1", 1.00797),
("C1", 12.01115),
("H2", 1.00797),
("C2", 12.01115),
("O1", 15.9994),
("C3", 12.01115),
("O2", 15.9994),
("H3", 1.00797),
("C4", 12.01115),
("N2", 14.0067),
("C5", 12.01115),
("S1", 32.064),
("C6", 12.01115),
("N3", 14.0067),
("C7", 12.01115),
("C8", 12.01115),
("C9", 12.01115),
("O3", 15.9994),
]
self.assertListEqual(c_ff.mass_info, mass_info)
np.testing.assert_array_equal(c_ff.nonbond_coeffs, c.force_field["Pair Coeffs"].values)
base_kws = ["Bond", "Angle", "Dihedral", "Improper"]
for kw in base_kws:
ff_kw = kw + " Coeffs"
i = random.randint(0, len(c_ff.topo_coeffs[ff_kw]) - 1)
sample_coeff = c_ff.topo_coeffs[ff_kw][i]
np.testing.assert_array_equal(sample_coeff["coeffs"], c.force_field[ff_kw].iloc[i].values, ff_kw)
topo = topos[-1]
atoms = c.atoms[c.atoms["molecule-ID"] == 46]
np.testing.assert_array_almost_equal(topo.sites.cart_coords, atoms[["x", "y", "z"]])
np.testing.assert_array_equal(topo.charges, atoms["q"])
atom_labels = [m[0] for m in mass_info]
self.assertListEqual(
topo.sites.site_properties["ff_map"],
[atom_labels[i - 1] for i in atoms["type"]],
)
shift = min(atoms.index)
for kw in base_kws:
ff_kw = kw + " Coeffs"
ff_coeffs = c_ff.topo_coeffs[ff_kw]
topo_kw = kw + "s"
topos_df = c.topology[topo_kw]
topo_df = topos_df[topos_df["atom1"] >= shift]
topo_arr = topo_df.drop("type", axis=1).values
np.testing.assert_array_equal(topo.topologies[topo_kw], topo_arr - shift, topo_kw)
sample_topo = random.sample(list(topo_df.itertuples(False, None)), 1)[0]
topo_type_idx = sample_topo[0] - 1
topo_type = tuple([atom_labels[i - 1] for i in atoms.loc[list(sample_topo[1:])]["type"]])
self.assertIn(topo_type, ff_coeffs[topo_type_idx]["types"], ff_kw)
# test no guessing element and pairij as nonbond coeffs
v = self.virus
_, v_ff, _ = v.disassemble(guess_element=False)
self.assertDictEqual(v_ff.maps["Atoms"], dict(Qa1=1, Qb1=2, Qc1=3, Qa2=4))
pairij_coeffs = v.force_field["PairIJ Coeffs"].drop(["id1", "id2"], axis=1)
np.testing.assert_array_equal(v_ff.nonbond_coeffs, pairij_coeffs.values)
# test class2 ff
_, e_ff, _ = self.ethane.disassemble()
e_topo_coeffs = e_ff.topo_coeffs
for k in ["BondBond Coeffs", "BondAngle Coeffs"]:
self.assertIn(k, e_topo_coeffs["Angle Coeffs"][0], k)
for k in [
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
]:
self.assertIn(k, e_topo_coeffs["Dihedral Coeffs"][0], k)
self.assertIn("AngleAngle Coeffs", e_topo_coeffs["Improper Coeffs"][0])
def test_from_file(self):
# general tests
pep = self.peptide
# header stats and Nos. of columns
self.assertEqual(pep.masses.shape, (14, 1))
self.assertEqual(pep.atoms.shape, (2004, 9))
self.assertListEqual(
list(pep.atoms.columns),
["molecule-ID", "type", "q", "x", "y", "z", "nx", "ny", "nz"],
)
topo = pep.topology
self.assertEqual(topo["Bonds"].shape, (1365, 3))
self.assertEqual(topo["Angles"].shape, (786, 4))
self.assertEqual(topo["Dihedrals"].shape, (207, 5))
self.assertEqual(topo["Impropers"].shape, (12, 5))
ff = pep.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (14, 4))
self.assertEqual(ff["Bond Coeffs"].shape, (18, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (31, 4))
self.assertEqual(ff["Dihedral Coeffs"].shape, (21, 4))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 2))
# header box
np.testing.assert_array_equal(
pep.box.bounds,
[[36.840194, 64.211560], [41.013691, 68.385058], [29.768095, 57.139462]],
)
# body
self.assertEqual(pep.masses.at[7, "mass"], 12.0110)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff3"], 0.152100)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.430000)
self.assertEqual(ff["Angle Coeffs"].at[21, "coeff2"], 120.000000)
self.assertEqual(ff["Dihedral Coeffs"].at[10, "coeff1"], 0.040000)
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 20.000000)
self.assertEqual(pep.atoms.at[29, "molecule-ID"], 1)
self.assertEqual(pep.atoms.at[29, "type"], 7)
self.assertEqual(pep.atoms.at[29, "q"], -0.020)
self.assertAlmostEqual(pep.atoms.at[29, "x"], 42.96709)
self.assertEqual(pep.atoms.at[1808, "molecule-ID"], 576)
self.assertEqual(pep.atoms.at[1808, "type"], 14)
self.assertAlmostEqual(pep.atoms.at[1808, "y"], 58.64352)
self.assertEqual(pep.atoms.at[1808, "nx"], -1)
self.assertAlmostEqual(pep.velocities.at[527, "vz"], -0.010889)
self.assertEqual(topo["Bonds"].at[47, "type"], 8)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 54)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 1384)
self.assertEqual(topo["Angles"].at[105, "type"], 19)
self.assertEqual(topo["Angles"].at[105, "atom3"], 51)
self.assertEqual(topo["Angles"].at[376, "atom2"], 772)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 14)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 51)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 32)
# class 2 and comments
ethane = self.ethane
self.assertEqual(ethane.masses.shape, (2, 1))
self.assertEqual(ethane.atoms.shape, (8, 9))
class2 = ethane.force_field
self.assertEqual(class2["Pair Coeffs"].shape, (2, 2))
self.assertEqual(class2["Bond Coeffs"].shape, (2, 4))
self.assertEqual(class2["Angle Coeffs"].shape, (2, 4))
self.assertEqual(class2["Dihedral Coeffs"].shape, (1, 6))
self.assertEqual(class2["Improper Coeffs"].shape, (2, 2))
self.assertEqual(class2["BondBond Coeffs"].at[2, "coeff3"], 1.1010)
self.assertEqual(class2["BondAngle Coeffs"].at[2, "coeff4"], 1.1010)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"], 107.6600)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"], 107.6600)
self.assertEqual(class2["AngleAngleTorsion Coeffs"].at[1, "coeff3"], 110.7700)
self.assertEqual(class2["EndBondTorsion Coeffs"].at[1, "coeff8"], 1.1010)
self.assertEqual(class2["MiddleBondTorsion Coeffs"].at[1, "coeff4"], 1.5300)
self.assertEqual(class2["BondBond13 Coeffs"].at[1, "coeff3"], 1.1010)
self.assertEqual(class2["AngleTorsion Coeffs"].at[1, "coeff8"], 110.7700)
# tilt box and another atom_style
quartz = self.quartz
np.testing.assert_array_equal(quartz.box.tilt, [-2.456700, 0.0, 0.0])
self.assertListEqual(list(quartz.atoms.columns), ["type", "x", "y", "z"])
self.assertAlmostEqual(quartz.atoms.at[7, "x"], 0.299963)
# PairIJ Coeffs section
virus = self.virus
pairij = virus.force_field["PairIJ Coeffs"]
self.assertEqual(pairij.at[7, "id1"], 3)
self.assertEqual(pairij.at[7, "id2"], 3)
self.assertEqual(pairij.at[7, "coeff2"], 2.1)
# sort_id
atom_id = random.randint(1, 384)
self.assertEqual(self.tatb.atoms.loc[atom_id].name, atom_id)
def test_from_ff_and_topologies(self):
mass = OrderedDict()
mass["H"] = 1.0079401
mass["O"] = 15.999400
nonbond_coeffs = [[0.00774378, 0.98], [0.1502629, 3.1169]]
topo_coeffs = {
"Bond Coeffs": [{"coeffs": [176.864, 0.9611], "types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712], "types": [("H", "O", "H")]}],
}
ff = ForceField(mass.items(), nonbond_coeffs, topo_coeffs)
with gzip.open(os.path.join(test_dir, "topologies_ice.json.gz")) as f:
topo_dicts = json.load(f)
topologies = [Topology.from_dict(d) for d in topo_dicts]
box = LammpsBox([[-0.75694412, 44.165558], [0.38127473, 47.066074], [0.17900842, 44.193867]])
ice = LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=topologies)
atoms = ice.atoms
bonds = ice.topology["Bonds"]
angles = ice.topology["Angles"]
np.testing.assert_array_equal(atoms.index.values, np.arange(1, len(atoms) + 1))
np.testing.assert_array_equal(bonds.index.values, np.arange(1, len(bonds) + 1))
np.testing.assert_array_equal(angles.index.values, np.arange(1, len(angles) + 1))
i = random.randint(0, len(topologies) - 1)
sample = topologies[i]
in_atoms = ice.atoms[ice.atoms["molecule-ID"] == i + 1]
np.testing.assert_array_equal(in_atoms.index.values, np.arange(3 * i + 1, 3 * i + 4))
np.testing.assert_array_equal(in_atoms["type"].values, [2, 1, 1])
np.testing.assert_array_equal(in_atoms["q"].values, sample.charges)
np.testing.assert_array_equal(in_atoms[["x", "y", "z"]].values, sample.sites.cart_coords)
broken_topo_coeffs = {
"Bond Coeffs": [{"coeffs": [176.864, 0.9611], "types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712], "types": [("H", "H", "H")]}],
}
broken_ff = ForceField(mass.items(), nonbond_coeffs, broken_topo_coeffs)
ld_woangles = LammpsData.from_ff_and_topologies(box=box, ff=broken_ff, topologies=[sample])
self.assertNotIn("Angles", ld_woangles.topology)
def test_from_structure(self):
latt = Lattice.monoclinic(9.78746, 4.75058, 8.95892, 115.9693)
structure = Structure.from_spacegroup(
15,
latt,
["Os", "O", "O"],
[
[0, 0.25583, 0.75],
[0.11146, 0.46611, 0.91631],
[0.11445, 0.04564, 0.69518],
],
)
velocities = np.random.randn(20, 3) * 0.1
structure.add_site_property("velocities", velocities)
ld = LammpsData.from_structure(structure=structure, ff_elements=["O", "Os", "Na"])
i = random.randint(0, 19)
a = latt.matrix[0]
va = velocities[i].dot(a) / np.linalg.norm(a)
self.assertAlmostEqual(va, ld.velocities.loc[i + 1, "vx"])
self.assertAlmostEqual(velocities[i, 1], ld.velocities.loc[i + 1, "vy"])
np.testing.assert_array_almost_equal(ld.masses["mass"], [22.989769, 190.23, 15.9994])
np.testing.assert_array_equal(ld.atoms["type"], [2] * 4 + [3] * 16)
def test_json_dict(self):
encoded = json.dumps(self.ethane.as_dict())
decoded = json.loads(encoded)
c2h6 = LammpsData.from_dict(decoded)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff = self.ethane.force_field
key, target_df = random.sample(ff.items(), 1)[0]
self.assertIsNone(
pd.testing.assert_frame_equal(c2h6.force_field[key], target_df, check_dtype=False),
key,
)
topo = self.ethane.topology
key, target_df = random.sample(topo.items(), 1)[0]
self.assertIsNone(pd.testing.assert_frame_equal(c2h6.topology[key], target_df), key)
@classmethod
def tearDownClass(cls):
tmpfiles = ["test1.data", "test2.data"]
for t in tmpfiles:
if os.path.exists(t):
os.remove(t)
class TopologyTest(unittest.TestCase):
def test_init(self):
inner_charge = np.random.rand(10) - 0.5
outer_charge = np.random.rand(10) - 0.5
inner_velo = np.random.rand(10, 3) - 0.5
outer_velo = np.random.rand(10, 3) - 0.5
m = Molecule(
["H"] * 10,
np.random.rand(10, 3) * 100,
site_properties={
"ff_map": ["D"] * 10,
"charge": inner_charge,
"velocities": inner_velo,
},
)
# q and v from site properties, while type from species_string
topo = Topology(sites=m)
self.assertListEqual(topo.type_by_sites, ["H"] * 10)
np.testing.assert_array_equal(topo.charges, inner_charge)
np.testing.assert_array_equal(topo.velocities, inner_velo)
# q and v from overriding, while type from site property
topo_override = Topology(sites=m, ff_label="ff_map", charges=outer_charge, velocities=outer_velo)
self.assertListEqual(topo_override.type_by_sites, ["D"] * 10)
np.testing.assert_array_equal(topo_override.charges, outer_charge)
np.testing.assert_array_equal(topo_override.velocities, outer_velo)
# test using a list of sites instead of SiteCollection
topo_from_list = Topology(sites=m.sites)
self.assertListEqual(topo_from_list.type_by_sites, topo.type_by_sites)
np.testing.assert_array_equal(topo_from_list.charges, topo.charges)
np.testing.assert_array_equal(topo_from_list.velocities, topo.velocities)
def test_from_bonding(self):
# He: no bonding topologies
helium = Molecule(["He"], [[0, 0, 0]])
topo_he = Topology.from_bonding(molecule=helium)
self.assertIsNone(topo_he.topologies)
# H2: 1 bond only
hydrogen = Molecule(["H"] * 2, [[0, 0, 0], [0, 0, 0.7414]])
topo_h = Topology.from_bonding(molecule=hydrogen)
tp_h = topo_h.topologies
self.assertListEqual(tp_h["Bonds"], [[0, 1]])
self.assertNotIn("Angles", tp_h)
self.assertNotIn("Dihedrals", tp_h)
# water: 2 bonds and 1 angle only
water = Molecule(
["O", "H", "H"],
[
[0.0000, 0.0000, 0.1173],
[0.0000, 0.7572, -0.4692],
[0.0000, -0.7572, -0.4692],
],
)
topo_water = Topology.from_bonding(molecule=water)
tp_water = topo_water.topologies
self.assertListEqual(tp_water["Bonds"], [[0, 1], [0, 2]])
self.assertListEqual(tp_water["Angles"], [[1, 0, 2]])
self.assertNotIn("Dihedrals", tp_water)
# EtOH
etoh = Molecule(
["C", "C", "O", "H", "H", "H", "H", "H", "H"],
[
[1.1879, -0.3829, 0.0000],
[0.0000, 0.5526, 0.0000],
[-1.1867, -0.2472, 0.0000],
[-1.9237, 0.3850, 0.0000],
[2.0985, 0.2306, 0.0000],
[1.1184, -1.0093, 0.8869],
[1.1184, -1.0093, -0.8869],
[-0.0227, 1.1812, 0.8852],
[-0.0227, 1.1812, -0.8852],
],
)
topo_etoh = Topology.from_bonding(molecule=etoh)
tp_etoh = topo_etoh.topologies
self.assertEqual(len(tp_etoh["Bonds"]), 8)
etoh_bonds = [[0, 1], [0, 4], [0, 5], [0, 6], [1, 2], [1, 7], [1, 8], [2, 3]]
np.testing.assert_array_equal(tp_etoh["Bonds"], etoh_bonds)
self.assertEqual(len(tp_etoh["Angles"]), 13)
etoh_angles = [
[1, 0, 4],
[1, 0, 5],
[1, 0, 6],
[4, 0, 5],
[4, 0, 6],
[5, 0, 6],
[0, 1, 2],
[0, 1, 7],
[0, 1, 8],
[2, 1, 7],
[2, 1, 8],
[7, 1, 8],
[1, 2, 3],
]
np.testing.assert_array_equal(tp_etoh["Angles"], etoh_angles)
self.assertEqual(len(tp_etoh["Dihedrals"]), 12)
etoh_dihedrals = [
[4, 0, 1, 2],
[4, 0, 1, 7],
[4, 0, 1, 8],
[5, 0, 1, 2],
[5, 0, 1, 7],
[5, 0, 1, 8],
[6, 0, 1, 2],
[6, 0, 1, 7],
[6, 0, 1, 8],
[0, 1, 2, 3],
[7, 1, 2, 3],
[8, 1, 2, 3],
]
np.testing.assert_array_equal(tp_etoh["Dihedrals"], etoh_dihedrals)
self.assertIsNotNone(json.dumps(topo_etoh.as_dict()))
# bond flag to off
topo_etoh0 = Topology.from_bonding(molecule=etoh, bond=False, angle=True, dihedral=True)
self.assertIsNone(topo_etoh0.topologies)
# angle or dihedral flag to off
topo_etoh1 = Topology.from_bonding(molecule=etoh, angle=False)
self.assertNotIn("Angles", topo_etoh1.topologies)
topo_etoh2 = Topology.from_bonding(molecule=etoh, dihedral=False)
self.assertNotIn("Dihedrals", topo_etoh2.topologies)
class ForceFieldTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
mass_info = [
("A", "H"),
("B", Element("C")),
("C", Element("O")),
("D", 1.00794),
]
nonbond_coeffs = [
[1, 1, 1.1225],
[1, 1.175, 1.31894],
[1, 1.55, 1.73988],
[1, 1, 1.1225],
[1, 1.35, 4],
[1, 1.725, 1.93631],
[1, 1.175, 1.31894],
[1, 2.1, 4],
[1, 1.55, 1.73988],
[1, 1, 1.1225],
]
topo_coeffs = {
"Bond Coeffs": [
{"coeffs": [50, 0.659469], "types": [("A", "B"), ("C", "D")]},
{"coeffs": [50, 0.855906], "types": [("B", "C")]},
]
}
cls.virus = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs, topo_coeffs=topo_coeffs)
cls.ethane = ForceField.from_file(os.path.join(test_dir, "ff_ethane.yaml"))
def test_init(self):
v = self.virus
self.assertListEqual(
v.mass_info,
[("A", 1.00794), ("B", 12.0107), ("C", 15.9994), ("D", 1.00794)],
)
self.assertEqual(v.masses.at[3, "mass"], 15.9994)
v_ff = v.force_field
self.assertNotIn("Pair Coeffs", v_ff)
self.assertEqual(v_ff["PairIJ Coeffs"].iat[5, 4], 1.93631)
self.assertEqual(v_ff["Bond Coeffs"].at[2, "coeff2"], 0.855906)
v_maps = v.maps
self.assertDictEqual(v_maps["Atoms"], {"A": 1, "B": 2, "C": 3, "D": 4})
self.assertDictEqual(
v_maps["Bonds"],
{
("A", "B"): 1,
("C", "D"): 1,
("B", "A"): 1,
("D", "C"): 1,
("B", "C"): 2,
("C", "B"): 2,
},
)
e = self.ethane
self.assertEqual(e.masses.at[1, "mass"], 12.01115)
e_ff = e.force_field
self.assertNotIn("PairIJ Coeffs", e_ff)
self.assertEqual(e_ff["Pair Coeffs"].at[1, "coeff2"], 3.854)
self.assertEqual(e_ff["Bond Coeffs"].at[2, "coeff4"], 844.6)
self.assertEqual(e_ff["Angle Coeffs"].at[2, "coeff4"], -2.4318)
self.assertEqual(e_ff["Dihedral Coeffs"].at[1, "coeff1"], -0.1432)
self.assertEqual(e_ff["Improper Coeffs"].at[2, "coeff2"], 0.0)
self.assertEqual(e_ff["BondBond Coeffs"].at[2, "coeff1"], 5.3316)
self.assertEqual(e_ff["BondAngle Coeffs"].at[1, "coeff3"], 1.53)
self.assertEqual(e_ff["MiddleBondTorsion Coeffs"].at[1, "coeff1"], -14.261)
self.assertEqual(e_ff["EndBondTorsion Coeffs"].at[1, "coeff1"], 0.213)
self.assertEqual(e_ff["AngleTorsion Coeffs"].at[1, "coeff3"], -0.2466)
self.assertEqual(e_ff["AngleAngleTorsion Coeffs"].at[1, "coeff1"], -12.564)
self.assertEqual(e_ff["BondBond13 Coeffs"].at[1, "coeff1"], 0.0)
self.assertEqual(e_ff["AngleAngle Coeffs"].at[1, "coeff2"], -0.4825)
e_maps = e.maps
self.assertDictEqual(e_maps["Atoms"], {"c4": 1, "h1": 2})
self.assertDictEqual(e_maps["Bonds"], {("c4", "c4"): 1, ("c4", "h1"): 2, ("h1", "c4"): 2})
self.assertDictEqual(
e_maps["Angles"],
{("c4", "c4", "h1"): 1, ("h1", "c4", "c4"): 1, ("h1", "c4", "h1"): 2},
)
self.assertEqual(
e_maps["Impropers"],
{
("c4", "c4", "h1", "h1"): 1,
("c4", "h1", "c4", "h1"): 1,
("h1", "h1", "c4", "c4"): 1,
("h1", "c4", "h1", "c4"): 1,
("h1", "c4", "h1", "h1"): 2,
("h1", "h1", "c4", "h1"): 2,
},
)
def test_to_file(self):
filename = "ff_test.yaml"
v = self.virus
v.to_file(filename=filename)
yml = yaml.YAML(typ="safe")
with open(filename, "r") as f:
d = yml.load(f)
self.assertListEqual(d["mass_info"], [list(m) for m in v.mass_info])
self.assertListEqual(d["nonbond_coeffs"], v.nonbond_coeffs)
def test_from_file(self):
e = self.ethane
self.assertListEqual(e.mass_info, [("c4", 12.01115), ("h1", 1.00797)])
np.testing.assert_array_equal(e.nonbond_coeffs, [[0.062, 3.854], [0.023, 2.878]])
e_tc = e.topo_coeffs
self.assertIn("Bond Coeffs", e_tc)
self.assertIn("BondAngle Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("BondBond Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("AngleAngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("BondBond13 Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("EndBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("MiddleBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleAngle Coeffs", e_tc["Improper Coeffs"][0])
def test_from_dict(self):
d = self.ethane.as_dict()
json_str = json.dumps(d)
decoded = ForceField.from_dict(json.loads(json_str))
self.assertListEqual(decoded.mass_info, self.ethane.mass_info)
self.assertListEqual(decoded.nonbond_coeffs, self.ethane.nonbond_coeffs)
self.assertDictEqual(decoded.topo_coeffs, self.ethane.topo_coeffs)
@classmethod
def tearDownClass(cls):
if os.path.exists("ff_test.yaml"):
os.remove("ff_test.yaml")
class FuncTest(unittest.TestCase):
def test_lattice_2_lmpbox(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) + np.random.rand(3, 3) * 0.2 - 0.1
init_latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
init_structure = Structure(init_latt, ["H"] * 10, frac_coords)
origin = np.random.rand(3) * 10 - 5
box, symmop = lattice_2_lmpbox(lattice=init_latt, origin=origin)
boxed_latt = box.to_lattice()
np.testing.assert_array_almost_equal(init_latt.abc, boxed_latt.abc)
np.testing.assert_array_almost_equal(init_latt.angles, boxed_latt.angles)
cart_coords = symmop.operate_multi(init_structure.cart_coords) - origin
boxed_structure = Structure(boxed_latt, ["H"] * 10, cart_coords, coords_are_cartesian=True)
np.testing.assert_array_almost_equal(boxed_structure.frac_coords, frac_coords)
tetra_latt = Lattice.tetragonal(5, 5)
tetra_box, _ = lattice_2_lmpbox(tetra_latt)
self.assertIsNone(tetra_box.tilt)
ortho_latt = Lattice.orthorhombic(5, 5, 5)
ortho_box, _ = lattice_2_lmpbox(ortho_latt)
self.assertIsNone(ortho_box.tilt)
rot_tetra_latt = Lattice([[5, 0, 0], [0, 2, 2], [0, -2, 2]])
_, rotop = lattice_2_lmpbox(rot_tetra_latt)
np.testing.assert_array_almost_equal(
rotop.rotation_matrix,
[
[1, 0, 0],
[0, 2 ** 0.5 / 2, 2 ** 0.5 / 2],
[0, -(2 ** 0.5) / 2, 2 ** 0.5 / 2],
],
)
@unittest.skip("The function is deprecated")
def test_structure_2_lmpdata(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) + np.random.rand(3, 3) * 0.2 - 0.1
latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
structure = Structure(latt, ["H"] * 10, frac_coords)
ld = structure_2_lmpdata(structure=structure)
box_tilt = [0.0, 0.0, 0.0] if not ld.box_tilt else ld.box_tilt
box_bounds = np.array(ld.box_bounds)
np.testing.assert_array_equal(box_bounds[:, 0], np.zeros(3))
new_matrix = np.diag(box_bounds[:, 1])
new_matrix[1, 0] = box_tilt[0]
new_matrix[2, 0] = box_tilt[1]
new_matrix[2, 1] = box_tilt[2]
new_latt = Lattice(new_matrix)
np.testing.assert_array_almost_equal(new_latt.abc, latt.abc)
np.testing.assert_array_almost_equal(new_latt.angles, latt.angles)
coords = ld.atoms[["x", "y", "z"]].values
new_structure = Structure(new_latt, ["H"] * 10, coords, coords_are_cartesian=True)
np.testing.assert_array_almost_equal(new_structure.frac_coords, frac_coords)
self.assertEqual(len(ld.masses), 1)
# test additional elements
ld_elements = structure_2_lmpdata(structure=structure, ff_elements=["C", "H"])
self.assertEqual(len(ld_elements.masses), 2)
np.testing.assert_array_almost_equal(ld_elements.masses["mass"], [1.00794, 12.01070])
class CombinedDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ec = LammpsData.from_file(filename=os.path.join(test_dir, "ec.data"))
cls.fec = LammpsData.from_file(filename=os.path.join(test_dir, "fec.data"))
cls.coord = CombinedData.parse_xyz(filename=os.path.join(test_dir, "ec_fec.xyz"))
cls.ec_fec1 = CombinedData.from_files(
os.path.join(test_dir, "ec_fec.xyz"),
[1200, 300],
os.path.join(test_dir, "ec.data"),
os.path.join(test_dir, "fec.data"),
)
cls.ec_fec2 = CombinedData.from_lammpsdata([cls.ec, cls.fec], ["EC", "FEC"], [1200, 300], cls.coord)
def test_from_files(self):
# general tests
ec_fec = self.ec_fec1
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ["cluster1", "cluster2"])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns), ["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(
ec_fec.box.bounds,
[[-0.597365, 54.56835], [-0.597365, 54.56835], [-0.597365, 54.56835]],
)
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
def test_from_lammpsdata(self):
# general tests
ec_fec = self.ec_fec2
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ["EC", "FEC"])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns), ["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(
ec_fec.box.bounds,
[[-0.597365, 54.56835], [-0.597365, 54.56835], [-0.597365, 54.56835]],
)
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
# non-destructively use of input (ID number)
fec = self.fec
topo = fec.topology
ff = fec.force_field
self.assertEqual(ff["Pair Coeffs"].index[0], 1)
self.assertEqual(ff["Bond Coeffs"].index[0], 1)
self.assertEqual(ff["Angle Coeffs"].index[0], 1)
self.assertTrue(ff["Dihedral Coeffs"].index[0], 1)
self.assertEqual(ff["Improper Coeffs"].index[0], 1)
self.assertEqual(fec.atoms.index[0], 1)
self.assertEqual(fec.atoms.at[1, "molecule-ID"], 1)
self.assertEqual(fec.atoms.at[1, "type"], 1)
self.assertEqual(topo["Bonds"].index[0], 1)
self.assertEqual(topo["Bonds"].at[1, "type"], 1)
self.assertEqual(topo["Bonds"].at[1, "atom1"], 1)
self.assertEqual(topo["Bonds"].at[1, "atom2"], 2)
self.assertEqual(topo["Angles"].index[0], 1)
self.assertEqual(topo["Angles"].at[1, "atom1"], 1)
self.assertEqual(topo["Angles"].at[1, "atom2"], 3)
self.assertEqual(topo["Angles"].at[1, "atom3"], 4)
self.assertEqual(topo["Dihedrals"].index[0], 1)
self.assertEqual(topo["Dihedrals"].at[1, "atom1"], 1)
self.assertEqual(topo["Dihedrals"].at[1, "atom2"], 3)
self.assertEqual(topo["Dihedrals"].at[1, "atom3"], 4)
self.assertEqual(topo["Dihedrals"].at[1, "atom4"], 5)
self.assertEqual(topo["Impropers"].index[0], 1)
self.assertEqual(topo["Impropers"].at[1, "atom1"], 5)
self.assertEqual(topo["Impropers"].at[1, "atom2"], 4)
self.assertEqual(topo["Impropers"].at[1, "atom3"], 3)
self.assertEqual(topo["Impropers"].at[1, "atom4"], 6)
def test_get_string(self):
# general tests
ec_fec_lines = self.ec_fec1.get_string().splitlines()
# header information
self.assertEqual(ec_fec_lines[1], "# 1200 cluster1 + 300 cluster2")
# data type consistency tests
self.assertEqual(ec_fec_lines[98], "1 harmonic 3.200000000 -1 2")
self.assertEqual(ec_fec_lines[109], "12 charmm 2.700000000 2 180 0.0")
self.assertEqual(
ec_fec_lines[113],
"16 multi/harmonic 0.382999522 -1.148998570 0.000000000 1.531998090 0.000000000",
)
self.assertEqual(ec_fec_lines[141], "1 10.5 -1 2")
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
# custom
from mxm.midifile.src.data_type_converters import readBew, readVar, varLen, from_twos_complement, to_twos_complement
from mxm.midifile.src import constants as c
class EventDispatcher:
def __init__(self, event_handler):
"""
The event dispatcher generates events on the event_handler.
"""
# internal values, don't mess with 'em directly
self.event_handler = event_handler
# public flags
# A note_on with a velocity of 0x00 is actually the same as a
# note_off with a velocity of 0x40. When
# "convert_zero_velocity" is set, the zero velocity note_on's
# automatically gets converted into note_off's. This is a less
# suprising behaviour for those that are not into the intimate
# details of the midi spec.
self.convert_zero_velocity = False
# If dispatch_continuos_controllers is true, continuos
# controllers gets dispatched to their defined handlers. Else
# they just trigger the "continuous_controller" event handler.
self.dispatch_continuos_controllers = 1 # NOT IMPLEMENTED YET
# If dispatch_meta_events is true, meta events get's dispatched
# to their defined events. Else they all they trigger the
# "meta_event" handler.
self.dispatch_meta_events = 1
def header(self, format, nTracks, division):
"Triggers the header event"
self.event_handler.header(format, nTracks, division)
def start_of_track(self, current_track):
"Triggers the start of track event"
self.event_handler.set_current_track(current_track)
self.event_handler.start_of_track(current_track)
def sysex_event(self, data): # files use sequencer specific, sysex is for live midi
"Dispatcher for sysex events"
self.event_handler.sysex_event(data)
def eof(self):
"End of file!"
self.event_handler.eof()
def update_time(self, new_time=0, relative=1):
"Updates relative/absolute time."
self.event_handler.update_time(new_time=new_time, relative=relative)
def reset_time(self):
"Updates relative/absolute time."
self.event_handler.reset_time()
# wrapping the oueventss running stat methods
def set_running_status(self, *args):
"set the running status"
self.event_handler.set_running_status(*args)
def get_running_status(self):
"Get the running status"
self.event_handler.get_running_status()
def reset_running_status(self):
"reset the running status"
self.event_handler.reset_running_status()
# Event dispatchers for similar types of events
def channel_message(self, hi_nible, channel, data, use_running_status=False):
"""
Dispatches channel messages
"""
events = self.event_handler
if (c.NOTE_ON) == hi_nible:
note, velocity = data
# note_on with velocity 0x00 are same as note
# off with velocity 0x40 according to spec!
if velocity==0 and self.convert_zero_velocity:
events.note_off(channel, note, 0x40, use_running_status)
else:
events.note_on(channel, note, velocity, use_running_status)
elif (c.NOTE_OFF) == hi_nible:
note, velocity = data
events.note_off(channel, note, velocity, use_running_status)
elif (c.AFTERTOUCH) == hi_nible:
note, velocity = data
events.aftertouch(channel, note, velocity, use_running_status)
elif (c.CONTINUOUS_CONTROLLER) == hi_nible:
controller, value = data
# A lot of the cc's are defined, so we trigger those directly
if self.dispatch_continuos_controllers:
self.continuous_controllers(channel, controller, value, use_running_status)
else:
events.continuous_controller(channel, controller, value, use_running_status)
elif (c.PATCH_CHANGE) == hi_nible:
program = data[0]
events.patch_change(channel, program, use_running_status)
elif (c.CHANNEL_PRESSURE) == hi_nible:
pressure = data[0]
events.channel_pressure(channel, pressure, use_running_status)
elif (c.PITCH_BEND) == hi_nible:
hibyte, lobyte = data
value = (hibyte<<7) + lobyte
events.pitch_bend(channel, value, use_running_status)
else:
raise ValueError('Illegal channel message! %.x' % hi_nible)
def continuous_controllers(self, channel, controller, value, use_running_status):
"""
Dispatches continuous_controllers messages
"""
events = self.event_handler
# I am not really shure if I ought to dispatch continuous controllers
# There's so many of them that it can clutter up the MidiEvents
# classes.
# So I just trigger the default event handler
events.continuous_controller(channel, controller, value, use_running_status)
def system_commons(self, common_type, common_data):
"Dispatches system common messages"
events = self.event_handler
# MTC Midi time code Quarter value
if common_type == c.MTC:
data = readBew(common_data)
msg_type = (data & 0x07) >> 4
values = (data & 0x0F)
events.midi_time_code(msg_type, values)
elif common_type == c.SONG_POSITION_POINTER:
hibyte, lobyte = common_data
value = (hibyte<<7) + lobyte
events.song_position_pointer(value)
elif common_type == c.SONG_SELECT:
data = readBew(common_data)
events.song_select(data)
elif common_type == c.TUNING_REQUEST:
# no data then
events.tuning_request(time=None)
def meta_event(self, meta_type, data):
"Dispatches meta events"
events = self.event_handler
# SEQUENCE_NUMBER = 0x00 (00 02 ss ss (seq-number))
if meta_type == c.SEQUENCE_NUMBER:
number = readBew(data)
events.sequence_number(number)
# TEXT = 0x01 (01 len text...)
elif meta_type == c.TEXT:
events.text(data)
# COPYRIGHT = 0x02 (02 len text...)
elif meta_type == c.COPYRIGHT:
events.copyright(data)
# SEQUENCE_NAME = 0x03 (03 len text...)
elif meta_type == c.SEQUENCE_NAME:
events.sequence_name(data)
# INSTRUMENT_NAME = 0x04 (04 len text...)
elif meta_type == c.INSTRUMENT_NAME:
events.instrument_name(data)
# LYRIC = 0x05 (05 len text...)
elif meta_type == c.LYRIC:
events.lyric(data)
# MARKER = 0x06 (06 len text...)
elif meta_type == c.MARKER:
events.marker(data)
# CUEPOINT = 0x07 (07 len text...)
elif meta_type == c.CUEPOINT:
events.cuepoint(data)
# PROGRAM_NAME = 0x08 (05 len text...)
elif meta_type == c.PROGRAM_NAME:
events.program_name(data)
# DEVICE_NAME = 0x09 (09 len text...)
elif meta_type == c.DEVICE_NAME:
events.device_name(data)
# MIDI_CH_PREFIX = 0x20 (20 01 channel)
elif meta_type == c.MIDI_CH_PREFIX:
channel = readBew(data)
events.midi_ch_prefix(channel)
# MIDI_PORT = 0x21 (21 01 port (legacy stuff))
elif meta_type == c.MIDI_PORT:
port = readBew(data)
events.midi_port(port)
# END_OFF_TRACK = 0x2F (2F 00)
elif meta_type == c.END_OF_TRACK:
events.end_of_track()
# TEMPO = 0x51 (51 03 tt tt tt (tempo in us/quarternote))
elif meta_type == c.TEMPO:
b1, b2, b3 = data
# uses 3 bytes to represent time between quarter
# notes in microseconds
events.tempo((b1<<16) + (b2<<8) + b3)
# SMTP_OFFSET = 0x54 (0x54 05 hh mm ss ff xx)
elif meta_type == c.SMTP_OFFSET:
hour, minute, second, frame, framePart = data
events.smtp_offset(
hour, minute, second, frame, framePart)
# TIME_SIGNATURE = 0x58 (58 04 nn dd cc bb)
elif meta_type == c.TIME_SIGNATURE:
nn, dd, cc, bb = data
events.time_signature(nn, dd, cc, bb)
# KEY_SIGNATURE = 0x59 (59 02 sf mi)
elif meta_type == c.KEY_SIGNATURE:
sf, mi = data
sf = from_twos_complement(sf)
events.key_signature(sf, mi)
# SPECIFIC = 0x7F (Sequencer specific event)
elif meta_type == c.SEQUENCER_SPECIFIC:
meta_data = data
if meta_data[0] == 0:
id = meta_data[:3]
meta_data = meta_data[3:]
else:
id = meta_data[0:1]
meta_data = meta_data[1:]
events.sequencer_specific(id, meta_data)
# Handles any undefined meta events
else: # undefined meta type
meta_data = data
events.meta_event(meta_type, meta_data)
if __name__ == '__main__':
import doctest
doctest.testmod() # run test on inline examples first
|
|
import logging
import struct
import zlib
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
BrokerMetadata, PartitionMetadata, Message, OffsetAndMessage,
ProduceResponse, FetchResponse, OffsetResponse,
OffsetCommitResponse, OffsetFetchResponse,
)
from kafka.exception import *
from kafka.util import (
read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition
)
log = logging.getLogger("kafka")
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ERROR_CODE_MAPPING = {
1: OffsetOutOfRange,
2: InvalidMessage,
3: UnknownTopicOrPartition,
4: InvalidMessageSize,
5: LeaderNotAvailable,
6: NotLeaderForPartition,
7: RequestTimedOut,
8: BrokerNotAvailable,
9: ReplicaNotAvailable,
10: MessageSizeTooLarge,
11: StaleControllerEpochCode,
12: OffsetMetadataTooLargeCode,
}
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
0, # ApiVersion
correlation_id, # CorrelationId
len(client_id),
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = ""
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set += struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message), encoded_message)
return message_set
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = struct.pack('>BB', message.magic, message.attributes)
msg += write_int_string(message.key)
msg += write_int_string(message.value)
crc = zlib.crc32(msg)
msg = struct.pack('>i%ds' % len(msg), crc, msg)
else:
raise Exception("Unexpected magic number: %d" % message.magic)
return msg
@classmethod
def _decode_message_set_iter(cls, data):
"""
Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily.
"""
cur = 0
read_message = False
while cur < len(data):
try:
((offset, ), cur) = relative_unpack('>q', data, cur)
(msg, cur) = read_int_string(data, cur)
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
read_message = True
yield OffsetAndMessage(offset, message)
except BufferUnderflowError:
if read_message is False:
# If we get a partial read of a message, but haven't
# yielded anyhting there's a problem
raise ConsumerFetchSizeTooSmall()
else:
raise StopIteration()
@classmethod
def _decode_message(cls, data, offset):
"""
Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload).
"""
((crc, magic, att), cur) = relative_unpack('>iBB', data, 0)
if crc != zlib.crc32(data[4:]):
raise ChecksumError("Message checksum failed")
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)
codec = att & KafkaProtocol.ATTRIBUTE_CODEC_MASK
if codec == KafkaProtocol.CODEC_NONE:
yield (offset, Message(magic, att, key, value))
elif codec == KafkaProtocol.CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)
elif codec == KafkaProtocol.CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg)
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1, timeout=1000):
"""
Encode some ProduceRequest structs
Params
======
client_id: string
correlation_id: string
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.PRODUCE_KEY)
message += struct.pack('>hii', acks, timeout, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += struct.pack('>h%dsi' % len(topic),
len(topic), topic, len(topic_payloads))
for partition, payload in topic_payloads.items():
msg_set = KafkaProtocol._encode_message_set(payload.messages)
message += struct.pack('>ii%ds' % len(msg_set), partition,
len(msg_set), msg_set)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_produce_response(cls, data):
"""
Decode bytes to a ProduceResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
((strlen,), cur) = relative_unpack('>h', data, cur)
topic = data[cur:cur + strlen]
cur += strlen
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq',
data, cur)
yield ProduceResponse(topic, partition, error, offset)
@classmethod
def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
max_wait_time=100, min_bytes=4096):
"""
Encodes some FetchRequest structs
Params
======
client_id: string
correlation_id: string
payloads: list of FetchRequest
max_wait_time: int, how long to block waiting on min_bytes of data
min_bytes: int, the minimum number of bytes to accumulate before
returning the response
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.FETCH_KEY)
# -1 is the replica id
message += struct.pack('>iiii', -1, max_wait_time, min_bytes,
len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.offset,
payload.max_bytes)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_fetch_response(cls, data):
"""
Decode bytes to a FetchResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, highwater_mark_offset), cur) = \
relative_unpack('>ihq', data, cur)
(message_set, cur) = read_int_string(data, cur)
yield FetchResponse(
topic, partition, error,
highwater_mark_offset,
KafkaProtocol._decode_message_set_iter(message_set))
@classmethod
def encode_offset_request(cls, client_id, correlation_id, payloads=None):
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_KEY)
# -1 is the replica id
message += struct.pack('>ii', -1, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.time,
payload.max_offsets)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_response(cls, data):
"""
Decode bytes to an OffsetResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, num_offsets,), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for j in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets))
@classmethod
def encode_metadata_request(cls, client_id, correlation_id, topics=None):
"""
Encode a MetadataRequest
Params
======
client_id: string
correlation_id: string
topics: list of strings
"""
topics = [] if topics is None else topics
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.METADATA_KEY)
message += struct.pack('>i', len(topics))
for topic in topics:
message += struct.pack('>h%ds' % len(topic), len(topic), topic)
return write_int_string(message)
@classmethod
def decode_metadata_response(cls, data):
"""
Decode bytes to a MetadataResponse
Params
======
data: bytes to decode
"""
((correlation_id, numbrokers), cur) = relative_unpack('>ii', data, 0)
# Broker info
brokers = {}
for i in range(numbrokers):
((nodeId, ), cur) = relative_unpack('>i', data, cur)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
brokers[nodeId] = BrokerMetadata(nodeId, host, port)
# Topic info
((num_topics,), cur) = relative_unpack('>i', data, cur)
topic_metadata = {}
for i in range(num_topics):
((topic_error,), cur) = relative_unpack('>h', data, cur)
(topic_name, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
partition_metadata = {}
for j in range(num_partitions):
((partition_error_code, partition, leader, numReplicas), cur) = \
relative_unpack('>hiii', data, cur)
(replicas, cur) = relative_unpack(
'>%di' % numReplicas, data, cur)
((num_isr,), cur) = relative_unpack('>i', data, cur)
(isr, cur) = relative_unpack('>%di' % num_isr, data, cur)
partition_metadata[partition] = \
PartitionMetadata(
topic_name, partition, leader, replicas, isr)
topic_metadata[topic_name] = partition_metadata
return brokers, topic_metadata
@classmethod
def encode_offset_commit_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetCommitRequest structs
Params
======
client_id: string
correlation_id: string
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_COMMIT_KEY)
message += write_short_string(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iq', partition, payload.offset)
message += write_short_string(payload.metadata)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_commit_response(cls, data):
"""
Decode bytes to an OffsetCommitResponse
Params
======
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_partitions):
((partition, error), cur) = relative_unpack('>ih', data, cur)
yield OffsetCommitResponse(topic, partition, error)
@classmethod
def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetFetchRequest structs
Params
======
client_id: string
correlation_id: string
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_FETCH_KEY)
message += write_short_string(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>i', partition)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_fetch_response(cls, data):
"""
Decode bytes to an OffsetFetchResponse
Params
======
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, offset), cur) = relative_unpack('>iq', data, cur)
(metadata, cur) = read_short_string(data, cur)
((error,), cur) = relative_unpack('>h', data, cur)
yield OffsetFetchResponse(topic, partition, offset,
metadata, error)
def create_message(payload, key=None):
"""
Construct a Message
Params
======
payload: bytes, the payload to send to Kafka
key: bytes, a key used for partition routing (optional)
"""
return Message(0, 0, key, payload)
def create_gzip_message(payloads, key=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Params
======
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload) for payload in payloads])
gzipped = gzip_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_GZIP
return Message(0, 0x00 | codec, key, gzipped)
def create_snappy_message(payloads, key=None):
"""
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Params
======
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload) for payload in payloads])
snapped = snappy_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_SNAPPY
return Message(0, 0x00 | codec, key, snapped)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import ceil
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unpack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.pack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors represending the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[dim_index])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat_v2(
[array_ops.fill(
array_ops.expand_dims(concat_dim, 0), 0),
[1],
array_ops.fill(
shape_of_shape - concat_dim - 1, 0)],
0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
else:
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The following line to be enabled once ready
# if len(sizes) > 16:
# sizes = array_ops.squeeze(array_ops.slice(
# array_ops.pack(sizes, axis=1), [concat_dim, 0], [1, -1]))
# out_grads = array_ops.split_v(grad, sizes, concat_dim)
# else:
# pylint: disable=protected-access
offset = gen_array_ops._concat_offset(concat_dim, sizes)
# pylint: enable=protected-access
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
# with all the indices, but with grad.values sliced accordingly. This
# is like the Tensor case, except shape(grad.values)[0] is not equal to
# shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values,
begin,
array_ops.concat_v2(
[[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(size_concat_dim,
dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None] if end_value_index <= dim_index
else [None] + out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=1, end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.pack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat(1, [before_pad, after_pad])
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
x = array_ops.shape(op.inputs[0])
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(op.inputs[0], list(grads))
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(op.inputs[2], list(grads))
returnval = [returnval] + [None,] * (len(op.inputs) - 1)
print(returnval)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat(0, [batch_shape, [min_dim]])
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(
diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat(0, [size, params_shape[1:]])
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
ref_shape = array_ops.shape(ref)
indices = op.inputs[1]
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(_, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
assert isinstance(grad, ops.Tensor)
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(array_ops.transpose(
array_ops.pack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
@ops.RegisterGradient("Pad")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.pack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
return array_ops.slice(grad, begin, sizes), None
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [array_ops.reverse_sequence(grad,
batch_dim=op.get_attr("batch_dim"),
seq_dim=op.get_attr("seq_dim"),
seq_lengths=seq_lengths),
None]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
return array_ops.reverse(grad, reverse_dims), None
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
return array_ops.depth_to_space(grad, block_size)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
return array_ops.space_to_depth(grad, block_size)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].get_shape()
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
_, rows_out, cols_out, _ = [
dim.value for dim in op.outputs[0].get_shape()
]
_, ksize_r, ksize_c, _ = op.get_attr('ksizes')
_, stride_r, stride_h, _ = op.get_attr('strides')
_, rate_r, rate_c, _ = op.get_attr('rates')
padding = op.get_attr('padding')
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
if padding == b'SAME':
rows_out = int(ceil(rows_in / stride_r))
cols_out = int(ceil(cols_in / stride_h))
pad_rows = ((rows_out - 1) * stride_r + ksize_r_eff - rows_in) // 2
pad_cols = ((cols_out - 1) * stride_h + ksize_c_eff - cols_in) // 2
elif padding == b'VALID':
rows_out = int(ceil((rows_in - ksize_r_eff + 1) / stride_r))
cols_out = int(ceil((cols_in - ksize_c_eff + 1) / stride_h))
pad_rows = (rows_out - 1) * stride_r + ksize_r_eff - rows_in
pad_cols = (cols_out - 1) * stride_h + ksize_c_eff - cols_in
pad_rows, pad_cols = max(0, pad_rows), max(0, pad_cols)
grad_expanded = array_ops.transpose(
array_ops.reshape(grad, (batch_size, rows_out,
cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5)
)
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
row_steps = range(0, rows_out * stride_r, stride_r)
col_steps = range(0, cols_out * stride_h, stride_h)
idx = []
for i in range(rows_out):
for j in range(cols_out):
r_low, c_low = row_steps[i] - pad_rows, col_steps[j] - pad_cols
r_high, c_high = r_low + ksize_r_eff, c_low + ksize_c_eff
idx.extend([(r * (cols_in) + c,
i * (cols_out * ksize_r * ksize_c) +
j * (ksize_r * ksize_c) +
ri * (ksize_c) + ci)
for (ri, r) in enumerate(range(r_low, r_high, rate_r))
for (ci, c) in enumerate(range(c_low, c_high, rate_c))
if 0 <= r and r < rows_in and 0 <= c and c < cols_in
])
sp_shape = (rows_in * cols_in,
rows_out * cols_out * ksize_r * ksize_c)
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32),
sp_shape
)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(
jac, (rows_in, cols_in, batch_size, channels)
)
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
|
|
import os
import warnings
import re
import html
import logging
import json
import datetime
import unicodedata
import traceback
from bs4 import BeautifulSoup
from lxml import etree
try:
import urllib.parse as urlparse
except:
import urlparse
import collections
import itertools
class Parser(object):
def __init__(self, folder=None, files=None):
self.folder = folder
self.file_list = files
def filter_func(self, filename):
return True
def parse_file_name(self, f):
return NotImplementedError("")
def parse_file(self, f):
messages = []
contacts = set()
for line in f:
match = re.match(self.regex, line)
if not match:
continue
try:
message = {
'timestamp':match.groups()[0],
'contact': match.groups()[1],
'message': match.groups()[2],
'source': self.__class__.__name__
}
file_name = f.name.lower()
if 'gmail' in file_name or 'google' in file_name:
# Because some of the Hangouts discussions in Trillian
# Have my email adress (but only mine) with a weird HEX
# suffix
if '[email protected]' in message['contact']:
message['contact'] = '[email protected]'
message['protocol'] = 'Hangouts'
elif 'facebook' in file_name:
message['protocol'] = 'Facebook'
elif 'yahoo' in file_name:
message['protocol'] = 'Yahoo'
else:
print(file_name)
for msg_filter in self.filters:
message = msg_filter(message)
contact = message['contact']
message['nick'] = message['contact']
contacts.add(message['contact'])
if type(message['timestamp']) == float:
message['timestamp'] = datetime.datetime.fromtimestamp(message['timestamp'])
if type(message['timestamp']) == datetime.datetime:
message['timestamp'] = message['timestamp'].isoformat()
messages.append(message)
except Exception as e:
logging.warning("Error in file %s at line %s: %s because %s", f.name,
line, str(e), traceback.format_exc())
if len(messages) == 0:
return
return contacts, messages
def files(self):
"""Generator that returns recursively all the files in a folder.
If filter_func is given, only the ones for which it returns true will be
returned. It should take one parameter, the name of the file.
"""
if self.folder is not None:
for root, dirs, files in os.walk(self.folder):
for file in files:
f = os.path.join(root, file)
if self.filter_func(f):
yield f
if '.git' in dirs: # still needed?
logging.warning(dirs)
dirs.remove('.git')
elif self.file_list is not None:
for f in self.file_list:
yield f
else:
raise Exception("You didn't specify source files")
def __iter__(self):
for f in self.files():
logging.info("Processing file %s", f)
try:
result = self.parse_file(open(f))
if result:
yield result
except UnicodeDecodeError as e:
logging.warning("Unicode !@#$ in file %s: %s", f, str(e))
except IOError as e:
logging.warning("Can't open file %s: %s", f, str(e))
continue
warnings.filterwarnings('ignore', ".+ looks like a URL. Beautiful Soup is not an HTTP client. .*")
warnings.filterwarnings('ignore', ".+ looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful .*")
def DigsbyParser(msg):
orig = msg['message']
# Stupid hack for Yahoo emoticon that got XML-ified
text = msg['message'].replace("<:-p></:-p>", "PRTY_EMOJI")
soup = etree.HTML(text)
if soup is not None:
msg['message'] = soup.xpath("string()")
msg['message'] = msg['message'].replace("PRTY_EMOJI", "<:-p")
return msg
def HTMLParse(msg):
soup = BeautifulSoup(msg['message'], 'html.parser')
msg['message'] = soup.get_text()
return msg
def Unquote(msg):
msg['message'] = urlparse.unquote(msg['message'])
msg['contact'] = urlparse.unquote(msg['contact'])
return msg
def HTMLEscaper(msg):
msg['message'] = html.unescape(msg['message'])
return msg
def DateTimer(msg):
dt = datetime.datetime.strptime(msg['timestamp'], "%d %b %Y %I:%M:%S %p")
msg['timestamp'] = dt
return msg
def FloatTimestamp(msg):
msg['timestamp'] = float(msg['timestamp'])
return msg
def ISOTimer(msg):
msg['timestamp'] = msg['timestamp'].replace(" ", "T")
return msg
am_conv = {'AM': 0,'PM': 12, 'am': 0, 'pm': 12}
def USATimer(ts):
ts = ts.replace(',', '')
ts = ts.replace('.', '/')
if ts.count(" ") == 2:
# %m/%d/%Y %I:%M %p
date, time, am = ts.split(" ")
month, day, year = date.split("/")
hour, minute = time.split(":")
year, month, day, minute = int(year), int(month), int(day), int(minute)
if year < 2000:
year += 2000
hour = int(hour) % 12 + am_conv[am]
second = 0
else:
# %d/%m/%y %H:%M:%S
date, time = ts.split(" ")
day, month, year = date.split("/")
hour, minute, second = time.split(":")
year, month, day = int(year) + 2000, int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return datetime.datetime(year, month, day, hour, minute, second)
class Digsby(Parser):
regex = '<div class=".+? message" .+? timestamp="(.+?)"><span class="buddy">(.+?)</span> <span class="msgcontent">(.+?)</span>'
filters = [DigsbyParser, ISOTimer]
class Trillian(Parser):
def filter_func(self, filename):
root, ext = os.path.splitext(filename)
return ext == '.xml' and root[-7:] != "-assets"
regex = '<message type=".+?_privateMessage(?:Offline)?" time="(.+?)" ms=".+?" medium=".+?" to=".+?" from="(.+?)" from_display=".+?" text="(.+?)"/>'
# filters = [Unquote, FloatTimestamp, HTMLParse]
filters = [Unquote, FloatTimestamp, HTMLEscaper]
class Pidgin(Parser):
regex = '<font color=".+?"><font size="2">\((\d\d:\d\d\:\d\d [AP]M)\)</font> <b>(.+?):</b></font>(.+?)<br/>'
filters = [DateTimer, HTMLParse]
def parse_file(self, f):
head = f.readline()
try:
date = re.search('at [^ ]+ (\d{2} [a-zA-Z]* \d{4}) \d{2}:\d{2}:\d{2} [AP]M EEST', head).group(1)
except AttributeError:
logging.error("Couldn't find date in line %s", head)
old_filters = self.filters[:]
def correct_date(msg):
msg['timestamp'] = "%s %s" % (date, msg['timestamp'])
return msg
self.filters.insert(0, correct_date)
messages = super(Pidgin, self).parse_file(f)
self.filters = old_filters
return messages
def NameToDate(line):
if len(line) < 8:
return line
try:
if line[0:2].isalpha() and line[4].isdigit() and (line[7].isdigit() or line[8].isdigit()):
sp = line.split(",", 1)
date = datetime.datetime.strptime(sp[0], "%b %d")
date = date.replace(year=2015)
new_fmt = date.strftime("%m/%d/%Y")
return "%s,%s" % (new_fmt, sp[1])
return line
except Exception as e:
print(line)
print(len(line))
raise e
class Whatsapp(Parser):
regex = '^(\d{1,2}[/.]\d{1,2}[/.]\d{2,4},? \d{1,2}:\d{2}(?::\d{2})?(?: [AP]M)?)(?: -|:) (.+?): (.+?)$'
def parse_file(self, f):
messages = []
contacts = set()
message = {'message': []}
for line in f:
line = NameToDate(line)
match = re.match(self.regex, line)
if not match:
print(line)
try:
# message['message'] += "\n"+line
message['message'].append(line)
# If message has not been defined yet, we're at the beginning
# of the file
except UnboundLocalError:
pass
continue
message['message'] = "\n".join(message['message'])
try:
message = {
'timestamp': USATimer(match.groups()[0]).isoformat(),
'contact': match.groups()[1],
'message': [match.groups()[2]],
'protocol': 'Whatsapp',
'source': 'Whatsapp',
'nick': match.groups()[1],
}
contacts.add(message['contact'])
messages.append(message)
except Exception as e:
print(e.stacktrace)
logging.warning("Error in file %s at line %s: %s", f.name,
line, str(e))
message['message'] = "\n".join(message['message'])
if len(messages) == 0:
return "", []
return contacts, messages
def filter_func(self, filename):
root, ext = os.path.splitext(filename)
return ext == '.txt'
months = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June':6,
'July':7, 'August': 8, 'September': 9, 'October': 10, 'November': 11,
'December':12}
def parseDate(date):
# "%A, %B %d, %Y at %I:%M%p"
_, day, rest = date.split(",")
month, day = day.strip().split()
month, day = months[month], int(day)
year, _, time = rest.strip().split()
year = int(year)
hour, minute = time[:-2].split(":")
hour, minute = int(hour) % 12 + am_conv[time[-2:]], int(minute)
return datetime.datetime(year, month, day, hour, minute, 0)
class Facebook(Parser):
def __iter__(self):
for filename in self.files():
with open(filename) as f:
file_content = f.read()
for result in self.parse_file(file_content):
yield result
logging.info("Finished loading HTML file %s", filename)
def parse_file(self, content):
soup = etree.HTML(content)
threads = soup.cssselect('div.thread')
for thread in threads:
result = self.parse_thread(thread)
if result:
yield result
def parse_thread(self, thread):
it = iter(thread.getchildren())
contacts = set()
# After that the children are: message_header,message in a p
messages = []
errors = 0
for header, message in zip(it, it):
try:
user = header.cssselect('span.user')[0].text.strip()
contacts.add(user)
except Exception as e:
logging.warning("Couldn't parse user %s because %s", etree.tostring(header), e)
errors +=1
continue
try:
date = header.cssselect('span.meta')[0].text.strip()[:-7]
date = parseDate(date)
except Exception as e:
logging.warning("Couldn't parse date %s because %s", header, e)
errors +=1
continue
try:
message = message.text.strip()
except Exception as e:
logging.warning("Couldn't parse message %s because %s", message, e)
errors +=1
continue
message = {
'timestamp': date,
'contact': user,
'message': message,
'protocol': 'Facebook',
'source': 'Facebook',
'nick': user,
}
message['timestamp'] = date.isoformat()
messages.append(message)
if errors > 15:
logging.error("Too many errors for %s", contacts)
break
return contacts, reversed(messages)
class Hangouts(Parser):
def __iter__(self):
for filename in self.files():
data = json.load(open(filename))['conversation_state']
logging.info("Finished loading JSON file %s", filename)
for contact in data:
result = self.parse_thread(contact)
if result:
yield result
def parse_thread(self, thread):
conv = thread["conversation_state"]["conversation"]
participants = {}
for part in conv["participant_data"]:
if "fallback_name" in part:
participants[part["id"]["gaia_id"]] = part["fallback_name"]
else:
participants[part["id"]["gaia_id"]] = ("Unknown_%s"
% part["id"]["gaia_id"])
events = thread["conversation_state"]["event"]
messages = []
for event in events:
gaia_id = event["sender_id"]["gaia_id"]
if gaia_id in participants:
sender = participants[gaia_id]
else:
sender = "Unknown_%s" % gaia_id
date = datetime.datetime.fromtimestamp(float(event["timestamp"])/1000000)
if event["event_type"] == "REGULAR_CHAT_MESSAGE":
if "segment" in event["chat_message"]["message_content"]:
message = " ".join(p["text"]
for p in event["chat_message"]["message_content"]
["segment"] if "text" in p)
messages.append({
'timestamp': date.isoformat(),
'contact': sender,
'message': message,
'protocol': 'Hangouts',
'source': 'Hangouts',
'nick': sender,
})
return set(participants.values()), messages
class Viber(Parser):
# Todo: deal with Me name
def parse_file(self, lines):
messages = []
contacts = set()
message = {'message': []}
for line in lines:
print(line)
try:
date, time, sender, phone_nr, msg = line.split(",", 4)
except:
message['message'].append(msg)
continue
message['message'] = "\n".join(message['message'])
contacts.add(sender)
message = {
'message': [msg],
'timestamp': self.getTime(date,time),
'contact': sender,
'protocol': 'Viber',
'source': 'Viber',
'nick': sender,
}
messages.append(message)
message['message'] = "\n".join(message['message'])
return contacts, messages
def getTime(self, date, time):
day, month, year = date.split("/")
hour, minute, second = time[:9].split(":")
day, month, year, second = int(day), int(month), int(year), int(second)
hour, minute = int(hour) % 12 + am_conv[time[-2:]], int(minute)
return datetime.datetime(year, month, day, hour, minute, second).isoformat()
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
messages = collections.defaultdict(list)
# for contact, text in Digsby("./data/raw/Digsby Logs"):
# messages[frozenset(contact)].append(text)
# print("Digsby")
# for contact, text in Trillian("./data/raw/Trillian"):
# messages[frozenset(contact)].append(text)
# print("Trillian")
# for contact, text in Pidgin("./data/raw/Pidgin"):
# messages[frozenset(contact)].append(text)
# print("Pidgin")
# for contact, text in Facebook(files=["./data/interim/Facebook/cleaned.html"]):
# messages[frozenset(contact)].append(text)
# print("Facebook")
# for contact, text in Hangouts(files=["./data/raw/Hangouts/Hangouts.json"]):
# messages[frozenset(contact)].append(text)
# print("Hangouts")
# for contact, text in Viber("./data/raw/Viber"):
# messages[frozenset(contact)].append(text)
# print("Viber")
for contact in messages:
messages[contact] = list(itertools.chain.from_iterable(messages[contact]))
messages[contact].sort(key=lambda x: x['timestamp'])
total = 0
for k in messages:
if len(messages[k])> 10:
print(k, len(messages[k]))
total += len(messages[k])
print(total)
# f = open("./logs/messages.json", "w")
# json.dump(messages, f, indent=2, ensure_ascii=False)
# f.close()
|
|
# -*- coding: utf-8 -*-
import unittest
#import logging
import pyb
import gc
import math
import random
from sys import platform
from ws2812 import WS2812, Pixel, PREALLOCATE, CACHE, RECREATE
#log = logging.getLogger("test_ws2812")
def tg(led_count, start):
def triple(n):
for i in range(3):
yield (n + i) & 0xff
for i in range(led_count):
yield triple(start + 3*i)
def count(r):
c = 1
for i in range(len(r)):
rgb = r[i]
#print("r[%d] = %r" % (i, rgb))
for color in range(len(rgb)):
t = rgb[color] + c
rgb[color] = t & 1
c = t >> 1
r[i] = rgb
#print("r[%d] now %r" % (i, rgb))
def v():
return ', '.join('0x%x' % v for v in ring.a)
def vg():
return ', '.join('0x%08x' % ring.get(i) for i in range(ring.led_count * 3))
class WS2812TestCase(unittest.TestCase):
names = """SinglePixel PixelBufferBits GrindSinglePixel PixelAssignPixel
MultiPixel MultiPixelFedIterator SlicedRval SlicedLval""".split()
#names = ['SlicedRval'] # DEBUG
def setUp(self):
#logging.basicConfig(level=logging.INFO)
gc.collect()
random.seed("WS2812")
def tearDown(self):
pass
#@unittest.skip("x")
def testAllMemoryStrategies(self):
print()
for mem in (PREALLOCATE, CACHE, RECREATE):
for name in self.names:
fun = getattr(self, 'doTest'+name)
print("\tdoTest%s(mem=%d) ... " % (name, mem), end='')
try:
gc.collect()
fun(mem=mem)
except:
print("Fail:", name, "with mem =", mem)
raise
else:
print("ok")
def doTestSinglePixel(self, mem):
# buf is the correct length
leds = WS2812(spi_bus=1, led_count=1, mem=mem)
self.assertEqual(len(leds.buf), 13)
# As-created the pixels are all off
# leds can be accessed via iterator
self.assertEqual(list(list(v) for v in leds), [[0]*3])
# Individual leds can be accessed by indexing
pix = leds[0]
# pixels have r,g,b
pix.r = 1
pix.g = 2
pix.b = 3
self.assertEqual([pix.r, pix.g, pix.b], [1,2,3])
self.assertEqual(list(pix), [1,2,3])
# Can get a named tuple of values
p = leds.get_led_values(0)
self.assertEqual([p.r, p.g, p.b], [1,2,3])
# pixels can also be indexed into for colors
self.assertEqual(list(pix), [1,2,3])
for i in range(len(pix)):
pix[i] = 12 * (i + 1)
self.assertEqual(list(pix), [12,24,36])
# A pixel position in a chain can be mutated by setting it with a bytearray
leds[0] = bytearray((7,11,92))
self.assertEqual(list(leds[0]), [7, 11, 92])
self.assertEqual([pix.r, pix.g, pix.b], [7, 11, 92])
# A pixel position in a chain can be mutated by setting it with bytes
leds[0] = b'foo'
self.assertEqual([pix.r, pix.g, pix.b], [102, 111, 111])
# A pixel position in a chain can be mutated by setting it with a list
leds[0] = [11, 22, 33]
self.assertEqual([pix.r, pix.g, pix.b], [11, 22, 33])
# A pixel position in a chain can be mutated by setting it with an iterator
leds[0] = (7*i + 3 for i in range(3))
self.assertEqual([pix.r, pix.g, pix.b], [3, 10, 17])
# The pixel.off() method works
leds[0] = bytearray((7,11,92))
leds[0].off()
self.assertEqual(list(leds[0]), [0]*3)
def doTestPixelBufferBits(self, mem):
leds = WS2812(spi_bus=1, led_count=1, mem=mem)
if platform == 'pyboard':
plati = 0
else:
plati = 1
# As-created the pixels are all off
# Off is represented correctly in the buffer
self.assertEqual('|'.join('%x' % v for v in leds.buf),
('11|11|11|11|11|11|11|11|11|11|11|11|0',
'0|0|0|0|0|0|0|0|0|0|0|0|0')[plati])
# All-ones is represented correctly in the buffer
leds[0] = b'\xff\xff\xff'
self.assertEqual(list(leds[0]), [255, 255, 255])
self.assertEqual('|'.join('%x' % v for v in leds.buf),
('33|33|33|33|33|33|33|33|33|33|33|33|0',
'ff|ff|ff|0|0|0|0|0|0|0|0|0|0')[plati])
pix = leds[0]
# The colors are in the right place, affecting the correct bits in the buffer
pix[0] = 2
pix[1] = 1
pix[2] = 4
self.assertEqual('|'.join('%x' % v for v in leds.buf),
('11|11|11|13|11|11|11|31|11|11|13|11|0',
'1|2|4|0|0|0|0|0|0|0|0|0|0')[plati])
# variation
pix[0] = 12
pix[1] = 34
pix[2] = 56
self.assertEqual(list(leds[0]), [12, 34, 56])
self.assertEqual('|'.join('%x' % v for v in leds.buf),
('11|31|11|31|11|11|33|11|11|33|31|11|0',
'22|c|38|0|0|0|0|0|0|0|0|0|0')[plati])
# variation
pix[0] = -1
pix[1] = 345
pix[2] = 777777777
self.assertEqual(list(leds[0]), [255, 89, 113])
self.assertEqual('|'.join('%x' % v for v in leds.buf),
('13|13|31|13|33|33|33|33|13|33|11|13|0',
'59|ff|71|0|0|0|0|0|0|0|0|0|0')[plati])
def testMemoryUsed0(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
pass
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed1(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
leds[0].g = i # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed2(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
leds[0] = b'\x08\x00\x00' # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed3(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
foo = b'foo'
prev_mem_free = gc.mem_free()
for i in range(8):
leds[0] = foo # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed4(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
bar = bytearray(range(3))
prev_mem_free = gc.mem_free()
for i in range(8):
leds[0] = bar # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed5(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
p = leds[i]
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed6(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
r = leds[i].r # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
@unittest.skip("Fails")
def testMemoryUsed7(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
r,g,b = leds[i] # -64 each
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed8(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
r,g,b = leds[i].r, leds[i].g, leds[i].b # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed9(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
foo = b'foo'
bar = bytearray(range(3))
foolist = list(range(3))
prev_mem_free = gc.mem_free()
for i in range(8):
t = leds[i][0] # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed10(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
prev_mem_free = gc.mem_free()
for i in range(8):
for k in range(len(leds[i])): # no leak
leds[i][k] = leds[i-1][k]
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
#@unittest.skip("FIXME")
def testMemoryUsed11(self):
leds = WS2812(spi_bus=1, led_count=64, mem=PREALLOCATE)
foolist = list(range(3))
prev_mem_free = gc.mem_free()
for i in range(8):
leds[i] = foolist # no leak
delta_mem = gc.mem_free() - prev_mem_free
if platform == 'pyboard':
self.assertEqual(delta_mem, 0)
def testSizes(self):
gc.collect()
m0 = gc.mem_free()
leds = WS2812(spi_bus=1, led_count=256, mem=PREALLOCATE)
gc.collect()
m1 = gc.mem_free()
print((m1-m0)/256)
#@unittest.skip("x")
def doTestGrindSinglePixel(self, mem):
# get / set work as expected
leds = WS2812(spi_bus=1, led_count=1, intensity=1, mem=mem)
for i in range(1000):
r = leds[0].r = random.getrandbits(8)
g = leds[0].g = random.getrandbits(8)
b = leds[0].b = random.getrandbits(8)
self.assertEqual(list(leds[0]), [r, g, b])
def doTestPixelAssignPixel(self, mem):
# A pixel can be assigned to another pixel
leds = WS2812(spi_bus=1, led_count=3, mem=mem)
for i in range(len(leds)):
leds[i] = (i, 2*i, 3*i)
self.assertEqual(list(leds[0]), [0, 0, 0])
self.assertEqual(list(leds[1]), [1, 2, 3])
self.assertEqual(list(leds[2]), [2, 4, 6])
leds[0] = leds[2]
leds[2] = leds[1]
leds[1] = [19, 23, 29]
self.assertEqual(list(leds[0]), [2, 4, 6])
self.assertEqual(list(leds[1]), [19, 23, 29])
self.assertEqual(list(leds[2]), [1, 2, 3])
self.assertIsNot(leds[0], leds[1])
self.assertIsNot(leds[0], leds[2])
self.assertIsNot(leds[1], leds[2])
#@unittest.skip("x")
def doTestMultiPixel(self, mem):
# buf is the correct length
# WS2812 can be iterated over to yield pixels
# pixel values can be set and read back
for n in range(1, 400, 19):
leds = None
gc.collect()
leds = WS2812(spi_bus=1, led_count=n, mem=mem)
self.assertEqual(len(leds), n)
self.assertEqual(len(leds.buf), 12*n + 1)
random.seed(n)
for pix in leds:
self.assertEqual(list(pix), [0]*3)
for j in range(len(pix)):
pix[j] = random.getrandbits(8)
pb = [0] * 3
random.seed(n)
for pix in leds:
for j in range(len(pix)):
pb[j] = random.getrandbits(8)
self.assertEqual(list(pix), pb)
#@unittest.skip("x")
def doTestMultiPixelFedIterator(self, mem):
# A chain can be fed from an iterator
for n in range(1, 200, 19):
leds = None
gc.collect()
leds = WS2812(spi_bus=1, led_count=n, mem=mem)
leds.fill_buf(tg(n, 1))
for pix, pg in zip(leds, tg(n, 1)):
self.assertEqual(list(pix), list(pg))
def doTestSlicedRval(self, mem):
# A chain slice can be read
leds = WS2812(spi_bus=1, led_count=9, mem=mem)
self.assertTrue(all(isinstance(v, Pixel) for v in leds[:3]))
self.assertTrue(all(isinstance(v, Pixel) for v in leds[2:5]))
self.assertTrue(all(isinstance(v, Pixel) for v in leds[7:11]))
for i in range(len(leds)):
leds[i] = (i, 2*i, 3*i)
for k, led in enumerate(leds[3:6]):
i = k + 3
self.assertEqual(tuple(led), (i, 2*i, 3*i))
self.assertEqual(list(tuple(led) for led in leds[-2:]), \
[(i, 2*i, 3*i) for i in (7,8)])
self.assertEqual(list(tuple(led) for led in leds[:]), \
[(i, 2*i, 3*i) for i in range(len(leds))])
# Negative index can be used
i = len(leds) - 1
self.assertEqual(tuple(leds[-1]), (i, 2*i, 3*i))
i = len(leds) - 5
self.assertEqual(tuple(leds[-5]), (i, 2*i, 3*i))
i = 0
self.assertEqual(tuple(leds[-len(leds)]), (i, 2*i, 3*i))
# Negative index doesn't blow up unallocated
leds = WS2812(spi_bus=1, led_count=66, mem=mem)
sum_neg = sum(sum([leds[i].r, leds[i].g, leds[i].b]) for i in range(-1, -len(leds), -1))
sum_pos = sum(sum([leds[i].r, leds[i].g, leds[i].b]) for i in range(len(leds)))
self.assertEqual(sum_neg, 0)
self.assertEqual(sum_pos, 0)
#@unittest.skip("FIXME")
def doTestSlicedLval(self, mem):
# A chain slice can be written
leds = WS2812(spi_bus=1, led_count=9, mem=mem)
for i in range(len(leds)):
leds[i] = (i, 2*i, 3*i)
leds[0:3] = leds[3:6]
for k in range(3):
i = k + 3
self.assertEqual(tuple(leds[k]), (i, 2*i, 3*i))
for i in range(len(leds)):
leds[i] = (i, 2*i, 3*i)
leds[-3:] = leds[:3]
for i in range(3):
k = i + 6
self.assertEqual(tuple(leds[k]), (i, 2*i, 3*i))
def main():
unittest.main()
return
# Burn-in test:
while True:
try:
unittest.main()
except MemoryError as e:
#print("MemoryError:", e)
pyb.info()
raise
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2014 ProphetStor, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implementation of the class of ProphetStor DPL storage adapter of Federator.
# v2.0.1 Consistency group support
# v2.0.2 Pool aware scheduler
"""
import base64
import errno
import httplib
import json
import random
import time
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder.openstack.common import loopingcall
from cinder.volume import driver
from cinder.volume.drivers.prophetstor import options
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONNECTION_RETRY = 10
MAXSNAPSHOTS = 1024
DISCOVER_SERVER_TYPE = 'dpl'
DPL_BLOCKSTOR = '/dpl_blockstor'
DPL_SYSTEM = '/dpl_system'
DPL_VER_V1 = 'v1'
DPL_OBJ_POOL = 'dpl_pool'
DPL_OBJ_DISK = 'dpl_disk'
DPL_OBJ_VOLUME = 'dpl_volume'
DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup'
DPL_OBJ_SNAPSHOT = 'cdmi_snapshots'
DPL_OBJ_EXPORT = 'dpl_export'
DPL_OBJ_REPLICATION = 'cdmi_replication'
DPL_OBJ_TARGET = 'dpl_target'
DPL_OBJ_SYSTEM = 'dpl_system'
DPL_OBJ_SNS = 'sns_table'
class DPLCommand(object):
"""DPL command interface."""
def __init__(self, ip, port, username, password):
self.ip = ip
self.port = port
self.username = username
self.password = password
def send_cmd(self, method, url, params, expected_status):
"""Send command to DPL."""
connection = None
retcode = 0
response = {}
data = {}
header = {'Content-Type': 'application/cdmi-container',
'Accept': 'application/cdmi-container',
'x-cdmi-specification-version': '1.0.2'}
# base64 encode the username and password
auth = base64.encodestring('%s:%s'
% (self.username,
self.password)).replace('\n', '')
header['Authorization'] = 'Basic %s' % auth
if not params:
payload = None
else:
try:
payload = json.dumps(params, ensure_ascii=False)
payload.encode('utf-8')
except Exception as e:
LOG.error(_LE('JSON encode params %(param)s error:'
' %(status)s.'), {'param': params, 'status': e})
retcode = errno.EINVAL
for i in range(CONNECTION_RETRY):
try:
connection = httplib.HTTPSConnection(self.ip,
self.port,
timeout=60)
if connection:
retcode = 0
break
except IOError as ioerr:
LOG.error(_LE('Connect to Flexvisor error: %s.'),
ioerr)
retcode = errno.ENOTCONN
except Exception as e:
LOG.error(_LE('Connect to Flexvisor failed: %s.'),
e)
retcode = errno.EFAULT
retry = CONNECTION_RETRY
while (connection and retry):
try:
connection.request(method, url, payload, header)
except httplib.CannotSendRequest as e:
connection.close()
time.sleep(1)
connection = httplib.HTTPSConnection(self.ip,
self.port,
timeout=60)
retry -= 1
if connection:
if retry == 0:
retcode = errno.ENOTCONN
else:
retcode = 0
else:
retcode = errno.ENOTCONN
continue
except Exception as e:
LOG.error(_LE('Failed to send request: %s.'),
e)
retcode = errno.EFAULT
break
if retcode == 0:
try:
response = connection.getresponse()
if response.status == httplib.SERVICE_UNAVAILABLE:
LOG.error(_LE('The Flexvisor service is unavailable.'))
time.sleep(1)
retry -= 1
retcode = errno.ENOPROTOOPT
continue
else:
retcode = 0
break
except httplib.ResponseNotReady as e:
time.sleep(1)
retry -= 1
retcode = errno.EFAULT
continue
except Exception as e:
LOG.error(_LE('Failed to get response: %s.'),
e)
retcode = errno.EFAULT
break
if retcode == 0 and response.status in expected_status and\
response.status == httplib.NOT_FOUND:
retcode = errno.ENODATA
elif retcode == 0 and response.status not in expected_status:
LOG.error(_LE('%(method)s %(url)s unexpected response status: '
'%(response)s (expects: %(expects)s).'),
{'method': method,
'url': url,
'response': httplib.responses[response.status],
'expects': expected_status})
if response.status == httplib.UNAUTHORIZED:
raise exception.NotAuthorized
retcode = errno.EACCES
else:
retcode = errno.EIO
elif retcode == 0 and response.status is httplib.NOT_FOUND:
retcode = errno.ENODATA
elif retcode == 0 and response.status is httplib.ACCEPTED:
retcode = errno.EAGAIN
try:
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
except Exception as e:
LOG.error(_LE('Read response raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
elif retcode == 0 and \
response.status in [httplib.OK, httplib.CREATED] and \
httplib.NO_CONTENT not in expected_status:
try:
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
except Exception as e:
LOG.error(_LE('Read response raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
if connection:
connection.close()
return retcode, data
class DPLVolume(object):
def __init__(self, dplServer, dplPort, dplUser, dplPassword):
self.objCmd = DPLCommand(dplServer, dplPort, dplUser, dplPassword)
def _execute(self, method, url, params, expected_status):
if self.objCmd:
return self.objCmd.send_cmd(method, url, params, expected_status)
else:
return -1, None
def _gen_snapshot_url(self, vdevid, snapshotid):
snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid)
return snapshot_url
def get_server_info(self):
method = 'GET'
url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM))
return self._execute(method, url, None, [httplib.OK, httplib.ACCEPTED])
def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize,
fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS,
snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
if volumeName is None or volumeName == '':
metadata['display_name'] = volumeID
else:
metadata['display_name'] = volumeName
metadata['display_description'] = volumeDesc
metadata['pool_uuid'] = poolID
metadata['total_capacity'] = volumeSize
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota is not None:
metadata['snapshot_quota'] = int(snapshot_quota)
metadata['properties'] = dict(thin_provision=fthinprovision)
params['metadata'] = metadata
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize,
maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
if volumeName is None or volumeName == '':
metadata['display_name'] = volumeID
else:
metadata['display_name'] = volumeName
metadata['display_description'] = volumeDesc
metadata['total_capacity'] = int(volumeSize)
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota is not None:
metadata['snapshot_quota'] = snapshot_quota
params['metadata'] = metadata
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def delete_vdev(self, volumeID, force=True):
method = 'DELETE'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
metadata['force'] = force
params['metadata'] = metadata
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND,
httplib.NO_CONTENT])
def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc,
snapshotID, poolID, fthinprovision=True,
maximum_snapshot=MAXSNAPSHOTS,
snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID)
metadata['snapshot_operation'] = 'copy'
if vdevDisplayName is None or vdevDisplayName == "":
metadata['display_name'] = vdevID
else:
metadata['display_name'] = vdevDisplayName
metadata['display_description'] = vdevDesc
metadata['pool_uuid'] = poolID
metadata['properties'] = {}
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota:
metadata['snapshot_quota'] = snapshot_quota
metadata['properties'] = dict(thin_provision=fthinprovision)
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(vdevID, snapshotID)
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id,
vol_display_name, description, snap_id):
method = 'PUT'
params = {}
metadata = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id)
metadata['snapshot_operation'] = 'spawn'
if vol_display_name is None or vol_display_name == '':
metadata['display_name'] = new_vol_id
else:
metadata['display_name'] = vol_display_name
metadata['display_description'] = description
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id)
return self._execute(method, url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def get_pools(self):
method = 'GET'
url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL)
return self._execute(method, url, None, [httplib.OK])
def get_pool(self, poolid):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid)
return self._execute(method, url, None, [httplib.OK, httplib.ACCEPTED])
def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName,
volumeDesc, volumeSize, fthinprovision=True,
maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None):
method = 'PUT'
params = {}
metadata = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID)
metadata["snapshot_operation"] = "clone"
if volumeName is None or volumeName == '':
metadata["display_name"] = NewVolumeID
else:
metadata["display_name"] = volumeName
metadata["display_description"] = volumeDesc
metadata["pool_uuid"] = poolID
metadata["total_capacity"] = volumeSize
metadata["maximum_snapshot"] = maximum_snapshot
if snapshot_quota:
metadata["snapshot_quota"] = snapshot_quota
metadata["properties"] = dict(thin_provision=fthinprovision)
params["metadata"] = metadata
params["copy"] = SourceVolumeID
return self._execute(method,
url, params,
[httplib.OK, httplib.CREATED, httplib.ACCEPTED])
def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='',
snapshotdes='', isgroup=False):
method = 'PUT'
metadata = {}
params = {}
if isgroup:
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid)
else:
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
if not snapshotname:
metadata['display_name'] = snapshotid
else:
metadata['display_name'] = snapshotname
metadata['display_description'] = snapshotdes
params['metadata'] = metadata
params['snapshot'] = snapshotid
return self._execute(method,
url, params,
[httplib.OK, httplib.CREATED, httplib.ACCEPTED])
def get_vdev(self, vdevid):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
return self._execute(method,
url, None,
[httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND])
def get_vdev_status(self, vdevid, eventid):
method = 'GET'
url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME,
vdevid, eventid))
return self._execute(method,
url, None,
[httplib.OK, httplib.NOT_FOUND])
def get_pool_status(self, poolid, eventid):
method = 'GET'
url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL,
poolid, eventid))
return self._execute(method,
url, None,
[httplib.OK, httplib.NOT_FOUND])
def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'assign'
exports['Network/iSCSI'] = {}
target_info = {}
target_info['logical_unit_number'] = 0
target_info['logical_unit_name'] = lunname
permissions = []
portals = []
portals.append(portal)
permissions.append(iqn)
target_info['permissions'] = permissions
target_info['portals'] = portals
exports['Network/iSCSI'] = target_info
params['metadata'] = metadata
params['exports'] = exports
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname,
lunid=-1):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'assign'
exports['Network/FC'] = {}
target_info = {}
target_info['target_identifier'] = targetwwpn
target_info['logical_unit_number'] = lunid
target_info['logical_unit_name'] = lunname
target_info['permissions'] = initiatorwwpn
exports['Network/FC'] = target_info
params['metadata'] = metadata
params['exports'] = exports
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/iSCSI'] = {}
exports['Network/iSCSI']['target_identifier'] = targetIqn
permissions = []
permissions.append(initiatorIqn)
exports['Network/iSCSI']['permissions'] = permissions
params['exports'] = exports
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED,
httplib.NO_CONTENT, httplib.NOT_FOUND])
def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/FC'] = {}
exports['Network/FC']['target_identifier'] = targetwwpn
permissions = initiatorwwpns
exports['Network/FC']['permissions'] = permissions
params['exports'] = exports
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED,
httplib.NO_CONTENT, httplib.NOT_FOUND])
def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False):
method = 'DELETE'
if isGroup:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1,
DPL_OBJ_VOLUMEGROUP,
objID,
DPL_OBJ_SNAPSHOT, snapshotID))
else:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1,
DPL_OBJ_VOLUME, objID,
DPL_OBJ_SNAPSHOT, snapshotID))
return self._execute(method,
url, None,
[httplib.OK, httplib.ACCEPTED, httplib.NO_CONTENT,
httplib.NOT_FOUND])
def rollback_vdev(self, vdevid, snapshotid):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
params['copy'] = self._gen_snapshot_url(vdevid, snapshotid)
return self._execute(method,
url, params,
[httplib.OK, httplib.ACCEPTED])
def list_vdev_snapshots(self, vdevid, isGroup=False):
method = 'GET'
if isGroup:
url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid,
DPL_OBJ_SNAPSHOT))
else:
url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME,
vdevid, DPL_OBJ_SNAPSHOT))
return self._execute(method,
url, None,
[httplib.OK])
def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False):
method = 'GET'
if isGroup:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP,
vdevid, DPL_OBJ_SNAPSHOT, snapshotID))
else:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid,
DPL_OBJ_SNAPSHOT, snapshotID))
return self._execute(method,
url, None,
[httplib.OK])
def create_target(self, targetID, protocol, displayName, targetAddress,
description=''):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
params['metadata'] = {}
metadata = params['metadata']
metadata['type'] = 'target'
metadata['protocol'] = protocol
if displayName is None or displayName == '':
metadata['display_name'] = targetID
else:
metadata['display_name'] = displayName
metadata['display_description'] = description
metadata['address'] = targetAddress
return self._execute(method, url, params, [httplib.OK])
def get_target(self, targetID):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
return self._execute(method, url, None, [httplib.OK])
def delete_target(self, targetID):
method = 'DELETE'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
return self._execute(method,
url, None,
[httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND])
def get_target_list(self, type='target'):
# type = target/initiator
method = 'GET'
if type is None:
url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT)
else:
url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type)
return self._execute(method, url, None, [httplib.OK])
def get_sns_table(self, wwpn):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS)
params['metadata'] = {}
params['metadata']['protocol'] = 'fc'
params['metadata']['address'] = str(wwpn)
return self._execute(method, url, params, [httplib.OK])
def create_vg(self, groupID, groupName, groupDesc='', listVolume=None,
maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True):
method = 'PUT'
metadata = {}
params = {}
properties = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
if listVolume:
metadata['volume'] = listVolume
else:
metadata['volume'] = []
metadata['display_name'] = groupName
metadata['display_description'] = groupDesc
metadata['maximum_snapshot'] = maxSnapshots
properties['snapshot_rotation'] = rotationSnapshot
metadata['properties'] = properties
params['metadata'] = metadata
return self._execute(method, url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def get_vg_list(self, vgtype=None):
method = 'GET'
if vgtype:
url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype)
else:
url = '/%s/' % (DPL_OBJ_VOLUMEGROUP)
return self._execute(method, url, None, [httplib.OK])
def get_vg(self, groupID):
method = 'GET'
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
return self._execute(method, url, None, [httplib.OK])
def delete_vg(self, groupID, force=True):
method = 'DELETE'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['force'] = force
params['metadata'] = metadata
return self._execute(method, url, params,
[httplib.NO_CONTENT, httplib.NOT_FOUND])
def join_vg(self, volumeID, groupID):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['volume_group_operation'] = 'join'
metadata['volume'] = []
metadata['volume'].append(volumeID)
params['metadata'] = metadata
return self._execute(method, url, params,
[httplib.OK, httplib.ACCEPTED])
def leave_vg(self, volumeID, groupID):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['volume_group_operation'] = 'leave'
metadata['volume'] = []
metadata['volume'].append(volumeID)
params['metadata'] = metadata
return self._execute(method, url, params,
[httplib.OK, httplib.ACCEPTED])
class DPLCOMMONDriver(driver.VolumeDriver):
"""class of dpl storage adapter."""
VERSION = '2.0.2'
def __init__(self, *args, **kwargs):
super(DPLCOMMONDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(options.DPL_OPTS)
self.configuration.append_config_values(san.san_opts)
self.dpl = DPLVolume(self.configuration.san_ip,
self.configuration.dpl_port,
self.configuration.san_login,
self.configuration.san_password)
self._stats = {}
def _convert_size_GB(self, size):
s = round(float(size) / units.Gi, 2)
if s > 0:
return s
else:
return 0
def _conver_uuid2hex(self, strID):
if strID:
return strID.replace('-', '')
else:
return None
def _get_event_uuid(self, output):
ret = 0
event_uuid = ""
if type(output) is dict and \
output.get("metadata") and output["metadata"]:
if output["metadata"].get("event_uuid") and \
output["metadata"]["event_uuid"]:
event_uuid = output["metadata"]["event_uuid"]
else:
ret = errno.EINVAL
else:
ret = errno.EINVAL
return ret, event_uuid
def _wait_event(self, callFun, objuuid, eventid=None):
nRetry = 30
fExit = False
status = {}
status['state'] = 'error'
status['output'] = {}
while nRetry:
try:
if eventid:
ret, output = callFun(
self._conver_uuid2hex(objuuid),
self._conver_uuid2hex(eventid))
else:
ret, output = callFun(self._conver_uuid2hex(objuuid))
if ret == 0:
if output['completionStatus'] == 'Complete':
fExit = True
status['state'] = 'available'
status['output'] = output
elif output['completionStatus'] == 'Error':
fExit = True
status['state'] = 'error'
raise loopingcall.LoopingCallDone(retvalue=False)
else:
nsleep = random.randint(0, 10)
value = round(float(nsleep) / 10, 2)
time.sleep(value)
elif ret == errno.ENODATA:
status['state'] = 'deleted'
fExit = True
else:
nRetry -= 1
time.sleep(3)
continue
except Exception as e:
LOG.error(_LE('Flexvisor failed to get event %(volume)s '
'(%(status)s).'),
{'volume': eventid, 'status': e})
raise loopingcall.LoopingCallDone(retvalue=False)
status['state'] = 'error'
fExit = True
if fExit is True:
break
return status
def _join_volume_group(self, volume):
# Join volume group if consistency group id not empty
cgId = volume['consistencygroup_id']
msg = ''
try:
ret, output = self.dpl.join_vg(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(cgId))
except Exception as e:
ret = errno.EFAULT
msg = _('Fexvisor failed to add volume %(id)s '
'due to %(reason)s.') % {"id": volume['id'],
"reason": six.text_type(e)}
if ret:
if not msg:
msg = _('Flexvisor failed to add volume %(id)s '
'to group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to add volume %(id)s to '
'group %(cgid)s.'),
{'id': volume['id'], 'cgid': cgId})
def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID):
snapshotID = None
ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True)
if ret == 0:
volumes = out.get('metadata', {}).get('member', {})
if volumes:
snapshotID = volumes.get(volumeID, None)
else:
msg = _('Flexvisor failed to get snapshot id of volume '
'%(id)s from group %(vgid)s.') % {'id': volumeID,
'vgid': vgID}
raise exception.VolumeBackendAPIException(data=msg)
if not snapshotID:
msg = _('Flexvisor could not find volume %(id)s snapshot in'
' the group %(vgid)s snapshot '
'%(vgsid)s.') % {'id': volumeID, 'vgid': vgID,
'vgsid': vgsnapshotID}
raise exception.VolumeBackendAPIException(data=msg)
return snapshotID
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
LOG.info(_LI('Start to create consistency group: %(group_name)s '
'id: %(id)s'),
{'group_name': group['name'], 'id': group['id']})
model_update = {'status': 'available'}
try:
ret, output = self.dpl.create_vg(
self._conver_uuid2hex(group['id']),
group['name'],
group['description'])
if ret:
msg = _('Failed to create consistency group '
'%(id)s:%(ret)s.') % {'id': group['id'],
'ret': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
return model_update
except Exception as e:
msg = _('Failed to create consistency group '
'%(id)s due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group):
"""Delete a consistency group."""
ret = 0
volumes = self.db.volume_get_all_by_group(
context, group['id'])
model_update = {}
model_update['status'] = group['status']
LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
{'cg_name': group['id']})
try:
self.dpl.delete_vg(self._conver_uuid2hex(group['id']))
except Exception as e:
msg = _('Failed to delete consistency group %(id)s '
'due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
for volume_ref in volumes:
try:
self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id']))
volume_ref['status'] = 'deleted'
except Exception:
ret = errno.EFAULT
volume_ref['status'] = 'error_deleting'
model_update['status'] = 'error_deleting'
if ret == 0:
model_update['status'] = 'deleted'
return model_update, volumes
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
cgId = cgsnapshot['consistencygroup_id']
cgsnapshot_id = cgsnapshot['id']
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
model_update = {}
LOG.info(_LI('Start to create cgsnapshot for consistency group'
': %(group_name)s'), {'group_name': cgId})
try:
self.dpl.create_vdev_snapshot(self._conver_uuid2hex(cgId),
self._conver_uuid2hex(cgsnapshot_id),
cgsnapshot['name'],
cgsnapshot['description'],
True)
for snapshot in snapshots:
snapshot['status'] = 'available'
except Exception as e:
msg = _('Failed to create cg snapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'available'
return model_update, snapshots
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
cgId = cgsnapshot['consistencygroup_id']
cgsnapshot_id = cgsnapshot['id']
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
model_update = {}
model_update['status'] = cgsnapshot['status']
LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
'%(group_name)s'),
{'snap_name': cgsnapshot['id'],
'group_name': cgsnapshot['consistencygroup_id']})
try:
self.dpl.delete_vdev_snapshot(self._conver_uuid2hex(cgId),
self._conver_uuid2hex(cgsnapshot_id),
True)
for snapshot in snapshots:
snapshot['status'] = 'deleted'
except Exception as e:
msg = _('Failed to delete cgsnapshot %(id)s due to '
'%(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'deleted'
return model_update, snapshots
def create_volume(self, volume):
"""Create a volume."""
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
pool,
int(volume['size']) * units.Gi,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create volume %(volume)s: '
'%(status)s.') % {'volume': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to create volume (get event) '
'%s.') % (volume['id'])
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor create volume failed.:%(volumeid)s:'
'%(status)s.') % {'volumeid': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s.'),
{'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume)
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
msg = _('Flexvisor failed to create volume %(id)s in the '
'group %(vgid)s.') % {
'id': volume['id'],
'vgid': volume['consistencygroup_id']}
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
src_volume = None
vgID = None
# Detect whether a member of the group.
snapshotID = snapshot['id']
# Try to get cgid if volume belong in the group.
src_volumeID = snapshot['volume_id']
cgsnapshotID = snapshot.get('cgsnapshot_id', None)
if cgsnapshotID:
try:
src_volume = self.db.volume_get(src_volumeID)
except Exception:
msg = _("Flexvisor unable to find the source volume "
"%(id)s info.") % {'id': src_volumeID}
raise exception.VolumeBackendAPIException(data=msg)
if src_volume:
vgID = src_volume.get('consistencygroup_id', None)
# Get the volume origin snapshot id if the source snapshot is group
# snapshot.
if vgID:
snapshotID = self._get_snapshotid_of_vgsnapshot(
self._conver_uuid2hex(vgID),
self._conver_uuid2hex(cgsnapshotID),
self._conver_uuid2hex(src_volumeID))
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev_from_snapshot(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
self._conver_uuid2hex(snapshotID),
pool,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create volume from '
'snapshot %(id)s:'
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to create volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s '
'from snapshot.'), {'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume)
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
raise
def spawn_volume_from_snapshot(self, volume, snapshot):
"""Spawn a REFERENCED volume from a snapshot."""
ret, output = self.dpl.spawn_vdev_from_snapshot(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(snapshot['volume_id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
self._conver_uuid2hex(snapshot['id']))
if ret == errno.EAGAIN:
# its an async process
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'], event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to spawn volume from snapshot '
'%(id)s:%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to spawn volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s '
'from snapshot.'), {'id': volume['id']})
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.clone_vdev(
self._conver_uuid2hex(src_vref['id']),
self._conver_uuid2hex(volume['id']),
pool,
volume.get('display_name', ''),
volume.get('display_description', ''),
int(volume['size']) * units.Gi,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to clone volume (failed to'
' get event) %(id)s.') % {'id': src_vref['id']}
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'], 'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to clone volume %(id)s.'),
{'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume)
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
msg = _('Flexvisor volume %(id)s failed to join group '
'%(vgid)s.') % {'id': volume['id'],
'vgid': volume['consistencygroup_id']}
raise exception.VolumeBackendAPIException(data=msg)
def delete_volume(self, volume):
"""Deletes a volume."""
ret = 0
if volume.get('consistencygroup_id', None):
msg = ''
try:
ret, out = self.dpl.leave_vg(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(volume['consistencygroup_id']))
if ret:
LOG.warning(_LW('Flexvisor failed to delete volume '
'%(id)s from the group %(vgid)s.'),
{'id': volume['id'],
'vgid': volume['consistencygroup_id']})
except Exception as e:
LOG.warning(_LW('Flexvisor failed to delete volume %(id)s '
'from group %(vgid)s due to %(status)s.'),
{'id': volume['id'],
'vgid': volume['consistencygroup_id'],
'status': e})
if ret:
ret = 0
ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
if ret == errno.EAGAIN:
status = self._wait_event(self.dpl.get_vdev, volume['id'])
if status['state'] == 'error':
msg = _('Flexvisor failed deleting volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
ret = 0
LOG.info(_LI('Flexvisor volume %(id)s does not '
'exist.'), {'id': volume['id']})
elif ret != 0:
msg = _('Flexvisor failed to delete volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
def extend_volume(self, volume, new_size):
ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description',
''),
new_size * units.Gi)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to extend volume '
'%(id)s:%(status)s.') % {'id': volume,
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to extend volume '
'(failed to get event) '
'%(id)s.') % {'id': volume['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to extend volume '
'%(id)s: %(status)s.') % {'id': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to extend volume'
' %(id)s.'), {'id': volume['id']})
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ret, output = self.dpl.create_vdev_snapshot(
self._conver_uuid2hex(snapshot['volume_id']),
self._conver_uuid2hex(snapshot['id']),
snapshot.get('display_name', ''),
snapshot.get('display_description', ''))
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
snapshot['volume_id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create snapshot for volume '
'%(id)s: %(status)s.') % \
{'id': snapshot['volume_id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to create snapshot for volume '
'(failed to get event) %(id)s.') % \
{'id': snapshot['volume_id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create snapshot for volume %(id)s: '
'%(status)s.') % {'id': snapshot['volume_id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ret, output = self.dpl.delete_vdev_snapshot(
self._conver_uuid2hex(snapshot['volume_id']),
self._conver_uuid2hex(snapshot['id']))
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
snapshot['volume_id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to delete snapshot (failed to '
'get event) %(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
LOG.info(_LI('Flexvisor snapshot %(id)s not existed.'),
{'id': snapshot['id']})
elif ret != 0:
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to delete snapshot %(id)s.'),
{'id': snapshot['id']})
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_pools(self):
pools = []
qpools = []
# Defined access pool by cinder configuration.
defined_pool = self.configuration.dpl_pool
if defined_pool:
qpools.append(defined_pool)
else:
try:
ret, output = self.dpl.get_pools()
if ret == 0:
for poolUuid, poolName in output.get('children', []):
qpools.append(poolUuid)
else:
LOG.error(_LE("Flexvisor failed to get pool list."
"(Error: %d)"), ret)
except Exception as e:
LOG.error(_LE("Flexvisor failed to get pool list due to "
"%s."), e)
# Query pool detail information
for poolid in qpools:
ret, output = self._get_pool_info(poolid)
if ret == 0:
pool = {}
pool['pool_name'] = output['metadata']['pool_uuid']
pool['total_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['total_capacity']))
pool['free_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['available_capacity']))
pool['allocated_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['used_capacity']))
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
pools.append(pool)
else:
LOG.warning(_LW("Failed to query pool %(id)s status "
"%(ret)d."), {'id': poolid, 'ret': ret})
continue
return pools
def _update_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first.
"""
data = {}
pools = self._get_pools()
data['volume_backend_name'] = \
self.configuration.safe_get('volume_backend_name')
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.configuration.san_ip,
'volume': self.configuration.dpl_pool
}
try:
ret, output = self.dpl.get_server_info()
if ret == 0:
data['vendor_name'] = output['metadata']['vendor']
data['driver_version'] = output['metadata']['version']
data['storage_protocol'] = 'iSCSI'
data['location_info'] = location_info
data['consistencygroup_support'] = True
data['pools'] = pools
self._stats = data
except Exception as e:
LOG.error(_LE('Failed to get server info due to '
'%(state)s.'), {'state': e})
return self._stats
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
LOG.info(_LI('Activate Flexvisor cinder volume driver.'))
def check_for_setup_error(self):
"""Check DPL can connect properly."""
pass
def _get_pool_info(self, poolid):
"""Query pool information."""
ret, output = self.dpl.get_pool(poolid)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_pool_status, poolid,
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = 0
output = status.get('output', {})
else:
LOG.error(_LE('Flexvisor failed to get pool %(id)s info.'),
{'id': poolid})
raise exception.VolumeBackendAPIException(
data="failed to get event")
elif ret != 0:
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.debug('Flexvisor succeeded to get pool info.')
return ret, output
|
|
# Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Martel based parser to read MetaTool output files.
This is a huge regular regular expression for MetaTool 3.5 output, built using
the 'regular expressiona on steroids' capabilities of Martel.
http://www2.bioinf.mdc-berlin.de/metabolic/metatool/
This helps us have endlines be consistent across platforms.
"""
# standard library
import string
# Martel
from Martel import Opt, Alt, Digits, Integer, Group, Str, MaxRepeat
from Martel import Any, AnyBut, RepN, Rep, Rep1, ToEol, AnyEol
from Martel import Expression
from Martel import RecordReader
blank = ' '
tab = '\t'
blank_space = MaxRepeat( Any( blank + tab), 1, 80 )
optional_blank_space = Rep( Any( blank + tab ) )
white_space = " \t" + chr( 10 ) + chr( 13 )
blank_line = optional_blank_space + AnyEol()
lower_case_letter = Group( "lower_case_letter", Any( "abcdefghijklmnopqrstuvwxyz" ) )
digits = "0123456789"
enzyme = Group( "enzyme", optional_blank_space + Digits() +
optional_blank_space + Str( ':' ) + ToEol() )
reaction = Group( "reaction", optional_blank_space + Digits() +
optional_blank_space + Str( ":" ) + ToEol() )
not_found_line = Group( "not_found_line", optional_blank_space + Str( "- not found -" ) +
ToEol() )
enzymes_header = Group( "enzymes_header", optional_blank_space + Str( "enzymes" ) +
ToEol() )
enzymes_list = Group( "enzymes_list", Alt( Rep1( enzyme ), \
not_found_line ) )
enzymes_block = Group( "enzymes_block", enzymes_header + Rep( blank_line ) +
enzymes_list )
reactions_header = Group( "reactions_header", optional_blank_space +
Str( "overall reaction" ) + ToEol() )
reactions_list = Group( "reactions_list", Alt( Rep1( reaction ), \
not_found_line ) )
reactions_block = Group( "reactions_block", reactions_header + Rep( blank_line ) +
reactions_list )
rev = Group( "rev", Opt( lower_case_letter ) )
version = Group( "version", Digits( "version_major") + Any( "." ) +
Digits( "version_minor") + rev )
metatool_tag = Str( "METATOOL OUTPUT" )
metatool_line = Group( "metatool_line", metatool_tag + blank_space +
Str( "Version" ) + blank_space + version + ToEol() )
input_file_tag = Str( "INPUT FILE:" )
input_file_line = Group( "input_file_line", input_file_tag + blank_space +
ToEol( "input_file_name" ) )
metabolite_count_tag = Str( "INTERNAL METABOLITES:" )
metabolite_count_line = Group( "metabolite_count_line", metabolite_count_tag +
blank_space + Digits( "num_int_metabolites" ) + ToEol() )
reaction_count_tag = Str( "REACTIONS:" )
reaction_count_line = Group( "reaction_count_line", reaction_count_tag + blank_space +
Digits( "num_reactions" ) + ToEol() )
type_metabolite = Group( "type_metabolite", Alt( Str( "int" ), \
Str( "external" ) ) )
metabolite_info = Group( "metabolite_info", optional_blank_space +
Digits() + blank_space + type_metabolite + blank_space +
# Integer() + blank_space + Rep1( lower_case_letter ) +
Rep1( AnyBut( white_space ) ) )
metabolite_line = Group( "metabolite_line", metabolite_info + ToEol() )
metabolites_summary = Group( "metabolites_summary", optional_blank_space + Digits() +
blank_space + Str( "metabolites" ) + ToEol() )
metabolites_block = Group( "metabolites_block", Rep1( metabolite_line ) +
metabolites_summary + Rep( blank_line ) )
graph_structure_heading = Group( "graph_structure_heading", optional_blank_space +
Str( "edges" ) + blank_space + Str( "frequency of nodes" ) + ToEol() )
graph_structure_line = Group( "graph_structure_line", optional_blank_space +
Digits( "edge_count" ) + blank_space + Digits( "num_nodes" ) + ToEol() )
graph_structure_block = Group( "graph_structure_block", \
graph_structure_heading + Rep( blank_line ) +
Rep1( graph_structure_line ) + Rep( blank_line ) )
sum_is_constant_line = Group( "sum_is_constant_line", optional_blank_space +
Digits() + optional_blank_space + Any( ":" ) + optional_blank_space +
Rep1( AnyBut( white_space ) ) +
Rep( blank_space + Any( "+" ) + blank_space + Rep1( AnyBut( white_space ) ) ) +
optional_blank_space + Str( "=" ) + ToEol() )
sum_is_constant_block = Group( "sum_is_constant_block", Rep( sum_is_constant_line ) )
stoichiometric_tag = Group( "stoichiometric_tag", Str( "STOICHIOMETRIC MATRIX" ) )
stoichiometric_line = Group( "stoichiometric_line", stoichiometric_tag +
ToEol() )
not_balanced_tag = Group( "not_balanced_tag", Str( "NOT BALANCED INTERNAL METABOLITES" ) )
not_balanced_line = Group( "not_balanced_line", not_balanced_tag +
ToEol() )
subsets_tag = Group( "subsets_tag", Str( "SUBSETS OF REACTIONS" ) )
subsets_line = Group( "subsets_line", \
subsets_tag + ToEol() )
reduced_system_tag = Group( "reduced_system_tag", Str( "REDUCED SYSTEM" ) )
reduced_system_line = Group( "reduced_system_line", reduced_system_tag +
Rep1( AnyBut( digits ) ) + Digits( "branch_points" ) +
Rep1( AnyBut( digits ) ) + Digits() + ToEol() )
kernel_tag = Group( "kernel_tag", Str( "KERNEL" ) )
kernel_line = Group( "kernel_line", kernel_tag + ToEol() )
convex_basis_tag = Group( "convex_basis_tag", Str( "CONVEX BASIS" ) )
convex_basis_line = Group( "convex_basis_line", convex_basis_tag +
ToEol() )
conservation_relations_tag = Group( "conservation_relations_tag", \
Str( "CONSERVATION RELATIONS" ) )
conservation_relations_line = Group( "conservation_relations_line", \
conservation_relations_tag + ToEol() )
elementary_modes_tag = Group( "elementary_modes_tag", \
Str( "ELEMENTARY MODES" ) )
elementary_modes_line = Group( "elementary_modes_line", \
elementary_modes_tag + ToEol() )
num_rows = Group( "num_rows", Digits() )
num_cols = Group( "num_cols", Digits() )
matrix_header = Group( "matrix_header", optional_blank_space +
Str( "matrix dimension" ) + blank_space + Any( "r" ) +
num_rows + blank_space + Any( "x" ) + blank_space +
Any( "c" ) + num_cols + optional_blank_space + AnyEol() )
matrix_element = Group( "matrix_element", Integer() )
matrix_row = Group( "matrix_row", MaxRepeat( optional_blank_space + matrix_element, \
"num_cols", "num_cols" ) + ToEol() )
matrix = Group( "matrix", MaxRepeat( matrix_row, "num_rows", "num_rows" ) )
matrix_block = Group( "matrix_block", matrix_header + matrix )
irreversible_vector = Group( "irreversible_vector", \
MaxRepeat( blank_space + matrix_element, "num_cols", "num_cols" ) +
ToEol() )
little_gap = Str( " " )
big_gap = Alt( Str( "\t" ), MaxRepeat( Str( " " ), 2, 80 ) )
unbalanced_metabolite = Group( "unbalanced_metabolite", \
Rep1( AnyBut( white_space ) ) + Opt( little_gap +
Rep1( AnyBut( white_space ) ) ) )
not_balanced_data = Group( "not_balanced_data", optional_blank_space +
unbalanced_metabolite + Rep( big_gap + unbalanced_metabolite ) + ToEol() )
metabolite_roles_heading = Group( "metabolite_roles_heading", \
Str( "->" ) + ToEol() )
metabolite_role_cols = Group( "metabolite_role_cols", \
optional_blank_space + Str( "met" ) + blank_space + Str( "cons" ) +
blank_space + Str( "built" ) +
blank_space + Str( "reactions" ) + ToEol() )
branch_metabolite = Group( "branch_metabolite", optional_blank_space +
Rep1( AnyBut( white_space ) ) + blank_space +
RepN( Digits() + blank_space, 3 ) + Rep1( Any( "ir" ) ) + ToEol() )
non_branch_metabolite = Group( "non_branch_metabolite", optional_blank_space +
Rep1( AnyBut( white_space ) ) + blank_space +
RepN( Digits() + blank_space, 3 ) + Rep1( Any( "ir" ) ) + ToEol() )
branch_metabolite_block = Group( "branch_metabolite_block", \
metabolite_roles_heading +
metabolite_role_cols + Rep( branch_metabolite ) )
non_branch_metabolite_block = Group( "non_branch_metabolite_block", \
metabolite_roles_heading +
metabolite_role_cols + Rep( non_branch_metabolite ) )
end_stoichiometric = Group( "end_stochiometric", \
Rep( Expression.Assert( not_balanced_tag, 1 ) +
Expression.Assert( kernel_tag, 1 ) + ToEol() ) )
end_not_balanced = Group( "end_not_balanced", \
Rep( Expression.Assert( kernel_tag, 1 ) + ToEol() ) )
end_kernel = Group( "end_kernel", \
Rep( Expression.Assert( subsets_tag, 1 ) + ToEol() ) )
end_subsets = Group( "end_subsets", \
Rep( Expression.Assert( reduced_system_tag, 1 ) + ToEol() ) )
end_reduced_system = Group( "end_reduced_system", \
Rep( Expression.Assert( convex_basis_tag, 1 ) + ToEol() ) )
end_convex_basis = Group( "end_convex_basis", \
Rep( Expression.Assert( conservation_relations_tag, 1 ) + ToEol() ) )
end_conservation_relations = Group( "end_conservation_relations", \
Rep( Expression.Assert( elementary_modes_tag, 1 ) + ToEol() ) )
end_elementary_modes = Group( "end_elementary_modes", Rep( ToEol() ) )
# Rep1( AnyBut( '.') ) + Str( "." ) )
input_file_block = Group( "input_file_block", input_file_line +
Rep( blank_line ) )
metatool_block = Group( "metatool_block", metatool_line + Rep1( blank_line ) )
metabolite_count_block = Group( "metabolite_count_block", \
metabolite_count_line + Rep( blank_line ) )
reaction_count_block = Group( "reaction_count_block", reaction_count_line +
Rep( blank_line ) + metabolites_block + Rep( blank_line ) +
graph_structure_block + Rep( blank_line ) )
stoichiometric_block = Group( "stoichiometric_block", stoichiometric_line +
Rep( blank_line ) + matrix_block + ToEol() + irreversible_vector +
end_stoichiometric )
not_balanced_block = Group( "not_balanced_block", not_balanced_line +
Rep( blank_line ) + not_balanced_data + Rep( blank_line ) )
kernel_block = Group( "kernel_block", kernel_line + Rep( blank_line ) +
matrix_block + ToEol() + Rep( blank_line ) + enzymes_block +
Rep( blank_line ) + reactions_block + end_kernel )
subsets_block = Group( "subsets_block", subsets_line + Rep( blank_line ) +
matrix_block + ToEol() + Rep( blank_line ) + enzymes_block +
Rep( blank_line ) + reactions_block + end_subsets )
reduced_system_block = Group( "reduced_system_block", reduced_system_line +
Rep( blank_line ) + matrix_block + ToEol() + irreversible_vector +
Rep( blank_line ) + branch_metabolite_block + Rep( blank_line ) +
non_branch_metabolite_block + end_reduced_system )
convex_basis_block = Group( "convex_basis_block", convex_basis_line +
Rep( blank_line ) + matrix_block + Opt( ToEol() ) + Rep( blank_line ) +
enzymes_block + Rep( blank_line ) + reactions_block + end_convex_basis )
conservation_relations_block = Group( "conservation_relations_block", \
conservation_relations_line + Rep( blank_line ) + matrix_block +
Rep( blank_line ) + sum_is_constant_block +
end_conservation_relations )
elementary_modes_block = Group( "elementary_modes_block", elementary_modes_line +
Rep( blank_line ) + matrix_block + Opt( ToEol() ) + Rep( blank_line ) +
enzymes_block + Rep( blank_line ) + reactions_block + end_elementary_modes )
metatool_record = Group( "metatool_record", metatool_block + input_file_block +
metabolite_count_block + reaction_count_block + stoichiometric_block +
Opt( not_balanced_block ) + kernel_block + subsets_block +
reduced_system_block + convex_basis_block + conservation_relations_block +
elementary_modes_block )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.