filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_26048 | """
Copyright (c) 2019, Brian Stafford
Copyright (c) 2019-2020, The Decred developers
See LICENSE for details
Based on dcrd MsgTx.
"""
from decred import DecredError
from decred.crypto.crypto import hashH
from decred.dcr import txscript
from decred.util.encode import ByteArray
from . import wire
# TxVersion is the current latest supported transaction version.
TxVersion = 1 # go type uint16
# TxTreeRegular is the value for a normal transaction tree for a
# transaction's location in a block.
TxTreeRegular = 0 # go type int8
# TxTreeStake is the value for a stake transaction tree for a
# transaction's location in a block.
TxTreeStake = 1 # go type int8
# chainhash.HashSize in go
HASH_SIZE = 32
# minTxInPayload is the minimum payload size for a transaction input.
# PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes +
# PreviousOutPoint.Tree 1 byte + Varint for SignatureScript length 1
# byte + Sequence 4 bytes.
minTxInPayload = 11 + HASH_SIZE
# maxTxInPerMessage is the maximum number of transactions inputs that
# a transaction which fits into a message could possibly have.
maxTxInPerMessage = (wire.MaxMessagePayload // minTxInPayload) + 1
# minTxOutPayload is the minimum payload size for a transaction output.
# Value 8 bytes + Varint for PkScript length 1 byte.
minTxOutPayload = 9
# maxTxOutPerMessage is the maximum number of transactions outputs that
# a transaction which fits into a message could possibly have.
maxTxOutPerMessage = (wire.MaxMessagePayload // minTxOutPayload) + 1
# MaxTxInSequenceNum is the maximum sequence number the sequence field
# of a transaction input can be.
MaxTxInSequenceNum = 0xFFFFFFFF
def writeOutPoint(pver, ver, op):
"""
writeOutPoint encodes op to the Decred protocol encoding for an OutPoint
to w.
"""
b = op.hash.copy()
b += ByteArray(op.index, length=4).littleEndian()
b += ByteArray(op.tree, length=1)
return b
def readOutPoint(b, pver, ver):
"""
readOutPoint reads the next sequence of bytes from r as an OutPoint.
"""
op = OutPoint(None, None, None)
op.hash = b.pop(HASH_SIZE)
op.index = b.pop(4).unLittle().int()
op.tree = b.pop(1).int()
return b, op
def readTxInPrefix(b, pver, serType, ver, ti):
if serType == wire.TxSerializeOnlyWitness:
raise DecredError(
"readTxInPrefix: tried to read a prefix input for a witness only tx"
)
# Outpoint.
b, ti.previousOutPoint = readOutPoint(b, pver, ver)
# Sequence.
ti.sequence = b.pop(4).unLittle().int()
def writeTxInPrefix(pver, ver, ti):
"""
writeTxInPrefixs encodes ti to the Decred protocol encoding for a transaction
input (TxIn) prefix to w.
"""
b = writeOutPoint(pver, ver, ti.previousOutPoint)
b += ByteArray(ti.sequence, length=4).littleEndian()
return b
def writeTxInWitness(pver, ver, ti):
"""
writeTxWitness encodes ti to the Decred protocol encoding for a transaction
input (TxIn) witness to w.
"""
# ValueIn
b = ByteArray(ti.valueIn, length=8).littleEndian()
# BlockHeight.
b += ByteArray(ti.blockHeight, length=4).littleEndian()
# BlockIndex.
b += ByteArray(ti.blockIndex, length=4).littleEndian()
# Write the signature script.
b += wire.writeVarBytes(pver, ti.signatureScript)
return b
def readScript(b, pver, maxAllowed, fieldName):
"""
readScript reads a variable length byte array that represents a transaction
script. It is encoded as a varInt containing the length of the array
followed by the bytes themselves. An error is returned if the length is
greater than the passed maxAllowed parameter which helps protect against
memory exhaustion attacks and forced panics thorugh malformed messages. The
fieldName parameter is only used for the error message so it provides more
context in the error.
"""
count = wire.readVarInt(b, pver)
# Prevent byte array larger than the max message size. It would
# be possible to cause memory exhaustion and panics without a sane
# upper bound on this count.
if count > maxAllowed:
msg = "readScript: {} is larger than the max allowed size [count {}, max {}]"
raise DecredError(msg.format(fieldName, count, maxAllowed))
a = b.pop(count)
return b, a
def readTxInWitness(b, pver, ver, ti):
"""
readTxInWitness reads the next sequence of bytes from r as a transaction input
(TxIn) in the transaction witness.
"""
# ValueIn.
ti.valueIn = b.pop(8).unLittle().int()
# BlockHeight.
ti.blockHeight = b.pop(4).unLittle().int()
# BlockIndex.
ti.blockIndex = b.pop(4).unLittle().int()
# Signature script.
b, ti.signatureScript = readScript(
b, pver, wire.MaxMessagePayload, "transaction input signature script"
)
return b
def readTxOut(b, pver, ver, to):
"""
# readTxOut reads the next sequence of bytes from r as a transaction output (TxOut).
"""
to.value = b.pop(8).unLittle().int()
to.version = b.pop(2).unLittle().int()
b, to.pkScript = readScript(
b, pver, wire.MaxMessagePayload, "transaction output public key script"
)
return b
def writeTxOut(pver, ver, to):
"""
writeTxOut encodes to into the Decred protocol encoding for a transaction
output (TxOut) to w.
"""
b = ByteArray(to.value, length=8).littleEndian()
b += ByteArray(to.version, length=2).littleEndian()
b += wire.writeVarBytes(pver, to.pkScript)
return b
# def writeTxScriptsToMsgTx(msg, totalScriptSize, serType):
# # msg *MsgTx, totalScriptSize uint64, serType TxSerializeType) {
# """
# writeTxScriptsToMsgTx allocates the memory for variable length fields in a
# MsgTx TxIns, TxOuts, or both as a contiguous chunk of memory, then fills
# in these fields for the MsgTx by copying to a contiguous piece of memory
# and setting the pointer.
# NOTE: It is no longer valid to return any previously borrowed script
# buffers after this function has run because it is already done and the
# scripts in the transaction inputs and outputs no longer point to the
# buffers.
# Create a single allocation to house all of the scripts and set each
# input signature scripts and output public key scripts to the
# appropriate subslice of the overall contiguous buffer. Then, return
# each individual script buffer back to the pool so they can be reused
# for future deserializations. This is done because it significantly
# reduces the number of allocations the garbage collector needs to track,
# which in turn improves performance and drastically reduces the amount
# of runtime overhead that would otherwise be needed to keep track of
# millions of small allocations.
# Closures around writing the TxIn and TxOut scripts are used in Decred
# because, depending on the serialization type desired, only input or
# output scripts may be required.
# """
# offset = 0
# scripts = ByteArray(0, length=totalScriptSize)
# def writeTxIns():
# nonlocal offset, scripts
# for txIn in msg.txIn:
# # Copy the signature script into the contiguous buffer at the
# # appropriate offset.
# signatureScript = txIn.signatureScript
# scripts[offset] = signatureScript
# # Reset the signature script of the transaction input to the
# # slice of the contiguous buffer where the script lives.
# scriptSize = len(signatureScript)
# end = offset + scriptSize
# txIn.signatureScript = scripts[offset:end]
# offset += scriptSize
# def writeTxOuts():
# nonlocal offset, scripts
# for txOut in msg.txOut:
# # Copy the public key script into the contiguous buffer at the
# # appropriate offset.
# pkScript = txOut.pkScript
# scripts[offset] = pkScript
# # Reset the public key script of the transaction output to the
# # slice of the contiguous buffer where the script lives.
# scriptSize = len(pkScript)
# end = offset + scriptSize
# txOut.pkScript = scripts[offset:end:end]
# offset += scriptSize
# # Return the temporary script buffer to the pool.
# scriptPool.Return(pkScript)
# }
# }
# // Handle the serialization types accordingly.
# switch serType {
# case TxSerializeNoWitness:
# writeTxOuts()
# case TxSerializeOnlyWitness:
# fallthrough
# case TxSerializeFull:
# writeTxIns()
# writeTxOuts()
# }
# }
class TxIn:
"""
TxIn defines a Decred transaction input.
"""
def __init__(
self,
previousOutPoint,
sequence=MaxTxInSequenceNum,
valueIn=0,
blockHeight=0,
blockIndex=0,
signatureScript=None,
):
# Non-witness
self.previousOutPoint = previousOutPoint # OutPoint
self.sequence = sequence # uint32
# Witness
self.valueIn = valueIn # int64
self.blockHeight = blockHeight # uint32
self.blockIndex = blockIndex # uint32
self.signatureScript = (
signatureScript if signatureScript else ByteArray(b"")
) # []byte
def serializeSizePrefix(self):
"""
SerializeSizePrefix returns the number of bytes it would take to serialize
the transaction input for a prefix.
Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Outpoint Tree 1 byte +
Sequence 4 bytes.
"""
return 41
def serializeSizeWitness(self):
"""
SerializeSizeWitness returns the number of bytes it would take to serialize the
transaction input for a witness.
ValueIn (8 bytes) + BlockHeight (4 bytes) + BlockIndex (4 bytes) +
serialized varint size for the length of SignatureScript +
SignatureScript bytes.
"""
return (
8
+ 4
+ 4
+ wire.varIntSerializeSize(len(self.signatureScript))
+ len(self.signatureScript)
)
def __eq__(self, ti):
"""
Check whether all fields are equal
"""
a = (
self.previousOutPoint == ti.previousOutPoint
and self.sequence == ti.sequence
and self.valueIn == ti.valueIn
and self.blockHeight == ti.blockHeight
and self.blockIndex == ti.blockIndex
and self.signatureScript == ti.signatureScript
)
return a
class TxOut:
"""
TxOut defines a Decred transaction output.
"""
def __init__(self, value=0, pkScript=None, version=0):
self.value = value # int64
self.version = version # uint16
self.pkScript = pkScript if pkScript else ByteArray(b"") # []byte
def serializeSize(self):
"""
SerializeSize returns the number of bytes it would take to serialize the
the transaction output.
Value 8 bytes + Version 2 bytes + serialized varint size for
the length of PkScript + PkScript bytes.
"""
return 8 + 2 + wire.varIntSerializeSize(len(self.pkScript)) + len(self.pkScript)
def __eq__(self, to):
"""
Check for all identical fields.
"""
return (
self.value == to.value
and self.version == to.version
and self.pkScript == to.pkScript
)
class OutPoint:
"""
OutPoint defines a Decred data type that is used to track previous
transaction outputs.
"""
def __init__(self, txHash, idx, tree):
self.hash = (
txHash if txHash else ByteArray(0, length=HASH_SIZE)
) # chainhash.Hash
self.index = idx # uint32
self.tree = tree # int8
def __eq__(self, other):
return (
self.hash == other.hash
and self.index == other.index
and self.tree == other.tree
)
def txid(self):
return reversed(self.hash).hex()
class MsgTx:
"""
MsgTx implements the Message interface and represents a Decred tx message.
It is used to deliver transaction information in response to a getdata
message (MsgGetData) for a given transaction.
Use the AddTxIn and AddTxOut functions to build up the list of transaction
inputs and outputs.
The go types are
cachedHash *chainhash.Hash
serType TxSerializeType
version uint16
txIn []*TxIn
txOut []*TxOut
lockTime uint32
expiry uint32
"""
def __init__(self, cachedHash, serType, version, txIn, txOut, lockTime, expiry):
self.cachedH = cachedHash
self.serType = serType
self.version = version
self.txIn = txIn
self.txOut = txOut
self.lockTime = lockTime
self.expiry = expiry
@staticmethod
def new():
"""
Return a fully serialized version 1 transaction. Python equivalent of
NewMsgTx in Go.
"""
return MsgTx(
cachedHash=None,
serType=wire.TxSerializeFull,
version=TxVersion,
txIn=[],
txOut=[],
lockTime=0,
expiry=0,
)
def __eq__(self, tx):
"""
Check equality of all fields. Useful in testing.
"""
return (
self.cachedH == tx.cachedH
and self.serType == tx.serType
and self.version == tx.version
and all((a == b for a, b in zip(self.txIn, tx.txIn)))
and all((a == b for a, b in zip(self.txOut, tx.txOut)))
and self.lockTime == tx.lockTime
and self.expiry == tx.expiry
)
def __repr__(self):
"""
Convert this tx to human readable form.
Returns:
str: The transaction, decoded.
"""
result = [""]
def s(thing, description, nIndent=0):
indent = "".join(" " for _ in range(nIndent))
result.append("{}{}: {}".format(indent, thing, description))
s("txid", self.txid())
s("serType", self.serType)
s("version", self.version)
for i, txIn in enumerate(self.txIn):
s("txIn {}".format(i), "")
s("previousOutPoint".format(i), "", 1)
s("txid", txIn.previousOutPoint.txid(), 2)
s("idx", txIn.previousOutPoint.index, 2)
s("tree", txIn.previousOutPoint.tree, 2)
s("sequence", txIn.sequence, 1)
s("valueIn", txIn.valueIn, 1)
s("blockHeight", txIn.blockHeight, 1)
s("blockIndex", txIn.blockIndex, 1)
s(
"signatureScript",
txIn.signatureScript.hex() if txIn.signatureScript else "None",
1,
)
for i, txOut in enumerate(self.txOut):
s("txOut {}".format(i), "")
s("value", txOut.value, 1)
s("version", txOut.version, 1)
s("pkScript", txOut.pkScript.hex(), 1)
s("lockTime", self.lockTime)
s("expiry", self.expiry)
return "\n".join(result)
def addTxIn(self, txin):
self.txIn.append(txin)
def addTxOut(self, txout):
self.txOut.append(txout)
def hash(self):
"""
TxHash generates the hash for the transaction prefix. Since it does not
contain any witness data, it is not malleable and therefore is stable for
use in unconfirmed transaction chains.
Returns:
ByteArray: The transaction hash.
"""
# TxHash should always calculate a non-witnessed hash.
toHash = self.mustSerialize(wire.TxSerializeNoWitness)
# If this hash is converted to a hex string, it should be reversed first.
return hashH(toHash.bytes())
def cachedHash(self):
"""
Returns the cached hash. If the hash has not been generated, generate
the cache first.
Returns:
ByteArray: The transaction hash.
"""
if self.cachedH:
return self.cachedH
self.cachedH = self.hash()
return self.cachedH
def txHex(self):
return self.serialize().hex()
def txid(self):
"""
Hex encoded, byte-reversed tx hash.
"""
return reversed(self.hash()).hex()
def id(self):
return self.txid()
def command(self):
"""
Command returns the protocol command string for the message. This is part
of the Message interface implementation in go.
"""
return wire.CmdTx
def maxPayloadLength(self, pver):
"""
MaxPayloadLength returns the maximum length the payload can be for the
receiver. This is part of the Message interface implementation.
"""
# Protocol version 3 and lower have a different max block payload.
if pver <= 3:
return wire.MaxBlockPayloadV3
return wire.MaxBlockPayload
def mustSerialize(self, serType):
"""
mustSerialize returns the serialization of the transaction for the provided
serialization type without modifying the original transaction. It will panic
if any errors occur.
"""
ogSerType = self.serType
self.serType = serType
serialized = self.serialize()
self.serType = ogSerType
return serialized
def encodePrefix(self, pver):
"""
encodePrefix encodes a transaction prefix into a writer.
"""
count = len(self.txIn)
b = wire.writeVarInt(pver, count)
for ti in self.txIn:
b += writeTxInPrefix(pver, self.version, ti)
count = len(self.txOut)
b += wire.writeVarInt(pver, count)
for to in self.txOut:
b += writeTxOut(pver, self.version, to)
b += ByteArray(self.lockTime, length=4).littleEndian()
b += ByteArray(self.expiry, length=4).littleEndian()
return b
def encodeWitness(self, pver):
# w io.Writer, pver uint32) error {
"""
encodeWitness encodes a transaction witness into a writer.
"""
count = len(self.txIn)
b = wire.writeVarInt(pver, count)
for ti in self.txIn:
b += writeTxInWitness(pver, self.version, ti)
return b
def btcEncode(self, pver):
"""
BtcEncode encodes the receiver to w using the Decred protocol encoding.
This is part of the Message interface implementation.
See Serialize for encoding transactions to be stored to disk, such as in a
database, as opposed to encoding transactions for the wire.
"""
# The serialized encoding of the version includes the real transaction
# version in the lower 16 bits and the transaction serialization type
# in the upper 16 bits.
b = ByteArray(self.version | (self.serType << 16), length=4).littleEndian()
if self.serType == wire.TxSerializeNoWitness:
b += self.encodePrefix(pver)
elif self.serType == wire.TxSerializeOnlyWitness:
b += self.encodeWitness(pver)
elif self.serType == wire.TxSerializeFull:
b += self.encodePrefix(pver)
b += self.encodeWitness(pver)
else:
raise NotImplementedError("MsgTx.BtcEncode: unsupported transaction type")
return b
def serializeSize(self):
"""
SerializeSize returns the number of bytes it would take to serialize the
the transaction.
"""
# Unknown type return 0.
n = 0
if self.serType == wire.TxSerializeNoWitness:
# Version 4 bytes + LockTime 4 bytes + Expiry 4 bytes +
# Serialized varint size for the number of transaction
# inputs and outputs.
n = (
12
+ wire.varIntSerializeSize(len(self.txIn))
+ wire.varIntSerializeSize(len(self.txOut))
)
for txIn in self.txIn:
n += txIn.serializeSizePrefix()
for txOut in self.txOut:
n += txOut.serializeSize()
elif self.serType == wire.TxSerializeOnlyWitness:
# Version 4 bytes + Serialized varint size for the
# number of transaction signatures.
n = 4 + wire.varIntSerializeSize(len(self.txIn))
for txIn in self.txIn:
n += txIn.serializeSizeWitness()
elif self.serType == wire.TxSerializeFull:
# Version 4 bytes + LockTime 4 bytes + Expiry 4 bytes + Serialized
# varint size for the number of transaction inputs (x2) and
# outputs. The number of inputs is added twice because it's
# encoded once in both the witness and the prefix.
n = (
12
+ wire.varIntSerializeSize(len(self.txIn))
+ wire.varIntSerializeSize(len(self.txIn))
+ wire.varIntSerializeSize(len(self.txOut))
)
for txIn in self.txIn:
n += txIn.serializeSizePrefix()
for txIn in self.txIn:
n += txIn.serializeSizeWitness()
for txOut in self.txOut:
n += txOut.serializeSize()
return n
def decodePrefix(self, b, pver):
"""
decodePrefix decodes a transaction prefix and stores the contents
in the embedded msgTx.
"""
count = wire.readVarInt(b, pver)
# Prevent more input transactions than could possibly fit into a
# message. It would be possible to cause memory exhaustion and panics
# without a sane upper bound on this count.
if count > maxTxInPerMessage:
raise DecredError(
"MsgTx.decodePrefix: too many input transactions to fit into"
" max message size [count %d, max %d]" % (count, maxTxInPerMessage)
)
# TxIns.
txIns = self.txIn = [TxIn(None, 0) for i in range(count)]
for txIn in txIns:
readTxInPrefix(b, pver, self.serType, self.version, txIn)
count = wire.readVarInt(b, pver)
# Prevent more output transactions than could possibly fit into a
# message. It would be possible to cause memory exhaustion and panics
# without a sane upper bound on this count.
if count > maxTxOutPerMessage:
raise DecredError(
"MsgTx.decodePrefix: too many output transactions to fit into"
" max message size [count %d, max %d]" % (count, maxTxOutPerMessage)
)
# TxOuts.
totalScriptSize = 0
txOuts = self.txOut = [TxOut(None, None) for i in range(count)]
for txOut in txOuts:
# The pointer is set now in case a script buffer is borrowed
# and needs to be returned to the pool on error.
b = readTxOut(b, pver, self.version, txOut)
totalScriptSize += len(txOut.pkScript)
# Locktime and expiry.
self.lockTime = b.pop(4).unLittle().int()
self.expiry = b.pop(4).unLittle().int()
return b, totalScriptSize
def decodeWitness(self, b, pver, isFull):
"""
Witness only; generate the TxIn list and fill out only the sigScripts.
Args:
b ByteArray: the encoded witnesses.
pver int: the protocol version.
isFull book: whether this is a full transaction.
"""
totalScriptSize = 0
count = wire.readVarInt(b, pver)
# Prevent more input transactions than could possibly fit into a
# message, or memory exhaustion and panics could happen.
if count > maxTxInPerMessage:
raise DecredError(
"MsgTx.decodeWitness: too many input transactions to fit into"
f" max message size [count {count}, max {maxTxInPerMessage}]"
)
if isFull:
# We're decoding witnesses from a full transaction, so make sure
# the number of signature scripts is the same as the number of
# TxIns we currently have, then fill in the signature scripts.
if count != len(self.txIn):
raise DecredError(
"MsgTx.decodeWitness: non equal witness and prefix txin"
f" quantities (witness {count}, prefix {len(self.txIn)})"
)
# Read in the witnesses, and copy them into the already generated
# by decodePrefix TxIns.
if self.txIn is None or len(self.txIn) == 0:
self.txIn = [TxIn(None, 0) for i in range(count)]
for txIn in self.txIn:
b = readTxInWitness(b, pver, self.version, txIn)
totalScriptSize += len(txIn.signatureScript)
else:
self.txIn = [TxIn(None, 0) for i in range(count)]
for txIn in self.txIn:
b = readTxInWitness(b, pver, self.version, txIn)
totalScriptSize += len(txIn.signatureScript)
self.txOut = []
return b, totalScriptSize
@staticmethod
def btcDecode(b, pver):
"""
btcDecode decodes r using the Decred protocol encoding into the
receiver. This is part of the Message interface implementation.
See deserialize for decoding transactions stored to disk, such as in a
database, as opposed to decoding transactions from the wire.
"""
# The serialized encoding of the version includes the real transaction
# version in the lower 16 bits and the transaction serialization type
# in the upper 16 bits.
b = ByteArray(b)
ver = b.pop(4).unLittle().int()
tx = MsgTx.new()
tx.version = ver & 0xFFFF
tx.serType = ver >> 16
# Serialize the transactions depending on their serialization
# types.
if tx.serType == wire.TxSerializeNoWitness:
b, _ = tx.decodePrefix(b, pver)
elif tx.serType == wire.TxSerializeOnlyWitness:
b, _ = tx.decodeWitness(b, pver, False)
elif tx.serType == wire.TxSerializeFull:
b, _ = tx.decodePrefix(b, pver)
b, _ = tx.decodeWitness(b, pver, True)
else:
raise NotImplementedError("MsgTx.BtcDecode: unsupported transaction type")
return tx
def serialize(self):
"""
Serialize the MsgTx.
Returns:
ByteArray: The serialized MsgTx.
"""
return self.btcEncode(0)
@staticmethod
def deserialize(b):
return MsgTx.btcDecode(b, 0)
@staticmethod
def blob(msgTx):
"""Satisfies the encode.Blobber API"""
return msgTx.serialize().b
@staticmethod
def unblob(b):
"""Satisfies the encode.Blobber API"""
return MsgTx.deserialize(b)
def pkScriptLocs(self): # []int {
"""
PkScriptLocs returns a slice containing the start of each public key script
within the raw serialized transaction. The caller can easily obtain the
length of each script by using len on the script available via the
appropriate transaction output entry.
TODO: Make this work for all serialization types, not just the full
serialization type.
"""
# Return nil for witness-only tx.
numTxOut = len(self.txOut)
if numTxOut == 0:
return []
# The starting offset in the serialized transaction of the first
# transaction output is:
# Version 4 bytes + serialized varint size for the number of
# transaction inputs and outputs + serialized size of each transaction
# input.
n = (
4
+ wire.varIntSerializeSize(len(self.txIn))
+ wire.varIntSerializeSize(numTxOut)
)
for txIn in self.txIn:
n += txIn.serializeSizePrefix()
# Calculate and set the appropriate offset for each public key script.
pkScriptLocs = []
for txOut in self.txOut:
# The offset of the script in the transaction output is:
# Value 8 bytes + version 2 bytes + serialized varint size
# for the length of PkScript.
n += 8 + 2 + wire.varIntSerializeSize(len(txOut.pkScript))
pkScriptLocs.append(n)
n += len(txOut.pkScript)
return pkScriptLocs
def looksLikeCoinbase(self):
return self.txIn and self.txIn[0].previousOutPoint.hash.iszero()
def isTicket(self):
"""
Whether this transaction is a stake submission.
Returns:
bool: True if ticket.
"""
return len(self.txOut) > 0 and txscript.isStakeSubmissionScript(
0, self.txOut[0].pkScript
)
|
the-stack_106_26050 | #Pranav Mishra
#BioCompute Object Creator Minimum Viable Product
import os
import json
import jsons
import sys
import hashlib
from json import JSONEncoder
from pprint import pprint
from datetime import datetime
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
class BCO():
def __init__(self, provenance, usability, description, execution, io, object_id, spec_version, etag = "new", parametric = None, error = None, extension = None):
self.provenance_domain = provenance
self.usability_domain = usability
self.extension_domain = extension
self.description_domain = description
self.execution_domain = execution
self.parametric_domain = parametric
self.io_domain = io
self.error_domain = error
self.object_id = object_id
self.spec_version = spec_version
self.etag = etag
def startCall():
pass
def endCall():
pass
def default(self, object):
if isinstance(object, BCO):
return object.__dict__
else:
return json.JSONEncoder.default(self, object)
class provenance_domain():
def __init__(self, name, version, license, created, modified, contributors, embargo=None, derived_from=None, obsolete_after=None, review=None):
self.embargo = embargo #object
self.created = created #string date-time
self.modified = modified #string date-time
self.name = name #string
self.version = version #string
self.derived_from = derived_from #$ref
self.obsolete_after = obsolete_after #string
self.license = license #string
self.review = review #array of reviews
self.contributors = contributors #array of contributors
class embargo():
def __init__(self, start_time, end_time):
self.start_time = start_time #string
self.end_time = end_time #string
class review():
def __init__(self, reviewer, status, date=None, reviewer_comment=None):
self.reviewer = reviewer #$ref
self.status = status #string from statuses
self.reviewer_comment = reviewer_comment #string
self.date = date #string date-time
class status():
def __init__(self, status):
statuses = ["unreviewed", "in-review", "approved", "rejected", "suspended"]
self.status = status #string
class reviewer():
def __init__(self, name, email=None, contribution=None, orcid=None, affiliation=None):
self.name = name #string
self.affiliation = affiliation #string
self.email = email #string
self.orcid = orcid #string
self.contribution = contribution #array of contributions
class contributor():
def __init__(self, name, contribution, affiliation=None, email=None, orcid=None):
self.name = name #string
self.affiliation = affiliation #string
self.email = email #string
self.orcid = orcid #string
self.contribution = contribution #string
class contribution():
def __init__(self, contribution):
contributions = ["authoredBy", "contributedBy", "createdAt", "createdBy", "createdWith", "curatedBy", "derivedFrom", "importedBy", "importedFrom", "providedBy", "retrievedBy", "retrievedFrom", "sourceAccessedBy"]
self.contribution = contribution #string
class description_domain():
def __init__(self, keywords, pipeline_steps, xref=None, platform=None):
self.keywords = keywords #array of strings
self.xref = xref #array of xrefs
self.platform = platform #array of strings
self.pipeline_steps = pipeline_steps #array of pipeline step objects
# class keyword():
# def __init__(self, keyword):
# self.keyword = keyword
class xref():
def __init__(self, namespace, name, ids, access_time):
self.namespace = namespace #string
self.name = name #string
self.ids = ids #array of ids
self.access_time = date #string
# class platform():
# def __init__(self, platform):
# self.platform = platform
class pipeline_step():
def __init__(self, step_number, name, description, input_list, output_list, version=None, prerequisite=None):
self.step_number = step_number #integer
self.name = name #string
self.description = description #string
self.version = version #string
self.prerequisite = prerequisite #array of prerequisite objects
self.input_list = input_list #array of uris, composed of uris ($ref)
self.output_list = output_list #carray of uris, composed of uris ($ref)
class prerequisite():
def __init__(self, name, uri = None):
self.uri = uri #$ref, NOT uri object ($ref)
self.name = name #string
class uri():
def __init__(self, uri, filename=None, sha1_checksum=None, access_time=None):
self.filename = filename
self.uri = uri
self.sha1_checksum = sha1_checksum
self.access_time = access_time
# class input(): #item in input_list
# def __init__(self, filename=None, uri=None, sha1_checksum=None, access_time=None):
# self.filename = filename
# self.uri = uri
# self.sha1_checksum = sha1_checksum
# self.access_time = access_time
# class output(): #item in output_list
# def __init__(self, filename=None, uri=None, sha1_checksum=None, access_time=None):
# self.filename = filename
# self.uri = uri
# self.sha1_checksum = sha1_checksum
# self.access_time = access_time
class execution_domain():
def __init__(self, environment_variables, script_driver, software_prerequisites, external_data_endpoints, script):
self.environment_variables = environment_variables #object
self.script = script #array of uris to script objects
self.script_driver = script_driver #string
self.software_prerequisites = software_prerequisites #array of software prerequisite objects
self.external_data_endpoints = external_data_endpoints
class environment_variable():
def __init__(self, key, value):
self.key = key #string
self.value = value #string
class environment_varibles():
def __init__(self, environment_variables):
self.environment_variables = environment_variables
class script_item():
def __init__(self, uri):
self.uri = uri
class software_prerequisite():
def __init__(self, version, name, uri):
self.uri = uri #NOT uri object ($ref)
self.version = version #string
self.name = name #string
class external_data_endpoint():
def __init__(self, name, url):
self.name = name #string
self.url = url #string
class io_domain():
def __init__(self, input_subdomain, output_subdomain):
self.input_subdomain = input_subdomain #array of inputs
self.output_subdomain = output_subdomain #array of outputs
class input_subdomain_item:
def __init__(self, uri):
self.uri = uri
class output_subdomain_item:
def __init__(self, uri, mediatype):
self.uri = uri #NOT uri object ($ref)
self.mediatype = mediatype #string
class error_domain():
def __init__(self, empirical, algorithmic):
self.empirical_error = empirical #object
self.algorithmic_error = algorithmic #object
# class empirical_error():
# def __init__(self, empirical_error):
# self.empirical_error = empirical_error #string
# class algorithmic_error():
# def __init__(self, algorithmic_error):
# self.algorithmic_error = algorithmic_error #string
# class extension_domain():
# def __init__(self, extension_schema):
# self.extension_schema = extension_schema
class usability_domain():
def __init__(self, use):
self.use = use #string
# class parametric_domain():
# def __init__(self, parameters):
# self.parameters= parameters #array
class parameter:
def __init__(self, param, value, step):
self.param = param #string
self.value = value #string
self.step = step #string
class extension_domain_item:
def __init__(self, extension_schema):
self.extension_schema = extension_schema
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def remove_nulls(d):
return {k: v for k, v in d.items() if v is not None}
def main():
# data = {}
# with open('blank_bco.json') as json_file: #testing
# data = json.load(json_file)
#print(data)
#insert code below
#take output of script command
#for finding steps, split by "|"
#metadata
print(color.CYAN + "Welcome to the BioCompute Command Line Tool. With this tool, you can automate some parts of BCO creation and work on creating a BCO directly from the command line. \nIf you make a mistake while creating your BCO, you can always edit that field in the output json file that represents your BCO.\n" + color.END)
input_filename = r'' + input("Enter name of desired input file (Output of script command/typescript): ")
data = ""
try:
with open(input_filename, 'r') as file:
data = file.read()
except FileNotFoundError:
print(color.RED + "error: input file not found. please restart this tool with correct input file." + color.END)
sys.exit()
index_of_first_newline = 0
for x in range(0, len(data)):
if data[x] == "\n":
index_of_first_newline = x
break
data = data[index_of_first_newline+1:]
index_of_start = 0
for x in range(0, len(data)):
if data[x] == "$" or data[x] == "#": #user or root
index_of_start = x
break
data = data[int(index_of_start+1): int(data.index('\n'))]
print("\nPipeline: " + data)
# print("\n")
confirmation = input("Confirm pipeline (y or n): ")
if confirmation.strip().lower() != "y":
data = input("Enter the correct pipeline: ")
pipeline = data.split("|")
for x in range(0, len(pipeline)):
pipeline[x] = pipeline[x].lstrip()
pipeline[x] = pipeline[x].rstrip()
output_filename = input("Enter name of desired output file (e.g. example_bco.json): ")
print(color.BOLD + "\nMetadata Information\n" + color.END)
object_id = input("Enter unique BCO Object ID (e.g. https://portal.aws.biochemistry.gwu.edu/bco/BCO_00026662). Ensure the id part ('00026662') is not in use : ")
etag = "new"
# etag_input = input("Enter etag value (default is 'new') For default, enter nothing.: ")
# if etag_input.strip() != etag.strip() and etag_input.strip() != "":
# etag = etag_input
etag_input = input("What type of hash would you like to hash your file with? Enter the number associated with the hash: MD5 (1), SHA1 (2), or SHA256 (3): ").strip().lower()
spec_version = "https://w3id.org/ieee/ieee-2791-schema/"
spec_version_input = input("Enter version of specification for this IEEE-2791 object (BCO) (Default is: https://w3id.org/ieee/ieee-2791-schema/) For default, enter nothing.: ")
if spec_version_input.strip() != spec_version.strip() and spec_version_input.strip() != "":
spec_version = spec_version_input
# print("\n")
#provenance
print("\n" + color.BOLD + "Provenance Domain Information\n" + color.END)
print(color.CYAN + "You are required to enter name, the license, the version, and the contributors for this BCO." + color.END)
print()
name = input("Enter name of BioCompute Object: ")
now = datetime.now()
created = now.strftime("%m/%d/%Y-%H:%M:%S")
modified = now.strftime("%m/%d/%Y-%H:%M:%S")
license = input("Enter a license if this BCO is not exclusively for personal use: ")
version = "1.0"
version_input = input("Following semantic versioning, enter a name for this BCO (default is 1.0). For default, enter nothing.: ")
if version_input.strip().lower() != version.strip().lower() and version_input.strip().lower() != "":
version = version_input
add_contributor = "y"
contributions = ["authoredBy", "contributedBy", "createdAt", "createdBy", "createdWith", "curatedBy", "derivedFrom", "importedBy", "importedFrom", "providedBy", "retrievedBy", "retrievedFrom", "sourceAccessedBy"]
contributors = []
while(add_contributor.lower().strip() == "y"):
contributor_name = input("Enter name of contributor: ")
print("Contribution Types: ", " ".join(contributions))
contribution_input = input("Enter contribution type: ").strip()
contribution = []
contribution.append(contribution_input)
contributors.append(contributor(name = contributor_name, contribution = contribution))
add_contributor = input("Add another contributor? y/n: ")
provenance = provenance_domain(name = name, version = version, license = license, created = created, modified = modified, contributors = contributors)
# print("\n")
#usability
print("\n" + color.BOLD + "Usability Domain Information \n" + color.END)
use = input("What is the purpose of this computational workflow/analysis?: ")
# usability = usability_domain(use)
usability = []
usability.append(use)
#execution
print(color.BOLD + "\nExecution Domain Information" + color.END)
print()
print(color.CYAN + "You are required to enter the script object uri, script driver, software prerequisites, external data endpoints, and environment variables." + color.END)
print()
print("URIs must begin with 'https://'. Enter script uris in the following format: 'uri1 uri2 uri3 uri4'")
script_input = input("\nEnter the uris for the script objects used to perform computations: ")
script = []
script_list = script_input.lstrip().rstrip().split(" ")
for x in script_list:
script.append(script_item(uri(uri=x)))
script_driver = "shell"
script_driver_input = input("\nEnter script driver, the type of executable to perform commands in script in order to run the pipeline (Default is shell) Enter nothing for default.: ")
if script_driver_input.lower().strip() != script_driver.lower().strip() and script_driver_input.lower().strip() != "":
script_driver = script_driver_input
software_prerequisites = []
add_software_prerequisite = "y"
print("\nSoftware prerequisites are necessary prerequisites such as libraries and tool versions that are needed to run the script to produce this BCO.\nThey have a name, version, and uri.\n")
while(add_software_prerequisite.lower().strip() == "y"):
sp_name = input("Enter software prerequisite name: ")
sp_version = input("Enter software prerequisite version: ")
sp_uri = input("URIs must begin with 'https://'. Enter software prerequisite uri: ")
software_prerequisites.append(software_prerequisite(version = sp_version, name = sp_name, uri = uri(uri=sp_uri)))
add_software_prerequisite = input("Add another software prerequisite? y/n: ")
external_data_endpoints = []
add_external_data_endpoints = "y"
print("\nExternal data endpoints are requirements for network protocol endpoints used by a pipeline.\nThey have a name and a url.\n")
while(add_external_data_endpoints.strip().lower() == "y"):
ede_name = input("Enter external data endpoint name: ")
ede_url = input("Enter external data endpoint url: ")
external_data_endpoints.append(external_data_endpoint(name = ede_name, url = ede_url))
add_external_data_endpoints = input("Add another external data endpoint? y/n: ")
add_environment_variable = "y"
print("\nEnter environment variables in 'key value' format \n")
environment_variables = []
while(add_environment_variable.strip().lower() == "y"):
key_value_pair = input("Enter environmental parameters that are useful to configure the execution environment on the target platform: ").lstrip().rstrip()
try:
env_variable = environment_variable(key_value_pair.split(" ")[0], key_value_pair.split(" ")[1])
environment_variables.append(env_variable)
except:
print("There was an error with your input. Please try again.")
add_environment_variable = input("Add another environment variable? y/n: ")
execution = execution_domain(environment_variables = environment_variables, script_driver = script_driver, software_prerequisites = software_prerequisites, external_data_endpoints = external_data_endpoints, script = script)
#description
print("\n" + color.BOLD + "Description Domain Information \n" + color.END)
print("Format to enter keywords in: 'keywordA keywordB keywordC keywordD'")
keywords_input = input("Enter biology research keywords in the specified format to aid in search-ability and description of this object: ").lstrip().rstrip()# .split(" ")
keywords = []
keywords.append(keywords_input)
pipeline_steps = []
input_list_description_domain = []
output_list_description_domain = []
step_number = 1
input_list_master = []
output_list_master = []
print()
print(color.CYAN + "You are required to enter the name of the tool, the purpose, and the input/output file uris at each step." + color.END)
print()
while(step_number != len(pipeline) + 1): #step number cannot exceed number of steps
print("Current step number: {}".format(step_number))
name = pipeline[step_number-1].split(" ")[0]
print("Name of tool: {}".format(name))
name_input = input("If name of tool is not correct, enter correct name. Else, enter nothing.: ")
if name_input.lower().strip() != name.lower().strip() and name_input.lower().strip() != "":
name = name_input
description = input("Enter purpose of the tool: ")
# version = input("Enter version of the tool: ")
print("\nURIs must begin with 'https://'. Enter input file uris in the following format: 'uri1 uri2 uri3 uri4'\n")
input_list_temp = input("Enter input file uris if this step uses input files: ").lstrip().rstrip().split(" ")
for x in range(len(input_list_temp)):
input_list_temp[x] = uri(uri=input_list_temp[x])
print("\nURIs must begin with 'https://'. Enter output file uris in the following format: 'uri1 uri2 uri3 uri4'\n")
output_list_temp = input("Enter output file uris if this step outputs to files: ").lstrip().rstrip().split(" ")
for x in range(len(output_list_temp)):
output_list_temp[x] = uri(uri=output_list_temp[x])
print()
pipeline_steps.append(pipeline_step(step_number=step_number, name=name, description=description, input_list= input_list_temp, output_list = output_list_temp))
if step_number == 1:
for x in input_list_temp:
if x.uri!= "":
input_list_master.append(x)
if step_number == len(pipeline):
for x in output_list_temp:
if x.uri != "":
output_list_master.append(x)
step_number += 1
description = description_domain(keywords = keywords, pipeline_steps = pipeline_steps)
#IO
print(color.BOLD + "Input Output (IO) Domain Information \n" + color.END)
inputs = []
for x in input_list_master:
inputs.append(input_subdomain_item(uri=x))
print("Current input list: ")
for x in inputs:
print(x.uri.uri)
add_input = input("Do you want to add additional input files? y/n: ")
while(add_input.strip().lower() == "y"):
input_file_uri = input("URIs must begin with 'https://'. Enter input file uri: ")
inputs.append(input_subdomain_item(uri = uri(uri=input_file_uri)))
add_input = input("Add an input file? y/n: ")
print("Current output list: ")
for x in output_list_master:
print(x.uri)
print()
outputs = []
for output in output_list_master:
print(output.uri)
output_mediatype = input("Enter output file mediatype for this output file: ")
outputs.append(output_subdomain_item(uri=output, mediatype=output_mediatype))
add_output = input("Do you want to add additional output files? y/n: ")
while(add_output.strip().lower() == "y"):
output = input("URIs must begin with 'https://'. Enter output file uri: ")
output_mediatype = input("Enter output file mediatype: ")
outputs.append(output_subdomain_item(uri=uri(uri=output), mediatype=output_mediatype))
add_output = input("Add an output file? y/n: ")
io = io_domain(inputs, outputs)
#parametric
print(color.BOLD + "\nParametric Domain Information \n" + color.END)
print("This represents the list of NON-default parameters customizing the computational flow which can affect the output of the calculations.\nThese fields can be custom to each kind of analysis and are tied to a particular pipeline implementation.")
print("Parameters are composed of a param (specific variable for computational workflow), a value (non-default param value), and a step (specific step in workflow relevant to param and value)")
parameters = []
option_index = -1
partial_string = ""
param = ""
value = ""
step = ""
edit_parameter = ""
add_parameter = ""
delete_parameter = ""
for x in range(0, len(pipeline)):
print()
partial_string = ""
param = ""
value = ""
step = ""
edit_parameter = ""
add_parameter = ""
delete_parameter = ""
if " -" in pipeline[x]:
option_index = pipeline[x].index(" -")
partial_string = pipeline[x][option_index+1:]
param = pipeline[x].split(" ")[0]
value = partial_string[0: partial_string.index(" ")]
step = str(x+1)
print("Current step number: {}".format(str(x+1)))
print()
print("param: ", param)
print("value: ", value)
print("step: ", step)
print("Current pipeline info: {}".format(pipeline[x]))
if param == "" and value == "" and step == "":
add_parameter = input("No parameter has been found. Is there a non-default parameter you wish to add? y/n: ")
if add_parameter.strip().lower() == "y":
param = input("Enter param: ")
value = input("Enter value: ")
step = input("Enter step: ")
parameters.append(parameter(param=param, value=value, step=step))
continue
else:
continue
delete_parameter = input("Would you like to delete the current parameter? (Note: you can edit parameter in the next step) y/n: ")
if delete_parameter.lower().strip() == "y":
param = ""
value = ""
step = ""
continue
edit_parameter = input("Would you like to edit the current parameter? y/n: ")
if edit_parameter.strip().lower() == "y":
param = input("Enter param: ")
value = input("Enter value: ")
step = input("Enter step: ")
parameters.append(parameter(param=param, value=value, step=step))
continue
parameters.append(parameter(param=param, value=value, step=step))
parametric = parameters# parametric_domain(parameters)
#Error
print(color.BOLD + "\nError Domain Information \n" + color.END)
emp_error = ""
alg_error = ""
empirical_error = {}
algorithmic_error = {}
add_error_domain = input("This domain is optional. Would you like to add an error domain? y/n: ")
add_emp_error = ""
add_alg_error = ""
if add_error_domain.strip().lower() == "y":
print("\nEmpirical error is defined as empirically determined values such as limits of detectability, false positives, false negatives, statistical confidence of outcomes, etc.\nThis can be measured by running the algorithm on multiple data samples of the usability domain or through the use of carefully designed in-silico data.\n")
add_emp_error = input("Would you like to add an empirical error subdomain for this BCO? y/n: ")
while(add_emp_error.strip().lower() == "y"):
print("An example of an input would be: 'false_negative_alignment_hits <0.0010'")
emp_error = input("Enter an empirical error 'key value' pair for this BCO: ")
try:
empirical_error[emp_error.split(" ")[0]] = emp_error.split(" ")[1]
except:
print("Error with input. Please retry or press 'n' at the next command.")
add_emp_error = input("Would you like to add another empirical error 'key value' pair for this BCO? y/n: ")
print("\nAlgorithmic error is defined as descriptive of errors that originate by fuzziness of the algorithms, driven by stochastic processes, in dynamically parallelized multi-threaded executions, or in machine learning methodologies where the state of the machine can affect the outcome.\n")
add_alg_error = input("Would you like to add an algorithmic error subdomain for this BCO? y/n: ")
while(add_alg_error.strip().lower() == "y"):
print("An example of an input would be: 'algorithm description' or 'algorithm json_schema_uri' where the json_schema_uri would be uri for algorithm description.")
alg_error = input("Enter an algorithmic error description for this BCO: ")
try:
algorithmic_error[alg_error.split(" ")[0]] = alg_error.split(" ")[1]
except:
print("Error with input. Please retry or press 'n' at the next command.")
add_alg_error = input("Would you like to add another algorithmic error 'algorithm description' or 'algorithm json_schema_uri' pair for this BCO? y/n: ")
error = error_domain(empirical=empirical_error, algorithmic=algorithmic_error)
#Extension
print(color.BOLD + "\nExtension Domain Information \n" + color.END)
extension = []
add_extension_domain = input("This domain is optional. Would you like to add an extension domain? y/n: ")
if add_extension_domain.strip().lower() == "y":
while add_extension_domain.strip().lower() == "y":
extension.append(extension_domain_item(extension_schema=input("URIs must begin with 'https://'. Enter a uri that points to an extension json schema: ").lstrip().rstrip()))
add_extension_domain = input("Would you like to add another extension json schema? y/n: ")
output_bco = BCO(provenance = provenance, usability = usability, description = description, execution = execution, io = io, object_id = None, spec_version = None, etag = None, parametric = parametric, error=error, extension=extension)
print(color.BOLD + "\nBCO Information" + color.END)
print(color.CYAN + "You can edit the output .json file if you made a mistake or would like to edit any fields in your BioCompute Object." + color.END)
print(color.GREEN + "BCO created" + color.END)
try:
print(color.GREEN + "{}".format(output_bco) + color.END)
print(color.GREEN + "BCO printed" + color.END)
except:
print(color.RED + "error occured with printing"+ color.END)
# try:
# with open(output_filename + ".pkl", 'wb') as output_pickle_file:
# pickle.dump(output_bco, output_pickle_file, pickle.HIGHEST_PROTOCOL)
# print(color.GREEN + "BCO saved to .pkl file" + color.END)
# except:
# print(color.RED + "error with saving BCO to .pkl file" + color.END)
# with open(output_filename + ".pkl", 'rb') input_pickle_file: #to open saved pkl file
# loaded_bco = pickle.load(input_pickle_file)
new_data = ""
with open(output_filename, 'w') as json_output:
try:
new_data = jsons.dumps(output_bco)
res = json.loads(new_data, object_hook=remove_nulls)
json.dump(res, json_output, indent = 4, sort_keys=True)
print(color.GREEN + "BCO initially saved in .json format" + color.END) # CORRECT
except:
print(color.RED + "error with initially saving BCO to .json file" + color.END)
BUF_SIZE = 1000
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
with open(output_filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
if etag_input == "1":
etag = md5.hexdigest()
elif etag_input == "2":
etag = sha1.hexdigest()
elif etag_input == "3":
etag = sha256.hexdigest()
with open(output_filename, 'w') as f:
f.truncate(0)
f.close()
output_bco = BCO(provenance = provenance, usability = usability, description = description, execution = execution, io = io, object_id = object_id, spec_version = spec_version, etag = etag, parametric = parametric, error=error, extension=extension)
with open(output_filename, 'w') as json_output:
try:
new_data = jsons.dumps(output_bco)
res = json.loads(new_data, object_hook=remove_nulls)
json.dump(res, json_output, indent = 4, sort_keys=True)
print(color.GREEN + "BCO saved in .json format" + color.END) # CORRECT
except:
print(color.RED + "error with saving BCO to .json file" + color.END)
# try:
# json_string = BCOEncoder().encode(output_bco)
# json_output.write(json_string)
# json_output.close()
# json_output.write(json.dumps(output_bco))
# json_output.write(jsons.dumps(output_bco)) # CORRECT
# except:
# new_data = ""
# with open(output_filename, 'r') as json_input:
# new_data = json.loads(json_input, object_hook=remove_nulls)
# with open(output_filename, 'w') as json_output:
# json_output.truncate(0)
# json_output.write(new_data)
# print(color.RED + "error occured with outputting as a json file" + color.END)
if __name__ == "__main__":
main() |
the-stack_106_26051 | import cx_Freeze
import sys
import os
import PySide2
plugins_path = os.path.join(PySide2.__path__[0], "plugins")
base = None
if sys.platform == 'win32':
base = "Win32GUI"
executables = [
cx_Freeze.Executable("emoch.py", base=base)
]
cx_Freeze.setup(
name = "EmoCh- Speech Emotion Analysis",
options = {"build_exe": {
"zip_include_packages": ["PySide2", "platform", "librosa", "soundfile", 'numpy', 'joblib', 'pyaudio', 'wave', 'termcolor'],
"include_files": [os.path.join(plugins_path, "platforms")]}},
version = "1.0.0",
description = "Emotion Analysis from Speech using Python",
executables = executables
)
|
the-stack_106_26052 | """2. Train Mask RCNN end-to-end on MS COCO
===========================================
This tutorial goes through the steps for training a Mask R-CNN [He17]_ instance segmentation model
provided by GluonCV.
Mask R-CNN is an extension to the Faster R-CNN [Ren15]_ object detection model.
As such, this tutorial is also an extension to :doc:`../examples_detection/train_faster_rcnn_voc`.
We will focus on the extra work on top of Faster R-CNN to show how to use GluonCV components
to construct a Mask R-CNN model.
It is highly recommended to read the original papers [Girshick14]_, [Girshick15]_, [Ren15]_, [He17]_
to learn more about the ideas behind Mask R-CNN.
Appendix from [He16]_ and experiment detail from [Lin17]_ may also be useful reference.
.. hint::
Please first go through this :ref:`sphx_glr_build_examples_datasets_mscoco.py` tutorial to
setup MSCOCO dataset on your disk.
.. hint::
You can skip the rest of this tutorial and start training your Mask RCNN model
right away by downloading this script:
:download:`Download train_mask_rcnn.py<../../../scripts/instance/mask_rcnn/train_mask_rcnn.py>`
Example usage:
Train a default resnet50_v1b model with COCO dataset on GPU 0:
.. code-block:: bash
python train_mask_rcnn.py --gpus 0
Train on GPU 0,1,2,3:
.. code-block:: bash
python train_mask_rcnn.py --gpus 0,1,2,3
Check the supported arguments:
.. code-block:: bash
python train_mask_rcnn.py --help
"""
##########################################################
# Dataset
# -------
#
# Make sure COCO dataset has been set up on your disk.
# Then, we are ready to load training and validation images.
from gluoncv.data import BDDInstance
# typically we use train2017 (i.e. train2014 + minival35k) split as training data
# COCO dataset actually has images without any objects annotated,
# which must be skipped during training to prevent empty labels
train_dataset = BDDInstance(root='/Volumes/DATASET/BDD100k/bdd100k/', splits='bdd100k_to_coco_labels_images_val2018', skip_empty=True, use_color_maps=False)
# and val2014 (i.e. minival5k) test as validation data/data1/datasets/bdd100k/
val_dataset = BDDInstance(root='/Volumes/DATASET/BDD100k/bdd100k/', splits='bdd100k_to_coco_labels_images_val2018', skip_empty=False, use_color_maps=False)
print('Training images:', len(train_dataset))
# print('Validation images:', len(val_dataset))
##########################################################
# Data transform
# --------------
# We can read an (image, label, segm) tuple from the training dataset:
train_image, train_label, train_segm = train_dataset[1]
bboxes = train_label[:, :4]
cids = train_label[:, 4:5]
print('image:', train_image.shape)
print('bboxes:', bboxes.shape, 'class ids:', cids.shape)
print('drivable_map:', train_segm.shape)
# segm is a list of polygons which are arrays of points on the object boundary
# print('masks', [[poly.shape for poly in polys] for polys in train_segm])
##############################################################################
# Plot the image with boxes and labels:
from matplotlib import pyplot as plt
from gluoncv.utils import viz
plt.imshow(train_segm.asnumpy()* 80)
# fig = plt.figure(figsize=(10, 10))
# ax = fig.add_subplot(1, 1, 1)
# ax = viz.plot_bbox(train_segm, bboxes, labels=cids, class_names=train_dataset.classes, ax=ax)
plt.show()
##############################################################################
# To actually see the object segmentation, we need to convert polygons to masks
import numpy as np
from gluoncv.data.transforms import mask as tmask
width, height = train_image.shape[1], train_image.shape[0]
# train_masks = np.stack([tmask.to_mask(polys, (width, height)) for polys in train_segm])
plt_image = viz.plot_drivable_map(train_image, [train_segm])
##############################################################################
# Now plot the image with boxes, labels and masks
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax = viz.plot_bbox(plt_image, bboxes, labels=cids, class_names=train_dataset.classes, ax=ax)
# plt.show()
##############################################################################
# Data transforms, i.e. decoding and transformation, are identical to Faster R-CNN
# with the exception of segmentation polygons as an additional input.
# :py:class:`gluoncv.data.transforms.presets.rcnn.MaskRCNNDefaultTrainTransform`
# converts the segmentation polygons to binary segmentation mask.
# :py:class:`gluoncv.data.transforms.presets.rcnn.MaskRCNNDefaultValTransform`
# ignores the segmentation polygons and returns image tensor and ``[im_height, im_width, im_scale]``.
from gluoncv.data.transforms import presets
from gluoncv import utils
from mxnet import nd
##############################################################################
short, max_size = 600, 1000 # resize image to short side 600 px, but keep maximum length within 1000
train_transform = presets.rcnn.BDDMaskRCNNDefaultTrainTransform(short, max_size)
val_transform = presets.rcnn.BDDMaskRCNNDefaultValTransform(short, max_size)
##############################################################################
utils.random.seed(233) # fix seed in this tutorial
##############################################################################
# apply transforms to train image
# print('segm', train_segm.shape)
train_image2, train_label2, train_masks2 = train_transform(train_image, train_label, train_segm)
print('tensor shape:', train_image2.shape)
print('box and id shape:', train_label2.shape)
print('drivable map shape', train_masks2.shape)
##############################################################################
# Images in tensor are distorted because they no longer sit in (0, 255) range.
# Let's convert them back so we can see them clearly.
plt_image2 = train_image2.transpose((1, 2, 0)) * nd.array((0.229, 0.224, 0.225)) + nd.array((0.485, 0.456, 0.406))
plt_image2 = (plt_image2 * 255).asnumpy().astype('uint8')
##############################################################################
# The transform already converted polygons to masks and we plot them directly.
width, height = plt_image2.shape[1], plt_image2.shape[0]
plt_image2 = viz.plot_drivable_map(plt_image2, [train_masks2])
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax = viz.plot_bbox(plt_image2, train_label2[:, :4],
labels=train_label2[:, 4:5],
class_names=train_dataset.classes,
ax=ax)
plt.show()
##########################################################
# Data Loader
# -----------
# Data loader is identical to Faster R-CNN with the difference of mask input and output.
from gluoncv.data.batchify import Tuple, Append
from mxnet.gluon.data import DataLoader
batch_size = 2 # for tutorial, we use smaller batch-size
num_workers = 0 # you can make it larger(if your CPU has more cores) to accelerate data loading
train_bfn = Tuple(*[Append() for _ in range(3)])
train_loader = DataLoader(train_dataset.transform(train_transform), batch_size, shuffle=True,
batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)
val_bfn = Tuple(*[Append() for _ in range(2)])
val_loader = DataLoader(val_dataset.transform(val_transform), batch_size, shuffle=False,
batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
for ib, batch in enumerate(train_loader):
if ib > 3:
break
print('data 0:', batch[0][0].shape, 'label 0:', batch[1][0].shape, 'mask 0:', batch[2][0].shape)
print('data 1:', batch[0][1].shape, 'label 1:', batch[1][1].shape, 'mask 1:', batch[2][1].shape)
##########################################################
# Mask RCNN Network
# -------------------
# In GluonCV, Mask RCNN network :py:class:`gluoncv.model_zoo.MaskRCNN`
# is inherited from Faster RCNN network :py:class:`gluoncv.model_zoo.FasterRCNN`.
#
# `Gluon Model Zoo <../../model_zoo/index.html>`__ has some Mask RCNN pretrained networks.
# You can load your favorate one with one simple line of code:
#
# .. hint::
#
# To avoid downloading mdoel in this tutorial, we set ``pretrained_base=False``,
# in practice we usually want to load pre-trained imagenet models by setting
# ``pretrained_base=True``.
from gluoncv import model_zoo
net = model_zoo.get_model('mask_rcnn_resnet50_v1b_bdd', pretrained_base=False)
print(net)
##############################################################################
# Mask-RCNN has identical inputs but produces an additional output.
# ``cids`` are the class labels,
# ``scores`` are confidence scores of each prediction,
# ``bboxes`` are absolute coordinates of corresponding bounding boxes.
# ``maps`` are predicted drivable maps corresponding to each image
import mxnet as mx
x = mx.nd.zeros(shape=(1, 3, 600, 800))
net.initialize()
cids, scores, bboxes, maps = net(x)
##############################################################################
# During training, an additional output is returned:
# ``mask_preds`` are per class masks predictions
# in addition to ``cls_preds``, ``box_preds``.
from mxnet import autograd
with autograd.train_mode():
# this time we need ground-truth to generate high quality roi proposals during training
gt_box = mx.nd.zeros(shape=(1, 1, 4))
cls_preds, box_preds, drivable_maps_pred, roi, samples, matches, rpn_score, rpn_box, anchors = net(x, gt_box)
##########################################################
# Training losses
# ----------------
# There are one additional losses in Mask-RCNN.
# the loss to penalize incorrect foreground/background prediction
rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
# the loss to penalize inaccurate anchor boxes
rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1/9.) # == smoothl1
# the loss to penalize incorrect classification prediction.
rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
# and finally the loss to penalize inaccurate proposals
rcnn_box_loss = mx.gluon.loss.HuberLoss() # == smoothl1
# the loss to penalize incorrect segmentation pixel prediction
# rcnn_mask_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
# the loss to penalize incorrect drivable maps segmentation pixel prediction
drivable_maps_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
##########################################################
# Training targets
# ----------------
# RPN and RCNN training target are the same as in :doc:`../examples_detection/train_faster_rcnn_voc`.
##############################################################################
# We also push RPN targets computation to CPU workers, so network is passed to transforms
train_transform = presets.rcnn.BDDMaskRCNNDefaultTrainTransform(short, max_size, net)
# return images, labels, masks, rpn_cls_targets, rpn_box_targets, rpn_box_masks loosely
batchify_fn = Tuple(*[Append() for _ in range(6)])
# For the next part, we only use batch size 1
batch_size = 1
train_loader = DataLoader(train_dataset.transform(train_transform), batch_size, shuffle=True,
batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
##########################################################
# Mask targets are generated with the intermediate outputs after rcnn target is generated.
for ib, batch in enumerate(train_loader):
if ib > 0:
break
with autograd.train_mode():
for data, label, drivable_maps, rpn_cls_targets, rpn_box_targets, rpn_box_masks in zip(*batch):
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
# network forward
cls_preds, box_preds, drivable_maps_preds, roi, samples, matches, rpn_score, rpn_box, anchors = net(data, gt_box)
# generate targets for rcnn
cls_targets, box_targets, box_masks = net.target_generator(roi, samples, matches, gt_label, gt_box)
# generate targets for mask head
# mask_targets, mask_masks = net.mask_target(roi, masks, matches, cls_targets)
print('data:', data.shape)
# box and class labels
print('box:', gt_box.shape)
print('label:', gt_label.shape)
# -1 marks ignored label
print('rpn cls label:', rpn_cls_targets.shape)
# mask out ignored box label
print('rpn box label:', rpn_box_targets.shape)
print('rpn box mask:', rpn_box_masks.shape)
# rcnn does not have ignored label
print('rcnn cls label:', cls_targets.shape)
# mask out ignored box label
print('rcnn box label:', box_targets.shape)
print('rcnn box mask:', box_masks.shape)
# print('rcnn mask label:', mask_targets.shape)
# print('rcnn mask mask:', mask_masks.shape)
##########################################################
# Training loop
# -------------
# After we have defined loss function and generated training targets, we can write the training loop.
for ib, batch in enumerate(train_loader):
if ib > 0:
break
with autograd.record():
for data, label, drivable_maps, rpn_cls_targets, rpn_box_targets, rpn_box_masks in zip(*batch):
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
# network forward
cls_preds, box_preds, drivable_maps_preds, roi, samples, matches, rpn_score, rpn_box, anchors = net(data, gt_box)
# generate targets for rcnn
cls_targets, box_targets, box_masks = net.target_generator(roi, samples, matches, gt_label, gt_box)
# generate targets for mask head
# mask_targets, mask_masks = net.mask_target(roi, masks, matches, cls_targets)
# losses of rpn
rpn_score = rpn_score.squeeze(axis=-1)
num_rpn_pos = (rpn_cls_targets >= 0).sum()
rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets, rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
rpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets, rpn_box_masks) * rpn_box.size / num_rpn_pos
# losses of rcnn
num_rcnn_pos = (cls_targets >= 0).sum()
rcnn_loss1 = rcnn_cls_loss(cls_preds, cls_targets, cls_targets >= 0) * cls_targets.size / cls_targets.shape[0] / num_rcnn_pos
rcnn_loss2 = rcnn_box_loss(box_preds, box_targets, box_masks) * box_preds.size / box_preds.shape[0] / num_rcnn_pos
# loss of mask
# mask_loss = rcnn_mask_loss(mask_preds, mask_targets, mask_masks) * mask_targets.size / mask_targets.shape[0] / mask_masks.sum()
# loss of drivable
drivable_maps_loss = drivable_maps_loss(drivable_maps_preds, drivable_maps)
# drivable_maps_preds
# some standard gluon training steps:
autograd.backward([rpn_loss1, rpn_loss2, rcnn_loss1, rcnn_loss2, drivable_maps_loss])
trainer.step(batch_size)
##############################################################################
# .. hint::
#
# Please checkout the full :download:`training script <../../../scripts/instance/mask_rcnn/train_mask_rcnn.py>` for complete implementation.
##########################################################
# References
# ----------
#
# .. [Girshick14] Ross Girshick and Jeff Donahue and Trevor Darrell and Jitendra Malik. Rich Feature Hierarchies for Accurate Object Detection and Semantic Segmentation. CVPR 2014.
# .. [Girshick15] Ross Girshick. Fast {R-CNN}. ICCV 2015.
# .. [Ren15] Shaoqing Ren and Kaiming He and Ross Girshick and Jian Sun. Faster {R-CNN}: Towards Real-Time Object Detection with Region Proposal Networks. NIPS 2015.
# .. [He16] Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun. Deep Residual Learning for Image Recognition. CVPR 2016.
# .. [Lin17] Tsung-Yi Lin and Piotr Dollár and Ross Girshick and Kaiming He and Bharath Hariharan and Serge Belongie. Feature Pyramid Networks for Object Detection. CVPR 2017.
# .. [He17] Kaiming He and Georgia Gkioxari and Piotr Dollár and and Ross Girshick. Mask {R-CNN}. ICCV 2017.
|
the-stack_106_26055 | import os
import unittest
import tempfile
from io import StringIO
from pathlib import Path
from robot.utils.asserts import assert_equal
from robot.parsing import get_tokens, get_init_tokens, get_resource_tokens, Token
T = Token
def assert_tokens(source, expected, get_tokens=get_tokens, **config):
tokens = list(get_tokens(source, **config))
assert_equal(len(tokens), len(expected),
'Expected %d tokens:\n%s\n\nGot %d tokens:\n%s'
% (len(expected), format_tokens(expected),
len(tokens), format_tokens(tokens)),
values=False)
for act, exp in zip(tokens, expected):
assert_equal(act, Token(*exp), formatter=repr)
def format_tokens(tokens):
return '\n'.join(repr(t) for t in tokens)
class TestLexSettingsSection(unittest.TestCase):
def test_common_suite_settings(self):
data = '''\
*** Settings ***
Documentation Doc in multiple
... parts
Metadata Name Value
MetaData Multi part Value continues
Suite Setup Log Hello, world!
suite teardown Log <b>The End.</b> WARN html=True
Test Setup None Shall Pass ${NONE}
TEST TEARDOWN No Operation
Test Timeout 1 day
Force Tags foo bar
'''
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.DOCUMENTATION, 'Documentation', 2, 0),
(T.ARGUMENT, 'Doc', 2, 18),
(T.ARGUMENT, 'in multiple', 2, 25),
(T.ARGUMENT, 'parts', 3, 18),
(T.EOS, '', 3, 23),
(T.METADATA, 'Metadata', 4, 0),
(T.NAME, 'Name', 4, 18),
(T.ARGUMENT, 'Value', 4, 33),
(T.EOS, '', 4, 38),
(T.METADATA, 'MetaData', 5, 0),
(T.NAME, 'Multi part', 5, 18),
(T.ARGUMENT, 'Value', 5, 33),
(T.ARGUMENT, 'continues', 5, 42),
(T.EOS, '', 5, 51),
(T.SUITE_SETUP, 'Suite Setup', 6, 0),
(T.NAME, 'Log', 6, 18),
(T.ARGUMENT, 'Hello, world!', 6, 25),
(T.EOS, '', 6, 38),
(T.SUITE_TEARDOWN, 'suite teardown', 7, 0),
(T.NAME, 'Log', 7, 18),
(T.ARGUMENT, '<b>The End.</b>', 7, 25),
(T.ARGUMENT, 'WARN', 7, 44),
(T.ARGUMENT, 'html=True', 7, 52),
(T.EOS, '', 7, 61),
(T.TEST_SETUP, 'Test Setup', 8, 0),
(T.NAME, 'None Shall Pass', 8, 18),
(T.ARGUMENT, '${NONE}', 8, 37),
(T.EOS, '', 8, 44),
(T.TEST_TEARDOWN, 'TEST TEARDOWN', 9, 0),
(T.NAME, 'No Operation', 9, 18),
(T.EOS, '', 9, 30),
(T.TEST_TIMEOUT, 'Test Timeout', 10, 0),
(T.ARGUMENT, '1 day', 10, 18),
(T.EOS, '', 10, 23),
(T.FORCE_TAGS, 'Force Tags', 11, 0),
(T.ARGUMENT, 'foo', 11, 18),
(T.ARGUMENT, 'bar', 11, 25),
(T.EOS, '', 11, 28),
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_init_tokens, data_only=True)
def test_suite_settings_not_allowed_in_init_file(self):
data = '''\
*** Settings ***
Test Template Not allowed in init file
Force Tags Allowed in both
Default Tags Not allowed in init file
'''
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.TEST_TEMPLATE, 'Test Template', 2, 0),
(T.NAME, 'Not allowed in init file', 2, 18),
(T.EOS, '', 2, 42),
(T.FORCE_TAGS, 'Force Tags', 3, 0),
(T.ARGUMENT, 'Allowed in both', 3, 18),
(T.EOS, '', 3, 33),
(T.DEFAULT_TAGS, 'Default Tags', 4, 0),
(T.ARGUMENT, 'Not allowed in init file', 4, 18),
(T.EOS, '', 4, 42)
]
assert_tokens(data, expected, get_tokens, data_only=True)
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.ERROR, 'Test Template', 2, 0,
"Setting 'Test Template' is not allowed in suite initialization file."),
(T.EOS, '', 2, 13),
(T.FORCE_TAGS, 'Force Tags', 3, 0),
(T.ARGUMENT, 'Allowed in both', 3, 18),
(T.EOS, '', 3, 33),
(T.ERROR, 'Default Tags', 4, 0,
"Setting 'Default Tags' is not allowed in suite initialization file."),
(T.EOS, '', 4, 12)
]
assert_tokens(data, expected, get_init_tokens, data_only=True)
def test_suite_settings_not_allowed_in_resource_file(self):
data = '''\
*** Settings ***
Metadata Name Value
Suite Setup Log Hello, world!
suite teardown Log <b>The End.</b> WARN html=True
Test Setup None Shall Pass ${NONE}
TEST TEARDOWN No Operation
Test Template NONE
Test Timeout 1 day
Force Tags foo bar
Default Tags zap
Documentation Valid in all data files.
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.ERROR, 'Metadata', 2, 0,
"Setting 'Metadata' is not allowed in resource file."),
(T.EOS, '', 2, 8),
(T.ERROR, 'Suite Setup', 3, 0,
"Setting 'Suite Setup' is not allowed in resource file."),
(T.EOS, '', 3, 11),
(T.ERROR, 'suite teardown', 4, 0,
"Setting 'suite teardown' is not allowed in resource file."),
(T.EOS, '', 4, 14),
(T.ERROR, 'Test Setup', 5, 0,
"Setting 'Test Setup' is not allowed in resource file."),
(T.EOS, '', 5, 10),
(T.ERROR, 'TEST TEARDOWN', 6, 0,
"Setting 'TEST TEARDOWN' is not allowed in resource file."),
(T.EOS, '', 6, 13),
(T.ERROR, 'Test Template', 7, 0,
"Setting 'Test Template' is not allowed in resource file."),
(T.EOS, '', 7, 13),
(T.ERROR, 'Test Timeout', 8, 0,
"Setting 'Test Timeout' is not allowed in resource file."),
(T.EOS, '', 8, 12),
(T.ERROR, 'Force Tags', 9, 0,
"Setting 'Force Tags' is not allowed in resource file."),
(T.EOS, '', 9, 10),
(T.ERROR, 'Default Tags', 10, 0,
"Setting 'Default Tags' is not allowed in resource file."),
(T.EOS, '', 10, 12),
(T.DOCUMENTATION, 'Documentation', 11, 0),
(T.ARGUMENT, 'Valid in all data files.', 11, 18),
(T.EOS, '', 11, 42)
]
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_imports(self):
data = '''\
*** Settings ***
Library String
LIBRARY XML lxml=True
Resource example.resource
resource
Variables variables.py
VariAbles variables.py arg
'''
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.LIBRARY, 'Library', 2, 0),
(T.NAME, 'String', 2, 18),
(T.EOS, '', 2, 24),
(T.LIBRARY, 'LIBRARY', 3, 0),
(T.NAME, 'XML', 3, 18),
(T.ARGUMENT, 'lxml=True', 3, 25),
(T.EOS, '', 3, 34),
(T.RESOURCE, 'Resource', 4, 0),
(T.NAME, 'example.resource', 4, 18),
(T.EOS, '', 4, 34),
(T.RESOURCE, 'resource', 5, 0),
(T.EOS, '', 5, 8),
(T.VARIABLES, 'Variables', 6, 0),
(T.NAME, 'variables.py', 6, 18),
(T.EOS, '', 6, 30),
(T.VARIABLES, 'VariAbles', 7, 0),
(T.NAME, 'variables.py', 7, 18),
(T.ARGUMENT, 'arg', 7, 34),
(T.EOS, '', 7, 37),
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_init_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_with_name(self):
data = '''\
*** Settings ***
Library Easter WITH NAME Christmas
Library Arguments arg WITH NAME One argument
Library Arguments arg1 arg2
... arg3 arg4 WITH NAME Four arguments
'''
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.LIBRARY, 'Library', 2, 0),
(T.NAME, 'Easter', 2, 16),
(T.WITH_NAME, 'WITH NAME', 2, 45),
(T.NAME, 'Christmas', 2, 58),
(T.EOS, '', 2, 67),
(T.LIBRARY, 'Library', 3, 0),
(T.NAME, 'Arguments', 3, 16),
(T.ARGUMENT, 'arg', 3, 29),
(T.WITH_NAME, 'WITH NAME', 3, 45),
(T.NAME, 'One argument', 3, 58),
(T.EOS, '', 3, 70),
(T.LIBRARY, 'Library', 4, 0),
(T.NAME, 'Arguments', 4, 16),
(T.ARGUMENT, 'arg1', 4, 29),
(T.ARGUMENT, 'arg2', 4, 37),
(T.ARGUMENT, 'arg3', 5, 29),
(T.ARGUMENT, 'arg4', 5, 37),
(T.WITH_NAME, 'WITH NAME', 5, 45),
(T.NAME, 'Four arguments', 5, 58),
(T.EOS, '', 5, 72)
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_init_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_invalid_settings(self):
data = '''\
*** Settings ***
Invalid Value
Library Valid
Oops, I dit it again
Libra ry Smallish typo gives us recommendations!
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.ERROR, 'Invalid', 2, 0, "Non-existing setting 'Invalid'."),
(T.EOS, '', 2, 7),
(T.LIBRARY, 'Library', 3, 0),
(T.NAME, 'Valid', 3, 14),
(T.EOS, '', 3, 19),
(T.ERROR, 'Oops, I', 4, 0, "Non-existing setting 'Oops, I'."),
(T.EOS, '', 4, 7),
(T.ERROR, 'Libra ry', 5, 0, "Non-existing setting 'Libra ry'. "
"Did you mean:\n Library"),
(T.EOS, '', 5, 8)
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_init_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_too_many_values_for_single_value_settings(self):
data = '''\
*** Settings ***
Resource Too many values
Test Timeout Too much
Test Template 1 2 3 4 5
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.ERROR, 'Resource', 2, 0,
"Setting 'Resource' accepts only one value, got 3."),
(T.EOS, '', 2, 8),
(T.ERROR, 'Test Timeout', 3, 0,
"Setting 'Test Timeout' accepts only one value, got 2."),
(T.EOS, '', 3, 12),
(T.ERROR, 'Test Template', 4, 0,
"Setting 'Test Template' accepts only one value, got 5."),
(T.EOS, '', 4, 13),
]
assert_tokens(data, expected, data_only=True)
def test_setting_too_many_times(self):
data = '''\
*** Settings ***
Documentation Used
Documentation Ignored
Suite Setup Used
Suite Setup Ignored
Suite Teardown Used
Suite Teardown Ignored
Test Setup Used
Test Setup Ignored
Test Teardown Used
Test Teardown Ignored
Test Template Used
Test Template Ignored
Test Timeout Used
Test Timeout Ignored
Force Tags Used
Force Tags Ignored
Default Tags Used
Default Tags Ignored
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.DOCUMENTATION, 'Documentation', 2, 0),
(T.ARGUMENT, 'Used', 2, 18),
(T.EOS, '', 2, 22),
(T.ERROR, 'Documentation', 3, 0,
"Setting 'Documentation' is allowed only once. Only the first value is used."),
(T.EOS, '', 3, 13),
(T.SUITE_SETUP, 'Suite Setup', 4, 0),
(T.NAME, 'Used', 4, 18),
(T.EOS, '', 4, 22),
(T.ERROR, 'Suite Setup', 5, 0,
"Setting 'Suite Setup' is allowed only once. Only the first value is used."),
(T.EOS, '', 5, 11),
(T.SUITE_TEARDOWN, 'Suite Teardown', 6, 0),
(T.NAME, 'Used', 6, 18),
(T.EOS, '', 6, 22),
(T.ERROR, 'Suite Teardown', 7, 0,
"Setting 'Suite Teardown' is allowed only once. Only the first value is used."),
(T.EOS, '', 7, 14),
(T.TEST_SETUP, 'Test Setup', 8, 0),
(T.NAME, 'Used', 8, 18),
(T.EOS, '', 8, 22),
(T.ERROR, 'Test Setup', 9, 0,
"Setting 'Test Setup' is allowed only once. Only the first value is used."),
(T.EOS, '', 9, 10),
(T.TEST_TEARDOWN, 'Test Teardown', 10, 0),
(T.NAME, 'Used', 10, 18),
(T.EOS, '', 10, 22),
(T.ERROR, 'Test Teardown', 11, 0,
"Setting 'Test Teardown' is allowed only once. Only the first value is used."),
(T.EOS, '', 11, 13),
(T.TEST_TEMPLATE, 'Test Template', 12, 0),
(T.NAME, 'Used', 12, 18),
(T.EOS, '', 12, 22),
(T.ERROR, 'Test Template', 13, 0,
"Setting 'Test Template' is allowed only once. Only the first value is used."),
(T.EOS, '', 13, 13),
(T.TEST_TIMEOUT, 'Test Timeout', 14, 0),
(T.ARGUMENT, 'Used', 14, 18),
(T.EOS, '', 14, 22),
(T.ERROR, 'Test Timeout', 15, 0,
"Setting 'Test Timeout' is allowed only once. Only the first value is used."),
(T.EOS, '', 15, 12),
(T.FORCE_TAGS, 'Force Tags', 16, 0),
(T.ARGUMENT, 'Used', 16, 18),
(T.EOS, '', 16, 22),
(T.ERROR, 'Force Tags', 17, 0,
"Setting 'Force Tags' is allowed only once. Only the first value is used."),
(T.EOS, '', 17, 10),
(T.DEFAULT_TAGS, 'Default Tags', 18, 0),
(T.ARGUMENT, 'Used', 18, 18),
(T.EOS, '', 18, 22),
(T.ERROR, 'Default Tags', 19, 0,
"Setting 'Default Tags' is allowed only once. Only the first value is used."),
(T.EOS, '', 19, 12)
]
assert_tokens(data, expected, data_only=True)
class TestLexTestAndKeywordSettings(unittest.TestCase):
def test_test_settings(self):
data = '''\
*** Test Cases ***
Name
[Documentation] Doc in multiple
... parts
[Tags] first second
[Setup] Log Hello, world! level=DEBUG
[Teardown] No Operation
[Template] Log Many
[Timeout] ${TIMEOUT}
'''
expected = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.DOCUMENTATION, '[Documentation]', 3, 4),
(T.ARGUMENT, 'Doc', 3, 23),
(T.ARGUMENT, 'in multiple', 3, 30),
(T.ARGUMENT, 'parts', 4, 23),
(T.EOS, '', 4, 28),
(T.TAGS, '[Tags]', 5, 4),
(T.ARGUMENT, 'first', 5, 23),
(T.ARGUMENT, 'second', 5, 32),
(T.EOS, '', 5, 38),
(T.SETUP, '[Setup]', 6, 4),
(T.NAME, 'Log', 6, 23),
(T.ARGUMENT, 'Hello, world!', 6, 30),
(T.ARGUMENT, 'level=DEBUG', 6, 47),
(T.EOS, '', 6, 58),
(T.TEARDOWN, '[Teardown]', 7, 4),
(T.NAME, 'No Operation', 7, 23),
(T.EOS, '', 7, 35),
(T.TEMPLATE, '[Template]', 8, 4),
(T.NAME, 'Log Many', 8, 23),
(T.EOS, '', 8, 31),
(T.TIMEOUT, '[Timeout]', 9, 4),
(T.ARGUMENT, '${TIMEOUT}', 9, 23),
(T.EOS, '', 9, 33)
]
assert_tokens(data, expected, data_only=True)
def test_keyword_settings(self):
data = '''\
*** Keywords ***
Name
[Arguments] ${arg1} ${arg2}=default @{varargs} &{kwargs}
[Documentation] Doc in multiple
... parts
[Tags] first second
[Teardown] No Operation
[Timeout] ${TIMEOUT}
[Return] Value
'''
expected = [
(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.ARGUMENTS, '[Arguments]', 3, 4),
(T.ARGUMENT, '${arg1}', 3, 23),
(T.ARGUMENT, '${arg2}=default', 3, 34),
(T.ARGUMENT, '@{varargs}', 3, 53),
(T.ARGUMENT, '&{kwargs}', 3, 67),
(T.EOS, '', 3, 76),
(T.DOCUMENTATION, '[Documentation]', 4, 4),
(T.ARGUMENT, 'Doc', 4, 23),
(T.ARGUMENT, 'in multiple', 4, 30),
(T.ARGUMENT, 'parts', 5, 23),
(T.EOS, '', 5, 28),
(T.TAGS, '[Tags]', 6, 4),
(T.ARGUMENT, 'first', 6, 23),
(T.ARGUMENT, 'second', 6, 32),
(T.EOS, '', 6, 38),
(T.TEARDOWN, '[Teardown]', 7, 4),
(T.NAME, 'No Operation', 7, 23),
(T.EOS, '', 7, 35),
(T.TIMEOUT, '[Timeout]', 8, 4),
(T.ARGUMENT, '${TIMEOUT}', 8, 23),
(T.EOS, '', 8, 33),
(T.RETURN, '[Return]', 9, 4),
(T.ARGUMENT, 'Value', 9, 23),
(T.EOS, '', 9, 28)
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_too_many_values_for_single_value_test_settings(self):
data = '''\
*** Test Cases ***
Name
[Timeout] This is not good
[Template] This is bad
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.ERROR, '[Timeout]', 3, 4,
"Setting 'Timeout' accepts only one value, got 4."),
(T.EOS, '', 3, 13),
(T.ERROR, '[Template]', 4, 4,
"Setting 'Template' accepts only one value, got 3."),
(T.EOS, '', 4, 14)
]
assert_tokens(data, expected, data_only=True)
def test_too_many_values_for_single_value_keyword_settings(self):
data = '''\
*** Keywords ***
Name
[Timeout] This is not good
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.ERROR, '[Timeout]', 3, 4,
"Setting 'Timeout' accepts only one value, got 4."),
(T.EOS, '', 3, 13),
]
assert_tokens(data, expected, data_only=True)
def test_test_settings_too_many_times(self):
data = '''\
*** Test Cases ***
Name
[Documentation] Used
[Documentation] Ignored
[Tags] Used
[Tags] Ignored
[Setup] Used
[Setup] Ignored
[Teardown] Used
[Teardown] Ignored
[Template] Used
[Template] Ignored
[Timeout] Used
[Timeout] Ignored
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.DOCUMENTATION, '[Documentation]', 3, 4),
(T.ARGUMENT, 'Used', 3, 23),
(T.EOS, '', 3, 27),
(T.ERROR, '[Documentation]', 4, 4,
"Setting 'Documentation' is allowed only once. Only the first value is used."),
(T.EOS, '', 4, 19),
(T.TAGS, '[Tags]', 5, 4),
(T.ARGUMENT, 'Used', 5, 23),
(T.EOS, '', 5, 27),
(T.ERROR, '[Tags]', 6, 4,
"Setting 'Tags' is allowed only once. Only the first value is used."),
(T.EOS, '', 6, 10),
(T.SETUP, '[Setup]', 7, 4),
(T.NAME, 'Used', 7, 23),
(T.EOS, '', 7, 27),
(T.ERROR, '[Setup]', 8, 4,
"Setting 'Setup' is allowed only once. Only the first value is used."),
(T.EOS, '', 8, 11),
(T.TEARDOWN, '[Teardown]', 9, 4),
(T.NAME, 'Used', 9, 23),
(T.EOS, '', 9, 27),
(T.ERROR, '[Teardown]', 10, 4,
"Setting 'Teardown' is allowed only once. Only the first value is used."),
(T.EOS, '', 10, 14),
(T.TEMPLATE, '[Template]', 11, 4),
(T.NAME, 'Used', 11, 23),
(T.EOS, '', 11, 27),
(T.ERROR, '[Template]', 12, 4,
"Setting 'Template' is allowed only once. Only the first value is used."),
(T.EOS, '', 12, 14),
(T.TIMEOUT, '[Timeout]', 13, 4),
(T.ARGUMENT, 'Used', 13, 23),
(T.EOS, '', 13, 27),
(T.ERROR, '[Timeout]', 14, 4,
"Setting 'Timeout' is allowed only once. Only the first value is used."),
(T.EOS, '', 14, 13)
]
assert_tokens(data, expected, data_only=True)
def test_keyword_settings_too_many_times(self):
data = '''\
*** Keywords ***
Name
[Documentation] Used
[Documentation] Ignored
[Tags] Used
[Tags] Ignored
[Arguments] Used
[Arguments] Ignored
[Teardown] Used
[Teardown] Ignored
[Timeout] Used
[Timeout] Ignored
[Return] Used
[Return] Ignored
'''
# Values of invalid settings are ignored with `data_only=True`.
expected = [
(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4),
(T.DOCUMENTATION, '[Documentation]', 3, 4),
(T.ARGUMENT, 'Used', 3, 23),
(T.EOS, '', 3, 27),
(T.ERROR, '[Documentation]', 4, 4,
"Setting 'Documentation' is allowed only once. Only the first value is used."),
(T.EOS, '', 4, 19),
(T.TAGS, '[Tags]', 5, 4),
(T.ARGUMENT, 'Used', 5, 23),
(T.EOS, '', 5, 27),
(T.ERROR, '[Tags]', 6, 4,
"Setting 'Tags' is allowed only once. Only the first value is used."),
(T.EOS, '', 6, 10),
(T.ARGUMENTS, '[Arguments]', 7, 4),
(T.ARGUMENT, 'Used', 7, 23),
(T.EOS, '', 7, 27),
(T.ERROR, '[Arguments]', 8, 4,
"Setting 'Arguments' is allowed only once. Only the first value is used."),
(T.EOS, '', 8, 15),
(T.TEARDOWN, '[Teardown]', 9, 4),
(T.NAME, 'Used', 9, 23),
(T.EOS, '', 9, 27),
(T.ERROR, '[Teardown]', 10, 4,
"Setting 'Teardown' is allowed only once. Only the first value is used."),
(T.EOS, '', 10, 14),
(T.TIMEOUT, '[Timeout]', 11, 4),
(T.ARGUMENT, 'Used', 11, 23),
(T.EOS, '', 11, 27),
(T.ERROR, '[Timeout]', 12, 4,
"Setting 'Timeout' is allowed only once. Only the first value is used."),
(T.EOS, '', 12, 13),
(T.RETURN, '[Return]', 13, 4),
(T.ARGUMENT, 'Used', 13, 23),
(T.EOS, '', 13, 27),
(T.ERROR, '[Return]', 14, 4,
"Setting 'Return' is allowed only once. Only the first value is used."),
(T.EOS, '', 14, 12)
]
assert_tokens(data, expected, data_only=True)
class TestSectionHeaders(unittest.TestCase):
def test_headers_allowed_everywhere(self):
data = '''\
*** Settings ***
*** Setting ***
***variables***
*VARIABLE* ARGS ARGH
*Keywords *** ...
... ***
*** Keyword *** # Comment
*** Comments ***
*** Comment *** 1 2
... 3 4
... 5
'''
expected = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.SETTING_HEADER, '*** Setting ***', 2, 0),
(T.EOS, '', 2, 15),
(T.VARIABLE_HEADER, '***variables***', 3, 0),
(T.EOS, '', 3, 15),
(T.VARIABLE_HEADER, '*VARIABLE*', 4, 0),
(T.VARIABLE_HEADER, 'ARGS', 4, 14),
(T.VARIABLE_HEADER, 'ARGH', 4, 22),
(T.EOS, '', 4, 26),
(T.KEYWORD_HEADER, '*Keywords', 5, 0),
(T.KEYWORD_HEADER, '***', 5, 14),
(T.KEYWORD_HEADER, '...', 5, 21),
(T.KEYWORD_HEADER, '***', 6, 14),
(T.EOS, '', 6, 17),
(T.KEYWORD_HEADER, '*** Keyword ***', 7, 0),
(T.EOS, '', 7, 15)
]
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_init_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
def test_test_case_section(self):
assert_tokens('*** Test Cases ***', [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
], data_only=True)
def test_case_section_causes_error_in_init_file(self):
assert_tokens('*** Test Cases ***', [
(T.ERROR, '*** Test Cases ***', 1, 0,
"'Test Cases' section is not allowed in suite initialization file."),
(T.EOS, '', 1, 18),
], get_init_tokens, data_only=True)
def test_case_section_causes_fatal_error_in_resource_file(self):
assert_tokens('*** Test Cases ***', [
(T.FATAL_ERROR, '*** Test Cases ***', 1, 0,
"Resource file with 'Test Cases' section is invalid."),
(T.EOS, '', 1, 18),
], get_resource_tokens, data_only=True)
def test_invalid_section_in_test_case_file(self):
assert_tokens('*** Invalid ***', [
(T.ERROR, '*** Invalid ***', 1, 0,
"Unrecognized section header '*** Invalid ***'. Valid sections: "
"'Settings', 'Variables', 'Test Cases', 'Tasks', 'Keywords' and 'Comments'."),
(T.EOS, '', 1, 15),
], data_only=True)
def test_invalid_section_in_init_file(self):
assert_tokens('*** S e t t i n g s ***', [
(T.ERROR, '*** S e t t i n g s ***', 1, 0,
"Unrecognized section header '*** S e t t i n g s ***'. Valid sections: "
"'Settings', 'Variables', 'Keywords' and 'Comments'."),
(T.EOS, '', 1, 23),
], get_init_tokens, data_only=True)
def test_invalid_section_in_resource_file(self):
assert_tokens('*', [
(T.ERROR, '*', 1, 0,
"Unrecognized section header '*'. Valid sections: "
"'Settings', 'Variables', 'Keywords' and 'Comments'."),
(T.EOS, '', 1, 1),
], get_resource_tokens, data_only=True)
class TestName(unittest.TestCase):
def test_name_on_own_row(self):
self._verify('My Name',
[(T.TESTCASE_NAME, 'My Name', 2, 0), (T.EOL, '', 2, 7), (T.EOS, '', 2, 7)])
self._verify('My Name ',
[(T.TESTCASE_NAME, 'My Name', 2, 0), (T.EOL, ' ', 2, 7), (T.EOS, '', 2, 11)])
self._verify('My Name\n Keyword',
[(T.TESTCASE_NAME, 'My Name', 2, 0), (T.EOL, '\n', 2, 7), (T.EOS, '', 2, 8),
(T.SEPARATOR, ' ', 3, 0), (T.KEYWORD, 'Keyword', 3, 4), (T.EOL, '', 3, 11), (T.EOS, '', 3, 11)])
self._verify('My Name \n Keyword',
[(T.TESTCASE_NAME, 'My Name', 2, 0), (T.EOL, ' \n', 2, 7), (T.EOS, '', 2, 10),
(T.SEPARATOR, ' ', 3, 0), (T.KEYWORD, 'Keyword', 3, 4), (T.EOL, '', 3, 11), (T.EOS, '', 3, 11)])
def test_name_and_keyword_on_same_row(self):
self._verify('Name Keyword',
[(T.TESTCASE_NAME, 'Name', 2, 0), (T.EOS, '', 2, 4), (T.SEPARATOR, ' ', 2, 4),
(T.KEYWORD, 'Keyword', 2, 8), (T.EOL, '', 2, 15), (T.EOS, '', 2, 15)])
self._verify('N K A',
[(T.TESTCASE_NAME, 'N', 2, 0), (T.EOS, '', 2, 1), (T.SEPARATOR, ' ', 2, 1),
(T.KEYWORD, 'K', 2, 3), (T.SEPARATOR, ' ', 2, 4),
(T.ARGUMENT, 'A', 2, 6), (T.EOL, '', 2, 7), (T.EOS, '', 2, 7)])
self._verify('N ${v}= K',
[(T.TESTCASE_NAME, 'N', 2, 0), (T.EOS, '', 2, 1), (T.SEPARATOR, ' ', 2, 1),
(T.ASSIGN, '${v}=', 2, 3), (T.SEPARATOR, ' ', 2, 8),
(T.KEYWORD, 'K', 2, 10), (T.EOL, '', 2, 11), (T.EOS, '', 2, 11)])
def test_name_and_keyword_on_same_continued_rows(self):
self._verify('Name\n... Keyword',
[(T.TESTCASE_NAME, 'Name', 2, 0), (T.EOS, '', 2, 4), (T.EOL, '\n', 2, 4),
(T.CONTINUATION, '...', 3, 0), (T.SEPARATOR, ' ', 3, 3),
(T.KEYWORD, 'Keyword', 3, 7), (T.EOL, '', 3, 14), (T.EOS, '', 3, 14)])
def test_name_and_setting_on_same_row(self):
self._verify('Name [Documentation] The doc.',
[(T.TESTCASE_NAME, 'Name', 2, 0), (T.EOS, '', 2, 4), (T.SEPARATOR, ' ', 2, 4),
(T.DOCUMENTATION, '[Documentation]', 2, 8), (T.SEPARATOR, ' ', 2, 23),
(T.ARGUMENT, 'The doc.', 2, 27), (T.EOL, '', 2, 35), (T.EOS, '', 2, 35)])
def test_name_with_extra(self):
self._verify('Name\n...\n',
[(T.TESTCASE_NAME, 'Name', 2, 0), (T.EOS, '', 2, 4), (T.EOL, '\n', 2, 4),
(T.CONTINUATION, '...', 3, 0), (T.KEYWORD, '', 3, 3), (T.EOL, '\n', 3, 3), (T.EOS, '', 3, 4)])
def _verify(self, data, tokens):
assert_tokens('*** Test Cases ***\n' + data,
[(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOL, '\n', 1, 18),
(T.EOS, '', 1, 19)] + tokens)
tokens[0] = (T.KEYWORD_NAME,) + tokens[0][1:]
assert_tokens('*** Keywords ***\n' + data,
[(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOL, '\n', 1, 16),
(T.EOS, '', 1, 17)] + tokens,
get_tokens=get_resource_tokens)
class TestNameWithPipes(unittest.TestCase):
def test_name_on_own_row(self):
self._verify('| My Name',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'My Name', 2, 2), (T.EOL, '', 2, 9), (T.EOS, '', 2, 9)])
self._verify('| My Name |',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'My Name', 2, 2), (T.SEPARATOR, ' |', 2, 9), (T.EOL, '', 2, 11), (T.EOS, '', 2, 11)])
self._verify('| My Name | ',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'My Name', 2, 2), (T.SEPARATOR, ' |', 2, 9), (T.EOL, ' ', 2, 11), (T.EOS, '', 2, 12)])
def test_name_and_keyword_on_same_row(self):
self._verify('| Name | Keyword',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'Name', 2, 2), (T.EOS, '', 2, 6),
(T.SEPARATOR, ' | ', 2, 6), (T.KEYWORD, 'Keyword', 2, 9), (T.EOL, '', 2, 16), (T.EOS, '', 2, 16)])
self._verify('| N | K | A |\n',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'N', 2, 2), (T.EOS, '', 2, 3),
(T.SEPARATOR, ' | ', 2, 3), (T.KEYWORD, 'K', 2, 6), (T.SEPARATOR, ' | ', 2, 7),
(T.ARGUMENT, 'A', 2, 10), (T.SEPARATOR, ' |', 2, 11), (T.EOL, '\n', 2, 13), (T.EOS, '', 2, 14)])
self._verify('| N | ${v} = | K ',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'N', 2, 5), (T.EOS, '', 2, 6),
(T.SEPARATOR, ' | ', 2, 6), (T.ASSIGN, '${v} =', 2, 11), (T.SEPARATOR, ' | ', 2, 17),
(T.KEYWORD, 'K', 2, 26), (T.EOL, ' ', 2, 27), (T.EOS, '', 2, 31)])
def test_name_and_keyword_on_same_continued_row(self):
self._verify('| Name | \n| ... | Keyword',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'Name', 2, 2), (T.EOS, '', 2, 6), (T.SEPARATOR, ' |', 2, 6), (T.EOL, ' \n', 2, 8),
(T.SEPARATOR, '| ', 3, 0), (T.CONTINUATION, '...', 3, 2), (T.SEPARATOR, ' | ', 3, 5),
(T.KEYWORD, 'Keyword', 3, 8), (T.EOL, '', 3, 15), (T.EOS, '', 3, 15)])
def test_name_and_setting_on_same_row(self):
self._verify('| Name | [Documentation] | The doc.',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'Name', 2, 2), (T.EOS, '', 2, 6), (T.SEPARATOR, ' | ', 2, 6),
(T.DOCUMENTATION, '[Documentation]', 2, 9), (T.SEPARATOR, ' | ', 2, 24),
(T.ARGUMENT, 'The doc.', 2, 27), (T.EOL, '', 2, 35), (T.EOS, '', 2, 35)])
def test_name_with_extra(self):
self._verify('| Name | | |\n| ... |',
[(T.SEPARATOR, '| ', 2, 0), (T.TESTCASE_NAME, 'Name', 2, 2), (T.EOS, '', 2, 6),
(T.SEPARATOR, ' | ', 2, 6), (T.SEPARATOR, '| ', 2, 10), (T.SEPARATOR, '|', 2, 14), (T.EOL, '\n', 2, 15),
(T.SEPARATOR, '| ', 3, 0), (T.CONTINUATION, '...', 3, 2), (T.KEYWORD, '', 3, 5), (T.SEPARATOR, ' |', 3, 5),
(T.EOL, '', 3, 7), (T.EOS, '', 3, 7)])
def _verify(self, data, tokens):
assert_tokens('*** Test Cases ***\n' + data,
[(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOL, '\n', 1, 18),
(T.EOS, '', 1, 19)] + tokens)
tokens[1] = (T.KEYWORD_NAME,) + tokens[1][1:]
assert_tokens('*** Keywords ***\n' + data,
[(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOL, '\n', 1, 16),
(T.EOS, '', 1, 17)] + tokens,
get_tokens=get_resource_tokens)
class TestVariables(unittest.TestCase):
def test_valid(self):
data = '''\
*** Variables ***
${SCALAR} value
${LONG} First part ${2} part
... third part
@{LIST} first ${SCALAR} third
&{DICT} key=value &{X}
'''
expected = [
(T.VARIABLE_HEADER, '*** Variables ***', 1, 0),
(T.EOS, '', 1, 17),
(T.VARIABLE, '${SCALAR}', 2, 0),
(T.ARGUMENT, 'value', 2, 13),
(T.EOS, '', 2, 18),
(T.VARIABLE, '${LONG}', 3, 0),
(T.ARGUMENT, 'First part', 3, 13),
(T.ARGUMENT, '${2} part', 3, 27),
(T.ARGUMENT, 'third part', 4, 13),
(T.EOS, '', 4, 23),
(T.VARIABLE, '@{LIST}', 5, 0),
(T.ARGUMENT, 'first', 5, 13),
(T.ARGUMENT, '${SCALAR}', 5, 22),
(T.ARGUMENT, 'third', 5, 35),
(T.EOS, '', 5, 40),
(T.VARIABLE, '&{DICT}', 6, 0),
(T.ARGUMENT, 'key=value', 6, 13),
(T.ARGUMENT, '&{X}', 6, 26),
(T.EOS, '', 6, 30)
]
self._verify(data, expected)
def test_valid_with_assign(self):
data = '''\
*** Variables ***
${SCALAR} = value
${LONG}= First part ${2} part
... third part
@{LIST} = first ${SCALAR} third
&{DICT} = key=value &{X}
'''
expected = [
(T.VARIABLE_HEADER, '*** Variables ***', 1, 0),
(T.EOS, '', 1, 17),
(T.VARIABLE, '${SCALAR} =', 2, 0),
(T.ARGUMENT, 'value', 2, 17),
(T.EOS, '', 2, 22),
(T.VARIABLE, '${LONG}=', 3, 0),
(T.ARGUMENT, 'First part', 3, 17),
(T.ARGUMENT, '${2} part', 3, 31),
(T.ARGUMENT, 'third part', 4, 17),
(T.EOS, '', 4, 27),
(T.VARIABLE, '@{LIST} =', 5, 0),
(T.ARGUMENT, 'first', 5, 17),
(T.ARGUMENT, '${SCALAR}', 5, 26),
(T.ARGUMENT, 'third', 5, 39),
(T.EOS, '', 5, 44),
(T.VARIABLE, '&{DICT} =', 6, 0),
(T.ARGUMENT, 'key=value', 6, 17),
(T.ARGUMENT, '&{X}', 6, 30),
(T.EOS, '', 6, 34)
]
self._verify(data, expected)
def _verify(self, data, expected):
assert_tokens(data, expected, get_tokens, data_only=True)
assert_tokens(data, expected, get_resource_tokens, data_only=True)
class TestForLoop(unittest.TestCase):
def test_for_loop_header(self):
header = 'FOR ${i} IN foo bar'
expected = [
(T.FOR, 'FOR', 3, 4),
(T.VARIABLE, '${i}', 3, 11),
(T.FOR_SEPARATOR, 'IN', 3, 19),
(T.ARGUMENT, 'foo', 3, 25),
(T.ARGUMENT, 'bar', 3, 32),
(T.EOS, '', 3, 35)
]
self._verify(header, expected)
def _verify(self, header, expected_header):
data = '''\
*** %s ***
Name
%s
Keyword
END
'''
body_and_end = [
(T.KEYWORD, 'Keyword', 4, 8),
(T.EOS, '', 4, 15),
(T.END, 'END', 5, 4),
(T.EOS, '', 5, 7)
]
expected = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4)
] + expected_header + body_and_end
assert_tokens(data % ('Test Cases', header), expected, data_only=True)
expected = [
(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4)
] + expected_header + body_and_end
assert_tokens(data % ('Keywords', header), expected, data_only=True)
assert_tokens(data % ('Keywords', header), expected,
get_resource_tokens, data_only=True)
class TestIf(unittest.TestCase):
def test_if_only(self):
block = '''\
IF ${True}
Log Many foo bar
END
'''
expected = [
(T.IF, 'IF', 3, 4),
(T.ARGUMENT, '${True}', 3, 10),
(T.EOS, '', 3, 17),
(T.KEYWORD, 'Log Many', 4, 8),
(T.ARGUMENT, 'foo', 4, 20),
(T.ARGUMENT, 'bar', 4, 27),
(T.EOS, '', 4, 30),
(T.END, 'END', 5, 4),
(T.EOS, '', 5, 7)
]
self._verify(block, expected)
def test_with_else(self):
block = '''\
IF ${False}
Log foo
ELSE
Log bar
END
'''
expected = [
(T.IF, 'IF', 3, 4),
(T.ARGUMENT, '${False}', 3, 10),
(T.EOS, '', 3, 18),
(T.KEYWORD, 'Log', 4, 8),
(T.ARGUMENT, 'foo', 4, 15),
(T.EOS, '', 4, 18),
(T.ELSE, 'ELSE', 5, 4),
(T.EOS, '', 5, 8),
(T.KEYWORD, 'Log', 6,8),
(T.ARGUMENT, 'bar', 6, 15),
(T.EOS, '', 6, 18),
(T.END, 'END', 7, 4),
(T.EOS, '', 7, 7)
]
self._verify(block, expected)
def test_with_else_if_and_else(self):
block = '''\
IF ${False}
Log foo
ELSE IF ${True}
Log bar
ELSE
Noop
END
'''
expected = [
(T.IF, 'IF', 3, 4),
(T.ARGUMENT, '${False}', 3, 10),
(T.EOS, '', 3, 18),
(T.KEYWORD, 'Log', 4, 8),
(T.ARGUMENT, 'foo', 4, 15),
(T.EOS, '', 4, 18),
(T.ELSE_IF, 'ELSE IF', 5, 4),
(T.ARGUMENT, '${True}', 5, 15),
(T.EOS, '', 5, 22),
(T.KEYWORD, 'Log', 6, 8),
(T.ARGUMENT, 'bar', 6, 15),
(T.EOS, '', 6, 18),
(T.ELSE, 'ELSE', 7, 4),
(T.EOS, '', 7, 8),
(T.KEYWORD, 'Noop', 8, 8),
(T.EOS, '', 8, 12),
(T.END, 'END', 9, 4),
(T.EOS, '', 9, 7)
]
self._verify(block, expected)
def test_multiline_and_comments(self):
block = '''\
IF # 3
... ${False} # 4
Log # 5
... foo # 6
ELSE IF # 7
... ${True} # 8
Log # 9
... bar # 10
ELSE # 11
Log # 12
... zap # 13
END # 14
'''
expected = [
(T.IF, 'IF', 3, 4),
(T.ARGUMENT, '${False}', 4, 11),
(T.EOS, '', 4, 19),
(T.KEYWORD, 'Log', 5, 8),
(T.ARGUMENT, 'foo', 6, 11),
(T.EOS, '', 6, 14),
(T.ELSE_IF, 'ELSE IF', 7, 4),
(T.ARGUMENT, '${True}', 8, 11),
(T.EOS, '', 8, 18),
(T.KEYWORD, 'Log', 9, 8),
(T.ARGUMENT, 'bar', 10, 11),
(T.EOS, '', 10, 14),
(T.ELSE, 'ELSE', 11, 4),
(T.EOS, '', 11, 8),
(T.KEYWORD, 'Log', 12, 8),
(T.ARGUMENT, 'zap', 13, 11),
(T.EOS, '', 13, 14),
(T.END, 'END', 14, 4),
(T.EOS, '', 14, 7)
]
self._verify(block, expected)
def _verify(self, block, expected_header):
data = f'''\
*** Test Cases ***
Name
{block}
'''
expected_tokens = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOS, '', 2, 4)
] + expected_header
assert_tokens(data, expected_tokens, data_only=True)
class TestInlineIf(unittest.TestCase):
def test_if_only(self):
header = ' IF ${True} Log Many foo bar'
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.INLINE_IF, 'IF', 3, 4),
(T.SEPARATOR, ' ', 3, 6),
(T.ARGUMENT, '${True}', 3, 10),
(T.EOS, '', 3, 17),
(T.SEPARATOR, ' ', 3, 17),
(T.KEYWORD, 'Log Many', 3, 21),
(T.SEPARATOR, ' ', 3, 29),
(T.ARGUMENT, 'foo', 3, 32),
(T.SEPARATOR, ' ', 3, 35),
(T.ARGUMENT, 'bar', 3, 39),
(T.EOL, '\n', 3, 42),
(T.EOS, '', 3, 43),
(T.END, '', 3, 43),
(T.EOS, '', 3, 43)
]
self._verify(header, expected)
def test_with_else(self):
# 4 10 22 29 36 43 50
header = ' IF ${False} Log foo ELSE Log bar'
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.INLINE_IF, 'IF', 3, 4),
(T.SEPARATOR, ' ', 3, 6),
(T.ARGUMENT, '${False}', 3, 10),
(T.EOS, '', 3, 18),
(T.SEPARATOR, ' ', 3, 18),
(T.KEYWORD, 'Log', 3, 22),
(T.SEPARATOR, ' ', 3, 25),
(T.ARGUMENT, 'foo', 3, 29),
(T.SEPARATOR, ' ', 3, 32),
(T.EOS, '', 3, 36),
(T.ELSE, 'ELSE', 3, 36),
(T.EOS, '', 3, 40),
(T.SEPARATOR, ' ', 3, 40),
(T.KEYWORD, 'Log', 3, 43),
(T.SEPARATOR, ' ', 3, 46),
(T.ARGUMENT, 'bar', 3, 50),
(T.EOL, '\n', 3, 53),
(T.EOS, '', 3, 54),
(T.END, '', 3, 54),
(T.EOS, '', 3, 54)
]
self._verify(header, expected)
def test_with_else_if_and_else(self):
# 4 10 22 29 36 47 56 63 70 78
header = ' IF ${False} Log foo ELSE IF ${True} Log bar ELSE Noop'
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.INLINE_IF, 'IF', 3, 4),
(T.SEPARATOR, ' ', 3, 6),
(T.ARGUMENT, '${False}', 3, 10),
(T.EOS, '', 3, 18),
(T.SEPARATOR, ' ', 3, 18),
(T.KEYWORD, 'Log', 3, 22),
(T.SEPARATOR, ' ', 3, 25),
(T.ARGUMENT, 'foo', 3, 29),
(T.SEPARATOR, ' ', 3, 32),
(T.EOS, '', 3, 36),
(T.ELSE_IF, 'ELSE IF', 3, 36),
(T.SEPARATOR, ' ', 3, 43),
(T.ARGUMENT, '${True}', 3, 47),
(T.EOS, '', 3, 54),
(T.SEPARATOR, ' ', 3, 54),
(T.KEYWORD, 'Log', 3, 56),
(T.SEPARATOR, ' ', 3, 59),
(T.ARGUMENT, 'bar', 3, 63),
(T.SEPARATOR, ' ', 3, 66),
(T.EOS, '', 3, 70),
(T.ELSE, 'ELSE', 3, 70),
(T.EOS, '', 3, 74),
(T.SEPARATOR, ' ', 3, 74),
(T.KEYWORD, 'Noop', 3, 78),
(T.EOL, '\n', 3, 82),
(T.EOS, '', 3, 83),
(T.END, '', 3, 83),
(T.EOS, '', 3, 83)
]
self._verify(header, expected)
def test_else_if_with_non_ascii_space(self):
# 4 10 15 21
header = ' IF 1 K1 ELSE\N{NO-BREAK SPACE}IF 2 K2'
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.INLINE_IF, 'IF', 3, 4),
(T.SEPARATOR, ' ', 3, 6),
(T.ARGUMENT, '1', 3, 10),
(T.EOS, '', 3, 11),
(T.SEPARATOR, ' ', 3, 11),
(T.KEYWORD, 'K1', 3, 15),
(T.SEPARATOR, ' ', 3, 17),
(T.EOS, '', 3, 21),
(T.ELSE_IF, 'ELSE\N{NO-BREAK SPACE}IF', 3, 21),
(T.SEPARATOR, ' ', 3, 28),
(T.ARGUMENT, '2', 3, 32),
(T.EOS, '', 3, 33),
(T.SEPARATOR, ' ', 3, 33),
(T.KEYWORD, 'K2', 3, 37),
(T.EOL, '\n', 3, 39),
(T.EOS, '', 3, 40),
(T.END, '', 3, 40),
(T.EOS, '', 3, 40)
]
self._verify(header, expected)
def test_assign(self):
# 4 14 20 28 34 42
header = ' ${x} = IF True K1 ELSE K2'
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.ASSIGN, '${x} =', 3, 4),
(T.SEPARATOR, ' ', 3, 10),
(T.INLINE_IF, 'IF', 3, 14),
(T.SEPARATOR, ' ', 3, 16),
(T.ARGUMENT, 'True', 3, 20),
(T.EOS, '', 3, 24),
(T.SEPARATOR, ' ', 3, 24),
(T.KEYWORD, 'K1', 3, 28),
(T.SEPARATOR, ' ', 3, 30),
(T.EOS, '', 3, 34),
(T.ELSE, 'ELSE', 3, 34),
(T.EOS, '', 3, 38),
(T.SEPARATOR, ' ', 3, 38),
(T.KEYWORD, 'K2', 3, 42),
(T.EOL, '\n', 3, 44),
(T.EOS, '', 3, 45),
(T.END, '', 3, 45),
(T.EOS, '', 3, 45),
]
self._verify(header, expected)
def test_multiline_and_comments(self):
header = '''\
IF # 3
... ${False} # 4
... Log # 5
... foo # 6
... ELSE IF # 7
... ${True} # 8
... Log # 9
... bar # 10
... ELSE # 11
... Log # 12
... zap # 13
'''
expected = [
(T.SEPARATOR, ' ', 3, 0),
(T.INLINE_IF, 'IF', 3, 4),
(T.SEPARATOR, ' ', 3, 6),
(T.COMMENT, '# 3', 3, 23),
(T.EOL, '\n', 3, 26),
(T.SEPARATOR, ' ', 4, 0),
(T.CONTINUATION, '...', 4, 4),
(T.SEPARATOR, ' ', 4, 7),
(T.ARGUMENT, '${False}', 4, 11),
(T.EOS, '', 4, 19),
(T.SEPARATOR, ' ', 4, 19),
(T.COMMENT, '# 4', 4, 23),
(T.EOL, '\n', 4, 26),
(T.SEPARATOR, ' ', 5, 0),
(T.CONTINUATION, '...', 5, 4),
(T.SEPARATOR, ' ', 5, 7),
(T.KEYWORD, 'Log', 5, 11),
(T.SEPARATOR, ' ', 5, 14),
(T.COMMENT, '# 5', 5, 23),
(T.EOL, '\n', 5, 26),
(T.SEPARATOR, ' ', 6, 0),
(T.CONTINUATION, '...', 6, 4),
(T.SEPARATOR, ' ', 6, 7),
(T.ARGUMENT, 'foo', 6, 11),
(T.SEPARATOR, ' ', 6, 14),
(T.COMMENT, '# 6', 6, 23),
(T.EOL, '\n', 6, 26),
(T.SEPARATOR, ' ', 7, 0),
(T.CONTINUATION, '...', 7, 4),
(T.SEPARATOR, ' ', 7, 7),
(T.EOS, '', 7, 11),
(T.ELSE_IF, 'ELSE IF', 7, 11),
(T.SEPARATOR, ' ', 7, 18),
(T.COMMENT, '# 7', 7, 23),
(T.EOL, '\n', 7, 26),
(T.SEPARATOR, ' ', 8, 0),
(T.CONTINUATION, '...', 8, 4),
(T.SEPARATOR, ' ', 8, 7),
(T.ARGUMENT, '${True}', 8, 11),
(T.EOS, '', 8, 18),
(T.SEPARATOR, ' ', 8, 18),
(T.COMMENT, '# 8', 8, 23),
(T.EOL, '\n', 8, 26),
(T.SEPARATOR, ' ', 9, 0),
(T.CONTINUATION, '...', 9, 4),
(T.SEPARATOR, ' ', 9, 7),
(T.KEYWORD, 'Log', 9, 11),
(T.SEPARATOR, ' ', 9, 14),
(T.COMMENT, '# 9', 9, 23),
(T.EOL, '\n', 9, 26),
(T.SEPARATOR, ' ', 10, 0),
(T.CONTINUATION, '...', 10, 4),
(T.SEPARATOR, ' ', 10, 7),
(T.ARGUMENT, 'bar', 10, 11),
(T.SEPARATOR, ' ', 10, 14),
(T.COMMENT, '# 10', 10, 23),
(T.EOL, '\n', 10, 27),
(T.SEPARATOR, ' ', 11, 0),
(T.CONTINUATION, '...', 11, 4),
(T.SEPARATOR, ' ', 11, 7),
(T.EOS, '', 11, 11),
(T.ELSE, 'ELSE', 11, 11),
(T.EOS, '', 11, 15),
(T.SEPARATOR, ' ', 11, 15),
(T.COMMENT, '# 11', 11, 23),
(T.EOL, '\n', 11, 27),
(T.SEPARATOR, ' ', 12, 0),
(T.CONTINUATION, '...', 12, 4),
(T.SEPARATOR, ' ', 12, 7),
(T.KEYWORD, 'Log', 12, 11),
(T.SEPARATOR, ' ', 12, 14),
(T.COMMENT, '# 12', 12, 23),
(T.EOL, '\n', 12, 27),
(T.SEPARATOR, ' ', 13, 0),
(T.CONTINUATION, '...', 13, 4),
(T.SEPARATOR, ' ', 13, 7),
(T.ARGUMENT, 'zap', 13, 11),
(T.SEPARATOR, ' ', 13, 14),
(T.COMMENT, '# 13', 13, 23),
(T.EOL, '\n', 13, 27),
(T.EOS, '', 13, 28),
(T.END, '', 13, 28),
(T.EOS, '', 13, 28),
(T.EOL, '\n', 14, 0),
(T.EOS, '', 14, 1)
]
self._verify(header, expected)
def _verify(self, header, expected_header):
data = f'''\
*** Test Cases ***
Name
{header}
'''
expected_tokens = [
(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOL, '\n', 1, 18),
(T.EOS, '', 1, 19),
(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOL, '\n', 2, 4),
(T.EOS, '', 2, 5),
] + expected_header
assert_tokens(data, expected_tokens)
class TestCommentRowsAndEmptyRows(unittest.TestCase):
def test_between_names(self):
self._verify('Name\n#Comment\n\nName 2',
[(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOL, '\n', 2, 4),
(T.EOS, '', 2, 5),
(T.COMMENT, '#Comment', 3, 0),
(T.EOL, '\n', 3, 8),
(T.EOS, '', 3, 9),
(T.EOL, '\n', 4, 0),
(T.EOS, '', 4, 1),
(T.TESTCASE_NAME, 'Name 2', 5, 0),
(T.EOL, '', 5, 6),
(T.EOS, '', 5, 6)])
def test_leading(self):
self._verify('\n#Comment\n\nName',
[(T.EOL, '\n', 2, 0),
(T.EOS, '', 2, 1),
(T.COMMENT, '#Comment', 3, 0),
(T.EOL, '\n', 3, 8),
(T.EOS, '', 3, 9),
(T.EOL, '\n', 4, 0),
(T.EOS, '', 4, 1),
(T.TESTCASE_NAME, 'Name', 5, 0),
(T.EOL, '', 5, 4),
(T.EOS, '', 5, 4)])
def test_trailing(self):
self._verify('Name\n#Comment\n\n',
[(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOL, '\n', 2, 4),
(T.EOS, '', 2, 5),
(T.COMMENT, '#Comment', 3, 0),
(T.EOL, '\n', 3, 8),
(T.EOS, '', 3, 9),
(T.EOL, '\n', 4, 0),
(T.EOS, '', 4, 1)])
self._verify('Name\n#Comment\n# C2\n\n',
[(T.TESTCASE_NAME, 'Name', 2, 0),
(T.EOL, '\n', 2, 4),
(T.EOS, '', 2, 5),
(T.COMMENT, '#Comment', 3, 0),
(T.EOL, '\n', 3, 8),
(T.EOS, '', 3, 9),
(T.COMMENT, '# C2', 4, 0),
(T.EOL, '\n', 4, 4),
(T.EOS, '', 4, 5),
(T.EOL, '\n', 5, 0),
(T.EOS, '', 5, 1)])
def test_on_their_own(self):
self._verify('\n',
[(T.EOL, '\n', 2, 0),
(T.EOS, '', 2, 1)])
self._verify('# comment',
[(T.COMMENT, '# comment', 2, 0),
(T.EOL, '', 2, 9),
(T.EOS, '', 2, 9)])
self._verify('\n#\n#',
[(T.EOL, '\n', 2, 0),
(T.EOS, '', 2, 1),
(T.COMMENT, '#', 3, 0),
(T.EOL, '\n', 3, 1),
(T.EOS, '', 3, 2),
(T.COMMENT, '#', 4, 0),
(T.EOL, '', 4, 1),
(T.EOS, '', 4, 1)])
def _verify(self, data, tokens):
assert_tokens('*** Test Cases ***\n' + data,
[(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOL, '\n', 1, 18),
(T.EOS, '', 1, 19)] + tokens)
tokens = [(T.KEYWORD_NAME,) + t[1:] if t[0] == T.TESTCASE_NAME else t
for t in tokens]
assert_tokens('*** Keywords ***\n' + data,
[(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOL, '\n', 1, 16),
(T.EOS, '', 1, 17)] + tokens,
get_tokens=get_resource_tokens)
class TestGetTokensSourceFormats(unittest.TestCase):
path = os.path.join(os.getenv('TEMPDIR') or tempfile.gettempdir(),
'test_lexer.robot')
data = '''\
*** Settings ***
Library Easter
*** Test Cases ***
Example
None shall pass ${NONE}
'''
tokens = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOL, '\n', 1, 16),
(T.EOS, '', 1, 17),
(T.LIBRARY, 'Library', 2, 0),
(T.SEPARATOR, ' ', 2, 7),
(T.NAME, 'Easter', 2, 16),
(T.EOL, '\n', 2, 22),
(T.EOS, '', 2, 23),
(T.EOL, '\n', 3, 0),
(T.EOS, '', 3, 1),
(T.TESTCASE_HEADER, '*** Test Cases ***', 4, 0),
(T.EOL, '\n', 4, 18),
(T.EOS, '', 4, 19),
(T.TESTCASE_NAME, 'Example', 5, 0),
(T.EOL, '\n', 5, 7),
(T.EOS, '', 5, 8),
(T.SEPARATOR, ' ', 6, 0),
(T.KEYWORD, 'None shall pass', 6, 4),
(T.SEPARATOR, ' ', 6, 19),
(T.ARGUMENT, '${NONE}', 6, 23),
(T.EOL, '\n', 6, 30),
(T.EOS, '', 6, 31)
]
data_tokens = [
(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.LIBRARY, 'Library', 2, 0),
(T.NAME, 'Easter', 2, 16),
(T.EOS, '', 2, 22),
(T.TESTCASE_HEADER, '*** Test Cases ***', 4, 0),
(T.EOS, '', 4, 18),
(T.TESTCASE_NAME, 'Example', 5, 0),
(T.EOS, '', 5, 7),
(T.KEYWORD, 'None shall pass', 6, 4),
(T.ARGUMENT, '${NONE}', 6, 23),
(T.EOS, '', 6, 30)
]
@classmethod
def setUpClass(cls):
with open(cls.path, 'w') as f:
f.write(cls.data)
@classmethod
def tearDownClass(cls):
os.remove(cls.path)
def test_string_path(self):
self._verify(self.path)
self._verify(self.path, data_only=True)
def test_pathlib_path(self):
self._verify(Path(self.path))
self._verify(Path(self.path), data_only=True)
def test_open_file(self):
with open(self.path) as f:
self._verify(f)
with open(self.path) as f:
self._verify(f, data_only=True)
def test_string_io(self):
self._verify(StringIO(self.data))
self._verify(StringIO(self.data), data_only=True)
def test_string(self):
self._verify(self.data)
self._verify(self.data, data_only=True)
def _verify(self, source, data_only=False):
expected = self.data_tokens if data_only else self.tokens
assert_tokens(source, expected, data_only=data_only)
class TestGetResourceTokensSourceFormats(TestGetTokensSourceFormats):
data = '''\
*** Variable ***
${VAR} Value
*** KEYWORD ***
NOOP No Operation
'''
tokens = [
(T.VARIABLE_HEADER, '*** Variable ***', 1, 0),
(T.EOL, '\n', 1, 16),
(T.EOS, '', 1, 17),
(T.VARIABLE, '${VAR}', 2, 0),
(T.SEPARATOR, ' ', 2, 6),
(T.ARGUMENT, 'Value', 2, 10),
(T.EOL, '\n', 2, 15),
(T.EOS, '', 2, 16),
(T.EOL, '\n', 3, 0),
(T.EOS, '', 3, 1),
(T.KEYWORD_HEADER, '*** KEYWORD ***', 4, 0),
(T.EOL, '\n', 4, 15),
(T.EOS, '', 4, 16),
(T.KEYWORD_NAME, 'NOOP', 5, 0),
(T.EOS, '', 5, 4),
(T.SEPARATOR, ' ', 5, 4),
(T.KEYWORD, 'No Operation', 5, 8),
(T.EOL, '\n', 5, 20),
(T.EOS, '', 5, 21)
]
data_tokens = [
(T.VARIABLE_HEADER, '*** Variable ***', 1, 0),
(T.EOS, '', 1, 16),
(T.VARIABLE, '${VAR}', 2, 0),
(T.ARGUMENT, 'Value', 2, 10),
(T.EOS, '', 2, 15),
(T.KEYWORD_HEADER, '*** KEYWORD ***', 4, 0),
(T.EOS, '', 4, 15),
(T.KEYWORD_NAME, 'NOOP', 5, 0),
(T.EOS, '', 5, 4),
(T.KEYWORD, 'No Operation', 5, 8),
(T.EOS, '', 5, 20)
]
def _verify(self, source, data_only=False):
expected = self.data_tokens if data_only else self.tokens
assert_tokens(source, expected, get_tokens=get_resource_tokens,
data_only=data_only)
class TestTokenizeVariables(unittest.TestCase):
def test_settings(self):
data = '''\
*** Settings ***
Library My${Name} my ${arg} ${x}[0] WITH NAME Your${Name}
${invalid} ${usage}
'''
expected = [(T.SETTING_HEADER, '*** Settings ***', 1, 0),
(T.EOS, '', 1, 16),
(T.LIBRARY, 'Library', 2, 0),
(T.NAME, 'My', 2, 14),
(T.VARIABLE, '${Name}', 2, 16),
(T.ARGUMENT, 'my ', 2, 27),
(T.VARIABLE, '${arg}', 2, 30),
(T.VARIABLE, '${x}[0]', 2, 40),
(T.WITH_NAME, 'WITH NAME', 2, 51),
(T.NAME, 'Your', 2, 64),
(T.VARIABLE, '${Name}', 2, 68),
(T.EOS, '', 2, 75),
(T.ERROR, '${invalid}', 3, 0, "Non-existing setting '${invalid}'."),
(T.EOS, '', 3, 10)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_variables(self):
data = '''\
*** Variables ***
${VARIABLE} my ${value}
&{DICT} key=${var}[item][1:] ${key}=${a}${b}[c]${d}
'''
expected = [(T.VARIABLE_HEADER, '*** Variables ***', 1, 0),
(T.EOS, '', 1, 17),
(T.VARIABLE, '${VARIABLE}', 2, 0),
(T.ARGUMENT, 'my ', 2, 17),
(T.VARIABLE, '${value}', 2, 20),
(T.EOS, '', 2, 28),
(T.VARIABLE, '&{DICT}', 3, 0),
(T.ARGUMENT, 'key=', 3, 17),
(T.VARIABLE, '${var}[item][1:]', 3, 21),
(T.VARIABLE, '${key}', 3, 41),
(T.ARGUMENT, '=', 3, 47),
(T.VARIABLE, '${a}', 3, 48),
(T.VARIABLE, '${b}[c]', 3, 52),
(T.VARIABLE, '${d}', 3, 59),
(T.EOS, '', 3, 63)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_test_cases(self):
data = '''\
*** Test Cases ***
My ${name}
[Documentation] a ${b} ${c}[d] ${e${f}}
${assign} = Keyword my ${arg}ument
Key${word}
${name}
'''
expected = [(T.TESTCASE_HEADER, '*** Test Cases ***', 1, 0),
(T.EOS, '', 1, 18),
(T.TESTCASE_NAME, 'My ', 2, 0),
(T.VARIABLE, '${name}', 2, 3),
(T.EOS, '', 2, 10),
(T.DOCUMENTATION, '[Documentation]', 3, 4),
(T.ARGUMENT, 'a ', 3, 23),
(T.VARIABLE, '${b}', 3, 25),
(T.ARGUMENT, ' ', 3, 29),
(T.VARIABLE, '${c}[d]', 3, 30),
(T.ARGUMENT, ' ', 3, 37),
(T.VARIABLE, '${e${f}}', 3, 38),
(T.EOS, '', 3, 46),
(T.ASSIGN, '${assign} =', 4, 4),
(T.KEYWORD, 'Keyword', 4, 19),
(T.ARGUMENT, 'my ', 4, 30),
(T.VARIABLE, '${arg}', 4, 33),
(T.ARGUMENT, 'ument', 4, 39),
(T.EOS, '', 4, 44),
(T.KEYWORD, 'Key${word}', 5, 4),
(T.EOS, '', 5, 14),
(T.VARIABLE, '${name}', 6, 0),
(T.EOS, '', 6, 7)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
def test_keywords(self):
data = '''\
*** Keywords ***
My ${name}
[Documentation] a ${b} ${c}[d] ${e${f}}
${assign} = Keyword my ${arg}ument
Key${word}
${name}
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'My ', 2, 0),
(T.VARIABLE, '${name}', 2, 3),
(T.EOS, '', 2, 10),
(T.DOCUMENTATION, '[Documentation]', 3, 4),
(T.ARGUMENT, 'a ', 3, 23),
(T.VARIABLE, '${b}', 3, 25),
(T.ARGUMENT, ' ', 3, 29),
(T.VARIABLE, '${c}[d]', 3, 30),
(T.ARGUMENT, ' ', 3, 37),
(T.VARIABLE, '${e${f}}', 3, 38),
(T.EOS, '', 3, 46),
(T.ASSIGN, '${assign} =', 4, 4),
(T.KEYWORD, 'Keyword', 4, 19),
(T.ARGUMENT, 'my ', 4, 30),
(T.VARIABLE, '${arg}', 4, 33),
(T.ARGUMENT, 'ument', 4, 39),
(T.EOS, '', 4, 44),
(T.KEYWORD, 'Key${word}', 5, 4),
(T.EOS, '', 5, 14),
(T.VARIABLE, '${name}', 6, 0),
(T.EOS, '', 6, 7)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
class TestKeywordCallAssign(unittest.TestCase):
def test_valid_assign(self):
data = '''\
*** Keywords ***
do something
${a}
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'do something', 2, 0),
(T.EOS, '', 2, 12),
(T.ASSIGN, '${a}', 3, 4),
(T.EOS, '', 3, 8)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_valid_assign_with_keyword(self):
data = '''\
*** Keywords ***
do something
${a} do nothing
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'do something', 2, 0),
(T.EOS, '', 2, 12),
(T.ASSIGN, '${a}', 3, 4),
(T.KEYWORD, 'do nothing', 3, 10),
(T.EOS, '', 3, 20)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_invalid_assign_not_closed_should_be_keyword(self):
data = '''\
*** Keywords ***
do something
${a
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'do something', 2, 0),
(T.EOS, '', 2, 12),
(T.KEYWORD, '${a', 3, 4),
(T.EOS, '', 3, 7)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_invalid_assign_ends_with_equal_should_be_keyword(self):
data = '''\
*** Keywords ***
do something
${=
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'do something', 2, 0),
(T.EOS, '', 2, 12),
(T.KEYWORD, '${=', 3, 4),
(T.EOS, '', 3, 7)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
def test_invalid_assign_variable_and_ends_with_equal_should_be_keyword(self):
data = '''\
*** Keywords ***
do something
${abc def=
'''
expected = [(T.KEYWORD_HEADER, '*** Keywords ***', 1, 0),
(T.EOS, '', 1, 16),
(T.KEYWORD_NAME, 'do something', 2, 0),
(T.EOS, '', 2, 12),
(T.KEYWORD, '${abc def=', 3, 4),
(T.EOS, '', 3, 14)]
assert_tokens(data, expected, get_tokens=get_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_resource_tokens,
data_only=True, tokenize_variables=True)
assert_tokens(data, expected, get_tokens=get_init_tokens,
data_only=True, tokenize_variables=True)
class TestReturn(unittest.TestCase):
def test_in_keyword(self):
data = ' RETURN'
expected = [(T.RETURN_STATEMENT, 'RETURN', 3, 4),
(T.EOS, '', 3, 10)]
self._verify(data, expected)
def test_in_test(self):
# This is not valid usage but that's not recognized during lexing.
data = ' RETURN'
expected = [(T.RETURN_STATEMENT, 'RETURN', 3, 4),
(T.EOS, '', 3, 10)]
self._verify(data, expected, test=True)
def test_in_if(self):
data = '''\
IF True
RETURN Hello!
END
'''
expected = [(T.IF, 'IF', 3, 4),
(T.ARGUMENT, 'True', 3, 10),
(T.EOS, '', 3, 14),
(T.RETURN_STATEMENT, 'RETURN', 4, 8),
(T.ARGUMENT, 'Hello!', 4, 18),
(T.EOS, '', 4, 24),
(T.END, 'END', 5, 4),
(T.EOS, '', 5, 7)]
self._verify(data, expected)
def test_in_for(self):
data = '''\
FOR ${x} IN @{STUFF}
RETURN ${x}
END
'''
expected = [(T.FOR, 'FOR', 3, 4),
(T.VARIABLE, '${x}', 3, 11),
(T.FOR_SEPARATOR, 'IN', 3, 19),
(T.ARGUMENT, '@{STUFF}', 3, 25),
(T.EOS, '', 3, 33),
(T.RETURN_STATEMENT, 'RETURN', 4, 8),
(T.ARGUMENT, '${x}', 4, 18),
(T.EOS, '', 4, 22),
(T.END, 'END', 5, 4),
(T.EOS, '', 5, 7)]
self._verify(data, expected)
def _verify(self, data, expected, test=False):
if not test:
header = '*** Keywords ***'
header_type = T.KEYWORD_HEADER
name_type = T.KEYWORD_NAME
else:
header = '*** Test Cases ***'
header_type = T.TESTCASE_HEADER
name_type = T.TESTCASE_NAME
data = f'{header}\nName\n{data}'
expected = [(header_type, header, 1, 0),
(T.EOS, '', 1, len(header)),
(name_type, 'Name', 2, 0),
(T.EOS, '', 2, 4)] + expected
assert_tokens(data, expected, data_only=True)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_26056 | '''
This script does all the data preprocessing.
You'll need to install CMU-Multimodal DataSDK
(https://github.com/A2Zadeh/CMU-MultimodalDataSDK) to use this script.
There's a packaged (and more up-to-date) version
of the utils below at https://github.com/Justin1904/tetheras-utils.
Preprocessing multimodal data is really tiring...
'''
from __future__ import print_function
import mmdata
import numpy as np
from torch.utils.data import Dataset
def pad(data, max_len):
"""Pads data without time stamps"""
data = remove_timestamps(data)
n_rows = data.shape[0]
dim = data.shape[1]
if max_len >= n_rows:
diff = max_len - n_rows
padding = np.zeros((diff, dim))
padded = np.concatenate((padding, data))
return padded
else:
return data[-max_len:]
def remove_timestamps(segment_data):
"""Removes the start and end time stamps in the Multimodal Data SDK"""
return np.array([feature[2] for feature in segment_data])
class ProcessedDataset(Dataset):
"""The class object for processed data, pipelined from CMU-MultimodalDataSDK through MultimodalDataset"""
def __init__(self, audio, visual, text, labels):
self.audio = audio
self.visual = visual
self.text = text
self.labels = labels
def __len__(self):
"""Checks the number of data points are the same across different modalities, and return length"""
assert self.audio.shape[1] == self.visual.shape[1] and self.visual.shape[1] == self.text.shape[1] and self.text.shape[1] == self.labels.shape[0]
return self.audio.shape[1]
def __getitem__(self, idx):
"""Returns the target element by index"""
return [self.audio[:, idx, :], self.visual[:, idx, :], self.text[:, idx, :], self.labels[idx]]
class MultimodalDataset(object):
"""The class object for all multimodal datasets from CMU-MultimodalDataSDK"""
def __init__(self, dataset, visual='facet', audio='covarep', text='embeddings', pivot='words', sentiments=True, emotions=False, max_len=20):
# instantiate a multimodal dataloader
self.dataloader = mmdata.__dict__[dataset]()
self.max_len = max_len
# load the separate modalities, it's silly to access parent class' methods
self.visual = self.dataloader.__class__.__bases__[0].__dict__[visual](self.dataloader)
self.audio = self.dataloader.__class__.__bases__[0].__dict__[audio](self.dataloader)
self.text = self.dataloader.__class__.__bases__[0].__dict__[text](self.dataloader)
# self.pivot = self.dataloader.__class__.__bases__[0].__dict__[pivot](self.dataloader)
# load train/dev/test splits and labels
self.train_vids = self.dataloader.train()
self.valid_vids = self.dataloader.valid()
self.test_vids = self.dataloader.test()
if sentiments:
self.sentiments = self.dataloader.sentiments()
if emotions:
self.emotions = self.dataloader.emotions()
# merge them one by one
self.dataset = mmdata.Dataset.merge(self.visual, self.text)
self.dataset = mmdata.Dataset.merge(self.audio, self.dataset)
# align the modalities
self.aligned = self.dataset.align(text)
# split the training, validation and test sets and preprocess them
train_set_ids = []
for vid in self.train_vids:
for sid in self.dataset[text][vid].keys():
if self.triple_check(vid, sid, audio, visual, text):
train_set_ids.append((vid, sid))
valid_set_ids = []
for vid in self.valid_vids:
for sid in self.dataset[text][vid].keys():
if self.triple_check(vid, sid, audio, visual, text):
valid_set_ids.append((vid, sid))
test_set_ids = []
for vid in self.test_vids:
for sid in self.dataset[text][vid].keys():
if self.triple_check(vid, sid, audio, visual, text):
test_set_ids.append((vid, sid))
self.train_set_audio = np.stack([pad(self.aligned[audio][vid][sid], self.max_len) for (vid, sid) in train_set_ids if self.aligned[audio][vid][sid]], axis=1)
self.valid_set_audio = np.stack([pad(self.aligned[audio][vid][sid], self.max_len) for (vid, sid) in valid_set_ids if self.aligned[audio][vid][sid]], axis=1)
self.test_set_audio = np.stack([pad(self.aligned[audio][vid][sid], self.max_len) for (vid, sid) in test_set_ids if self.aligned[audio][vid][sid]], axis=1)
self.train_set_audio = self.validify(self.train_set_audio)
self.valid_set_audio = self.validify(self.valid_set_audio)
self.test_set_audio = self.validify(self.test_set_audio)
self.train_set_visual = np.stack([pad(self.aligned[visual][vid][sid], self.max_len) for (vid, sid) in train_set_ids], axis=1)
self.valid_set_visual = np.stack([pad(self.aligned[visual][vid][sid], self.max_len) for (vid, sid) in valid_set_ids], axis=1)
self.test_set_visual = np.stack([pad(self.aligned[visual][vid][sid], self.max_len) for (vid, sid) in test_set_ids], axis=1)
self.train_set_visual = self.validify(self.train_set_visual)
self.valid_set_visual = self.validify(self.valid_set_visual)
self.test_set_visual = self.validify(self.test_set_visual)
self.train_set_text = np.stack([pad(self.aligned[text][vid][sid], self.max_len) for (vid, sid) in train_set_ids], axis=1)
self.valid_set_text = np.stack([pad(self.aligned[text][vid][sid], self.max_len) for (vid, sid) in valid_set_ids], axis=1)
self.test_set_text = np.stack([pad(self.aligned[text][vid][sid], self.max_len) for (vid, sid) in test_set_ids], axis=1)
self.train_set_text = self.validify(self.train_set_text)
self.valid_set_text = self.validify(self.valid_set_text)
self.test_set_text = self.validify(self.test_set_text)
self.train_set_labels = np.array([self.sentiments[vid][sid] for (vid, sid) in train_set_ids])
self.valid_set_labels = np.array([self.sentiments[vid][sid] for (vid, sid) in valid_set_ids])
self.test_set_labels = np.array([self.sentiments[vid][sid] for (vid, sid) in test_set_ids])
self.train_set_labels = self.validify(self.train_set_labels)
self.valid_set_labels = self.validify(self.valid_set_labels)
self.test_set_labels = self.validify(self.test_set_labels)
self.train_set = ProcessedDataset(self.train_set_audio, self.train_set_visual, self.train_set_text, self.train_set_labels)
self.valid_set = ProcessedDataset(self.valid_set_audio, self.valid_set_visual, self.valid_set_text, self.valid_set_labels)
self.test_set = ProcessedDataset(self.test_set_audio, self.test_set_visual, self.test_set_text, self.test_set_labels)
def triple_check(self, vid, sid, audio, visual, text):
"""Checks if this segment data is intact"""
if self.aligned[audio][vid][sid] and self.aligned[visual][vid][sid] and self.aligned[text][vid][sid]:
return True
else:
print("Video {} segment {} has incomplete data and has been discarded!".format(vid, sid))
return False
def validify(self, array, dummy=0):
"""Check and remove NaN values in the data!"""
array[array != array] = dummy
return array
|
the-stack_106_26057 | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# Copyright (c) 2018, 2019, 2020 President and Fellows of Harvard College.
# This file is part of ProvBuild.
"""Lightweight objects for storage during collection"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from datetime import datetime
from future.utils import viewitems, viewvalues
from . import content
class ObjectStore(object):
"""Temporary storage for LW objects"""
def __init__(self, cls):
"""Initialize Object Store
Arguments:
cls -- LW object class
"""
self.cls = cls
self.store = {}
self.id = 0 # pylint: disable=invalid-name
self.count = 0
def __getitem__(self, index):
return self.store[index]
def __delitem__(self, index):
self.store[index] = None
self.count -= 1
def add(self, *args):
"""Add object using its __init__ arguments and return id"""
self.id += 1
self.count += 1
self.store[self.id] = self.cls(self.id, *args)
return self.id
def add_object(self, *args):
"""Add object using its __init__ arguments and return object"""
self.id += 1
self.count += 1
self.store[self.id] = self.cls(self.id, *args)
return self.store[self.id]
def dry_add(self, *args):
"""Return object that would be added by add_object
Do not add it to storage
"""
return self.cls(-1, *args)
def remove(self, value):
"""Remove object from storage"""
for key, val in viewitems(self.store):
if val == value:
del self.store[key]
self.count -= 1
def __iter__(self):
"""Iterate on objects, and not ids"""
return viewvalues(self.store)
def items(self):
"""Iterate on both ids and objects"""
for key, value in viewitems(self.store):
yield key, value
def iteritems(self):
"""Iterate on both ids and objects"""
for key, value in viewitems(self.store):
yield key, value
def values(self):
"""Iterate on objects if they exist"""
for value in viewvalues(self.store):
if value is not None:
yield value
def clear(self):
"""Remove deleted objects from storage"""
self.store = {key: val for key, val in viewitems(self.store) if val}
self.count = len(self.store)
def generator(self, trial_id, partial=False):
"""Generator used for storing objects in database"""
for obj in self.values():
if partial and obj.is_complete():
del self[obj.id]
obj.trial_id = trial_id
yield obj
if partial:
self.clear()
def has_items(self):
"""Return true if it has items"""
return bool(self.count)
def define_attrs(required, extra=[]): # pylint: disable=dangerous-default-value
"""Create __slots__ by adding extra attributes to required ones"""
slots = tuple(required + extra)
attributes = tuple(required)
return slots, attributes
class BaseLW: # pylint: disable=too-few-public-methods
"""Lightweight modules base class"""
def keys(self):
"""Return attributes that should be saved"""
return self.attributes # pylint: disable=no-member
def __iter__(self):
return iter(self.attributes) # pylint: disable=no-member
def __getitem__(self, key):
if key in self.special and getattr(self, key) == -1: # pylint: disable=no-member
return None
return getattr(self, key)
# Deployment
class ModuleLW(BaseLW):
"""Module lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "path", "version", "code_hash"],
["trial_id"]
)
special = set()
def __init__(self, oid, name, version, path, code_hash): # pylint: disable=too-many-arguments
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.version = version
self.path = path
self.code_hash = code_hash
def is_complete(self): # pylint: disable=no-self-use
"""Module can always be removed from object store"""
return True
def __repr__(self):
return ("Module(id={}, name={}, version={})").format(
self.id, self.name, self.version)
class DependencyLW(BaseLW):
"""Dependency lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "module_id"], ["id"]
)
special = set()
def __init__(self, oid, module_id):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.module_id = module_id
def is_complete(self): # pylint: disable=no-self-use
"""Dependency can always be removed from object store"""
return True
def __repr__(self):
return ("Dependency(module_id={})").format(self.module_id)
class EnvironmentAttrLW(BaseLW):
"""EnvironmentAttr lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "value"]
)
special = set()
def __init__(self, oid, name, value):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.value = value
def is_complete(self): # pylint: disable=no-self-use
"""EnvironmentAttr can always be removed from object store"""
return True
def __repr__(self):
return ("EnvironmentAttr(id={}, name={}, value={})").format(
self.id, self.name, self.value)
# Definition
class DefinitionLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""Definition lightweight object
May represent files, classes and function definitions
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "code_hash", "trial_id", "first_line", "last_line",
"docstring"],
["type", "code", "parent", "namespace"],
)
special = set()
def __init__(self, aid, previous_namespace, name, code, dtype, parent, # pylint: disable=too-many-arguments
first_line, last_line, docstring):
self.trial_id = -1
self.id = aid # pylint: disable=invalid-name
self.namespace = (
previous_namespace +
("." if previous_namespace else "") +
name
)
self.name = self.namespace
self.parent = (parent if parent is not None else -1)
self.type = dtype
self.code = code
self.code_hash = content.put(code.encode("utf-8"))
self.first_line = first_line
self.last_line = last_line
self.docstring = docstring or ""
def is_complete(self): # pylint: disable=no-self-use
"""DefinitionLW can always be removed from object store"""
return True
def __repr__(self):
return ("DefinitionLW(id={}, name={}, type={})").format(
self.id, self.name, self.type)
class ObjectLW(BaseLW):
"""Object lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "type", "function_def_id"]
)
special = set()
def __init__(self, oid, name, otype, function_def_id):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.type = otype
self.function_def_id = function_def_id
def is_complete(self): # pylint: disable=no-self-use
"""Object can always be removed from object store"""
return True
def __repr__(self):
return (
"Object(id={}, name={}, type={}, "
"function_def={})"
).format(self.id, self.name, self.type, self.function_def_id)
# Profiler
class ActivationLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""Activation lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "line", "return_value", "start", "finish", "caller_id",
"trial_id"],
["file_accesses", "context", "slice_stack", "lasti", "definition_file",
"args", "kwargs", "starargs", "with_definition", "filename",
"is_main", "has_parameters",
"loops", "conditions", "permanent_conditions",
"temp_context", "temp_line"]
)
special = {"caller_id"}
def __init__(self, aid, definition_file, filename, name, line, lasti, # pylint: disable=too-many-arguments
caller_id, with_definition):
self.trial_id = aid
self.id = aid # pylint: disable=invalid-name
self.name = name
self.line = line
self.start = datetime.now()
self.finish = None
self.caller_id = (caller_id if caller_id else -1)
self.return_value = None
# Name of the script with the call
self.filename = filename
# Name of the script with the function definition
self.definition_file = definition_file
# Activation has definition or not
self.with_definition = with_definition
# Activation is __main__
self.is_main = aid == 1
# Activation has parameters. Use only for slicing!
self.has_parameters = True
# File accesses. Used to get the content after the activation
self.file_accesses = []
# Variable context. Used in the slicing lookup
self.context = {}
# Temporary variables
self.temp_context = set()
self.temp_line = None
# Line execution stack.
# Used to evaluate function calls before execution line
self.slice_stack = []
self.lasti = lasti
self.args = []
self.kwargs = []
self.starargs = []
self.loops = []
self.conditions = []
self.permanent_conditions = []
def is_complete(self):
"""Activation can be removed from object store after setting finish"""
return self.finish is not None
def is_comprehension(self):
"""Check if activation is comprehension"""
return self.name in [
"<setcomp>", "<dictcomp>", "<genexpr>", "<listcomp>"
]
def __repr__(self):
return (
"Activation(id={}, line={}, lasti={}, filename={}, "
" name={}, start={}, finish={}, return={}, caller_id={})"
).format(
self.id, self.line, self.lasti, self.filename, self.name,
self.start, self.finish, self.return_value, self.caller_id
)
class ObjectValueLW(BaseLW):
"""ObjectValue lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "value", "type", "function_activation_id"]
)
special = set()
def __init__(self, oid, name, value, otype, function_activation_id): # pylint: disable=too-many-arguments
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.value = value
self.type = otype
self.function_activation_id = function_activation_id
def is_complete(self): # pylint: disable=no-self-use
"""ObjectValue can always be removed"""
return True
def __repr__(self):
return (
"ObjectValue(id={}, name={}, value={}, type={}, "
"activation={})"
).format(
self.id, self.name,
self.value, self.type, self.function_activation_id
)
class FileAccessLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""FileAccess lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "mode", "buffering", "timestamp", "trial_id",
"content_hash_before", "content_hash_after",
"function_activation_id"],
["done"]
)
special = {"function_activation_id"}
def __init__(self, fid, name):
self.trial_id = -1
self.id = fid # pylint: disable=invalid-name
self.name = name
self.mode = "r"
self.buffering = "default"
self.content_hash_before = None
self.content_hash_after = None
self.timestamp = datetime.now()
self.function_activation_id = -1
self.done = False
def update(self, variables):
"""Update file access with dict"""
for key, value in viewitems(variables):
setattr(self, key, value)
def is_complete(self):
"""FileAccess can be removed once it is tagged as done"""
return self.done
def __repr__(self):
return ("FileAccess(id={}, name={}").format(self.id, self.name)
# Slicing
class VariableLW(BaseLW):
"""Variable lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "activation_id", "name", "line", "value", "time", "trial_id",
"type"]
)
special = set()
def __init__(self, vid, activation_id, name, line, value, time, _type): # pylint: disable=too-many-arguments
self.id = vid # pylint: disable=invalid-name
self.activation_id = activation_id
self.name = name
self.line = line
self.value = value
self.time = time
self.type = _type
def is_complete(self): # pylint: disable=no-self-use
"""Variable can never be removed"""
return False
def __repr__(self):
return ("Variable(id={}, activation_id={}, name={}, line={}, type={},"
"value={})").format(self.id, self.activation_id, self.name,
self.line, self.type, self.value)
class VariableDependencyLW(BaseLW):
"""Variable Dependency lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "source_activation_id", "source_id",
"target_activation_id", "target_id", "trial_id", "type"]
)
special = set()
def __init__(self, vid, source_activation_id, source_id, # pylint: disable=too-many-arguments
target_activation_id, target_id, _type):
self.id = vid # pylint: disable=invalid-name
self.source_activation_id = source_activation_id
self.source_id = source_id
self.target_activation_id = target_activation_id
self.target_id = target_id
self.trial_id = -1
self.type = _type
def is_complete(self): # pylint: disable=no-self-use
"""Variable Dependency can always be removed"""
return True
def __repr__(self):
return (
"Dependent(id={}, "
"sact_id={}, source_id={}, "
"tact_id={}, target_id={}, type={})"
).format(
self.id,
self.source_activation_id, self.source_id,
self.target_activation_id, self.target_id, self.type
)
class VariableUsageLW(BaseLW):
"""Variable Usage lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "activation_id", "variable_id",
"line", "ctx", "trial_id"]
)
special = set()
def __init__(self, vid, activation_id, variable_id, line, ctx): # pylint: disable=too-many-arguments
self.id = vid # pylint: disable=invalid-name
self.activation_id = activation_id
self.variable_id = variable_id
self.line = line
self.ctx = ctx
self.trial_id = -1
def is_complete(self): # pylint: disable=no-self-use
"""Variable Usage can always be removed"""
return True
def __repr__(self):
return (
"Usage(id={}, variable_id={}, line={}, ctx={})"
).format(self.id, self.variable_id, self.line, self.ctx)
|
the-stack_106_26061 | import json
import os
import sys
import numpy as np
import random
import math
import time
from graph import GraphBatch
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from env import R2RBatch, R2RBatch_neg
from utils import padding_idx, add_idx, Tokenizer
import utils
import model
import param
from param import args
from collections import defaultdict
from copy import copy, deepcopy
from torch import multiprocessing as mp
# from mp import Queue
# from torch.multiprocessing import Queue
from torch.multiprocessing import Process, Queue
# import imp
# imp.reload(model)
from speaker import Speaker_v2
from sklearn.svm import SVC
class SF(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return (input>0.5).float()
@staticmethod
def backward(ctx, grad_output):
return grad_output
def obs_process(batch):
res = []
for item in batch:
res.append({
'instr_id' : item['instr_id'],
'scan' : item['scan'],
'viewpoint' : item['viewpoint'],
'viewIndex' : item['viewIndex'],
'heading' : item['heading'],
'elevation' : item['elevation'],
# 'navigableLocations' : item['navigableLocations'],
'instructions' : item['instructions'],
'path_id' : item['path_id']
})
return res
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]
with open(self.results_path, 'w') as f:
json.dump(output, f)
def get_results(self):
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
return output
def rollout(self, **args):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None)) # If iters is not none, shuffle the env batch
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
looped = False
self.loss = 0
if iters is not None:
# For each time, it will run the first 'iters' iterations. (It was shuffled before)
for i in range(iters):
# print('iter',i)
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj['path']
else: # Do a full round
while True:
for traj in self.rollout(**kwargs):
if traj['instr_id'] in self.results:
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj['path']
if looped:
break
class ActiveExplore_v1(BaseAgent): # this label is from entropy
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, results_path, tok, episode_len=20, scorer=None):
super(ActiveExplore_v1, self).__init__(env, results_path)
self.tok = tok
self.episode_len = episode_len
self.feature_size = self.env.feature_size
# self.env_exp = R2RBatch(self.env.env.features, args.batchSize)
# self.queue = Queue()
self.queue = Queue()
# Models
enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim
self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,
args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.policy = model.AttnGobalPolicyLSTM_v4(args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.explorer = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.linear = model.FullyConnected2(args.rnn_dim, self.feature_size).cuda()
self.critic = model.Critic().cuda()
self.critic_exp = model.Critic().cuda()
self.critic_policy = model.Critic(self.feature_size + args.angle_feat_size + args.rnn_dim).cuda()
# self.scorer = model.AlignScoring_combine(self.feature_size + args.angle_feat_size, args.rnn_dim).cuda()
if not scorer is None:
self.scorer = scorer
self.models = (self.encoder, self.decoder, self.policy, self.critic, self.explorer, self.linear, self.critic_exp, self.critic_policy)
self.models_part = (self.encoder, self.decoder, self.critic)
# Optimizers
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr * 0.05)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr * 0.05)
self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)
self.policy_optimizer = args.optimizer(self.policy.parameters(), lr=args.lr)
self.explorer_optimizer = args.optimizer(self.explorer.parameters(), lr=args.lr)
self.critic_exp_optimizer = args.optimizer(self.critic_exp.parameters(), lr=args.lr)
self.critic_policy_optimizer = args.optimizer(self.critic_policy.parameters(), lr=args.lr)
self.linear_optimizer = args.optimizer(self.linear.parameters(), lr=args.lr)
self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.policy_optimizer, self.critic_optimizer, self.explorer_optimizer, self.linear_optimizer,self.critic_exp_optimizer,self.critic_policy_optimizer)
# Evaluations
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False, reduce=False)
self.criterion_gate = nn.CrossEntropyLoss(ignore_index=2, size_average=False, reduce=False)
# Logs
sys.stdout.flush()
self.logs = defaultdict(list)
print('Initialization finished')
def _sort_batch(self, obs):
''' Extract instructions from a list of observations and sort by descending
sequence length (to enable PyTorch packing). '''
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)
seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length
seq_tensor = torch.from_numpy(seq_tensor)
seq_lengths = torch.from_numpy(seq_lengths)
# Sort sequences by lengths
seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending
sorted_tensor = seq_tensor[perm_idx]
mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length
return Variable(sorted_tensor, requires_grad=False).long().cuda(), \
mask.byte().cuda(), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, ob in enumerate(obs):
features[i, :, :] = ob['feature'] # Image feat
return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _candidate_variable(self, obs):
candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end
candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32) # [batch, max_candidat_length, feature_size]
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i, ob in enumerate(obs):
for j, c in enumerate(ob['candidate']):
candidate_feat[i, j, :] = c['feature'] # Image feat
return torch.from_numpy(candidate_feat).cuda(), candidate_leng
def get_viewpoint(self, obs):
viewpoints = []
for i, ob in enumerate(obs):
viewpoints.append(ob['viewpoint'])
return viewpoints
def get_input_feat(self, obs):
input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])
input_a_t = torch.from_numpy(input_a_t).cuda()
f_t = self._feature_variable(obs) # Image features from obs
candidate_feat, candidate_leng = self._candidate_variable(obs)
return input_a_t, f_t, candidate_feat, candidate_leng
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def make_equiv_action(self, env, a_t, perm_obs, perm_idx=None, traj=None):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
env.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
env.env.sims[idx].makeAction(*self.env_actions[name])
state = env.env.sims[idx].getState()
if traj is not None:
traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
# print(action, len(perm_obs[i]['candidate']))
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
while env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
assert select_candidate['viewpointId'] == \
env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def make_reward(self, cpu_a_t_after, cpu_a_t_before, perm_idx):
obs = np.array(self.env._get_obs())
scanIds = [ob['scan'] for ob in obs]
viewpoints = [ob['viewpoint'] for ob in obs]
headings = [ob['heading'] for ob in obs]
elevations = [ob['elevation'] for ob in obs]
perm_obs = obs[perm_idx]
self.make_equiv_action(self.env, cpu_a_t_after, perm_obs, perm_idx)
obs_temp = np.array(self.env._get_obs())
perm_obs_temp = obs_temp[perm_idx]
dist_after = np.array([ob['distance'] for ob in perm_obs_temp])
self.env.env.newEpisodes(scanIds,viewpoints,headings,elevations)
self.make_equiv_action(self.env, cpu_a_t_before, perm_obs, perm_idx)
obs_temp = np.array(self.env._get_obs())
perm_obs_temp = obs_temp[perm_idx]
dist_before = np.array([ob['distance'] for ob in perm_obs_temp])
self.env.env.newEpisodes(scanIds,viewpoints,headings,elevations)
reward = (dist_before > dist_after).astype(np.float32) - (dist_before < dist_after).astype(np.float32) + 3 * ((dist_after < 3).astype(np.float32) - (dist_after > 3).astype(np.float32)) * (cpu_a_t_after == -1).astype(np.float32)
# return torch.from_numpy(reward).cuda().float()
return reward
def make_label(self, cpu_a_t_after, cpu_a_t_before, perm_idx):
obs = np.array(self.env._get_obs())
scanIds = [ob['scan'] for ob in obs]
viewpoints = [ob['viewpoint'] for ob in obs]
headings = [ob['heading'] for ob in obs]
elevations = [ob['elevation'] for ob in obs]
perm_obs = obs[perm_idx]
self.make_equiv_action(self.env, cpu_a_t_after, perm_obs, perm_idx)
obs_temp = np.array(self.env._get_obs())
perm_obs_temp = obs_temp[perm_idx]
dist_after = np.array([ob['distance'] for ob in perm_obs_temp])
self.env.env.newEpisodes(scanIds,viewpoints,headings,elevations)
self.make_equiv_action(self.env, cpu_a_t_before, perm_obs, perm_idx)
obs_temp = np.array(self.env._get_obs())
perm_obs_temp = obs_temp[perm_idx]
dist_before = np.array([ob['distance'] for ob in perm_obs_temp])
self.env.env.newEpisodes(scanIds,viewpoints,headings,elevations)
return (dist_before > dist_after).astype(np.float32)
def exploration(self, explore_env, cand_feat, mark_cand,
h_t, h1, c_t,
ctx, ctx_mask, batch_size,
perm_idx, speaker, explore_flag, noise, explore_length=4):
# mark_cand: 2-D tensor, shape: batch_size x max_candidate_length + 1
# explore_length = 4
others = 0.
exp_loss = 0.
rewards = []
masks = []
hidden_states = []
policy_log_probs = []
dim = h1.shape[1]
obs_exp = np.array(explore_env._get_obs())
perm_obs_exp = obs_exp[perm_idx]
scanIds = [ob['scan'] for ob in obs_exp]
viewpoints = [ob['viewpoint'] for ob in obs_exp]
headings = [ob['heading'] for ob in obs_exp]
elevations = [ob['elevation'] for ob in obs_exp]
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs_exp]
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs_exp)
candidate_leng_ori = candidate_leng
for i,l in enumerate(candidate_leng):
mark_cand[i,l-1] = True # if still has direction not been explored, cannot choose stop
if mark_cand[i].all():
mark_cand[i,l-1] = False # if all directions has been explored
max_candidate_length = max(candidate_leng) - 1
if speaker is not None: # Apply the env drop mask to the feat
f_t[..., :-args.angle_feat_size] *= noise
h_t, c_t, logit, h1, _, _ = self.explorer(input_a_t, f_t, cand_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
# print('markcand',mark_cand)
logit.masked_fill_(mark_cand, -float('inf'))
hidden_states.append(h_t)
if self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1)
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1)))
else:
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
# print('mark_cand', mark_cand)
maxx, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
cpu_a_t = a_t.cpu().numpy()
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
policy_log_probs.append(c.log_prob(a_t))
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
self.make_equiv_action(explore_env, cpu_a_t, perm_obs_exp, perm_idx, traj)
maxx = maxx.repeat(max_candidate_length + 1,1).permute(1,0) # batch_size x max_candidate_length + 1
# print(maxx.shape, explore_flag.shape)
dir_mark = (logit == maxx) & explore_flag.repeat(max_candidate_length + 1, 1).permute(1,0) # the direction of max value will be explored
mark_cand = mark_cand | dir_mark
# print('dir',dir_mark)
# print('mark_later',mark_cand)
length = torch.zeros(batch_size).cuda() # the length of exploration
ended = (~explore_flag) | torch.from_numpy(cpu_a_t == -1).cuda()
h1_final = torch.zeros_like(h1).cuda()
reward = np.ones(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i in range(batch_size):
if ended[i]:
reward[i] = 0
mask[i] = 0
rewards.append(reward)
masks.append(mask)
for l in range(explore_length):
if ended.all():
break
obs_exp = np.array(explore_env._get_obs())
perm_obs_exp = obs_exp[perm_idx] # Perm the obs for the res
self.graphs.add_edge(perm_obs_exp)
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs_exp)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
candidate_mask = utils.length2mask(candidate_leng)
h_t, c_t, logit, h1, _, _ = self.explorer(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
logit.masked_fill_(candidate_mask, -float('inf'))
hidden_states.append(h_t)
if self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1)
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1)))
else:
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
finish_new = (~ended) & torch.from_numpy(cpu_a_t == -1).cuda() # just finished
h1_final = h1_final + h1 * finish_new.repeat(args.rnn_dim,1).permute(1,0).float()
self.make_equiv_action(explore_env, cpu_a_t, perm_obs_exp, perm_idx, traj)
reward = np.ones(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i in range(batch_size):
if ended[i]:
reward[i] = 0
mask[i] = 0
rewards.append(reward)
masks.append(mask)
length = length + (l+1) * finish_new.float()
ended = ended | finish_new
if ended.all():
break
length = length + explore_length * (~ended).float()
length = length.detach().cpu().numpy()
h1_final = h1_final + h1 * (~ended).repeat(args.rnn_dim,1).permute(1,0).float()
self.logs['explore_length'].append(length)
obs_exp = np.array(explore_env._get_obs())
perm_obs_exp = obs_exp[perm_idx] # Perm the obs for the res
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs_exp)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t, _, _, _, _, _ = self.explorer(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
hidden_states.append(h_t)
explore_env.env.newEpisodes(scanIds,viewpoints,headings,elevations)
candidate_res_comp = self.linear(h1_final)
dir_mark_np = dir_mark.detach().cpu().numpy()
cand_res = []
for i, d in enumerate(dir_mark_np): # batch size
cand_res_comp = []
for j,_ in enumerate(d): # max_candidate
if _ and (j != candidate_leng_ori[i] - 1):
cand_res_comp.append(candidate_res_comp[i])
else:
cand_res_comp.append(torch.zeros_like(candidate_res_comp[i]).cuda())
cand_res_comp = torch.stack(cand_res_comp, 0) # max_candidate x dim
cand_res.append(cand_res_comp)
cand_res = torch.stack(cand_res, 0) # batch size x max_candidate x dim
for i,l in enumerate(candidate_leng_ori):
mark_cand[i,l-1] = True # if still has direction not been explored, cannot choose stop
return cand_res, mark_cand, rewards, masks, hidden_states, policy_log_probs, traj
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None, filter_loss=False, train_exp=False, train_exp_flag=True, traj_flag=True, cand_dir_num=100,ths=None):
"""
:param train_ml: The weight to train with maximum likelihood
:param train_rl: whether use RL in training
:param reset: Reset the environment
:param speaker: Speaker used in back translation.
If the speaker is not None, use back translation.
O.w., normal training
:return:
"""
if self.feedback == 'teacher' or self.feedback == 'argmax':
train_rl = False
if reset:
# Reset env
obs = np.array(self.env.reset())
else:
obs = np.array(self.env._get_obs())
batch_size = len(obs)
self.graphs = GraphBatch(batch_size)
if speaker is not None: # Trigger the self_train mode!
noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())
batch = self.env.batch.copy()
speaker.env = self.env
insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker
# Create fake environments with the generated instruction
boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
insts = np.concatenate((boss, insts), 1)
for i, (datum, inst) in enumerate(zip(batch, insts)):
if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!
inst[-1] = self.tok.word_to_index['<EOS>']
datum.pop('instructions')
datum.pop('instr_encoding')
datum['instructions'] = self.tok.decode_sentence(inst)
datum['instr_encoding'] = inst
obs = np.array(self.env.reset(batch))
else:
noise = None
# Reorder the language input for the encoder (do not ruin the original code)
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
self.graphs.add_edge(perm_obs)
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx_mask = seq_mask
# Init the reward shaping
last_dist = np.zeros(batch_size, np.float32)
for i, ob in enumerate(perm_obs): # The init distance from the view point to the target
last_dist[i] = ob['distance']
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# For test result submission
visited = [set() for _ in perm_obs]
# Initialization the tracking state
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Init the logs
rewards = []
hidden_states = []
policy_log_probs = []
masks = []
entropys = []
ml_loss_list = []
ml_loss = 0.
ml_loss_policy = 0.
rl_loss_exp = 0.
rl_loss_policy = 0.
policy_entropy = 0.
if ths is None:
ths = np.zeros(self.episode_len)
h1 = h_t
traj_length = np.zeros(batch_size).astype(np.int32)
cnt = 0
explore_cnt = 0
policy_state = []
masks_policy = []
rewards_policy = []
log_prob_policy = []
total_chance = 0
total_explore = 0
exp_traj_all = [[] for i in range(batch_size)]
for t in range(self.episode_len):
# print('t',t)
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
# Mask outputs where agent can't move forward
# Here the logit is [b, max_candidate]
candidate_mask = utils.length2mask(candidate_leng)
max_candidate_length = max(candidate_leng) - 1
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t_temp, c_t_temp, logit_before, h1_temp, _ ,_ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
logit_before.masked_fill_(candidate_mask, -float('inf'))
probs = F.softmax(logit_before, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
entropy = c.entropy().detach() # batch
_, a_t_before = logit_before.max(1) # student forcing - argmax
a_t_before = a_t_before.detach()
cpu_a_t_before = a_t_before.cpu().numpy()
for i, next_id in enumerate(cpu_a_t_before):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid: # The last action is <end>
cpu_a_t_before[i] = -1 # Change the <end> and ignore action to -1
mark_cand = candidate_mask.clone().detach() # batch_size x max_candidate_length + 1, 1 for has been explored.
for i,l in enumerate(candidate_leng):
mark_cand[i,l-1] = True # if still has direction not been explored, cannot choose stop
cand_feat = candidate_feat.detach().clone()
ended_exp = torch.from_numpy(np.array(ended)).cuda()
cnt += (((~candidate_mask).float().sum(-1)-1) * (~ended_exp).float()).sum().item()
temp_traj = [[] for i in range(batch_size)]
exp_traj = [[] for i in range(batch_size)]
# print('before',logit_before)
times = 0
label_list = []
mask_list = []
prob_list = []
teacher = []
while True:
times += 1
if times > cand_dir_num:
break
if times > max_candidate_length + 1:
print('error')
### whether to explore
prob = self.policy(h1_temp, entropy)
if self.feedback == 'argmax':
a_t = prob > 0.5
# a_t = torch.ones_like(prob).cuda() > 0 # for training,
else:
# c = torch.distributions.Categorical(torch.stack([prob,1-prob],-1))
# a = c.sample().detach()
# a_t = (a == 0)
# print(a_t)
a_t = torch.ones_like(prob).cuda() > 0 # for training, execute each exploration
label = (entropy > ths[t]).detach().cpu().numpy()
teacher.append(label)
# print(a_t.shape)
explore_flag = (entropy > ths[t]) & (~ended_exp) # if already stop should not explore
self.logs['prob'].append((prob.detach()*(~ended_exp).float()).cpu().numpy())
# print('end',ended_exp)
# if ended_exp.all():
# break
# print('explore_flag',explore_flag)
# print('mark_cand',mark_cand)
cand_res, mark_cand, rewards_exp, masks, hidden_states_exp, policy_log_probs_exp, tj = self.exploration(self.env, cand_feat, mark_cand,
h_t, h1, c_t,
ctx, ctx_mask, batch_size,
perm_idx, speaker, explore_flag, noise)
# print('mark_cand_after',mark_cand)
# print('cand_res', cand_res)
f = cand_feat[..., :-args.angle_feat_size] + cand_res
a = cand_feat[..., -args.angle_feat_size:]
cand_feat = torch.cat([f, a],-1)
# print('cand_res',cand_res)
_, _, logit_after, _, _ ,_ = self.decoder(input_a_t, f_t, cand_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
logit_after.masked_fill_(candidate_mask, -float('inf'))
_, a_t_after = logit_after.max(1) # student forcing - argmax
cpu_a_t_after = a_t_after.detach().cpu().numpy()
for i, next_id in enumerate(cpu_a_t_after):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid: # The last action is <end>
cpu_a_t_after[i] = -1 # Change the <end> and ignore action to -1
# print('mark',times,mark_cand)
rewards_change = self.make_reward(cpu_a_t_after
,cpu_a_t_before, perm_idx) # batch_size
probs = F.softmax(logit_after, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
entropy = c.entropy().detach() # batch
for i in range(len(rewards_exp)): # explorer reward
for j in range(batch_size):
rewards_exp[i][j] *= rewards_change[j]
self.logs['rewards'].append(rewards_change*((~ended_exp).cpu().numpy() & (~ended)).astype(np.float32))
# if traj_flag:
for i, _ in enumerate(ended_exp):
if not _:
temp_traj[i] += tj[i]['path']
exp_traj[i].append(tj[i]['path'])
label = self.make_label(cpu_a_t_after ,cpu_a_t_before, perm_idx)
label_list.append(label)
prob_list.append(prob)
if self.feedback != 'argmax':
##########################################
### ###
### RL for explorer start. ###
### ###
##########################################
last_value__ = self.critic_exp(hidden_states_exp[-1]).detach()
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
mask_ = masks[-1] # if mask is 1, not finish yet
for i in range(batch_size):
if mask_[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards_exp)
total = 0
temp_loss = 0.
for i in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards_exp[i]
# If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[i]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic_exp(hidden_states_exp[i])
a_ = (r_ - v_).detach()
a_ = torch.clamp(a_,-50,50) # clip the advatange
self.logs['advantage'].append(a_.cpu().numpy())
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
temp_loss += (-policy_log_probs_exp[i] * a_ * mask_)
temp_loss += (((r_ - v_) ** 2) * mask_) * 0.5 # 1/2 L2 loss
self.logs['critic_exp_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
rl_loss_exp += (temp_loss * explore_flag.float()).sum()
##########################################
### ###
### RL for policy start. ###
### ###
##########################################
policy_state.append(torch.cat([attn_cand_feat,h1_temp],-1))
mask = np.array((~ended_exp).cpu().numpy()).astype(np.float32) # policy not finished yet
reward = np.array(rewards_change).astype(np.float32) * mask - 0 * explore_flag.cpu().numpy() # if decide to explore , get -0.5
masks_policy.append(mask)
rewards_policy.append(reward)
log_prob = a_t.float()*torch.log(prob+1e-6) + (1-a_t.float())*torch.log((1-prob)+1e-6)
log_prob_policy.append(log_prob)
mask_list.append(mask)
# rl_loss_policy += (- torch.log(prob+1e-6) * torch.from_numpy(rewards_change).cuda().float() * (~ended_exp).float()).sum()
# print('rl_loss',rl_loss_policy,'prob',prob)
ended_exp = ended_exp | (~explore_flag) # if decided not to explore
ended_exp = ended_exp | mark_cand.all(1) # if all direction has been explored
if ended_exp.all():
break
if self.feedback != 'argmax':
# p = np.ones(batch_size)*(-1)
# for i, label in enumerate(label_list): # length
# for j, _ in enumerate(label): # batch
# if _ and p[j] == -1:
# p[j] = i
# teacher = []
# for i in range(len(label_list)):
# a_t = np.zeros(batch_size)
# for j, position in enumerate(p): # batch
# if i <= position:
# a_t[j] = 1
# teacher.append(a_t)
for i, a_t in enumerate(teacher):
total_chance += mask_list[i].sum()
total_explore += (a_t*mask_list[i]).sum()
prob = prob_list[i]
mask = torch.from_numpy(mask_list[i]).cuda().float()
a_t = torch.from_numpy(a_t).cuda().float()
ml_loss_policy += (-(torch.log(prob+1e-6) * a_t + torch.log(1-prob+1e-6) * (1-a_t)) * mask).sum()
c = torch.distributions.Categorical(torch.stack([prob,1-prob],-1))
policy_entropy += (c.entropy() * mask).sum()
p = np.ones(batch_size).astype(np.int32)*(-1)
for i, label in enumerate(label_list): # length
for j, _ in enumerate(label): # batch
if _ and p[j] == -1:
p[j] = i
for i, tj_ in enumerate(exp_traj):
exp_traj_all[i] += tj_[:int(p[i]+1)]
explore_cnt += (((mark_cand ^ candidate_mask).float().sum(-1)-1) * (~torch.from_numpy(np.array(ended)).cuda()).float()).sum().item()
self.logs['dirs'].append((((mark_cand ^ candidate_mask).float().sum(-1)) * (~torch.from_numpy(np.array(ended)).cuda()).float()).cpu().numpy())
h_t, c_t, logit, h1, _, _ = self.decoder(input_a_t, f_t, cand_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None)
)
hidden_states.append(h_t)
if args.submit: # Avoding cyclic path
for ob_id, ob in enumerate(perm_obs):
visited[ob_id].add(ob['viewpoint'])
for c_id, c in enumerate(ob['candidate']):
if c['viewpointId'] in visited[ob_id]:
candidate_mask[ob_id][c_id] = 1
logit.masked_fill_(candidate_mask, -float('inf'))
# print('after_final',logit)
# Supervised training
target = self._teacher_action(perm_obs, ended)
ml_loss_list.append(self.criterion(logit, target))
# Determine next model inputs
if self.feedback == 'teacher':
a_t = target # teacher forcing
elif self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) # Gather the log_prob for each batch
elif self.feedback == 'sample':
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
# self.logs['entropy'].append(c.entropy().sum().item()) # For log
entropys.append(c.entropy()) # For optimization
a_t = c.sample().detach()
# print(a_t)
policy_log_probs.append(c.log_prob(a_t))
else:
print(self.feedback)
sys.exit('Invalid feedback option')
# Prepare environment action
# NOTE: Env action is in the perm_obs space
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
if traj_flag:
for i, _ in enumerate(ended):
if (not _) and (cpu_a_t[i] != -1):
traj[i]['path'] += temp_traj[i]
# Make action and get the new state
self.make_equiv_action(self.env, cpu_a_t, perm_obs, perm_idx, traj)
obs = np.array(self.env._get_obs())
perm_obs = obs[perm_idx] # Perm the obs for the resu
self.graphs.add_edge(perm_obs)
# Calculate the mask and reward
dist = np.zeros(batch_size, np.float32)
reward = np.zeros(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i, ob in enumerate(perm_obs):
dist[i] = ob['distance']
if ended[i]: # If the action is already finished BEFORE THIS ACTION.
reward[i] = 0.
mask[i] = 0.
else: # Calculate the reward
action_idx = cpu_a_t[i]
if action_idx == -1: # If the action now is end
if dist[i] < 3: # Correct
reward[i] = 2.
else: # Incorrect
reward[i] = -2.
else: # The action is not end
reward[i] = - (dist[i] - last_dist[i]) # Change of distance
if reward[i] > 0: # Quantification
reward[i] = 1
elif reward[i] < 0:
reward[i] = -1
else:
raise NameError("The action doesn't change the move")
rewards.append(reward)
masks.append(mask)
last_dist[:] = dist
# Update the finished actions
# -1 means ended or ignored (already ended)
traj_length += (t+1) * np.logical_and(ended == 0, (cpu_a_t == -1))
ended[:] = np.logical_or(ended, (cpu_a_t == -1))
# Early exit if all ended
if ended.all():
break
traj_length += self.episode_len * (ended == 0)
loss_mask = utils.length2mask(traj_length)
ml_loss_seq = torch.stack(ml_loss_list,1)
loss_weights = 1.0 - loss_mask.float()
ml_loss += (ml_loss_seq * loss_weights).sum()
explore_rate = explore_cnt / cnt
self.logs['explore_rate'].append(explore_rate)
self.logs['explore_cnt'].append(explore_cnt)
self.logs['all_cnt'].append(cnt)
if self.feedback != 'argmax':
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
length = len(rewards_policy)
for t in range(length-1,-1,-1):
mask = masks_policy[t]
discount_reward = discount_reward * (args.gamma*mask + (1-mask)) + rewards_policy[t]
mask_ = Variable(torch.from_numpy(mask), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda().float()
v_ = self.critic_policy(policy_state[t])
a_ = (r_-v_).detach()
rl_loss_policy += (-log_prob_policy[t] * a_ * mask_).sum()
rl_loss_policy += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
self.logs['critic_policy_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
self.logs['discount_reward'].append(discount_reward)
if train_rl:
# Last action in A2C
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None:
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
last_h_, _, _, _,_,_ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
speaker is not None)
rl_loss = 0.
# NOW, A2C!!!
# Calculate the final discounted reward
last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
for i in range(batch_size):
if not ended[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards)
total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic(hidden_states[t])
a_ = (r_ - v_).detach()
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
if self.feedback == 'sample':
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
total = total + np.sum(masks[t])
self.logs['total'].append(total)
# Normalize the loss function
if args.normalize_loss == 'total':
rl_loss /= total
elif args.normalize_loss == 'batch':
rl_loss /= batch_size
else:
assert args.normalize_loss == 'none'
self.loss += rl_loss
if train_ml is not None:
print('explore_rate', explore_rate)
self.loss += ml_loss * train_ml / batch_size
self.logs['losses_ml'].append((ml_loss * train_ml / batch_size).item() / self.episode_len)
rl_loss_exp /= batch_size
rl_loss_policy /= batch_size
if self.feedback != 'argmax':
print('label explore rate',total_explore/total_chance)
# if np.isnan(rl_loss_exp.item()):
# print('warning, nan is detected.','rl_loss_exp',rl_loss_exp)
# if np.isnan(rl_loss_policy.item()):
# print('warning, nan is detected.','rl_loss_policy',rl_loss_policy)
# self.loss += rl_loss_exp + rl_loss_policy
# self.loss += rl_loss_policy
ml_loss_policy = ml_loss_policy / batch_size
policy_entropy = policy_entropy / batch_size
self.loss += ml_loss_policy + policy_entropy
traj = self.late_action_taking(traj, self.graphs)
# self.loss += rl_loss_exp
if type(policy_entropy) is float or type(policy_entropy) is int:
self.logs['policy_entropy'].append(0.)
else:
self.logs['policy_entropy'].append(policy_entropy.item())
if type(ml_loss_policy) is float or type(ml_loss_policy) is int:
self.logs['ml_loss_policy'].append(0.)
else:
self.logs['ml_loss_policy'].append(ml_loss_policy.item())
if type(rl_loss_exp) is float or type(rl_loss_exp) is int:
self.logs['rl_loss_exp'].append(0.)
else:
self.logs['rl_loss_exp'].append(rl_loss_exp.item())
if type(rl_loss_policy) is float or type(rl_loss_policy) is int:
self.logs['rl_loss_policy'].append(0.)
else:
self.logs['rl_loss_policy'].append(rl_loss_policy.item())
if type(self.loss) is int or type(self.loss) is float: # For safety, it will be activated if no losses are added
self.losses.append(0.)
else:
self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.
self.logs['traj'].append(traj)
self.logs['exp_traj'].append(exp_traj_all)
return traj
def _dijkstra(self):
"""
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
"""
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
# Run one decoding step
h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it
continue
for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action
# score + log_prob[action]
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate): # A normal action
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else: # The end action
next_id = make_state_id(current_viewpoint, -1) # action is -1
next_viewpoint = current_viewpoint # next viewpoint is still here
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
# The active state is zero after the updating, then setting the ended to True
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]): # It's the last active state
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
"""
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
"""
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def _dijkstra_exp(self):
"""
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
"""
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
self.feedback='argmax'
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
candidate_res = self.exploration(self.env,
h_t, h1, c_t,
ctx, ctx_mask,
True, batch_size,
perm_idx, None, None, None, True)
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
f = candidate_feat[..., :-args.angle_feat_size] + candidate_res # bs x length x dim
a = candidate_feat[..., -args.angle_feat_size:]
cand_feat = torch.cat([f, a],-1)
# Run one decoding step
h_t, c_t, logit, h1, _, _ = self.decoder(input_a_t, f_t, cand_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it
continue
for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action
# score + log_prob[action]
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate): # A normal action
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else: # The end action
next_id = make_state_id(current_viewpoint, -1) # action is -1
next_viewpoint = current_viewpoint # next viewpoint is still here
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
# The active state is zero after the updating, then setting the ended to True
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]): # It's the last active state
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
"""
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
"""
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def beam_search(self, speaker):
"""
:param speaker: The speaker to be used in searching.
:return:
{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"speaker_scores": [log_prob_word1, log_prob_word2, ..., ],
}]
}
"""
self.env.reset()
# results = self._dijkstra()
results = self._dijkstra_exp()
"""
return from self._dijkstra()
[{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}]
}]
"""
# Compute the speaker scores:
for result in results:
lengths = []
num_paths = len(result['paths'])
for path in result['paths']:
assert len(path['trajectory']) == (len(path['visual_feature']) + 1)
lengths.append(len(path['visual_feature']))
max_len = max(lengths)
img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)
can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)
for j, path in enumerate(result['paths']):
for k, feat in enumerate(path['visual_feature']):
img_feat, can_feat = feat
img_feats[j][k] = img_feat
can_feats[j][k] = can_feat
img_feats, can_feats = img_feats.cuda(), can_feats.cuda()
features = ((img_feats, can_feats), lengths)
insts = np.array([result['instr_encoding'] for _ in range(num_paths)])
seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')
insts = torch.from_numpy(insts).cuda()
speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)
for j, path in enumerate(result['paths']):
path.pop("visual_feature")
path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]
return results
def beam_search_test(self, speaker):
self.encoder.eval()
self.decoder.eval()
self.linear.eval()
self.critic.eval()
looped = False
self.results = {}
while True:
for traj in self.beam_search(speaker):
if traj['instr_id'] in self.results:
looped = True
else:
self.results[traj['instr_id']] = traj
if looped:
break
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None, **kwargs):
''' Evaluate once on each instruction in the current environment '''
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
self.linear.train()
self.explorer.train()
self.policy.train()
self.critic.train()
self.critic_exp.train()
self.critic_policy.train()
else:
self.encoder.eval()
self.decoder.eval()
self.linear.eval()
self.explorer.eval()
self.policy.eval()
self.critic.eval()
self.critic_exp.eval()
self.critic_policy.eval()
with torch.no_grad():
super(ActiveExplore_v1, self).test(iters, **kwargs)
def zero_grad(self):
self.loss = 0.
self.losses = []
for model, optimizer in zip(self.models, self.optimizers):
model.train()
optimizer.zero_grad()
def accumulate_gradient(self, feedback='teacher', **kwargs):
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
def optim_step(self):
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.explorer.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.explorer_optimizer.step()
self.critic_optimizer.step()
self.critic_exp_optimizer.step()
def train(self, n_iters, feedback='teacher', **kwargs):
''' Train for a given number of iterations '''
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.linear.train()
self.explorer.train()
self.policy.train()
self.critic.train()
self.critic_exp.train()
self.critic_policy.train()
self.losses = []
for iter in range(1, n_iters + 1):
# print()
# print('======================= start rollout ',iter,' ===========================')
# print()
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.explorer_optimizer.zero_grad()
self.linear_optimizer.zero_grad()
self.policy_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
self.critic_exp_optimizer.zero_grad()
self.critic_policy_optimizer.zero_grad()
self.loss = 0
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, train_exp=True, **kwargs)
elif feedback == 'sample':
if args.ml_weight != 0:
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, train_exp=True, **kwargs)
# self.feedback = 'sample'
# self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.explorer.parameters(), 40.)
# self.encoder_optimizer.step()
# self.decoder_optimizer.step()
self.explorer_optimizer.step()
self.linear_optimizer.step()
# self.policy_optimizer.step()
# self.critic_optimizer.step()
# self.critic_exp_optimizer.step()
# self.critic_policy_optimizer.step()
# torch.cuda.empty_cache()
def save(self, epoch, path):
''' Snapshot models '''
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("policy", self.policy, self.policy_optimizer),
("explorer", self.explorer, self.explorer_optimizer),
("linear", self.linear, self.linear_optimizer),
("critic", self.critic, self.critic_optimizer),
("critic_exp", self.critic_exp, self.critic_exp_optimizer),
("critic_policy", self.critic_policy, self.critic_policy_optimizer)
]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path, part=False):
''' Loads parameters (but not training state) '''
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
if part:
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)
]
else:
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
# ("policy", self.policy, self.policy_optimizer),
("decoder", self.explorer, self.explorer_optimizer),
("linear", self.linear, self.linear_optimizer),
# ("critic", self.critic, self.critic_optimizer),
# ("critic_exp", self.critic_exp, self.critic_exp_optimizer),
# ("critic_policy", self.critic_policy, self.critic_policy_optimizer)
]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
def load_eval(self, path, part=False):
''' Loads parameters (but not training state) '''
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
if part:
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)
]
else:
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("policy", self.policy, self.policy_optimizer),
("explorer", self.explorer, self.explorer_optimizer),
("linear", self.linear, self.linear_optimizer),
("critic", self.critic, self.critic_optimizer),
("critic_exp", self.critic_exp, self.critic_exp_optimizer),
("critic_policy", self.critic_policy, self.critic_policy_optimizer)
]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
def late_action_taking(self, traj, graph: GraphBatch):
res = []
paths = graph.get_paths()
for i, case in enumerate(traj):
data = {}
data['instr_id'] = case['instr_id']
path = case['path']
tmp = [path[0]]
vs = set([path[0][0]])
for p in path:
if p[0] in vs:
continue
if p[0] == path[-1][0]:
break
u = tmp[-1][0]
v = p[0]
# print(u, v)
tunnel = paths[i][u][v]
# tmp.append(p)
for vp in tunnel:
tmp.append((vp,0,0))
vs.add(p[0])
tmp.append(path[-1])
data['path'] = tmp
res.append(data)
return res
|
the-stack_106_26062 | class Node:
def __init__(self, key):
self.data = key
self.left = None
self.right = None
def find_lca(root, n1, n2):
path1 = []
path2 = []
# Find paths from root to n1 and root to n2.
# if either n1 or n2 not present return -1
if not find_path(root, path1, n1) or not find_path(root, path2, n2):
return -1
i = 0
# compare paths to get first different value
while i < len(path1):
if path1[i] != path2[i]:
break
i += 1
return path1[i - 1]
"""
Function to find the path from root node to
given root of the tree, Stores the path in a
list path[], returns true if path exists
otherwise false
"""
def find_path(root, path, k):
# base case
if not root:
return False
"""
Store this node in path list.
The node will be removed if
not in path from root to k
"""
path.append(root.data)
if root.data == k:
return True
# Check if k is found in left or right sub-tree
if (root.left and find_path(root.left, path, k)) or (root.right and find_path(root.right, path, k)):
return True
# If not present in subtree rooted with root,
# remove root from path[] and return false
path.pop()
return False
"""
If we assume that the keys are present in Binary Tree,
we can find LCA using single traversal of Binary Tree
and without extra storage for path arrays.
The idea is to traverse the tree starting from the root node.
If any of the given keys (n1 and n2) matches with root,
then root is LCA (assuming that both keys are present).
If root doesn't match with any of the keys, we recur for left and right subtrees.
The node which has one key present in its left subtree
and the other key present in the right subtree is the LCA.
If both keys lie in left subtree, then left subtree has LCA also, otherwise,
LCA lies in the right subtree
"""
def find_lca_method2(root, n1, n2):
if not root:
return
# if n1 or n2 matches root key then that would be LCA
if root.data == n1 or root.data == n2:
return root.data
left_lca = find_lca_method2(root.left, n1, n2)
right_lca = find_lca_method2(root.right, n1, n2)
if left_lca and right_lca:
return root.data
return left_lca if left_lca else right_lca
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
print("LCA(4, 5) = {}".format(find_lca(root, 4, 5)))
print("LCA(4, 6) = {}".format(find_lca(root, 4, 6)))
print("LCA(3, 4) = {}".format(find_lca(root, 3, 4)))
print("LCA(2, 4) = {}".format(find_lca(root, 2, 4)))
print("Using method2")
print("LCA(4, 5) = {}".format(find_lca_method2(root, 4, 5)))
print("LCA(4, 6) = {}".format(find_lca_method2(root, 4, 6)))
print("LCA(3, 4) = {}".format(find_lca_method2(root, 3, 4)))
print("LCA(2, 4) = {}".format(find_lca_method2(root, 2, 4)))
"""
The time complexity of the above solution is O(N)
where N is the number of nodes in the given Tree
and the above solution also takes O(N) extra space
So, basically it requires three tree traversals plus extra spaces for path arrays.
"""
|
the-stack_106_26065 | import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2) |
the-stack_106_26067 | # coding=utf-8
import logging
__author__ = 'ThucNC'
import os
from PIL import Image
from tinify import tinify
_logger = logging.getLogger(__name__)
class ImageOptimizer:
"""
For best result:
1. use png as source file,
2. make cover and thumbnail and optimize them,
3. convert to jpg if needed as the last step
For facebook Images in Link Shares
Use images that are at least 1200 x 630 pixels for the best display on high resolution devices.
At the minimum, you should use images that are 600 x 315
"""
def __init__(self, tinify_api_key=None):
self._tinify_api_key = tinify_api_key
pass
def make_thumbnail(self, from_file_path, to_file_path=None, suffix="_thumb", width=640, height=360):
if not to_file_path:
filename, file_extension = os.path.splitext(from_file_path)
to_file_path = filename + suffix + file_extension
self.image_optimize(from_file_path, to_file_path, width=width, height=height, make_thumb=True)
def make_cover(self, from_file_path, to_file_path=None, suffix="_cover", width=1280, height=720):
if not to_file_path:
filename, file_extension = os.path.splitext(from_file_path)
to_file_path = filename + suffix + file_extension
self.image_optimize(from_file_path, to_file_path, width=width, height=height, make_cover=True)
def image_optimize(self, from_file_path, to_file_path=None,
width=None, height=None,
make_thumb=False, make_cover=False):
"""
support png and jpeg
:param file_path:
:return:
"""
if self._tinify_api_key:
tf = tinify.get_instance()
tf.key = self._tinify_api_key
source = tf.from_file(from_file_path)
if not to_file_path:
to_file_path = from_file_path
if make_thumb:
source = source.resize(
method="thumb",
width=width,
height=height
)
elif make_cover:
source = source.resize(
method="cover",
width=width,
height=height
)
else:
if width or height:
if width and height:
source = source.resize(
method="fit",
width=width,
height=height
)
elif width:
source = source.resize(
method="scale",
width=width,
)
else:
source = source.resize(
method="scale",
height=height
)
source.to_file(to_file_path)
else:
raise Exception("Only tinify backend is currently supported!")
def image_to_jpg(self, from_file_path, to_file_path=None, max_width=0, max_height=0, quality=70):
if not to_file_path:
filename, file_extension = os.path.splitext(from_file_path)
to_file_path = filename + '.jpg'
return self.image_convert(from_file_path, to_file_path, max_width, max_height, quality)
def image_convert(self, from_file_path, to_file_path=None, max_width=0, max_height=0, quality=70):
image = Image.open(from_file_path)
to_file_name, to_file_extension = os.path.splitext(to_file_path)
if to_file_extension == ".jpg":
if image.mode != 'RGB':
image = image.convert('RGB')
if not to_file_path:
to_file_path = from_file_path
if max_width or max_height:
image.thumbnail((max_width, max_height))
image.save(to_file_path, quality=quality)
return to_file_path
if __name__ == "__main__":
img_opti = ImageOptimizer(tinify_api_key="8kStJZxfd9FprXb9cDL2mtkmN421XCqD")
ifile = "/home/thuc/Documents/blog.png"
# ofile = "/home/thuc/Documents/blog_opti.png"
# image_convert(ifile, ofile, 1280, 360)
# img_opti.image_to_jpg(ifile)
img_opti.make_cover(ifile)
img_opti.make_thumbnail(ifile)
jpg_file = img_opti.image_to_jpg(ifile)
img_opti.make_cover(jpg_file)
img_opti.make_thumbnail(jpg_file)
# img_opti.image_to_jpg("/home/thuc/Documents/blog.png", quality=50)
# img_opti.image_to_jpg("/home/thuc/Documents/blog_thumb.png", quality=50)
# img_opti.image_optimize(ifile, ofile, width=630, height=480, make_thumb=True)
|
the-stack_106_26068 | #!/usr/bin/env python
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
import sprokit.pipeline.process
except:
test_error("Failed to import the process module")
def test_create():
from sprokit.pipeline import datum
from sprokit.pipeline import process
process.ProcessTypes()
process.ProcessNames()
process.ProcessProperties()
process.PortFrequency(1)
process.PortFrequency(1, 1)
process.Ports()
process.PortFlags()
process.PortAddr()
process.PortAddrs()
process.Connection()
process.Connections()
process.PortInfo('type', process.PortFlags(), 'desc', process.PortFrequency(1, 1))
process.ConfInfo('default', 'desc', False)
process.DataInfo(True, datum.DatumType.invalid)
process.DataCheck.none
process.DataCheck.sync
process.DataCheck.valid
def test_api_calls():
from sprokit.pipeline import datum
from sprokit.pipeline import process
a = process.PortAddr()
a.process
a.port
a.process = ''
a.port = ''
f = process.PortFrequency(1, 1)
a = process.PortInfo('type', process.PortFlags(), 'desc', f)
a.type
a.flags
a.description
a.frequency
a = process.ConfInfo('default', 'desc', False)
a.default
a.description
a.tunable
a = process.DataInfo(True, datum.DatumType.invalid)
a.in_sync
a.max_status
process.PythonProcess.property_no_threads
process.PythonProcess.property_no_reentrancy
process.PythonProcess.property_unsync_input
process.PythonProcess.property_unsync_output
process.PythonProcess.port_heartbeat
process.PythonProcess.config_name
process.PythonProcess.config_type
process.PythonProcess.type_any
process.PythonProcess.type_none
process.PythonProcess.type_data_dependent
process.PythonProcess.type_flow_dependent
process.PythonProcess.flag_output_const
process.PythonProcess.flag_output_shared
process.PythonProcess.flag_input_static
process.PythonProcess.flag_input_mutable
process.PythonProcess.flag_input_nodep
process.PythonProcess.flag_required
def test_flags_as_set():
from sprokit.pipeline import process
# TODO: Make tests more rigorous (check more than just len()).
a = process.PortFlags()
# adding to the set
a.add(process.PythonProcess.flag_required)
a.add(process.PythonProcess.flag_input_mutable)
a.add(process.PythonProcess.flag_input_nodep)
a.add(process.PythonProcess.flag_input_static)
# length
if not len(a) == 4:
test_error("len() does not work: expected 4, got %d" % len(a))
# adding duplicate values
a.add(process.PythonProcess.flag_required)
if not len(a) == 4:
test_error(".add() added a duplicate item: expected 4, got %d" % len(a))
# adding invalid objects
expect_exception('adding a value of an invalid type', TypeError,
process.PortFlags.add, a, True),
# 'in' keyword
if process.PythonProcess.flag_required not in a:
test_error("a value in the set is 'not in' the set")
if process.PythonProcess.flag_output_const in a:
test_error("a value not in the set is 'in' the set")
# iteration
for value in a:
pass
# boolean casting
if not a:
test_error("a non-empty set is False-like")
b = process.PortFlags()
if b:
test_error("an empty set is True-like")
# removal
expect_exception('.pop() on an empty set', KeyError,
process.PortFlags.pop, b)
expect_exception('.remove() with an item that does not exist in the set', KeyError,
process.PortFlags.remove, a, process.PythonProcess.flag_output_const)
a.discard(process.PythonProcess.flag_output_const)
if not len(a) == 4:
test_error(".discard() removed an item not in the set")
a.discard(process.PythonProcess.flag_input_static)
if not len(a) == 3:
test_error(".discard() did not remove an item from the set")
a.remove(process.PythonProcess.flag_input_nodep)
if not len(a) == 2:
test_error(".remove() did not remove an item from the set")
a.pop()
if not len(a) == 1:
test_error(".pop() did not remove an item from the set")
a.clear()
if a:
test_error(".clear() did not make a False-like set")
# copy
b.add(process.PythonProcess.flag_required)
c = b.copy()
b.clear()
if not c:
test_error(".clear() on a set modified a set created using .copy()")
c = b.copy()
b.add(process.PythonProcess.flag_required)
if c:
test_error(".add() on a set modified a set created using .copy()")
# set vs. set queries
a.add(process.PythonProcess.flag_input_nodep)
a.add(process.PythonProcess.flag_input_static)
if not b.isdisjoint(a):
test_error(".isdisjoint() does not work")
if b.issubset(a):
test_error(".issubset() does not work")
if a.issuperset(b):
test_error(".issuperset() does not work")
a.add(process.PythonProcess.flag_required)
if b.isdisjoint(a):
test_error(".isdisjoint() does not work")
if not b.issubset(a):
test_error(".issubset() does not work")
if not a.issuperset(b):
test_error(".issuperset() does not work")
u = a.union(b)
if not len(u) == 3:
test_error(".union() does not work: expected 3, got %d" % len(u))
d = a.difference(b)
if not len(d) == 2:
test_error(".difference() does not work: expected 2, got %d" % len(d))
i = a.intersection(b)
if not len(i) == 1:
test_error(".intersection() does not work: expected 1, got %d" % len(i))
b.add(process.PythonProcess.flag_output_const)
s = a.symmetric_difference(b)
if not len(s) == 3:
test_error(".symmetric_difference() does not work: expected 3, got %d" % len(s))
a.update(b)
if not len(a) == 4:
test_error(".update() does not work: expected 4, got %d" % len(a))
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments( test-name, data-dir, path")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
|
the-stack_106_26069 | """This module contains the meta information of ConfigGetEstimateImpact ExternalMethod."""
import sys, os
from ..ucscoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ConfigGetEstimateImpact", "configGetEstimateImpact", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_configs": MethodPropertyMeta("InConfigs", "inConfigs", "ConfigMap", "Version142b", "Input", True),
"in_deleted_dns": MethodPropertyMeta("InDeletedDns", "inDeletedDns", "DnSet", "Version142b", "Input", True),
"in_impact_analyzer_id": MethodPropertyMeta("InImpactAnalyzerId", "inImpactAnalyzerId", "DateTime", "Version142b", "Input", False),
"in_is_policy_full_config": MethodPropertyMeta("InIsPolicyFullConfig", "inIsPolicyFullConfig", "Xs:string", "Version142b", "Input", False),
"in_source_connector_id": MethodPropertyMeta("InSourceConnectorId", "inSourceConnectorId", "Xs:unsignedInt", "Version142b", "Input", False),
"out_app_impact_response": MethodPropertyMeta("OutAppImpactResponse", "outAppImpactResponse", "ConfigConfig", "Version142b", "Output", True),
"out_retry": MethodPropertyMeta("OutRetry", "outRetry", "Xs:unsignedShort", "Version142b", "Output", False),
}
prop_map = {
"cookie": "cookie",
"inConfigs": "in_configs",
"inDeletedDns": "in_deleted_dns",
"inImpactAnalyzerId": "in_impact_analyzer_id",
"inIsPolicyFullConfig": "in_is_policy_full_config",
"inSourceConnectorId": "in_source_connector_id",
"outAppImpactResponse": "out_app_impact_response",
"outRetry": "out_retry",
}
|
the-stack_106_26070 | from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from error.models import Error
from issues.models import Issue
from notifications.models import Notification
from appengine_django.auth.models import User as AppUser
from google.appengine.api.users import User
from profiles.utils import get_profile
from app.tests import test_data
from django.core import mail
def create_user():
return AppUser(user=User(email="[email protected]"),
username="test",
email="[email protected]",
is_staff=True).save()
class ErrorTests(TestCase):
# test the view for writing errors
def setUp(self):
for error in Error.all(): error.delete()
for notification in Notification.all(): notification.delete()
for user in AppUser.all(): user.delete()
for issue in Issue.all(): issue.delete()
def testBasic(self):
c = Client()
assert not Error.all().count()
c.post(reverse("error-post"), test_data)
assert test_data["priority"] < 5, test_data["priority"]
assert Error.all().count() == 1
c.post(reverse("error-post"), test_data)
assert test_data["priority"] < 5, test_data["priority"]
assert Error.all().count() == 2
def testNoNotification(self):
c = Client()
assert not Error.all().count()
data = test_data.copy()
data["priority"] = 6
c.post(reverse("error-post"), data)
assert data["priority"] > 5, data["priority"]
assert Error.all().count() == 1
assert Notification.all().count() == 0
def testProfile(self):
user = create_user()
c = Client()
data = test_data.copy()
data["priority"] = 6
c.post(reverse("error-post"), data)
assert Notification.all().count() == 0, Notification.all().count()
data["priority"] = 5
c.post(reverse("error-post"), data)
assert Notification.all().count() == 1
profile = get_profile(user)
profile.notification = 8
data["priority"] = 5
c.post(reverse("error-post"), data)
assert Notification.all().count() == 2
data["priority"] = 8
c.post(reverse("error-post"), data)
assert Notification.all().count() == 2
data["priority"] = 9
c.post(reverse("error-post"), data)
assert Notification.all().count() == 2
def testNotificationNoUsers(self):
c = Client()
c.post(reverse("error-post"), test_data)
assert Notification.all().count() == 0
def testIssueAndErrorNotification(self):
user = create_user()
issue = Issue()
issue.description = "This is a test"
issue.save()
assert Issue.all().count() == 1
c = Client()
c.post(reverse("error-post"), test_data)
#assert Notification.all().count() == 2
# this would be 2 when issues are turned on
assert Notification.all().count() == 1
c = Client()
res = c.get(reverse("notification-send"))
self.assertEquals(len(mail.outbox), 1)
# this is to check that at the moment issue notifications don't get sent
def testIssueNotification(self):
user = create_user()
issue = Issue()
issue.description = "This is a test"
issue.save()
assert Issue.all().count() == 1
#assert Notification.all().count() == 1
#assert Notification.all()[0].type == "Issue"
def testCron(self):
user = create_user()
self.testBasic()
# now test our sending actually works
c = Client()
res = c.get(reverse("notification-send"))
self.assertEquals(len(mail.outbox), 1) |
the-stack_106_26071 | # Copyright (C) 2017, Anthony Oteri
# All rights reserved.
"""Control panel for the time clock."""
from __future__ import absolute_import
import logging
import time
import Tkinter as tk
import ttk
from datetime import datetime
from chronos import event
from chronos.db import ProjectService, RecordService
from chronos.utils import human_time
log = logging.getLogger(__name__)
class Clock(ttk.Frame):
"""Frame which allows the user to start/stop the timeclock."""
POLLING_INTERVAL_MS = 1000
def __init__(self, master):
"""Construct the frame and initialize the internal state."""
ttk.Frame.__init__(self, master)
self.project_list = set()
self.active_project_start_ts = None
self.clock_status = tk.StringVar()
self.active_project = tk.StringVar()
self.elapsed_time = tk.StringVar()
# TODO: Use dependency injection for these services.
self.project_service = ProjectService()
self.record_service = RecordService()
self.configure_layout()
self.create_widgets()
self.poll()
self.on_startup()
event.register(self.update)
def configure_layout(self):
for row in xrange(50):
self.rowconfigure(row, weight=1)
for col in xrange(24):
self.columnconfigure(col, weight=1)
def create_widgets(self):
"""Construct and place the UI widgets on screen."""
ttk.Label(self, text='Clock Status').grid(row=0,
column=0,
columnspan=8,
sticky='w')
ttk.Label(self, textvariable=self.clock_status).grid(row=0,
column=14,
columnspan=10,
sticky='w')
ttk.Label(self, text='Project').grid(row=1,
column=0,
columnspan=8,
sticky='w')
ttk.Label(self, textvariable=self.active_project).grid(row=1,
column=14,
columnspan=10,
sticky='w')
ttk.Label(self, text='Elapsed').grid(row=2,
column=0,
columnspan=8,
sticky='w')
ttk.Label(self, textvariable=self.elapsed_time).grid(row=2,
column=14,
columnspan=10,
sticky='w')
ttk.Label(self, text="Select Project").grid(row=48,
column=0,
columnspan=24,
sticky='w')
self.box = ttk.Combobox(self,
textvariable=self.active_project,
width=24)
self.box.grid(row=49, column=0, columnspan=24, sticky='e')
self.start_button = ttk.Button(self,
text='Start',
command=self.on_start)
self.start_button.grid(row=50, column=12, columnspan=6, sticky='e')
self.stop_button = ttk.Button(self, text='Stop', command=self.on_stop)
self.stop_button.grid(row=50, column=18, columnspan=6, sticky='e')
@event.notify
def on_startup(self):
"""Determine status of last exit and set the state accordingly."""
last_ongoing = self.record_service.ongoing()
if last_ongoing is not None:
log.info("Resuming active project %s, started at %s",
last_ongoing['project'],
str(datetime.utcfromtimestamp(last_ongoing['start'])))
self.active_project.set(last_ongoing['project'])
self.active_project_start_ts = last_ongoing['start']
def load(self):
"""Load the project list from the database."""
self.project_list.clear()
for row in self.project_service.list():
try:
self.project_list.add(row['name'])
except KeyError:
log.debug("The database may not be ready.")
continue
def update(self):
"""Refresh the internal state of the object from the database."""
self.load()
# Reset the list of projects displayed in the dropdown.
self.box['values'] = sorted(self.project_list)
# Handle the case where the active project has been deleted, in that
# case, we need to clear the active project field.
if self.active_project.get() not in self.project_list:
self.active_project.set('')
# If the active project field is not set, but we have a list of
# projects, set the active project to the first project in the list.
if not self.active_project.get() and self.project_list:
self.active_project.set([v for v in self.project_list][0])
# Toggle the enabled/disabled state of the buttons depending on if
# the clock is currently running.
if self.running:
self.start_button['state'] = tk.DISABLED
self.stop_button['state'] = tk.NORMAL
self.box['state'] = tk.DISABLED
else:
self.start_button['state'] = tk.NORMAL
self.stop_button['state'] = tk.DISABLED
self.box['state'] = tk.NORMAL
def poll(self):
"""Update the displayed times so that the fields work in real-time."""
if self.running:
elapsed = time.time() - self.active_project_start_ts
self.elapsed_time.set(human_time(elapsed, 0))
self.clock_status.set("Running")
else:
self.elapsed_time.set('')
self.clock_status.set("Stopped")
self.after(Clock.POLLING_INTERVAL_MS, self.poll)
@property
def running(self):
"""Return true if an active project has been started."""
return (self.active_project.get() and
self.active_project_start_ts is not None)
@event.notify
def on_start(self):
"""Start the clock on the current active project."""
if not self.running:
log.info("Starting work on project %s at %s",
self.active_project.get(), str(datetime.now()))
self.active_project_start_ts = int(time.time())
log.debug("active project start timestamp %d",
self.active_project_start_ts)
self.record_service.start(project=self.active_project.get(),
ts=self.active_project_start_ts)
@event.notify
def on_stop(self):
"""Stop the clock on the current active project."""
if self.running:
log.info("Stopping work on project %s at %s",
self.active_project.get(), str(datetime.now()))
stop_ts = int(time.time())
log.debug("active project stop timestamp %d", stop_ts)
self.record_service.stop(project=self.active_project.get(),
start_ts=self.active_project_start_ts,
stop_ts=stop_ts, )
self.active_project_start_ts = None
|
the-stack_106_26073 | from PIL import Image
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import torchvision.transforms as transforms
import numpy as np
from collections import OrderedDict
import importlib
from .utils import CTCLabelConverter
import math
def custom_mean(x):
return x.prod()**(2.0/np.sqrt(len(x)))
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high-low)/np.maximum(10, high+low), high, low
def adjust_contrast_grey(img, target = 0.4):
contrast, high, low = contrast_grey(img)
if contrast < target:
img = img.astype(int)
ratio = 200./np.maximum(10, high-low)
img = (img - low + 25)*ratio
img = np.maximum(np.full(img.shape, 0) ,np.minimum(np.full(img.shape, 255), img)).astype(np.uint8)
return img
class NormalizePAD(object):
def __init__(self, max_size, PAD_type='right'):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)
return Pad_img
class ListDataset(torch.utils.data.Dataset):
def __init__(self, image_list):
self.image_list = image_list
self.nSamples = len(image_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
img = self.image_list[index]
return Image.fromarray(img, 'L')
class AlignCollate(object):
def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False, adjust_contrast = 0.):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = keep_ratio_with_pad
self.adjust_contrast = adjust_contrast
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images = batch
resized_max_w = self.imgW
input_channel = 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
#### augmentation here - change contrast
if self.adjust_contrast > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target = self.adjust_contrast)
image = Image.fromarray(image, 'L')
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
return image_tensors
def recognizer_predict(model, converter, test_loader, batch_max_length,\
ignore_idx, char_group_idx, decoder = 'greedy', beamWidth= 5, device = 'cpu'):
model.eval()
result = []
with torch.no_grad():
for image_tensors in test_loader:
batch_size = image_tensors.size(0)
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, batch_max_length + 1).fill_(0).to(device)
preds = model(image, text_for_pred)
# Select max probabilty (greedy decoding) then decode index to character
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
######## filter ignore_char, rebalance
preds_prob = F.softmax(preds, dim=2)
preds_prob = preds_prob.cpu().detach().numpy()
preds_prob[:,:,ignore_idx] = 0.
pred_norm = preds_prob.sum(axis=2)
preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
preds_prob = torch.from_numpy(preds_prob).float().to(device)
letter_prob_indices = None
preds_index = None
if decoder == 'greedy':
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds_prob.max(2)
preds_index = preds_index.view(-1)
preds_str_indices = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
preds_index = [preds_index]
preds_str = [s for s, _ in preds_str_indices]
letter_prob_indices = [i for _, i in preds_str_indices]
elif decoder == 'beamsearch':
k = preds_prob.cpu().detach().numpy()
preds_str = converter.decode_beamsearch(k, beamWidth=beamWidth)
letter_prob_indices = [None]*len(preds_str)
preds_index = [None]*len(preds_str)
elif decoder == 'wordbeamsearch':
k = preds_prob.cpu().detach().numpy()
preds_str = converter.decode_wordbeamsearch(k, beamWidth=beamWidth)
letter_prob_indices = [None]*len(preds_str)
preds_index = [None]*len(preds_str)
preds_prob = preds_prob.cpu().detach().numpy()
values = preds_prob.max(axis=2)
indices = preds_prob.argmax(axis=2)
preds_max_prob = []
for v,i in zip(values, indices):
max_probs = v[i!=0]
if len(max_probs)>0:
preds_max_prob.append(max_probs)
else:
preds_max_prob.append(np.array([0]))
for pred, pred_max_prob, letter_prob_idx, pred_idx in zip(preds_str, preds_max_prob, letter_prob_indices, preds_index):
confidence_score = custom_mean(pred_max_prob)
probs = None
if letter_prob_idx is not None:
prob_indices = pred_idx[letter_prob_idx[0]].cpu()
tmp = preds_prob[0][letter_prob_idx]
prob_distributions = [tmp[i] for i, _ in enumerate(prob_indices)]
result.append([pred, confidence_score, prob_indices, np.array(prob_distributions)])
return result
def get_recognizer(recog_network, network_params, character,\
separator_list, dict_list, model_path,\
device = 'cpu', quantize = True):
converter = CTCLabelConverter(character, separator_list, dict_list)
num_class = len(converter.character)
if recog_network == 'generation1':
model_pkg = importlib.import_module("easyocr.model.model")
elif recog_network == 'generation2':
model_pkg = importlib.import_module("easyocr.model.vgg_model")
else:
model_pkg = importlib.import_module(recog_network)
model = model_pkg.Model(num_class=num_class, **network_params)
if device == 'cpu':
state_dict = torch.load(model_path, map_location=device)
new_state_dict = OrderedDict()
for key, value in state_dict.items():
new_key = key[7:]
new_state_dict[new_key] = value
model.load_state_dict(new_state_dict)
if quantize:
try:
torch.quantization.quantize_dynamic(model, dtype=torch.qint8, inplace=True)
except:
pass
else:
model = torch.nn.DataParallel(model).to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
return model, converter
def get_text(character, imgH, imgW, recognizer, converter, image_list,\
ignore_char = '',decoder = 'greedy', beamWidth =5, batch_size=1, contrast_ths=0.1,\
adjust_contrast=0.5, filter_ths = 0.003, workers = 1, device = 'cpu'):
batch_max_length = int(imgW/10)
char_group_idx = {}
ignore_idx = []
for char in ignore_char:
try: ignore_idx.append(character.index(char)+1)
except: pass
coord = [item[0] for item in image_list]
img_list = [item[1] for item in image_list]
AlignCollate_normal = AlignCollate(imgH=imgH, imgW=imgW, keep_ratio_with_pad=True)
test_data = ListDataset(img_list)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=False,
num_workers=int(workers), collate_fn=AlignCollate_normal, pin_memory=True)
# predict first round
result1 = recognizer_predict(recognizer, converter, test_loader,batch_max_length,\
ignore_idx, char_group_idx, decoder, beamWidth, device = device)
# predict second round
low_confident_idx = [i for i,item in enumerate(result1) if (item[1] < contrast_ths)]
if len(low_confident_idx) > 0:
img_list2 = [img_list[i] for i in low_confident_idx]
AlignCollate_contrast = AlignCollate(imgH=imgH, imgW=imgW, keep_ratio_with_pad=True, adjust_contrast=adjust_contrast)
test_data = ListDataset(img_list2)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=False,
num_workers=int(workers), collate_fn=AlignCollate_contrast, pin_memory=True)
result2 = recognizer_predict(recognizer, converter, test_loader, batch_max_length,\
ignore_idx, char_group_idx, decoder, beamWidth, device = device)
result = []
for i, zipped in enumerate(zip(coord, result1)):
box, pred1 = zipped
if i in low_confident_idx:
pred2 = result2[low_confident_idx.index(i)]
if pred1[1]>pred2[1]:
result.append( (box, pred1[0], *pred1[1:]) )
else:
result.append( (box, pred2[0], *pred2[1:]) )
else:
result.append( (box, pred1[0], *pred1[1:]) )
return result
|
the-stack_106_26076 | import sys
import collections
def input():
return sys.stdin.readline()[:-1]
N = int(input())
A = []
for i in range(N):
tmp = list(input())
c = collections.Counter(tmp)
A.append(c)
ans = ''
for i in range(N//2):
for k in A[i].keys():
if k in A[(i+1)*-1]:
ans = ans+k
break
else:
print(-1)
exit(0)
if N % 2 == 1:
tmp = ''
for k in A[N//2].keys():
tmp = k
break
ans = ans+tmp+ans[::-1]
else:
ans = ans+ans[::-1]
print(ans)
|
the-stack_106_26077 | import os
from fifteen_api import FifteenAPI
# initialization
tts_api = FifteenAPI(show_debug=True)
# be aware that there is a serverside max text length. If text is too long, it will be trimmed.
print(tts_api.max_text_len)
### valid usage examples
# get tts raw bytes (well, assuming that Fluttershy is not currently disabled)
response = tts_api.get_tts_raw("Fluttershy", "Neutral", "This is a test")
assert response["status"] == "OK"
assert len(response["data"]) > 100000 # those are .wav audiofile bytes
# save tts to file with generated filename
response = tts_api.save_to_file("Fluttershy", "Neutral", "This is another test")
assert response["status"] == "OK"
assert response["filename"] != None # this is a generated filename of TTS file
print(response)
os.remove(response["filename"])
# save tts to file with target filename.
response = tts_api.save_to_file("Fluttershy", "Neutral", "One more test", "tts.wav")
assert response["status"] == "OK"
assert response["filename"] == "tts.wav"
print(response)
os.remove("tts.wav")
# if filename doesn't end with '.wav', it will be added automatically
response = tts_api.save_to_file("Fluttershy", "Neutral", "Last one valid test", "randomfilename")
assert response["status"] == "OK"
assert response["filename"] == "randomfilename.wav"
print(response)
os.remove("randomfilename.wav")
### invalid usage examples
# unavailable character
response = tts_api.save_to_file("random character or an incorrect name", "Neutral", "Test?", "tts.wav")
assert response["status"] != "OK"
assert response["filename"] == None
print(response)
# emotion that doesn't exist
response = tts_api.save_to_file("Fluttershy", "Super extra angry!", "Angry test!!!", "tts.wav")
assert response["status"] != "OK"
assert response["filename"] == None
print(response)
# assume that 15.ai api is currently broken
tts_api.tts_url = "https://example.com/brokenapi"
response = tts_api.save_to_file("Fluttershy", "Neutral", "...test?", "tts.wav")
assert response["status"] != "OK"
assert response["filename"] == None
print(response)
|
the-stack_106_26078 | from configs.config import Config
def load_config(dataset_name):
cfg = Config()
''' Experiment '''
cfg.experiment_idx = 1
cfg.trial_id = None
cfg.train_mode = 'train'
''' Dataset '''
cfg.dataset_name = dataset_name
cfg.set_hint_patch_shape((96, 96, 96))
cfg.num_classes = 4
''' Model '''
cfg.model_name = 'unet'
cfg.first_layer_channels = 32
cfg.num_input_channel = 1
cfg.step_count = 4
''' Training '''
cfg.numb_of_epochs = 25000
cfg.eval_every = 1
cfg.lamda_ce = 1
cfg.batch_size = 1
cfg.learning_rate = 1e-4
''' Priors '''
cfg.priors = None
cfg.augmentation_shift_range = 15
''' Save at '''
cfg.save_path = '/cvlabdata1/cvlab/datasets_udaranga/experiments/miccai2019/'
cfg.save_dir_prefix = 'Experiment_'
return cfg
|
the-stack_106_26079 | from plotly.basedatatypes import BaseTraceType
import copy
class Scattermapbox(BaseTraceType):
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['connectgaps']
@connectgaps.setter
def connectgaps(self, val):
self['connectgaps'] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". "toself" connects the endpoints of the trace (or
each segment of the trace if it has gaps) into a closed shape.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself']
Returns
-------
Any
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['fillcolor']
@fillcolor.setter
def fillcolor(self, val):
self['fillcolor'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lon', 'lat', 'text', 'name', 'name'] joined with '+' characters
(e.g. 'lon+lat')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on plot.ly for hoverinfo .
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.scattermapbox.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (lon,lat) pair If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertext']
@hovertext.setter
def hovertext(self, val):
self['hovertext'] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on plot.ly for hovertext .
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertextsrc']
@hovertextsrc.setter
def hovertextsrc(self, val):
self['hovertextsrc'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# lat
# ---
@property
def lat(self):
"""
Sets the latitude coordinates (in degrees North).
The 'lat' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['lat']
@lat.setter
def lat(self, val):
self['lat'] = val
# latsrc
# ------
@property
def latsrc(self):
"""
Sets the source reference on plot.ly for lat .
The 'latsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['latsrc']
@latsrc.setter
def latsrc(self, val):
self['latsrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattermapbox.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# lon
# ---
@property
def lon(self):
"""
Sets the longitude coordinates (in degrees East).
The 'lon' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['lon']
@lon.setter
def lon(self, val):
self['lon'] = val
# lonsrc
# ------
@property
def lonsrc(self):
"""
Sets the source reference on plot.ly for lon .
The 'lonsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['lonsrc']
@lonsrc.setter
def lonsrc(self, val):
self['lonsrc'] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
colorbar
plotly.graph_objs.scattermapbox.marker.ColorBar
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.cmin` and `marker.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol. Full list:
https://www.mapbox.com/maki-icons/ Note that
the array `marker.color` and `marker.size` are
only available for "circle" symbols.
symbolsrc
Sets the source reference on plot.ly for
symbol .
Returns
-------
plotly.graph_objs.scattermapbox.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover.
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self['mode']
@mode.setter
def mode(self, val):
self['mode'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Selected
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
plotly.graph_objs.scattermapbox.selected.Marker
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.scattermapbox.Selected
"""
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.scattermapbox.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# subplot
# -------
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
mapbox subplot. If "mapbox" (the default value), the data refer
to `layout.mapbox`. If "mapbox2", the data refer to
`layout.mapbox2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'mapbox', that may be specified as the string 'mapbox'
optionally followed by an integer >= 1
(e.g. 'mapbox', 'mapbox1', 'mapbox2', 'mapbox3', etc.)
Returns
-------
str
"""
return self['subplot']
@subplot.setter
def subplot(self, val):
self['subplot'] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (lon,lat) pair If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the icon text font. Has an effect only when `type` is set
to "symbol".
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scattermapbox.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
Returns
-------
Any
"""
return self['textposition']
@textposition.setter
def textposition(self, val):
self['textposition'] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on plot.ly for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
# uid
# ---
@property
def uid(self):
"""
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of plotly.graph_objs.scattermapbox.Unselected
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
plotly.graph_objs.scattermapbox.unselected.Mark
er instance or dict with compatible properties
Returns
-------
plotly.graph_objs.scattermapbox.Unselected
"""
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scattermapbox.Hoverlabel instance or
dict with compatible properties
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on plot.ly for lat .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scattermapbox.Line instance or dict
with compatible properties
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on plot.ly for lon .
marker
plotly.graph_objs.scattermapbox.Marker instance or dict
with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scattermapbox.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scattermapbox.Stream instance or dict
with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font. Has an effect only when `type`
is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
unselected
plotly.graph_objs.scattermapbox.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
lat=None,
latsrc=None,
legendgroup=None,
line=None,
lon=None,
lonsrc=None,
marker=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
text=None,
textfont=None,
textposition=None,
textsrc=None,
uid=None,
unselected=None,
visible=None,
**kwargs
):
"""
Construct a new Scattermapbox object
The data visualized as scatter point, lines or marker symbols
on a Mapbox GL geographic map is provided by longitude/latitude
pairs in `lon` and `lat`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Scattermapbox
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scattermapbox.Hoverlabel instance or
dict with compatible properties
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on plot.ly for lat .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scattermapbox.Line instance or dict
with compatible properties
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on plot.ly for lon .
marker
plotly.graph_objs.scattermapbox.Marker instance or dict
with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scattermapbox.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scattermapbox.Stream instance or dict
with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font. Has an effect only when `type`
is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
unselected
plotly.graph_objs.scattermapbox.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scattermapbox
"""
super(Scattermapbox, self).__init__('scattermapbox')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattermapbox
constructor must be a dict or
an instance of plotly.graph_objs.Scattermapbox"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (scattermapbox as v_scattermapbox)
# Initialize validators
# ---------------------
self._validators['connectgaps'
] = v_scattermapbox.ConnectgapsValidator()
self._validators['customdata'] = v_scattermapbox.CustomdataValidator()
self._validators['customdatasrc'
] = v_scattermapbox.CustomdatasrcValidator()
self._validators['fill'] = v_scattermapbox.FillValidator()
self._validators['fillcolor'] = v_scattermapbox.FillcolorValidator()
self._validators['hoverinfo'] = v_scattermapbox.HoverinfoValidator()
self._validators['hoverinfosrc'
] = v_scattermapbox.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_scattermapbox.HoverlabelValidator()
self._validators['hovertext'] = v_scattermapbox.HovertextValidator()
self._validators['hovertextsrc'
] = v_scattermapbox.HovertextsrcValidator()
self._validators['ids'] = v_scattermapbox.IdsValidator()
self._validators['idssrc'] = v_scattermapbox.IdssrcValidator()
self._validators['lat'] = v_scattermapbox.LatValidator()
self._validators['latsrc'] = v_scattermapbox.LatsrcValidator()
self._validators['legendgroup'
] = v_scattermapbox.LegendgroupValidator()
self._validators['line'] = v_scattermapbox.LineValidator()
self._validators['lon'] = v_scattermapbox.LonValidator()
self._validators['lonsrc'] = v_scattermapbox.LonsrcValidator()
self._validators['marker'] = v_scattermapbox.MarkerValidator()
self._validators['mode'] = v_scattermapbox.ModeValidator()
self._validators['name'] = v_scattermapbox.NameValidator()
self._validators['opacity'] = v_scattermapbox.OpacityValidator()
self._validators['selected'] = v_scattermapbox.SelectedValidator()
self._validators['selectedpoints'
] = v_scattermapbox.SelectedpointsValidator()
self._validators['showlegend'] = v_scattermapbox.ShowlegendValidator()
self._validators['stream'] = v_scattermapbox.StreamValidator()
self._validators['subplot'] = v_scattermapbox.SubplotValidator()
self._validators['text'] = v_scattermapbox.TextValidator()
self._validators['textfont'] = v_scattermapbox.TextfontValidator()
self._validators['textposition'
] = v_scattermapbox.TextpositionValidator()
self._validators['textsrc'] = v_scattermapbox.TextsrcValidator()
self._validators['uid'] = v_scattermapbox.UidValidator()
self._validators['unselected'] = v_scattermapbox.UnselectedValidator()
self._validators['visible'] = v_scattermapbox.VisibleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('connectgaps', None)
self['connectgaps'] = connectgaps if connectgaps is not None else _v
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('fillcolor', None)
self['fillcolor'] = fillcolor if fillcolor is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hovertext', None)
self['hovertext'] = hovertext if hovertext is not None else _v
_v = arg.pop('hovertextsrc', None)
self['hovertextsrc'] = hovertextsrc if hovertextsrc is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('lat', None)
self['lat'] = lat if lat is not None else _v
_v = arg.pop('latsrc', None)
self['latsrc'] = latsrc if latsrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('lon', None)
self['lon'] = lon if lon is not None else _v
_v = arg.pop('lonsrc', None)
self['lonsrc'] = lonsrc if lonsrc is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('mode', None)
self['mode'] = mode if mode is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('subplot', None)
self['subplot'] = subplot if subplot is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
_v = arg.pop('textposition', None)
self['textposition'] = textposition if textposition is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'scattermapbox'
self._validators['type'] = LiteralValidator(
plotly_name='type',
parent_name='scattermapbox',
val='scattermapbox'
)
arg.pop('type', None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_26081 | from __future__ import print_function
import sys
import os
# This is important to allow access to the CTFd application factory
sys.path.append(os.getcwd())
import datetime
import hashlib
import netaddr
from flask_sqlalchemy import SQLAlchemy
from passlib.hash import bcrypt_sha256
from sqlalchemy.sql.expression import union_all
from CTFd import config, create_app
from sqlalchemy_utils import (
drop_database,
)
from six.moves import input
import dataset
if __name__ == '__main__':
print("/*\\ Migrating your database to 2.0.0 can potentially lose data./*\\")
print("""/*\\ Please be sure to back up all data by:
* creating a CTFd export
* creating a dump of your actual database
* and backing up the CTFd source code directory""")
print("/*\\ CTFd maintainers are not responsible for any data loss! /*\\")
if input('Run database migrations (Y/N)').lower().strip() == 'y':
pass
else:
print('/*\\ Aborting database migrations... /*\\')
print('/*\\ Exiting... /*\\')
exit(1)
db_url = config.Config.SQLALCHEMY_DATABASE_URI
old_data = {}
old_conn = dataset.connect(config.Config.SQLALCHEMY_DATABASE_URI)
tables = old_conn.tables
for table in tables:
old_data[table] = old_conn[table].all()
if 'alembic_version' in old_data:
old_data.pop('alembic_version')
print('Current Tables:')
for table in old_data.keys():
print('\t', table)
old_conn.executable.close()
print('DROPPING DATABASE')
drop_database(db_url)
app = create_app()
new_conn = dataset.connect(config.Config.SQLALCHEMY_DATABASE_URI)
print('MIGRATING Challenges')
for challenge in old_data['challenges']:
hidden = challenge.pop('hidden')
challenge['state'] = 'hidden' if hidden else 'visible'
new_conn['challenges'].insert(dict(challenge))
del old_data['challenges']
print('MIGRATING Teams')
for team in old_data['teams']:
admin = team.pop('admin')
team['type'] = 'admin' if admin else 'user'
team['hidden'] = bool(team.pop('banned'))
team['banned'] = False
team['verified'] = bool(team.pop('verified'))
new_conn['users'].insert(dict(team))
del old_data['teams']
print('MIGRATING Pages')
for page in old_data['pages']:
page['content'] = page.pop('html')
new_conn['pages'].insert(dict(page))
del old_data['pages']
print('MIGRATING Keys')
for key in old_data['keys']:
key['challenge_id'] = key.pop('chal')
key['content'] = key.pop('flag')
new_conn['flags'].insert(dict(key))
del old_data['keys']
print('MIGRATING Tags')
for tag in old_data['tags']:
tag['challenge_id'] = tag.pop('chal')
tag['value'] = tag.pop('tag')
new_conn['tags'].insert(dict(tag))
del old_data['tags']
print('MIGRATING Files')
for f in old_data['files']:
challenge_id = f.pop('chal')
if challenge_id:
f['challenge_id'] = challenge_id
f['type'] = 'challenge'
else:
f['page_id'] = None
f['type'] = 'page'
new_conn['files'].insert(dict(f))
del old_data['files']
print('MIGRATING Hints')
for hint in old_data['hints']:
hint['type'] = 'standard'
hint['challenge_id'] = hint.pop('chal')
hint['content'] = hint.pop('hint')
new_conn['hints'].insert(dict(hint))
del old_data['hints']
print('MIGRATING Unlocks')
for unlock in old_data['unlocks']:
unlock['user_id'] = unlock.pop('teamid') # This is intentional as previous CTFds are effectively in user mode
unlock['target'] = unlock.pop('item_id')
unlock['type'] = unlock.pop('model')
new_conn['unlocks'].insert(dict(unlock))
del old_data['unlocks']
print('MIGRATING Awards')
for award in old_data['awards']:
award['user_id'] = award.pop('teamid') # This is intentional as previous CTFds are effectively in user mode
new_conn['awards'].insert(dict(award))
del old_data['awards']
submissions = []
for solve in old_data['solves']:
solve.pop('id') # ID of a solve doesn't really matter
solve['challenge_id'] = solve.pop('chalid')
solve['user_id'] = solve.pop('teamid')
solve['provided'] = solve.pop('flag')
solve['type'] = 'correct'
solve['model'] = 'solve'
submissions.append(solve)
for wrong_key in old_data['wrong_keys']:
wrong_key.pop('id') # ID of a fail doesn't really matter.
wrong_key['challenge_id'] = wrong_key.pop('chalid')
wrong_key['user_id'] = wrong_key.pop('teamid')
wrong_key['provided'] = wrong_key.pop('flag')
wrong_key['type'] = 'incorrect'
wrong_key['model'] = 'wrong_key'
submissions.append(wrong_key)
submissions = sorted(submissions, key=lambda k: k['date'])
print('MIGRATING Solves & WrongKeys')
for submission in submissions:
model = submission.pop('model')
if model == 'solve':
new_id = new_conn['submissions'].insert(dict(submission))
submission['id'] = new_id
new_conn['solves'].insert(dict(submission))
elif model == 'wrong_key':
new_conn['submissions'].insert(dict(submission))
del old_data['solves']
del old_data['wrong_keys']
print('MIGRATING Tracking')
for tracking in old_data['tracking']:
tracking['user_id'] = tracking.pop('team')
new_conn['tracking'].insert(dict(tracking))
del old_data['tracking']
print('MIGRATING Config')
banned = [
'ctf_version',
'setup'
]
for config in old_data['config']:
config.pop('id')
if config['key'] not in banned:
new_conn['config'].insert(dict(config))
new_conn['config'].insert({
'key': 'user_mode',
'value': 'users'
})
del old_data['config']
manual = []
not_created = []
print('MIGRATING extra tables')
for table in old_data.keys():
print('MIGRATING', table)
new_conn.create_table(table, primary_id=False)
data = old_data[table]
ran = False
for row in data:
new_conn[table].insert(dict(row))
ran = True
else: # We finished inserting
if ran:
manual.append(table)
if ran is False:
not_created.append(table)
print('Migration completed.')
print('The following tables require manual setting of primary keys and manual inspection')
for table in manual:
print('\t', table)
print('For example you can use the following commands if you know that the PRIMARY KEY for the table is `id`:')
for table in manual:
print('\t', 'ALTER TABLE `{table}` ADD PRIMARY KEY(id)'.format(table=table))
print('The following tables were not created because they were empty and must be manually recreated (e.g. app.db.create_all()')
for table in not_created:
print('\t', table)
|
the-stack_106_26082 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
class WelfordCovariance(object):
"""
Implements Welford's online scheme for estimating (co)variance (see :math:`[1]`).
Useful for adapting diagonal and dense mass structures for HMC.
**References**
[1] `The Art of Computer Programming`,
Donald E. Knuth
"""
def __init__(self, diagonal=True):
self.diagonal = diagonal
self.reset()
def reset(self):
self._mean = 0.
self._m2 = 0.
self.n_samples = 0
def update(self, sample):
self.n_samples += 1
delta_pre = sample - self._mean
self._mean = self._mean + delta_pre / self.n_samples
delta_post = sample - self._mean
if self.diagonal:
self._m2 += delta_pre * delta_post
else:
self._m2 += torch.ger(delta_post, delta_pre)
def get_covariance(self, regularize=True):
if self.n_samples < 2:
raise RuntimeError('Insufficient samples to estimate covariance')
cov = self._m2 / (self.n_samples - 1)
if regularize:
# Regularization from stan
scaled_cov = (self.n_samples / (self.n_samples + 5.)) * cov
shrinkage = 1e-3 * (5. / (self.n_samples + 5.0))
if self.diagonal:
cov = scaled_cov + shrinkage
else:
scaled_cov.view(-1)[::scaled_cov.size(0) + 1] += shrinkage
cov = scaled_cov
return cov
|
the-stack_106_26083 | from domain.ErrorTypes import ErrorTypes
from validity import IncomingEdgeValidityChecker, DataSourceValidityChecker
from utils import CodeGenerationUtils
import os
# Consider adding offset option...
def generate_code(args):
node = args["node"]
requireds_info = args["requireds_info"]
edges = args["edges"]
checklist={"df_count": {0}, "model_count": {0}}
error, extra=IncomingEdgeValidityChecker.check_validity(node["id"], requireds_info, edges, checklist)
code=[]
if(error == ErrorTypes.NO_ERROR):
error, is_schema_appropriate=DataSourceValidityChecker.check_validity(node)
if(error == ErrorTypes.NO_ERROR):
# Must be a valid schema at this point.
code.append("schema_" + node["id"] + "=")
code.extend([CodeGenerationUtils.arrange_schema(node["parameter"]["schema"]), os.linesep])
code.append("df_" + node["id"] + ' = spark.readStream.format("kafka").option("kafka.bootstrap.servers", ')
code.append(CodeGenerationUtils.arrange_parameter_value(node["parameters"]["host"] + ":" + node["parameters"]["port"])+")")
code.append('.option("subscribe", '+ CodeGenerationUtils.arrange_parameter_value(node["parameters"]["topic"]+")"))
code.append('.load().select(from_json(col("value").cast("string"), schema_'+node["id"]+")")
# For streams, we will use timestamp as a key while writing to kafka topic in case.
code.extend(['.alias("value"), "timestamp").select("value.*", "timestamp")', os.linesep])
return code, error |
the-stack_106_26085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from scipy import interpolate
import hitomipy
import pycuba
import os
class ClassBiSpectrum():
def __init__(self):
self.initialize()
def initialize(self):
self.k_temp = np.zeros(1)
self.P_temp = np.zeros(1)
self.P_nw_temp = np.zeros(1)
self.sigma8_norm = 1.0
self.alpha_perp = 1.0
self.alpha_parallel = 1.0
self.sigma8 = 0.0
self.fz = 0.0
self.b1 = 0.0
self.b2 = 0.0
self.b3 = 0.0
self.bK2 = 0.0
self.bK3 = 0.0
self.bDK = 0.0
self.bO = 0.0
def set_input_pk(self, k_in, P_in):
self.k_temp = k_in
self.P_temp = P_in
def set_input_pk_nw(self, k_in, P_nw_in):
self.k_temp = k_in
self.P_nw_temp = P_nw_in
def set_normalization(self, sigma8_norm):
self.sigma8_norm = sigma8_norm
def set_params(self, params):
try:
self.alpha_perp = params["alpha_perp"]
except:
pass
try:
self.alpha_parallel = params["alpha_parallel"]
except:
pass
try:
self.sigma8 = params["sigma8"]
except:
pass
try:
self.fz = params["fz"]
except:
pass
try:
self.b1 = params["b1"]
except:
pass
try:
self.b2 = params["b2"]
except:
pass
try:
self.b3 = params["b3"]
except:
pass
try:
self.bK2 = params["bK2"]
except:
pass
try:
self.bK3 = params["bK3"]
except:
pass
try:
self.bDK = params["bDK"]
except:
pass
try:
self.bO = params["bO"]
except:
pass
def select_B(self, name):
n_kbin = len(self.kbin)
if name == "Tree":
return hitomipy.integrand_B_Tree_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1,
self.b2, self.bK2)
elif name == "Tree_NoWiggle":
return hitomipy.integrand_B_Tree_NoWiggle_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1,
self.b2, self.bK2)
elif name == "Tree_BAO":
return hitomipy.integrand_B_Tree_BAO_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1,
self.b2, self.bK2,
self.sigma2_perp, self.sigma2_para)
elif name == "Tree_BAO_Reconstructed":
return hitomipy.integrand_B_Tree_BAO_Reconstructed_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1,
self.b2, self.bK2,
self.one_over_b1_fid, self.R,
self.sigma2_perp, self.sigma2_para)
elif name == "Tree_BAO_Template":
return hitomipy.integrand_B_Tree_BAO_Template_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma2_perp, self.sigma2_para,
self.param_name)
elif name == "Tree_NonGaussian_Local":
return hitomipy.integrand_B_Tree_NonGaussian_Local_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1)
elif name == "Tree_NonGaussian_Equilateral":
return hitomipy.integrand_B_Tree_NonGaussian_Equilateral_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1)
elif name == "Tree_NonGaussian_Orthogonal":
return hitomipy.integrand_B_Tree_NonGaussian_Orthogonal_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, n_kbin, self.ell1, self.ell2, self.ELL, self.kmag1,
self.alpha_perp, self.alpha_parallel,
self.sigma8, self.fz, self.b1)
else:
print("select_B: ERROR")
return 0.0
def select_ndim(self, name):
name_dim3 = []
name_dim3.append("Tree")
name_dim3.append("Tree_NoWiggle")
name_dim3.append("Tree_BAO")
name_dim3.append("Tree_BAO_Reconstructed")
name_dim3.append("Tree_BAO_Template")
name_dim3.append("Tree_NonGaussian_Local")
name_dim3.append("Tree_NonGaussian_Equilateral")
name_dim3.append("Tree_NonGaussian_Orthogonal")
if name in name_dim3:
return 3
def check_flag_BAO(self, name, flag_BAO, sigma8_fid, fz_fid):
flag = 0
name_BAO = []
name_BAO.append("Tree_BAO")
name_BAO.append("Tree_BAO_Reconstructed")
name_BAO.append("Tree_BAO_Template")
name_no_BAO = []
name_no_BAO.append("Tree")
name_no_BAO.append("Tree_NoWiggle")
name_no_BAO.append("Tree_NonGaussian_Local")
name_no_BAO.append("Tree_NonGaussian_Equilateral")
name_no_BAO.append("Tree_NonGaussian_Orthogonal")
if name in name_BAO:
if flag_BAO and sigma8_fid >= 0.0 and fz_fid >= 0.0:
flag = 0
self.sigma8_fid = sigma8_fid
self.fz_fid = fz_fid
elif flag_BAO and sigma8_fid < 0.0 and fz_fid < 0.0:
flag = 0
self.sigma8_fid = self.sigma8
self.fz_fid = self.fz
else:
flag = -1
elif name in name_no_BAO:
if flag_BAO:
flag = -1
else:
flag = 0
else:
flag = -1
return flag
def check_flag_recon(self, name, flag_BAO, flag_recon, one_over_b1_fid, R):
flag = 0
name_recon = []
name_recon.append("Tree_BAO_Reconstructed")
name_no_recon = []
name_no_recon.append("Tree")
name_no_recon.append("Tree_NoWiggle")
name_no_recon.append("Tree_BAO")
name_no_recon.append("Tree_BAO_Template")
name_no_recon.append("Tree_NonGaussian_Local")
name_no_recon.append("Tree_NonGaussian_Equilateral")
name_no_recon.append("Tree_NonGaussian_Orthogonal")
if name in name_recon:
if flag_recon and one_over_b1_fid >= 0.0 and R >= 0.0:
flag = 0
self.one_over_b1_fid = one_over_b1_fid
self.R = R
else:
flag = - 1
elif name in name_no_recon:
if flag_recon:
flag = -1
else:
flag = 0
else:
flag = -1
return flag
def check_flag_damping(self, name, flag_BAO, flag_damping, sigma2_perp, sigma2_para):
flag = 0
name_damping = []
name_damping.append("Tree_BAO")
name_damping.append("Tree_BAO_Reconstructed")
name_damping.append("Tree_BAO_Template")
name_no_damping = []
name_no_damping.append("Tree")
name_no_damping.append("Tree_NoWiggle")
name_no_damping.append("Tree_NonGaussian_Local")
name_no_damping.append("Tree_NonGaussian_Equilateral")
name_no_damping.append("Tree_NonGaussian_Orthogonal")
if name in name_damping:
if flag_BAO and flag_damping and sigma2_perp >= 0.0 and sigma2_para >= 0.0:
flag = 0
self.sigma2_perp = sigma2_perp
self.sigma2_para = sigma2_para
elif flag_BAO and flag_damping == False:
flag = 0
self.sigma2_perp = 1.0e-10
self.sigma2_para = 1.0e-10
else:
flag = - 1
elif name in name_no_damping:
if flag_damping:
flag = -1
else:
flag = 0
else:
flag = -1
return flag
def check_flag_PNG(self, name, flag_PNG, k_pri, mk_pri):
flag = 0
name_PNG = []
name_PNG.append("Tree_NonGaussian_Local")
name_PNG.append("Tree_NonGaussian_Equilateral")
name_PNG.append("Tree_NonGaussian_Orthogonal")
if name in name_PNG:
if flag_PNG and len(k_pri) > 1 and np.sum(mk_pri) > 0.0:
flag = 0
else:
flag = -1
return flag
def Integrand_B(self, ndim, xx, ncomp, ff, userdata):
self.xx_in = np.zeros(ndim[0])
self.ff_out = np.zeros(ncomp[0])
for i in range(ndim[0]):
self.xx_in[i] = xx[i]
self.ndim = ndim[0]
self.ncomp = ncomp[0]
self.select_B(self.name)
for i in range(ncomp[0]):
ff[i] = self.ff_out[i]
return 0
def calc_B(
self, name,
kbin=np.linspace(0.01, 0.2, 20), ell1=0, ell2=0, ELL=0,
flag_3pcf=False,
flag_BAO=False, sigma8_fid = - 1.0, fz_fid = - 1.0,
flag_recon = False, one_over_b1_fid = - 1.0, R = - 1.0,
flag_damping = False, sigma2_perp = -1.0, sigma2_para = -1.0,
flag_PNG = False, k_pri=np.zeros(1), mk_pri=np.zeros(1),
param_name = None):
(kbin2_out, kbin1_out) = np.meshgrid(kbin, kbin)
## flags ##
output_dict_ini = {
"kbin1": kbin1_out,
"kbin2": kbin2_out,
"B": np.zeros((len(kbin), len(kbin))),
"kbin1_fft": kbin1_out,
"kbin2_fft": kbin2_out,
"B_fft": np.zeros((len(kbin), len(kbin))),
"ell1": ell1,
"ell2": ell2,
"ELL" : ELL,
"flag_3pcf": flag_3pcf,
"flag_BAO": flag_BAO,
"flag_recon": flag_recon,
"sigma2_perp" : -1.0,
"sigma2_para" : -1.0
}
check_bao = self.check_flag_BAO(name, flag_BAO, sigma8_fid, fz_fid)
check_recon = self.check_flag_recon(name, flag_BAO, flag_recon, one_over_b1_fid, R)
check_damping = self.check_flag_damping(name, flag_BAO, flag_damping, sigma2_perp, sigma2_para)
check_png = self.check_flag_PNG(name, flag_PNG, k_pri, mk_pri)
if check_bao < 0:
print("FLAG_BAO: ERROR")
return output_dict_ini
if check_recon < 0:
print("FLAG_RECON: ERROR")
return output_dict_ini
if check_damping < 0:
print("FLAG_DAMPING: ERROR")
return output_dict_ini
if check_png < 0:
print("FLAG_PNG: ERROR")
return output_dict_ini
##################
self.name = name
self.param_name = param_name
## set kbin ##
if not flag_3pcf:
self.kbin = kbin
elif flag_3pcf:
kbin0 = np.logspace(np.log(3.0e-4), np.log(0.2), 100, base=np.e)
kbin1 = np.logspace(np.log(0.201), np.log(10.0), 50, base=np.e)
self.kbin = np.hstack([kbin0, kbin1])
## set multipole indices ##
self.ell1 = ell1
self.ell2 = ell2
self.ELL = ELL
## initialization ##
hitomipy.initializeInputPowerSpectrum_py()
## calc. wigner 3j symbols ##
hitomipy.setWigner3j_py()
## read linear power spectrum ##
hitomipy.readInputPowerSpectrum_py(
self.k_temp, self.P_temp, len(self.k_temp))
hitomipy.readInputNoWigglePowerSpectrum_py(
self.k_temp, self.P_nw_temp, len(self.k_temp))
if flag_PNG:
hitomipy.readInputTransferFunctionM_py(k_pri, mk_pri, len(k_pri))
## normalization ##
hitomipy.calcNormalizationUsingSigma8_py(self.sigma8_norm)
hitomipy.calcNormalizationNoWiggle_py(1.0)
## sigma2_perp and sigma2_para ##
if flag_BAO == True and flag_damping == False:
if flag_recon == False:
self.sigma2_perp = hitomipy.calcSigma_dd_py(self.sigma8_fid)
self.sigma2_para = (1.0 + self.fz_fid) * (1.0 + self.fz_fid) * self.sigma2_perp
elif flag_recon == True:
self.sigma2_perp = pycuba.Cuhre(
self.Integrand_P_sigma2_perp_Reconstructed,
2,
ncomp=1,
key=0, verbose=0 | 4)["results"][0]['integral']
self.sigma2_para = pycuba.Cuhre(
self.Integrand_P_sigma2_para_Reconstructed,
2,
ncomp=1,
key=0, verbose=0 | 4)["results"][0]['integral']
elif flag_BAO == False:
self.sigma2_perp = sigma2_perp
self.sigma2_para = sigma2_para
## compute bispectra ##
NDIM = self.select_ndim(self.name)
NCOMP = len(self.kbin)
if NCOMP > 1024:
print("# of NCOMP should be <= 1024, otherwise results become zero.")
return output_dict_ini
AA = []
for i in range(NCOMP):
print("k1 = %.6f [h/Mpc]" % self.kbin[i])
self.kmag1 = self.kbin[i]
if NDIM > 3:
NNEW = 5000
NMIN = 2
FLATNESS = 50
MAXEVAL = 5000
AA.append(pycuba.Suave(self.Integrand_B, NDIM, NNEW, NMIN, FLATNESS, ncomp=NCOMP, maxeval = MAXEVAL, verbose=0 | 4)["results"])
else:
AA.append(pycuba.Cuhre(self.Integrand_B, NDIM, ncomp=NCOMP, key=0, verbose=0 | 4)["results"])
bk_temp = np.zeros((NCOMP,NCOMP))
for i in range(NCOMP):
for j in range(NCOMP):
bk_temp[i,j] = AA[i][j]["integral"]
## finalize parameters ##
hitomipy.finalizeInputPowerSpectrum_py()
#########################
if flag_3pcf:
f_bk = interpolate.interp2d(self.kbin, self.kbin, bk_temp, kind="cubic")
(kbin2_out, kbin1_out) = np.meshgrid(kbin, kbin)
bk_out = f_bk(kbin, kbin)
(kbin2_fft, kbin1_fft) = np.meshgrid(self.kbin, self.kbin)
bk_fft = bk_temp
else:
(kbin2_out, kbin1_out) = np.meshgrid(self.kbin, self.kbin)
bk_out = bk_temp
(kbin2_fft, kbin1_fft) = np.meshgrid(self.kbin, self.kbin)
bk_fft = bk_temp
## output dict ##
output_dict = {
"kbin1": kbin1_out,
"kbin2": kbin2_out,
"B": bk_out,
"kbin1_fft": kbin1_fft,
"kbin2_fft": kbin2_fft,
"B_fft": bk_fft,
"ell1": ell1,
"ell2": ell2,
"ELL" : ELL,
"flag_3pcf": flag_3pcf,
"flag_BAO": flag_BAO,
"flag_recon": flag_recon,
"sigma2_perp": self.sigma2_perp,
"sigma2_para": self.sigma2_para,
}
return output_dict
def calc_B_to_3PCF(self, bk_in, rbin = np.linspace(0.0, 200, 41), N_fftlog = 1000):
if not bk_in["flag_3pcf"]:
(rbin2_out, rbin1_out) = np.meshgrid(rbin, rbin)
output_dict_ini = {
"rbin1": rbin1_out,
"rbin2": rbin2_out,
"3pcf": np.zeros((len(rbin),len(rbin))),
"rbin1_fft": rbin1_out,
"rbin2_fft": rbin2_out,
"3pcf_fft": np.zeros((len(rbin),len(rbin))),
"ell1": bk_in["ell1"],
"ell2": bk_in["ell2"],
"ELL": bk_in["ELL"],
"flag_3pcf": bk_in["flag_3pcf"],
"flag_BAO": bk_in["flag_BAO"],
"flag_recon": bk_in["flag_recon"],
"N_fftlog": N_fftlog}
return output_dict_ini
## set bispec. ##
BB = bk_in["B_fft"]
## set kbin ##
self.kbin = bk_in["kbin1_fft"][:,0]
## set multipole indices ##
self.ell1 = bk_in["ell1"]
self.ell2 = bk_in["ell2"]
self.ELL = bk_in["ELL"]
## set rbin ##
self.rbin = rbin
## input parameter for fftlog ##
NNN = N_fftlog
## compute 3PCF ##
kbin_for_zeta = np.logspace(np.log(self.kbin[0]), np.log(self.kbin[-1]), NNN, base=np.e)
CC = np.zeros((len(self.kbin), NNN))
for i in range(len(self.kbin)):
f_bk = interpolate.interp1d(self.kbin, BB[i,:], fill_value = "extrapolate", kind="cubic")
bk_for_zeta = f_bk(kbin_for_zeta)
r_temp = np.zeros(NNN)
zeta_temp = np.zeros(NNN)
hitomipy.hankel_py(self.ell2, 2, NNN, kbin_for_zeta, bk_for_zeta, r_temp, zeta_temp)
f_zeta = interpolate.interp1d(r_temp, zeta_temp, fill_value = "extrapolate", kind="cubic")
CC[i,:] = f_zeta(r_temp[:])
DD = np.zeros((NNN, NNN))
for j in range(NNN):
f_bk = interpolate.interp1d(self.kbin, CC[:,j], fill_value = "extrapolate", kind="cubic")
bk_for_zeta = f_bk(kbin_for_zeta)
r_temp = np.zeros(NNN)
zeta_temp = np.zeros(NNN)
hitomipy.hankel_py(self.ell1, 2, NNN, kbin_for_zeta, bk_for_zeta, r_temp, zeta_temp)
f_zeta = interpolate.interp1d(r_temp, zeta_temp, fill_value = "extrapolate", kind="cubic")
DD[:,j] = f_zeta(r_temp[:])
sign = np.real(1.0j**(self.ell1+self.ell2))
zeta_fft = sign * DD
f_zeta = interpolate.interp2d(r_temp, r_temp, zeta_fft, kind="cubic")
zeta_out = np.zeros((len(self.rbin),len(self.rbin)))
zeta_out[:,:] = f_zeta(self.rbin[:], self.rbin[:])
## rbin_out ##
(rbin2_out, rbin1_out) = np.meshgrid(self.rbin, self.rbin)
(rbin2_fft, rbin1_fft) = np.meshgrid(r_temp, r_temp)
## output dict ##
output_dict = {
"rbin1": rbin1_out,
"rbin2": rbin2_out,
"3pcf": zeta_out,
"rbin1_fft": rbin1_fft,
"rbin2_fft": rbin2_fft,
"3pcf_fft": zeta_fft,
"ell1": bk_in["ell1"],
"ell2": bk_in["ell2"],
"ELL": bk_in["ELL"],
"flag_3pcf": bk_in["flag_3pcf"],
"flag_BAO": bk_in["flag_BAO"],
"flag_recon": bk_in["flag_recon"],
"N_fftlog": N_fftlog}
return output_dict
def calc_3PCF_to_B(self, zeta_in, kbin = np.linspace(0.01, 0.2, 20)):
if not zeta_in["flag_3pcf"]:
(kbin2_out, kbin1_out) = np.meshgrid(kbin, kbin)
output_dict_ini = {
"kbin1": kbin1_out,
"kbin2": kbin2_out,
"B": np.zeros((len(kbin),len(kbin))),
"ell1": zeta_in["ell1"],
"ell2": zeta_in["ell2"],
"ELL": zeta_in["ELL"],
"flag_3pcf": zeta_in["flag_3pcf"],
"flag_BAO": zeta_in["flag_BAO"],
"flag_recon": zeta_in["flag_recon"],
"N_fftlog": zeta_in["N_fftlog"]}
return output_dict_ini
## set 3pcf ##
BB = zeta_in["3pcf_fft"]
## set rbin ##
self.rbin = zeta_in["rbin1_fft"][:,0]
## set multipole indices ##
self.ell1 = zeta_in["ell1"]
self.ell2 = zeta_in["ell2"]
self.ELL = zeta_in["ELL"]
## set kbin ##
self.kbin = kbin
## input parameter for fftlog ##
NNN = zeta_in["N_fftlog"]
## compute bispec ##
rbin_for_bk = np.logspace(np.log(self.rbin[0]), np.log(self.rbin[-1]), NNN, base=np.e)
CC = np.zeros((len(self.rbin), len(self.kbin)))
for i in range(len(self.rbin)):
f_zeta = interpolate.interp1d(self.rbin, BB[i,:], fill_value = "extrapolate", kind="cubic")
zeta_for_bk = f_zeta(rbin_for_bk)
k_temp = np.zeros(NNN)
bk_temp = np.zeros(NNN)
hitomipy.hankel_py(self.ell2, 2, NNN, rbin_for_bk, zeta_for_bk, k_temp, bk_temp)
f_bk = interpolate.interp1d(k_temp, bk_temp, fill_value = "extrapolate", kind="cubic")
CC[i,:] = f_bk(self.kbin[:])
DD = np.zeros((len(self.kbin), len(self.kbin)))
for j in range(len(self.kbin)):
f_zeta = interpolate.interp1d(self.rbin, CC[:,j], fill_value = "extrapolate", kind="cubic")
zeta_for_bk = f_zeta(rbin_for_bk)
k_temp = np.zeros(NNN)
bk_temp = np.zeros(NNN)
hitomipy.hankel_py(self.ell1, 2, NNN, rbin_for_bk, zeta_for_bk, k_temp, bk_temp)
f_bk = interpolate.interp1d(k_temp, bk_temp, fill_value = "extrapolate", kind="cubic")
DD[:,j] = f_bk(self.kbin[:])
sign = np.real((-1.0j)**(self.ell1+self.ell2)) * (2.0*np.pi)**6
bk_out = sign * DD
## rbin_out ##
(kbin2_out, kbin1_out) = np.meshgrid(self.kbin, self.kbin)
## output dict ##
output_dict = {
"kbin1": kbin1_out,
"kbin2": kbin2_out,
"B": bk_out,
"ell1": self.ell1,
"ell2": self.ell2,
"ELL": self.ELL,
"flag_3pcf": zeta_in["flag_3pcf"],
"flag_BAO": zeta_in["flag_BAO"],
"flag_recon": zeta_in["flag_recon"],
"N_fftlog": zeta_in["N_fftlog"]}
return output_dict
def Integrand_P_sigma2_perp_Reconstructed(self, ndim, xx, ncomp, ff, userdata):
self.xx_in = np.zeros(ndim[0])
self.ff_out = np.zeros(ncomp[0])
for i in range(ndim[0]):
self.xx_in[i] = xx[i]
self.ndim = ndim[0]
self.ncomp = ncomp[0]
hitomipy.integrand_P_sigma2_perp_Reconstructed_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, len(self.kbin),
self.sigma8_fid, self.fz_fid, self.b1,
self.one_over_b1_fid, self.R)
for i in range(ncomp[0]):
ff[i] = self.ff_out[i]
return 0
def Integrand_P_sigma2_para_Reconstructed(self, ndim, xx, ncomp, ff, userdata):
self.xx_in = np.zeros(ndim[0])
self.ff_out = np.zeros(ncomp[0])
for i in range(ndim[0]):
self.xx_in[i] = xx[i]
self.ndim = ndim[0]
self.ncomp = ncomp[0]
hitomipy.integrand_P_sigma2_para_Reconstructed_py(
self.xx_in, self.ndim, self.ff_out, self.ncomp,
self.kbin, len(self.kbin),
self.sigma8_fid, self.fz_fid, self.b1,
self.one_over_b1_fid, self.R)
for i in range(ncomp[0]):
ff[i] = self.ff_out[i]
return 0
|
the-stack_106_26087 | # import json
import json
from django.db.models import QuerySet, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.ticket.models import TicketCustomField
from apps.workflow.models import State
from service.account.account_base_service import AccountBaseService
from service.base_service import BaseService
from service.common.constant_service import CONSTANT_SERVICE
from service.common.log_service import auto_log
from service.workflow.workflow_custom_field_service import WorkflowCustomFieldService
from service.workflow.workflow_runscript_service import WorkflowRunScriptService
from service.workflow.workflow_transition_service import WorkflowTransitionService
class WorkflowStateService(BaseService):
def __init__(self):
pass
@staticmethod
@auto_log
def get_workflow_states(workflow_id):
"""
获取流程的状态列表,每个流程的state不会很多,所以不分页
:param self:
:param workflow_id:
:return:
"""
if not workflow_id:
return False, 'except workflow_id but not provided'
else:
workflow_states = State.objects.filter(workflow_id=workflow_id, is_deleted=False).order_by('order_id')
return workflow_states, ''
@staticmethod
@auto_log
def get_workflow_states_serialize(workflow_id, per_page=10, page=1, query_value=''):
"""
获取序列化工作流状态记录
:param workflow_id:
:param per_page:
:param page:
:param search_value:
:return:
"""
if not workflow_id:
return False, 'except workflow_id but not provided'
query_params = Q(workflow_id=workflow_id, is_deleted=False)
if query_value:
query_params &= Q(name__contains=query_value)
workflow_states = State.objects.filter(query_params).order_by('order_id')
paginator = Paginator(workflow_states, per_page)
try:
workflow_states_result_paginator = paginator.page(page)
except PageNotAnInteger:
workflow_states_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
workflow_states_result_paginator = paginator.page(paginator.num_pages)
workflow_states_object_list = workflow_states_result_paginator.object_list
workflow_states_restful_list = []
for workflow_states_object in workflow_states_object_list:
participant_info, msg = WorkflowStateService.get_format_participant_info(workflow_states_object.participant_type_id, workflow_states_object.participant)
result_dict = dict(id=workflow_states_object.id, name=workflow_states_object.name, workflow_id=workflow_states_object.workflow_id,
sub_workflow_id=workflow_states_object.sub_workflow_id, is_hidden=workflow_states_object.is_hidden,
order_id=workflow_states_object.order_id, type_id=workflow_states_object.type_id,
participant_type_id=workflow_states_object.participant_type_id, participant=workflow_states_object.participant,
distribute_type_id=workflow_states_object.distribute_type_id,
state_field_str=json.loads(workflow_states_object.state_field_str), label=json.loads(workflow_states_object.label),
creator=workflow_states_object.creator, participant_info=participant_info,
remember_last_man_enable=1 if workflow_states_object.remember_last_man_enable else 0,
gmt_created=str(workflow_states_object.gmt_created)[:19])
workflow_states_restful_list.append(result_dict)
return workflow_states_restful_list, dict(per_page=per_page, page=page, total=paginator.count)
@staticmethod
@auto_log
def get_workflow_state_by_id(state_id):
"""
获取state详情
:param self:
:param state_id:
:return:
"""
if not state_id:
return False, 'except state_id but not provided'
else:
workflow_state = State.objects.filter(id=state_id, is_deleted=False).first()
if not workflow_state:
return False, '工单状态不存在或已被删除'
return workflow_state, ''
@classmethod
@auto_log
def get_restful_state_info_by_id(cls, state_id):
if not state_id:
return False, 'except state_id but not provided'
else:
workflow_state = State.objects.filter(id=state_id, is_deleted=False).first()
if not workflow_state:
return False, '工单状态不存在或已被删除'
state_info_dict = dict(id=workflow_state.id, name=workflow_state.name, workflow_id=workflow_state.workflow_id,
sub_workflow_id=workflow_state.sub_workflow_id, distribute_type_id=workflow_state.distribute_type_id,
is_hidden=workflow_state.is_hidden, order_id=workflow_state.order_id, type_id=workflow_state.type_id,
participant_type_id=workflow_state.participant_type_id, participant=workflow_state.participant,
state_field=json.loads(workflow_state.state_field_str), label=json.loads(workflow_state.label),
creator=workflow_state.creator, gmt_created=str(workflow_state.gmt_created)[:19]
)
return state_info_dict, ''
@classmethod
@auto_log
def get_workflow_start_state(cls, workflow_id):
"""
获取工作流初始状态
:param workflow_id:
:return:
"""
workflow_state_obj = State.objects.filter(
is_deleted=0, workflow_id=workflow_id, type_id=CONSTANT_SERVICE.STATE_TYPE_START).first()
if workflow_state_obj:
return workflow_state_obj, ''
else:
return None, '该工作流未配置初始状态,请检查工作流配置'
@classmethod
@auto_log
def get_states_info_by_state_id_list(cls, state_id_list):
state_queryset = State.objects.filter(is_deleted=0, id__in=state_id_list).all()
state_info_dict = {}
for state in state_queryset:
state_info_dict[state.id] = state.name
return state_info_dict, ''
@classmethod
@auto_log
def get_workflow_init_state(cls, workflow_id):
"""
获取工作的初始状态信息,包括允许的transition
:param workflow_id:
:return:
"""
init_state_obj = State.objects.filter(workflow_id=workflow_id, is_deleted=False, type_id=CONSTANT_SERVICE.STATE_TYPE_START).first()
if not init_state_obj:
return False, '该工作流尚未配置初始状态'
transition_queryset, msg = WorkflowTransitionService.get_state_transition_queryset(init_state_obj.id)
transition_info_list = []
for transition in transition_queryset:
transition_info_list.append(dict(transition_id=transition.id, transition_name=transition.name))
# 工单基础字段及属性
field_list = []
field_list.append(dict(field_key='title', field_name=u'标题', field_value=None, order_id=20,
field_type_id=CONSTANT_SERVICE.FIELD_TYPE_STR,
field_attribute=CONSTANT_SERVICE.FIELD_ATTRIBUTE_RO, description='工单的标题',
field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}))
custom_field_dict, msg = WorkflowCustomFieldService.get_workflow_custom_field(workflow_id)
for key, value in custom_field_dict.items():
field_list.append(dict(field_key=key, field_name=custom_field_dict[key]['field_name'], field_value=None, order_id=custom_field_dict[key]['order_id'],
field_type_id=custom_field_dict[key]['field_type_id'],
field_attribute=CONSTANT_SERVICE.FIELD_ATTRIBUTE_RO,
default_value=custom_field_dict[key]['default_value'],
description=custom_field_dict[key]['description'],
field_template=custom_field_dict[key]['field_template'],
boolean_field_display=json.loads(custom_field_dict[key]['boolean_field_display']) if custom_field_dict[key]['boolean_field_display'] else {}, # 之前model允许为空了,为了兼容先这么写,
field_choice=json.loads(custom_field_dict[key]['field_choice']),
label=json.loads(custom_field_dict[key]['label'])
))
state_field_dict = json.loads(init_state_obj.state_field_str)
state_field_key_list = state_field_dict.keys()
new_field_list = []
for field0 in field_list:
if field0['field_key'] in state_field_key_list:
field0['field_attribute'] = state_field_dict[field0['field_key']]
new_field_list.append(field0)
# 字段排序
new_field_list = sorted(new_field_list, key=lambda r: r['order_id'])
state_info_dict = init_state_obj.get_dict()
state_info_dict.update(field_list=new_field_list, label=json.loads(init_state_obj.label), transition=transition_info_list)
state_info_dict.pop('state_field_str')
return state_info_dict, ''
@classmethod
@auto_log
def get_format_participant_info(cls, participant_type_id, participant):
"""
获取格式化的参与人信息
:param participant_type_id:
:param participant:
:return:
"""
participant_name = participant
participant_type_name = ''
participant_alias = ''
if participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_PERSONAL:
participant_type_name = '个人'
participant_user_obj, msg = AccountBaseService.get_user_by_username(participant)
if not participant_user_obj:
participant_alias = participant
else:
participant_alias = participant_user_obj.alias
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_MULTI:
participant_type_name = '多人'
# 依次获取人员信息
participant_name_list = participant_name.split(',')
participant_alias_list = []
for participant_name0 in participant_name_list:
participant_user_obj, msg = AccountBaseService.get_user_by_username(participant_name0)
if not participant_user_obj:
participant_alias_list.append(participant_name0)
else:
participant_alias_list.append(participant_user_obj.alias)
participant_alias = ','.join(participant_alias_list)
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_DEPT:
participant_type_name = '部门'
dept_obj, msg = AccountBaseService.get_dept_by_id(int(participant))
if not dept_obj:
return False, 'dept is not existed or has been deleted'
participant_name = dept_obj.name
participant_alias = participant_name
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_ROLE:
participant_type_name = '角色'
role_obj, msg = AccountBaseService.get_role_by_id(int(participant))
if not role_obj:
return False, 'role is not existedor has been deleted'
participant_name = role_obj.name
participant_alias = participant_name
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_VARIABLE:
participant_type_name = '变量'
if participant_name == 'creator':
participant_alias = '工单创建人'
elif participant_name == 'creator_tl':
participant_alias = '工单创建人的tl'
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_ROBOT:
if participant:
flag, result = WorkflowRunScriptService.get_run_script_by_id(int(participant))
if flag:
participant_alias = result.name
elif participant_type_id == CONSTANT_SERVICE.PARTICIPANT_TYPE_HOOK:
participant_type_name = 'hook'
participant_alias = participant_name
return dict(participant=participant, participant_name=participant_name, participant_type_id=participant_type_id,
participant_type_name=participant_type_name, participant_alias=participant_alias), ''
@classmethod
@auto_log
def add_workflow_state(cls, workflow_id, name, sub_workflow_id, is_hidden, order_id, type_id, remember_last_man_enable,
participant_type_id, participant, distribute_type_id, state_field_str, label, creator):
"""
新增工作流状态
:param workflow_id:
:param name:
:param sub_workflow_id:
:param is_hidden:
:param order_id:
:param type_id:
:param remember_last_man_enable:
:param participant_type_id:
:param participant:
:param distribute_type_id:
:param state_field_str:
:param label:
:param creator:
:return:
"""
workflow_state_obj = State(workflow_id=workflow_id, name=name, sub_workflow_id=sub_workflow_id, is_hidden=is_hidden,
order_id=order_id, type_id=type_id, remember_last_man_enable=remember_last_man_enable,
participant_type_id=participant_type_id, participant=participant, distribute_type_id=distribute_type_id,
state_field_str=state_field_str, label=label, creator=creator)
workflow_state_obj.save()
return workflow_state_obj.id, ''
@classmethod
@auto_log
def edit_workflow_state(cls, state_id, workflow_id, name, sub_workflow_id, is_hidden, order_id, type_id,
remember_last_man_enable,
participant_type_id, participant, distribute_type_id, state_field_str, label, creator):
"""
新增工作流状态
:param state_id:
:param workflow_id:
:param name:
:param sub_workflow_id:
:param is_hidden:
:param order_id:
:param type_id:
:param remember_last_man_enable:
:param participant_type_id:
:param participant:
:param distribute_type_id:
:param state_field_str:
:param label:
:param creator:
:return:
"""
state_obj = State.objects.filter(id=state_id, is_deleted=0)
if state_obj:
state_obj.update(workflow_id=workflow_id, name=name, sub_workflow_id=sub_workflow_id,
is_hidden=is_hidden, order_id=order_id, type_id=type_id,
remember_last_man_enable=remember_last_man_enable, participant_type_id=participant_type_id,
participant=participant, distribute_type_id=distribute_type_id,
state_field_str=state_field_str, label=label)
return state_id, ''
@classmethod
@auto_log
def del_workflow_state(cls, state_id):
"""
删除状态
:param state_id:
:return:
"""
state_obj = State.objects.filter(id=state_id, is_deleted=0)
if state_obj:
state_obj.update(is_deleted=1)
return state_id, ''
|
the-stack_106_26088 | # Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
from functools import lru_cache
from heapq import merge
from itertools import zip_longest
from typing import Callable
from typing import cast
from typing import Generic
from typing import Iterable
from typing import Optional
from typing import Tuple
from typing import TypeVar
from sortedcontainers import SortedDict
from clusterman.math.piecewise_types import XValue
from clusterman.math.piecewise_types import XValueDiff
_LRU_CACHE_SIZE = 5
T = TypeVar("T")
def hour_transform(td: datetime.timedelta) -> float:
return td.total_seconds() / 3600
def piecewise_breakpoint_generator(breakpoints, start_time, end_time):
for x in breakpoints.irange(start_time, end_time):
yield x
yield end_time
class PiecewiseConstantFunction(Generic[T]):
def __init__(self, initial_value: float = 0) -> None:
""" Initialize the constant function to a particular value
:param initial_value: the starting value for the function
"""
self.breakpoints = SortedDict()
self._initial_value: float = initial_value
def add_breakpoint(self, xval: XValue[T], yval: float, squash: bool = True) -> None:
""" Add a breakpoint to the function and update the value
Let f(x) be the original function, and next_bp be the first breakpoint > xval; after calling
this method, the function will be modified to f'(x) = yval for x \in [xval, next_bp)
:param xval: the x-position of the breakpoint to add/modify
:param yval: the value to set the function to at xval
:param squash: if True and f(xval) = yval before calling this method, the function will remain unchanged
"""
if squash and self.call(xval) == yval:
return
self.breakpoints[xval] = yval
def add_delta(self, xval: XValue[T], delta: float) -> None:
""" Modify the function value for x >= xval
Let f(x) be the original function; After calling this method,
the function will be modified to f'(x) = f(x) + delta for all x >= xval
:param xval: the x-position of the breakpoint to add/modify
:param delta: the amount to shift the function value by at xval
"""
if delta == 0:
return
if xval not in self.breakpoints:
self.breakpoints[xval] = self.call(xval)
for x in self.breakpoints.irange(xval):
self.breakpoints[x] += delta
self.values.cache_clear()
self.integrals.cache_clear()
def call(self, xval: XValue[T]) -> float:
""" Compute the output of the function at a point
:param xval: the x-position to compute
:returns: f(xval)
"""
if len(self.breakpoints) == 0 or xval < self.breakpoints.keys()[0]:
return self._initial_value
else:
lower_index = self.breakpoints.bisect(xval) - 1
return self.breakpoints.values()[lower_index]
def _breakpoint_info(self, index: Optional[int]) -> Tuple[Optional[int], Optional[XValue[T]], float]:
""" Helper function for computing breakpoint information
:param index: index of the breakpoint to compute
:returns: (index, breakpoint, value)
* index is the breakpoint index (if it exists), or None if we're off the end
* breakpoint is the x-value of the breakpoint, or None if we're off the end
* value is f(breakpoint), or f(last_breakpoint) if we're off the end
"""
try:
breakpoint, value = self.breakpoints.peekitem(index)
except IndexError:
index = None
breakpoint, value = None, self.breakpoints.values()[-1]
return (index, breakpoint, value)
@lru_cache(maxsize=_LRU_CACHE_SIZE) # cache results of calls to this function
def values(self, start: XValue[T], stop: XValue[T], step: XValueDiff[T]) -> "SortedDict[XValue[T], float]":
""" Compute a sequence of values of the function
This is more efficient than [self.call(xval) for xval in range(start, stop, step)] because each self.call(..)
takes O(log n) time due to the binary tree structure of self._breakpoints. This method can compute the range
of values in linear time in the range, which is significantly faster for large value ranges.
:param start: lower bound of value sequence
:param stop: upper bound of value sequence
:param step: width between points in the sequence
:returns: a SortedDict of the values of the function between start and stop, with the x-distance between
each data-point equal to `step`; like normal "range" functions the right endpoint is not included
"""
step = step or (stop - start)
if len(self.breakpoints) == 0:
num_values = int(math.ceil((stop - start) / step))
return SortedDict([(start + step * i, self._initial_value) for i in range(num_values)])
curr_xval = start
curr_value = self.call(start)
next_index, next_breakpoint, next_value = self._breakpoint_info(self.breakpoints.bisect(start))
sequence = SortedDict()
while curr_xval < stop:
sequence[curr_xval] = curr_value
next_xval = min(stop, curr_xval + step)
while next_breakpoint and next_xval >= next_breakpoint:
assert next_index is not None # if next_breakpoint is set, next_index should also be set
curr_value = next_value
next_index, next_breakpoint, next_value = self._breakpoint_info(next_index + 1)
curr_xval = next_xval
return sequence
@lru_cache(maxsize=_LRU_CACHE_SIZE) # cache results of calls to this function
def integrals(
self,
start: XValue[T],
stop: XValue[T],
step: XValueDiff[T],
transform: Callable[[XValueDiff[T]], float] = lambda x: cast(float, x),
) -> "SortedDict[XValue[T], float]":
""" Compute a sequence of integrals of the function
:param start: lower bound of integral sequence
:param stop: upper bound of integral sequence
:param step: width of each "chunk" of the integral sequence
:param transform: function to apply to x-widths before computing the integral
:returns: a SortedDict of the numeric integral values of the function between start and stop;
each integral has a range of size `step`, and the key-value is the left endpoint of the chunk
"""
step = step or (stop - start)
if len(self.breakpoints) == 0:
# If there are no breakpoints, just split up the range into even widths and compute
# (width * self._initial_value) for each chunk.
step_width = transform(step)
range_width = transform(stop - start)
num_full_chunks = int(range_width // step_width)
sequence = SortedDict(
[(start + step * i, step_width * self._initial_value) for i in range(num_full_chunks)]
)
# If the width does not evenly divide the range, compute the last chunk separately
if range_width % step_width != 0:
sequence[start + step * num_full_chunks] = range_width % step_width * self._initial_value
return sequence
# Set up starting loop parameters
curr_xval = start
curr_value = self.call(start)
next_index, next_breakpoint, next_value = self._breakpoint_info(self.breakpoints.bisect(start))
# Loop through the entire range and compute the integral of each chunk
sequence = SortedDict()
while curr_xval < stop:
orig_xval = curr_xval
next_xval = min(stop, curr_xval + step)
# For each breakpoint in [curr_xval, next_xval), compute the area of that sub-chunk
next_integral: float = 0
while next_breakpoint and next_xval >= next_breakpoint:
assert next_index is not None # if next_breakpoint is set, next_index should also be set
next_integral += transform(next_breakpoint - curr_xval) * curr_value
curr_xval = next_breakpoint
curr_value = next_value
next_index, next_breakpoint, next_value = self._breakpoint_info(next_index + 1)
# Handle any remaining width between the last breakpoint and the end of the chunk
next_integral += transform(next_xval - curr_xval) * curr_value
sequence[orig_xval] = next_integral
curr_xval = next_xval
return sequence
def integral(
self, start: XValue[T], stop: XValue[T], transform: Callable[[XValueDiff[T]], float] = lambda x: cast(float, x),
) -> float:
""" Helper function to compute the integral of the whole specified range
:param start: lower bound of the integral
:param stop: upper bound of the integral
:returns: the integral of the function between start and stop
"""
return self.integrals(start, stop, (stop - start), transform).values()[0]
def __str__(self) -> str:
ret = f"{self._initial_value}, x < {self.breakpoints.keys()[0]}\n"
for xval, yval in self.breakpoints.items():
ret += f"{yval}, x >= {xval}\n"
return ret
def __add__(self, other: "PiecewiseConstantFunction[T]") -> "PiecewiseConstantFunction[T]":
new_func: "PiecewiseConstantFunction[T]" = PiecewiseConstantFunction(self._initial_value + other._initial_value)
for xval, y0, y1 in _merged_breakpoints(self, other):
new_func.add_breakpoint(xval, y0 + y1)
return new_func
def __sub__(self, other: "PiecewiseConstantFunction[T]") -> "PiecewiseConstantFunction[T]":
new_func: "PiecewiseConstantFunction[T]" = PiecewiseConstantFunction(self._initial_value - other._initial_value)
for xval, y0, y1 in _merged_breakpoints(self, other):
new_func.add_breakpoint(xval, y0 - y1)
return new_func
def __mul__(self, other: "PiecewiseConstantFunction[T]") -> "PiecewiseConstantFunction[T]":
new_func: "PiecewiseConstantFunction[T]" = PiecewiseConstantFunction(self._initial_value * other._initial_value)
for xval, y0, y1 in _merged_breakpoints(self, other):
new_func.add_breakpoint(xval, y0 * y1)
return new_func
def __truediv__(self, other: "PiecewiseConstantFunction[T]") -> "PiecewiseConstantFunction[T]":
try:
new_func: "PiecewiseConstantFunction[T]" = PiecewiseConstantFunction(
self._initial_value / other._initial_value
)
except ZeroDivisionError:
new_func = PiecewiseConstantFunction()
for xval, y0, y1 in _merged_breakpoints(self, other):
try:
new_func.add_breakpoint(xval, y0 / y1)
except ZeroDivisionError:
new_func.add_breakpoint(xval, 0)
return new_func
def piecewise_max(
fn0: PiecewiseConstantFunction[T], fn1: PiecewiseConstantFunction[T],
) -> PiecewiseConstantFunction[T]:
new_func: PiecewiseConstantFunction[T] = PiecewiseConstantFunction(max(fn0._initial_value, fn1._initial_value))
for xval, y0, y1 in _merged_breakpoints(fn0, fn1):
new_func.add_breakpoint(xval, max(y0, y1))
return new_func
def _merged_breakpoints(
fn0: PiecewiseConstantFunction[T], fn1: PiecewiseConstantFunction[T],
) -> Iterable[Tuple[XValue[T], float, float]]:
bp0 = zip_longest(fn0.breakpoints.items(), [], fillvalue=0)
bp1 = zip_longest(fn1.breakpoints.items(), [], fillvalue=1)
yprev0, yprev1 = fn0._initial_value, fn1._initial_value
for (x, y), fnnum in merge(bp0, bp1):
if fnnum == 0:
yield x, y, yprev1
yprev0 = y
elif fnnum == 1:
yield x, yprev0, y
yprev1 = y
|
the-stack_106_26090 | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Trainer for RL environments.
For now we only support PPO as RL algorithm.
Sample invocation:
TRAIN_BATCH_SIZE=32
python trax/rl_trainer.py \
--config_file=trax/rl/configs/acrobot.gin \
--train_batch_size=${TRAIN_BATCH_SIZE} \
--output_dir=${HOME}/ppo_acrobot \
--vmodule=*/tensor2tensor/*=1 \
--alsologtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import gin
import jax
from jax.config import config
from tensor2tensor import envs # pylint: disable=unused-import
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.rl.google import atari_utils # GOOGLE-INTERNAL:
from tensor2tensor.trax import rl # pylint: disable=unused-import
from tensor2tensor.trax.rl import envs as rl_envs # pylint: disable=unused-import
from tensor2tensor.trax.rl import trainers as rl_trainers
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"jax_debug_nans", False,
"Setting to true will help to debug nans and disable jit.")
flags.DEFINE_boolean("disable_jit", False, "Setting to true will disable jit.")
flags.DEFINE_string("output_dir", "", "Output dir.")
flags.DEFINE_multi_string("config_file", None,
"Configuration file with parameters (.gin).")
flags.DEFINE_multi_string("config", None,
"Configuration parameters (gin string).")
flags.DEFINE_bool("use_tpu", False, "Whether we're running on TPU.")
flags.DEFINE_bool("xm", False, "Copy atari roms?")
flags.DEFINE_integer("train_batch_size", 32,
"Number of parallel environments during training.")
flags.DEFINE_integer("eval_batch_size", 4, "Batch size for evaluation.")
flags.DEFINE_boolean("parallelize_envs", False,
"If true, sets parallelism to number of cpu cores.")
flags.DEFINE_string("trajectory_dump_dir", "",
"Directory to dump trajectories to.")
# TODO(afrozm): Find a better way to do these configurations.
flags.DEFINE_string("train_server_bns", "", "Train Server's BNS.")
flags.DEFINE_string("eval_server_bns", "", "Eval Server's BNS.")
# Not just "train" to avoid a conflict with trax.train in GIN files.
@gin.configurable(blacklist=[
"output_dir", "train_batch_size", "eval_batch_size", "trajectory_dump_dir"
])
def train_rl(
output_dir,
train_batch_size,
eval_batch_size,
env_name="Acrobot-v1",
max_timestep=None,
clip_rewards=False,
rendered_env=False,
resize_dims=(105, 80),
trainer_class=rl_trainers.PPO,
n_epochs=10000,
trajectory_dump_dir=None,
):
"""Train the RL agent.
Args:
output_dir: Output directory.
train_batch_size: Number of parallel environments to use for training.
eval_batch_size: Number of parallel environments to use for evaluation.
env_name: Name of the environment.
max_timestep: Int or None, the maximum number of timesteps in a trajectory.
The environment is wrapped in a TimeLimit wrapper.
clip_rewards: Whether to clip and discretize the rewards.
rendered_env: Whether the environment has visual input. If so, a
RenderedEnvProblem will be used.
resize_dims: Pair (height, width), dimensions to resize the visual
observations to.
trainer_class: RLTrainer class to use.
n_epochs: Number epochs to run the training for.
trajectory_dump_dir: Directory to dump trajectories to.
"""
if FLAGS.jax_debug_nans:
config.update("jax_debug_nans", True)
if FLAGS.use_tpu:
config.update("jax_platform_name", "tpu")
else:
config.update("jax_platform_name", "gpu")
# TODO(pkozakowski): Find a better way to determine this.
train_env_kwargs = {}
eval_env_kwargs = {}
if "OnlineTuneEnv" in env_name:
# TODO(pkozakowski): Separate env output dirs by train/eval and epoch.
train_env_kwargs = {"output_dir": os.path.join(output_dir, "envs/train")}
eval_env_kwargs = {"output_dir": os.path.join(output_dir, "envs/eval")}
if "ClientEnv" in env_name:
train_env_kwargs["per_env_kwargs"] = [{
"remote_env_address": os.path.join(FLAGS.train_server_bns, str(replica))
} for replica in range(train_batch_size)]
eval_env_kwargs["per_env_kwargs"] = [{
"remote_env_address": os.path.join(FLAGS.eval_server_bns, str(replica))
} for replica in range(eval_batch_size)]
# TODO(afrozm): Should we leave out some cores?
parallelism = multiprocessing.cpu_count() if FLAGS.parallelize_envs else 1
train_env = env_problem_utils.make_env(
batch_size=train_batch_size,
env_problem_name=env_name,
resize=rendered_env,
resize_dims=resize_dims,
max_timestep=max_timestep,
clip_rewards=clip_rewards,
parallelism=parallelism,
use_tpu=FLAGS.use_tpu,
**train_env_kwargs)
assert train_env
eval_env = env_problem_utils.make_env(
batch_size=eval_batch_size,
env_problem_name=env_name,
resize=rendered_env,
resize_dims=resize_dims,
max_timestep=max_timestep,
clip_rewards=clip_rewards,
parallelism=parallelism,
use_tpu=FLAGS.use_tpu,
**eval_env_kwargs)
assert eval_env
def run_training_loop():
"""Runs the training loop."""
logging.info("Starting the training loop.")
trainer = trainer_class(
output_dir=output_dir,
train_env=train_env,
eval_env=eval_env,
trajectory_dump_dir=trajectory_dump_dir,
)
trainer.training_loop(n_epochs=n_epochs)
if FLAGS.jax_debug_nans or FLAGS.disable_jit:
with jax.disable_jit():
run_training_loop()
else:
run_training_loop()
def main(argv):
del argv
logging.info("Starting RL training.")
gin_configs = FLAGS.config or []
gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)
train_rl(
output_dir=FLAGS.output_dir,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
trajectory_dump_dir=(FLAGS.trajectory_dump_dir or None),
)
if __name__ == "__main__":
app.run(main)
|
the-stack_106_26092 | # -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.kentech.hri
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the driver for Kentech High Repetition Rate Image Intensifier
revisions 1 and 2.
Implementation Notes
--------------------
The set of commands is cumbersome and inconsistent. Moreover, each revision
introduces backward incompatible changes. The Lantz driver abstracts those
differences.
Sources::
- LaVision PicoStar HR12
- HRI Commands obtained from Kentech
- HRI.cl from DaVis
- Lantz reverse engineering team
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz import Feat, Action
from lantz.drivers.legacy.serial import SerialDriver
from lantz import errors
def between(s, before, after):
ndx1 = s.index(before)
ndx2 = s.index(after)
return s[ndx1+len(before):ndx2]
class HRI(SerialDriver):
"""Kentech High Repetition Rate Image Intensifier.
"""
SEND_TERMINATION = '\r'
RECV_TERMINATION = '\n'
ENCODING = 'ascii'
def query(self, command, *, send_args=(None, None), recv_args=(None, None)):
"""Send query to the instrument and return the answer.
Set remote mode if needed.
"""
if command and not self.recall('remote'):
self.log_info('Setting Remote.')
self.remote = True
return super().query(command, send_args=send_args, recv_args=recv_args)
def query_expect(self, command, recv_termination=None, expected='ok'):
ans = self.query(command, recv_args=(recv_termination, HRI.ENCODING))
if expected and not expected in ans:
raise errors.InstrumentError("'{}' not in '{}'".format(expected, ans))
return ans
@Action()
def clear(self):
"""Clear the buffer.
"""
self.send('\r\r')
@Feat(None, values={True, False})
def remote(self, value):
"""Remote or local.
"""
if value:
#self.query_expect('', None, None)
self.query_expect('\r', expected=None)
self.recv()
else:
return self.query_expect('LOCAL', chr(0), None)
@Feat(read_once=True)
def revision(self):
"""Revision.
"""
ans = self.query_expect('.REV', expected=None)
print(ans)
if 'UNDEFINED' in ans:
ans = '1.0'
else:
ans = self.recv()
ans = ans.split()[1]
return ans
@Feat(None, values={'ecl': 'ECLTRIG', 'ttl': 'TTLTRIG'})
def trigger_logic(self, value):
"""Trigger logic.
"""
self.query_expect(value)
@Feat(None, values={'high': 'HITRIG', '50ohm': '50TRIG}'})
def trigger_ttl_termination(self, value):
"""Trigger termination for TTL logic (for ECL is fixed to 50 ohm).
"""
if self.recall('trigger_type') == 'ecl':
raise errors.InstrumentError('Level triggering only with ECL')
self.query_expect(value)
@Feat(None, values={'rising': '+VETRIG', 'falling': '-VETRIG}'})
def trigger_edge(self, value):
"""Trigger on rising or falling edge.
"""
self.query_expect(value)
@Feat(None, values={'level': 'LVLTRIG', 'log': 'LOGTRIG}'})
def trigger_ecl_mode(self, value):
"""Trigger mode for ECL logic.
"""
if self.recall('trigger_type') == 'ttl':
raise errors.InstrumentError('Level triggering only with ECL')
self.query_expect(value)
@Feat(units='centivolt', limits=(-40, 40, 1))
def trigger_ecl_level(self):
"""Trigger level for ECL logic, mode level.
"""
if self.revision >= 2.0:
ans = self.query_expect('THRESHV ?')
ans = between(ans, 'THRESHV ?', 'ok')
return float(ans.strip())
else:
ans = self.query_expect('THRESHV @ .')[8:]
try:
pos = ans.index('.')
except ValueError:
raise errors.InstrumentError('Unsupported operation.')
return float(ans[pos+2:pos+7])
@trigger_ecl_level.setter
def trigger_ecl_level(self, value):
if self.revision >= 2.0:
self.query_expect('{:d} !THRESH'.format(value))
else:
value = 40 * value + 2000.0
self.query_expect('{:d} THRESH ! TRIG+RF>HW'.format(value))
@Feat(units='volt', limits=(-50, 50))
def clamp_voltage(self):
"""Most negative value of the gate pulse.
"""
if self.revision >= 2.0:
ans = self.query_expect('CLAMP ?')
ans = between(ans, 'CLAMP ?', 'ok').strip()
return float(ans)
else:
ans = self.query_expect('CLAMP @ .')
try:
pos = ans.index('.')
except ValueError:
raise errors.InstrumentError('Unsupported operation.')
return float(ans[pos+2:pos+7]) / 10.0
@clamp_voltage.setter
def clamp_voltage(self, value):
average = self.recall('average_voltage')
mn, mx = average - Q_(60, volt), average
if mn < value < mx:
raise ValueError('Invalid clamp voltage. Not in range {}-{}'.format(mn, mx))
self.query_expect('{:d} CLAMP ! CLAMP>HW'.format(value * 10))
@Feat(units='volt', limits=(-50, 50))
def average_voltage(self):
"""Cathode potential bias with respect of MCP.
"""
if self.revision >= 2.0:
ans = self.query_expect('AVE ?')
ans = between(ans, 'AVE ?', 'ok')
return float(ans.strip()) / 10.
else:
ans = self.query_expect('THRESHV @ .')[8:]
try:
pos = ans.index('.')
except ValueError:
raise errors.InstrumentError('Unsupported operation.')
return float(ans[pos+2:pos+7]) / 10.
@average_voltage.setter
def average_voltage(self, value):
self.query_expect('{:d} AVE ! AVE>HW'.format(value * 10))
@Feat()
def status(self):
"""Get status.
"""
return self.query_expect(".STATUS", chr(0))
@Feat(None, units='volt', limits=(0, 1700))
def mcp(self, value):
"""MCP Voltage.
"""
if self.revision >= '2.0':
return self.query_expect('{} !MCP'.format(value))
else:
return self.query_expect('{} !MCPVOLTS'.format(value))
@Feat(None, values={'inhibit': 0, 'rf': 21, 'ldc': 22, 'hdc': 23, 'dc': 24,
'user1': 25, 'user2': 26, 'user3': 27, 'user4': 28})
def mode(self, mode):
"""Gain modulation mode.
HRI Machine Modes and Mode Indices
None Mode
0 INHIBIT
2-10 COMB modes 200 ps to 1 ns inclusive (High rate operation)
11-20 COMB modes 100 ps to 3 ns inclusive (Low rate (+GOI) operation)
21 RF
22 logic low duty cycle (LDC)
23 logic high duty cycle
24 DC
25-28 user modes 1 to 4
"""
#TODO: Modes [11-20] not available in rev < 2.0
return self.query_expect("{} !MODE".format(mode))
@Feat(None)
def rfgain(self, value):
"""RF Gain.
"""
return self.query("{} !RFGAIN".format(value))
@Feat()
def temperature(self):
"""Temperature.
"""
if self.revision == 2.0:
return self.query("@TEMP .")
return 0
@Feat(None, values={True, False})
def enabled(self, value):
"""MCP Enabled
"""
if self.revision < 2:
if value:
self.query_expect('+M')
else:
self.query_expect('-M')
else:
if value:
self.mode = self.__dict__.get('_last_mode', 21)
else:
self._last_mode = self.recall('mode')
self.mode = 0
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with HRI(args.port, baudrate=9600) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
#inst.clear()
inst.remote = True
print(inst.revision)
inst.mode = "inhibit"
inst.mcp = 350
inst.rfgain = 99
#print(inst.status)
inst.mode = "rf"
#print(inst.status)
inst.remote = False
|
the-stack_106_26094 | import unittest
from flask import current_app
from flask_testing import TestCase
from api import app
from app.main.config import *
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == DevelopmentConfig.SQLALCHEMY_DATABASE_URI
)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(app.config['DEBUG'])
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == TestingConfig.SQLALCHEMY_DATABASE_URI
)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertTrue(app.config['DEBUG'] is False)
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == ProductionConfig.SQLALCHEMY_DATABASE_URI
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_26095 | # adapted from: https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/vit.py
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils import checkpoint
from einops import rearrange, repeat
import triton
import triton.language as tl
import time
@triton.jit
def blub_kernel(
q_ptr, k_ptr, c_ptr,
M, D, # M: num_queries, D: d_model
B, S, H, W, # 3D key dimensions: time, height, width
kS, kH, kW, # 3D kernel sizes
stride_qm, stride_qd, # query strides
stride_kB, stride_kS, stride_kH, stride_kW, stride_k_ks, stride_k_kh, stride_k_kw, stride_kd,
stride_cm, stride_cw, # output strides
**meta
):
"""
C = Q x K
Q: (M, D)
K: ... it's complicated
C: (M, W)
"""
BLOCK_SIZE_M = meta['BLOCK_SIZE_M'] # num queries we process at once
BLOCK_SIZE_D = meta['BLOCK_SIZE_D'] # num elements of embedding we process at once
pid = tl.program_id(axis=0)
wnd = kS * kH * kW
base_m = (pid // wnd) * BLOCK_SIZE_M
base_w = pid % wnd
# current programs key input coordinate
base_ws = tl.arange(0, BLOCK_SIZE_M) + base_m
b = base_ws // (W * H * S)
z = base_ws // (W * H) % S
y = (base_ws // W) % H
x = base_ws % W
s = base_w // (kH * kW)
h = (base_w // kW) % kH
w = base_w % kW
# compute source key pointers
offs_k = b * stride_kB + z * stride_kS + y * stride_kH + x * stride_kW + w * stride_kW + h * stride_kH + s * stride_kS
offs_d = tl.arange(0, BLOCK_SIZE_D)
offs_q = base_m + tl.arange(0, BLOCK_SIZE_M)
q_ptrs = q_ptr + offs_q[:, None] * stride_qm + offs_d[None, :] * stride_qd # (M, D)
k_ptrs = k_ptr + offs_k[:, None] + offs_d[None, :] * stride_kd # (M, D)
accumulator = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for d in range(0, D, BLOCK_SIZE_D):
q = tl.load(q_ptrs) # (BLOCK_SIZE_M, BLOCK_SIZE_D)
k = tl.load(k_ptrs) # (BLOCK_SIZE_M, BLOCK_SIZE_D)
accumulator += tl.sum(q * k, axis=1)
q_ptrs += BLOCK_SIZE_D * stride_qd
k_ptrs += BLOCK_SIZE_D * stride_kd
# write result
offs_cm = base_m + tl.arange(0, BLOCK_SIZE_M)
c_ptrs = c_ptr + offs_cm * stride_cm + base_w * stride_cw
c_mask = offs_cm < M
tl.store(c_ptrs, accumulator, mask=c_mask)
def blub(q, k):
B, S, H, W, D, kS, kH, kW = k.shape
M,D = q.shape
# allocate output tensor
window_size = kS * kH * kW
c = torch.zeros(M, window_size, device=q.device, dtype=q.dtype)
stride_qm,stride_qd = q.stride()
stride_kB,stride_kS,stride_kH,stride_kW,stride_kd,stride_k_ks,stride_k_kh,stride_k_kw = k.stride()
stride_cm,stride_cw = c.stride()
#print('c.stride()', c.stride())
# grid based on output elements (number of queries times local windows size)
grid = lambda meta: (
triton.cdiv(M, meta['BLOCK_SIZE_M']) * window_size, # cdiv = ceil_div
)
blub_kernel[grid](
q, k, c,
M, D,
B, S, H, W, # 3D key dimensions: frame, width, height
kS, kH, kW, # 3D kernel sizes
stride_qm, stride_qd, # query strides
stride_kB, stride_kS, stride_kH, stride_kW, stride_k_ks, stride_k_kh, stride_k_kw, stride_kd,
stride_cm, stride_cw, # output strides
BLOCK_SIZE_M=64, # TODO: tuning
BLOCK_SIZE_D=64,
)
return c
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Local3dAttention(nn.Module):
def __init__(self, extents, dim, heads=8, dim_head=64, dropout=.0, use_checkpointing=True, use_triton=True):
super().__init__()
self.extents = extents
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_k = nn.Linear(dim, inner_dim, bias=False)
self.to_v = nn.Linear(dim, inner_dim, bias=True)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
self.use_checkpointing = use_checkpointing
self.use_triton = use_triton
def pad(self, x, pad_value=0, mask=False):
padding = ()
if not mask:
padding += (0, 0) # 'skip' embedding dim
for i in reversed(range(3)):
padding += (self.extents[i], self.extents[i])
return F.pad(x, pad=padding, value=pad_value)
def unfold(self, x):
for i in range(3):
kernel_size = self.extents[i] * 2 + 1
x = x.unfold(dimension=i+1, size=kernel_size, step=1)
return x
def get_mask(self, batch_shape):
_,s,h,w,_ = batch_shape
m = torch.zeros(1, s, h, w, dtype=torch.bool)
m = self.pad(m, pad_value=True, mask=True)
m = self.unfold(m)
return m
def local_attention(self, k, v, q):
batch_size = v.shape[0]
mask = self.get_mask(k.shape).to(k.device)
k = self.unfold(self.pad(k)) # pad border cases to get equal sizes
v = self.unfold(self.pad(v))
# print('k', k.size(), k.stride(), k.numel(), k.storage().size())
# print('v', v.size(), v.stride(), v.numel(), v.storage().size())
# print('q', q.size(), q.stride(), q.numel(), q.storage().size())
if self.heads == 1 and self.use_triton:
#print('triton')
dots = blub(q.view(-1, q.size(-1)), k) * self.scale
dots = dots.unsqueeze(-2).unsqueeze(-2)
else:
q = rearrange(q, 'b s h w (H d) -> (b s h w) H 1 d', H = self.heads)
k = rearrange(k, 'b s h w (H d) i j k -> (b s h w) H (i j k) d', H = self.heads)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
v = rearrange(v, 'b s h w (H d) i j k -> (b s h w) H (i j k) d', H = self.heads)
# q = rearrange(q, 'b s h w (H d) -> (b s h w) H 1 d', H = self.heads)
# v = rearrange(v, 'b s h w (H d) i j k -> (b s h w) H (i j k) d', H = self.heads)
# k = rearrange(k, 'b s h w (H d) i j k -> (b s h w) H (i j k) d', H = self.heads)
# dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
# masking
mask_value = -1e9
mask = repeat(mask, '1 s h w i j k -> (b s h w) heads 1 (i j k)', b=batch_size, heads=self.heads)
dots.masked_fill_(mask, mask_value)
attn = self.attend(dots)
out = torch.matmul(attn, v)
return out
# todo: add causal masking
def forward(self, x, q):
q_shape = q.shape
# key & value projections
k = self.to_k(x)
v = self.to_v(x)
q = self.to_q(q)
if self.use_checkpointing:
out = checkpoint.checkpoint(self.local_attention, k, v, q)
else:
out = self.local_attention(k, v, q)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out.reshape(q_shape)
class Local3dAttentionTransformer(nn.Module):
def __init__(self, *, data_shape, dim, num_classes, extents, depth, heads, dim_head, mlp_dim, dropout=.0, use_triton=True):
super().__init__()
self.num_classes = num_classes
self.embedding = nn.Embedding(num_classes, dim)
# position embeddings
self.pos_emb_s = nn.Embedding(data_shape[0], dim)
self.pos_emb_h = nn.Embedding(data_shape[1], dim)
self.pos_emb_w = nn.Embedding(data_shape[2], dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Local3dAttention(extents, dim, heads=heads, dim_head=dim_head, dropout=dropout, use_triton=use_triton)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
]))
def get_pos_embedding(self, batch_shape):
_,s,h,w, = batch_shape
device = self.pos_emb_s.weight.device
indices = torch.arange(s*h*w, device=device).view(1, s, h, w)
w_pos = indices % w
h_pos = indices.div(w, rounding_mode='trunc') % h
s_pos = indices.div(h * w, rounding_mode='trunc')
return (self.pos_emb_s(s_pos.expand(batch_shape))
+ self.pos_emb_h(h_pos.expand(batch_shape))
+ self.pos_emb_w(w_pos.expand(batch_shape)))
def forward(self, img_z):
batch_shape = img_z.shape
x = self.embedding(img_z)
x = x + self.get_pos_embedding(batch_shape)
for attn, ff in self.layers:
x = attn(x, q=x) + x
x = ff(x) + x
return x
@torch.no_grad()
def run_test(device, w, x, n, use_triton=True):
net = Local3dAttentionTransformer(data_shape=(10,16,16), dim=128, num_classes=1000, extents=(2,2,2), depth=10, mlp_dim=256, heads=1, dim_head=128, dropout=.0, use_triton=use_triton)
net.load_state_dict(w)
net = net.to(device)
for i in range(n):
y = net.forward(x)
print(i, y.size(), torch.cuda.max_memory_allocated(device))
return y
def get_weights():
net = Local3dAttentionTransformer(data_shape=(10,16,16), dim=128, num_classes=1000, extents=(2,2,2), depth=10, mlp_dim=256, heads=1, dim_head=128, dropout=.0)
return net.state_dict()
@torch.no_grad()
def test():
device = torch.device('cuda', 0)
w = get_weights() # generate state dict to use for tests
x = torch.randint(0, 99, (10,6,16,16), device=device)
torch.cuda.empty_cache()
tic = time.time_ns()
run_test(device, w, x, 10, use_triton=True)
toc = time.time_ns()
time_with = toc-tic
print('with triton: {}ms'.format(time_with/(1e6)))
torch.cuda.empty_cache()
tic = time.time_ns()
run_test(device, w, x, 10, use_triton=False)
toc = time.time_ns()
time_without = toc-tic
print('without triton: {}ms'.format(time_without/(1e6)))
print('ratio:', time_without/time_with)
# simple output comparison
a = run_test(device, w, x, 1, use_triton=True)
b = run_test(device, w, x, 1, use_triton=False)
print('diff:', torch.abs(a-b).sum(), a.numel(), a.std())
if __name__ == '__main__':
test()
|
the-stack_106_26097 | import numpy as np
import pandas as pd
import streamlit as st
import os
import src
from datetime import datetime
from pandas.util import hash_pandas_object
import boto3
s3 = boto3.resource(
service_name='s3',
region_name=st.secrets["region_name"],
aws_access_key_id=st.secrets["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=st.secrets["AWS_SECRET_ACCESS_KEY"]
)
def main():
PATH = str(os.path.realpath('..')) + '/HopeCheckIn/'
DATA_PATH = PATH + 'data/'
st.set_page_config(layout="wide")
st.title("Hope Check in")
#df_people = src.load_table(DATA_PATH + 'all_people_directory.csv')
s3 = boto3.client('s3')
df_people = src.load_table("s3://hope-bucket/all_people_directory.csv")
#st.write(df_people)
lastname = st.text_input("Please enter your last name", "", key = "lastname")
families = src.search_families(df_people, lastname)
st.write("--------")
for family in families:
left, _, right = st.columns(3)
left.write("##### People in this family")
right.button("This is my family", key=hash_pandas_object(family))
#my_family = right.button("This is my family", key=family)
src.display_family(family, df_people)
st.write("--------")
if 'newcomer' not in st.session_state:
st.session_state.newcomer = 0
if st.session_state.newcomer == 0:
st.session_state.newcomer = st.button("New to Hope?")
if st.session_state.newcomer:
st.write("### Newcomer details")
lastname = st.text_input("Last name", key = "newcomer_lastname")
firstname = st.text_input("First name", key = "newcomer_firstname")
phone = st.text_input("Phone", key = "newcomer_phone")
save = st.button("Save")
if save:
df_new = pd.DataFrame({"Member ID": firstname+lastname,
"First Name": firstname,
"Last Name": lastname,
"Mobile Number": phone,
"Family Members": firstname,
"Family Relationship": 'Primary',
"Checked In": datetime.now(tz=None),
"Printed": 0},
index=[0])
st.write(df_new)
df_people = df_people.append(df_new)
done = st.button("Done")
#src.save_table(DATA_PATH + "all_people_directory.csv", df_people)
src.save_table("s3://hope-bucket/all_people_directory.csv", df_people)
if __name__ == '__main__':
main() |
the-stack_106_26099 | #!/usr/bin/env python3
# Copyright (c) 2021 The Khronos Group Inc.
# Copyright (c) 2021 Valve Corporation
# Copyright (c) 2021 LunarG, Inc.
# Copyright (c) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mike Schuchardt <[email protected]>
import argparse
import filecmp
import os
import shutil
import subprocess
import sys
import tempfile
import difflib
import common_codegen
# files to exclude from --verify check
verify_exclude = ['.clang-format']
def main(argv):
parser = argparse.ArgumentParser(description='Generate source code for this repository')
parser.add_argument('registry', metavar='REGISTRY_PATH', help='path to the Vulkan-Headers registry directory')
group = parser.add_mutually_exclusive_group()
group.add_argument('-i', '--incremental', action='store_true', help='only update repo files that change')
group.add_argument('-v', '--verify', action='store_true', help='verify repo files match generator output')
args = parser.parse_args(argv)
gen_cmds = [*[[common_codegen.repo_relative('scripts/lvl_genvk.py'),
'-registry', os.path.abspath(os.path.join(args.registry, 'vk.xml')),
'-quiet',
filename] for filename in ["chassis.cpp",
"chassis.h",
"chassis_dispatch_helper.h",
"layer_chassis_dispatch.cpp",
"layer_chassis_dispatch.h",
"object_tracker.cpp",
"object_tracker.h",
"parameter_validation.cpp",
"parameter_validation.h",
"synchronization_validation_types.cpp",
"synchronization_validation_types.h",
"thread_safety.cpp",
"thread_safety.h",
"vk_dispatch_table_helper.h",
"vk_enum_string_helper.h",
"vk_extension_helper.h",
"vk_layer_dispatch_table.h",
"vk_object_types.h",
"vk_safe_struct.cpp",
"vk_safe_struct.h",
"lvt_function_pointers.cpp",
"lvt_function_pointers.h",
"vk_typemap_helper.h",
"best_practices.h",
"best_practices.cpp",
"spirv_validation_helper.cpp",
"command_validation.cpp",
"command_validation.h",
"corechecks_optick_instrumentation.cpp",
"corechecks_optick_instrumentation.h"]],
[common_codegen.repo_relative('scripts/vk_validation_stats.py'),
os.path.abspath(os.path.join(args.registry, 'validusage.json')),
'-export_header'],
[common_codegen.repo_relative('scripts/external_revision_generator.py'),
'--json_file', common_codegen.repo_relative('scripts/known_good.json'),
'--json_keys', 'repos,0,commit',
'-s', 'SPIRV_TOOLS_COMMIT_ID',
'-o', 'spirv_tools_commit_id.h']]
repo_dir = common_codegen.repo_relative('layers/generated')
# get directory where generators will run
if args.verify or args.incremental:
# generate in temp directory so we can compare or copy later
temp_obj = tempfile.TemporaryDirectory(prefix='VulkanVL_generated_source_')
temp_dir = temp_obj.name
gen_dir = temp_dir
else:
# generate directly in the repo
gen_dir = repo_dir
# run each code generator
for cmd in gen_cmds:
print(' '.join(cmd))
try:
subprocess.check_call([sys.executable] + cmd, cwd=gen_dir)
except Exception as e:
print('ERROR:', str(e))
return 1
# optional post-generation steps
if args.verify:
# compare contents of temp dir and repo
temp_files = set(os.listdir(temp_dir))
repo_files = set(os.listdir(repo_dir))
files_match = True
for filename in sorted((temp_files | repo_files) - set(verify_exclude)):
temp_filename = os.path.join(temp_dir, filename)
repo_filename = os.path.join(repo_dir, filename)
if filename not in repo_files:
print('ERROR: Missing repo file', filename)
files_match = False
elif filename not in temp_files:
print('ERROR: Missing generator for', filename)
files_match = False
elif not filecmp.cmp(temp_filename, repo_filename, shallow=False):
print('ERROR: Repo files do not match generator output for', filename)
files_match = False
# print line diff on file mismatch
with open(temp_filename) as temp_file, open(repo_filename) as repo_file:
print(''.join(difflib.unified_diff(temp_file.readlines(),
repo_file.readlines(),
fromfile='temp/' + filename,
tofile= 'repo/' + filename)))
# return code for test scripts
if files_match:
print('SUCCESS: Repo files match generator output')
return 0
return 1
elif args.incremental:
# copy missing or differing files from temp directory to repo
for filename in os.listdir(temp_dir):
temp_filename = os.path.join(temp_dir, filename)
repo_filename = os.path.join(repo_dir, filename)
if not os.path.exists(repo_filename) or \
not filecmp.cmp(temp_filename, repo_filename, shallow=False):
print('update', repo_filename)
shutil.copyfile(temp_filename, repo_filename)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_106_26102 | # Copyright 2020 The Cirq Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to generate external api_docs for Cirq.
In order to publish to our site, devsite runs two jobs for us: stable and nightly.
The stable one downloads the latest cirq release from pypi and uses that to generate the reference
API docs.
The nightly one downloads the latest cirq pre-release (pip install cirq --pre) and uses that to
generate the "nightly diff".
This script needs to cater for both of these cases.
"""
import os
import types
import networkx
from absl import app
from absl import flags
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import cirq
import cirq_google
from cirq import _doc
flags.DEFINE_string("output_dir", "docs/api_docs", "Where to output the docs")
flags.DEFINE_string(
"code_url_prefix",
"https://github.com/quantumlib/Cirq/blob/master",
"The url prefix for links to code.",
)
flags.DEFINE_bool("search_hints", True, "Include metadata search hints in the generated files")
flags.DEFINE_string("site_path", "reference/python", "Path prefix in the _toc.yaml")
FLAGS = flags.FLAGS
def filter_unwanted_inherited_methods(path, parent, children):
"""Filter the unwanted inherited methods.
CircuitDag inherits a lot of methods from `networkx.DiGraph` and `Graph`.
This filter removes these, as it creates a lot of noise in the API docs.
"""
if parent.__name__ != "CircuitDag":
return children
filtered_children = []
for name, obj in children:
if isinstance(obj, types.FunctionType):
if obj.__module__.startswith('cirq'):
filtered_children.append((name, obj))
return filtered_children
def main(unused_argv):
generate_cirq()
generate_cirq_google()
generate_cirq_aqt()
def generate_cirq():
doc_generator = generate_lib.DocGenerator(
root_title="Cirq",
py_modules=[("cirq", cirq)],
base_dir=os.path.dirname(cirq.__file__),
code_url_prefix=FLAGS.code_url_prefix + "/cirq-core/cirq",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
)
doc_generator.build(output_dir=FLAGS.output_dir)
def generate_cirq_aqt():
# This try-catch can go after v0.12 is released
try:
# should be present in the nightly (pre-release) build
import cirq_aqt
except ImportError:
# as cirq.aqt is currently not being generated anyway
# we won't handle this case (the stable build)
return
doc_generator = generate_lib.DocGenerator(
root_title="Cirq-aqt",
py_modules=[("cirq_aqt", cirq_aqt)],
base_dir=os.path.dirname(cirq_aqt.__file__),
code_url_prefix=FLAGS.code_url_prefix + "/cirq-aqt/cirq_aqt",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
)
doc_generator.build(output_dir=FLAGS.output_dir)
def generate_cirq_google():
doc_generator = generate_lib.DocGenerator(
root_title="Cirq-google",
py_modules=[("cirq_google", cirq_google)],
base_dir=os.path.dirname(cirq_google.__file__),
code_url_prefix=FLAGS.code_url_prefix + "/cirq-google/cirq_google",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
private_map={
# Opt to not build docs for these paths for now since they error.
"cirq_google.engine.client.quantum.QuantumEngineServiceClient": ["enums"],
"cirq_google.engine.client.quantum_v1alpha1.QuantumEngineServiceClient": ["enums"],
"cirq_google.api": ["v1"],
},
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == "__main__":
app.run(main)
|
the-stack_106_26109 | """
Basic unit test module for the at module.
"""
import os
import sys
import unittest
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.insert(0, os.getcwd())
import at
class TestATParsing(unittest.TestCase):
"""Defines unit tests for verifying parsing functionality."""
TEST_CMDS = [('AT+CEMODE=0',
{'cmd':'+CEMODE', 'type':'SET', 'params':[0]}),
('AT+CSIM=14,"A0A40000027F20"',
{'cmd':'+CSIM', 'type':'SET', 'params':[14, "A0A40000027F20"]}),
('AT%XSUDO=7,"c2lnbmF0dXJl";%CMNG=1',
[{'cmd':'%XSUDO', 'type':'SET', 'params':[7, "c2lnbmF0dXJl"]},
{'cmd':'%CMNG', 'type':'SET', 'params':[1]}]),
('AT+CRSM=176,28539,0,0,12',
{'cmd':'+CRSM', 'type':'SET', 'params':[176, 28539, 0, 0, 12]}),
('AT+CFUN?',
{'cmd':'+CFUN', 'type':'READ', 'params':[]}),
('AT%XSIM?',
{'cmd':'%XSIM', 'type':'READ', 'params':[]}),
('AT+CGEREP=?',
{'cmd':'+CGEREP', 'type':'TEST', 'params':[]}),
('AT%XCBAND=?',
{'cmd':'%XCBAND', 'type':'TEST', 'params':[]}),
('AT%FOO=7,"c2lnbmF0dXJl";+BAR=(1,2,3)',
[{'cmd':'%FOO', 'type':'SET', 'params':[7, "c2lnbmF0dXJl"]},
{'cmd':'+BAR', 'type':'SET', 'params':[[1, 2, 3]]}]),
('AT%XMODEMUUID',
{'cmd':'%XMODEMUUID', 'type':'SET', 'params':[]}),
('AT%XVBAT',
{'cmd':'%XVBAT', 'type':'SET', 'params':[]})]
TEST_RSPS = [('ERROR',
{'response':'ERROR', 'type':'RESPONSE', 'error':True, 'params':[]}),
('OK',
{'response':'OK', 'type':'RESPONSE', 'error':False, 'params':[]}),
('+CME ERROR: 513',
{'response':'+CME ERROR',
'type':'RESPONSE', 'error':True, 'params':[513]}),
('+CGSN: "352656100032138"',
{'response':'+CGSN',
'type':'RESPONSE', 'error':False, 'params':["352656100032138"]}),
('+CMEE: 1',
{'response':"+CMEE", 'type':'RESPONSE', 'error':False, 'params':[1]}),
('+CMS ERROR: 128',
{'response':'+CMS ERROR',
'type':'RESPONSE', 'error':True, 'params':[128]}),
('+CNUM: ,"+1234567891234",145',
{'response':'+CNUM', 'type':'RESPONSE', 'error':False,
'params':[None, '+1234567891234', 145]}),
('+CLCK: ("SC")',
{'response':'+CLCK',
'type':'RESPONSE', 'error':False, 'params':[['SC']]}),
('%FOO: ("A", "B", 10)',
{'response':'%FOO',
'type':'RESPONSE', 'error':False, 'params':[['A', 'B', 10]]}),
('Manufacturer',
{'type':'RESPONSE', 'response':None, 'error':False, 'params':['Manufacturer']}),
('%CMNG: 16842753,0,"000000000000000000000000000000000' +
'0000000000000000000000000000000"',
{'response':'%CMNG',
'type':'RESPONSE', 'error':False,
'params':[16842753,
0,
"000000000000000000000000000000000000000000" +
"0000000000000000000000"]}),
('%XMODEMUUID: 072fa1c7-304e-4dcf-adcc-76a1601c7192',
{'response':'%XMODEMUUID',
'type':'RESPONSE', 'error':False,
'params':["072fa1c7-304e-4dcf-adcc-76a1601c7192"]}),
('%XVBAT: 1234',
{'response':'%XVBAT',
'type':'RESPONSE', 'error':False,
'params':[1234]})]
def test_command_encoding(self):
"""Encode command dicts and compare them to the original string."""
for cmd_str, cmd_dict in self.TEST_CMDS:
result = at.encode_command(cmd_dict)
self.assertEqual(result, cmd_str)
def test_command_parsing(self):
"""Parse command strings and compare them to dicts."""
for cmd_str, cmd_dict in self.TEST_CMDS:
result = at.parse_string(cmd_str)
self.assertEqual(result, cmd_dict)
def test_responses(self):
"""Iterate through sample response strings."""
for cmd_str, params in self.TEST_RSPS:
result = at.parse_string(cmd_str)
self.assertEqual(result, params)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_26110 | """
Convert case data into format that can be used to construct model instance
"""
from nemde.core.casefile.lookup import convert_to_list, get_intervention_status
from nemde.core.casefile.algorithms import get_parsed_interconnector_loss_model_segments
from nemde.core.casefile.algorithms import get_interconnector_loss_estimate
from nemde.core.model.utils import fcas
def find(path, data):
"""
Extract element from nested dictionary given a path using dot notation
Parameters
----------
path : str
Path to nested element using dot notation
E.g. 'NEMSPDCaseFile.NemSpdInputs.RegionCollection.Region'
data : dict
Nested dictionary
Returns
-------
output : list or int or str or float
Value corresponding to path in nested dictionary
"""
keys = path.split('.')
output = data
for key in keys:
output = output[key]
return output
def get_region_index(data) -> list:
"""Get NEM region index"""
return [i['@RegionID'] for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('RegionCollection')
.get('Region'))]
def get_trader_index(data) -> list:
"""Get trader index"""
return [i['@TraderID'] for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('PeriodCollection')
.get('Period')
.get('TraderPeriodCollection')
.get('TraderPeriod'))]
def get_trader_semi_dispatch_index(data) -> list:
"""Get index of semi-dispatchable plant"""
return [i['@TraderID'] for i in data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('TraderCollection')
.get('Trader')
if i['@SemiDispatch'] == '1']
def get_trader_offer_index(data) -> list:
"""Get trader offer index"""
return [(i['@TraderID'], j['@TradeType'])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
for j in convert_to_list(i.get('TradeCollection').get('Trade'))]
def get_trader_fcas_offer_index(data) -> list:
"""Get trader FCAS offers"""
return [(i['@TraderID'], j['@TradeType'])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
for j in convert_to_list(i.get('TradeCollection').get('Trade'))
if j['@TradeType'] not in ['ENOF', 'LDOF']]
def get_trader_energy_offer_index(data) -> list:
"""Get trader energy offers"""
return [(i['@TraderID'], j['@TradeType'])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
for j in convert_to_list(i.get('TradeCollection').get('Trade'))
if j['@TradeType'] in ['ENOF', 'LDOF']]
def get_trader_fast_start_index(data) -> list:
"""Get fast start units"""
traders = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('TraderCollection').get('Trader'))
# Fast start unit IDs
return [i['@TraderID'] for i in traders if i.get('@FastStart') == '1']
def get_generic_constraint_index(data) -> list:
"""Get generic constraint index"""
return [i['@ConstraintID'] for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('PeriodCollection')
.get('Period')
.get('GenericConstraintPeriodCollection')
.get('GenericConstraintPeriod'))]
def get_generic_constraint_trader_variable_index(data) -> list:
"""Get all trader variables within generic constraints"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
# Container for all trader variables
trader_variables = []
for i in constraints:
collection = i.get('LHSFactorCollection')
# Continue if no LHS factors or no trader factors
if (collection is None) or (collection.get('TraderFactor') is None):
continue
for j in convert_to_list(collection.get('TraderFactor')):
trader_variables.append((j['@TraderID'], j['@TradeType']))
# Retain unique indices
return list(set(trader_variables))
def get_generic_constraint_interconnector_variable_index(data) -> list:
"""Get all interconnector variables within generic constraints"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
# Container for all interconnector variables
interconnector_variables = []
for i in constraints:
collection = i.get('LHSFactorCollection')
# Continue if no LHS factors or no interconnector factors
if (collection is None) or (collection.get('InterconnectorFactor') is None):
continue
for j in convert_to_list(collection.get('InterconnectorFactor')):
interconnector_variables.append(j['@InterconnectorID'])
# Retain unique indices
return list(set(interconnector_variables))
def get_generic_constraint_region_variable_index(data) -> list:
"""Get generic constraint region variable indices"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
# Container for all region variables
region_variables = []
for i in constraints:
collection = i.get('LHSFactorCollection')
# Continue if no LHS factors or no region factors
if (collection is None) or (collection.get('RegionFactor') is None):
continue
for j in convert_to_list(collection.get('RegionFactor')):
region_variables.append((j['@RegionID'], j['@TradeType']))
# Retain unique indices
return list(set(region_variables))
def get_mnsp_index(data) -> list:
"""Get MNSP index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Only retain MNSPs
return [i['@InterconnectorID'] for i in interconnectors if i['@MNSP'] == '1']
def get_mnsp_offer_index(data) -> list:
"""Get MNSP offer index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for offer index
offer_index = []
for i in interconnectors:
# Non-MNSP interconnectors do not have an MNSPOfferCollection attribute
if i.get('MNSPOfferCollection') is None:
continue
# Extract InterconnectorID and RegionID for each offer entry
for j in i.get('MNSPOfferCollection').get('MNSPOffer'):
offer_index.append((i['@InterconnectorID'], j['@RegionID']))
return offer_index
def get_interconnector_index(data) -> list:
"""Get interconnector index"""
return [i['@InterconnectorID'] for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('PeriodCollection')
.get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))]
def get_interconnector_loss_model_breakpoint_index(data) -> list:
"""Get interconnector loss model breakpoint index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection')
.get('Interconnector'))
# Container for indices
values = []
for i in interconnectors:
# Loss model segments
segments = i.get('LossModelCollection').get(
'LossModel').get('SegmentCollection').get('Segment')
for j in range(len(segments) + 1):
# Append index to container
values.append((i['@InterconnectorID'], j))
return values
def get_interconnector_loss_model_interval_index(data) -> list:
"""Get interconnector loss model interval index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection')
.get('Interconnector'))
# Container for indices
values = []
for i in interconnectors:
# Loss model segments
segments = i.get('LossModelCollection').get(
'LossModel').get('SegmentCollection').get('Segment')
for j in range(len(segments)):
# Append index to container
values.append((i['@InterconnectorID'], j))
return values
def get_trader_price_bands(data) -> dict:
"""Trader price bands"""
traders = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('TraderCollection').get('Trader'))
# Container for all price bands
price_bands = {}
for i in traders:
# All trade types for a given trader
trade_types = (i.get('TradePriceStructureCollection')
.get('TradePriceStructure')
.get('TradeTypePriceStructureCollection')
.get('TradeTypePriceStructure'))
for j in convert_to_list(trade_types):
# Price bands
for k in range(1, 11):
key = (i['@TraderID'], j['@TradeType'], k)
price_bands[key] = float(j.get(f'@PriceBand{k}'))
return price_bands
def get_trader_quantity_bands(data) -> dict:
"""Get trader quantity bands"""
traders = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
# Container for quantity bands
quantity_bands = {}
for i in traders:
for j in convert_to_list(i.get('TradeCollection').get('Trade')):
# Quantity bands
for k in range(1, 11):
key = (i['@TraderID'], j['@TradeType'], k)
quantity_bands[key] = float(j[f'@BandAvail{k}'])
return quantity_bands
def get_trader_initial_condition_attribute(data, attribute, func) -> dict:
"""Get trader initial MW"""
traders = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('TraderCollection').get('Trader'))
# Container for initial MW data
values = {}
for i in traders:
# Initial conditions
for j in i.get('TraderInitialConditionCollection').get('TraderInitialCondition'):
# Check matching attribute and extract value
if j.get('@InitialConditionID') == attribute:
values[i.get('@TraderID')] = func(j.get('@Value'))
return values
def get_trader_period_attribute(data, attribute, func) -> dict:
"""Get trader period attribute"""
return {i['@TraderID']: func(i[attribute])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
if i.get(attribute) is not None}
def get_trader_collection_attribute(data, attribute, func) -> dict:
"""Get trader collection attribute"""
return {i['@TraderID']: func(i[attribute]) for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('TraderCollection')
.get('Trader'))}
def get_trader_period_trade_attribute(data, attribute, func) -> dict:
"""Get trader quantity band attribute"""
return {(i['@TraderID'], j['@TradeType']): func(j[attribute])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('TraderPeriodCollection').get('TraderPeriod'))
for j in convert_to_list(i.get('TradeCollection').get('Trade'))
if j.get(attribute) is not None}
def get_trader_fast_start_attribute(data, attribute, func) -> dict:
"""Get trader fast start attribute"""
traders = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('TraderCollection').get('Trader'))
# Fast start traders
fast_start_traders = get_trader_fast_start_index(data)
# CurrentModeTime and CurrentMode may be missing for some traders (seem to
# be fast-start units). Set to 0 if missing.
if attribute == '@CurrentModeTime':
return {i['@TraderID']: func(i.get(attribute))
if i.get(attribute) is not None else 0.0
for i in traders if i['@TraderID'] in fast_start_traders}
if attribute == '@CurrentMode':
return {i['@TraderID']: func(i.get(attribute))
if i.get(attribute) is not None else 0
for i in traders if i['@TraderID'] in fast_start_traders}
else:
return {i['@TraderID']: func(i.get(attribute))
if i.get(attribute) is not None else i.get(attribute)
for i in traders if i['@TraderID'] in fast_start_traders}
def get_interconnector_collection_attribute(data, attribute, func) -> dict:
"""Get interconnector collection attribute"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection')
.get('Interconnector'))
# Container for extract values
values = {}
for i in interconnectors:
initial_conditions = (i.get('InterconnectorInitialConditionCollection')
.get('InterconnectorInitialCondition'))
for j in initial_conditions:
if j['@InitialConditionID'] == attribute:
values[i['@InterconnectorID']] = func(j['@Value'])
return values
def get_interconnector_period_collection_attribute(data, attribute, func) -> dict:
"""
Get interconnector period collection attribute
Parameters
----------
data : dict
NEMDE case file dictionary
attribute : str
Name of attribute to extract for each interconnector
func : function
Function used to parse attribute values e.g. float or str
Returns
-------
values : dict
Dictionary of extracted interconnector period collection attributes
"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for extracted values
values = {}
for i in interconnectors:
values[i['@InterconnectorID']] = func(i[attribute])
return values
def get_interconnector_loss_model_attribute(data, attribute, func) -> dict:
"""
Get interconnector loss model attribute
Parameters
----------
data : dict
NEMDE case file dictionary
attribute : str
Name of attribute to extract
func : function
Function used to parse attribute values e.g. convert to float or string
"""
return {i['@InterconnectorID']: func(i.get('LossModelCollection').get('LossModel')[attribute])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection').get('Interconnector'))}
def get_interconnector_loss_model_segments(data, interconnector_id) -> list:
"""Get segments corresponding to interconnector loss model"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection')
.get('Interconnector'))
# Container for loss model segments
segments = []
for i in interconnectors:
if i['@InterconnectorID'] == interconnector_id:
loss_model_segments = (i.get('LossModelCollection')
.get('LossModel').get('SegmentCollection')
.get('Segment'))
for segment in loss_model_segments:
s = {j: int(k) if j == '@Limit' else float(k) for j, k in segment.items()}
segments.append(s)
return segments
def get_interconnector_loss_model_segment_attribute(data, attribute, func) -> dict:
"""Get interconnector loss model segment collection"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection').get('Interconnector'))
# Container for values
values = {}
for i in interconnectors:
segment_attributes = (i.get('LossModelCollection').get('LossModel')
.get('SegmentCollection').get('Segment'))
for j, k in enumerate(segment_attributes):
# Extract loss model segment attribute
values[(i['@InterconnectorID'], j)] = func(k[attribute])
return values
def get_standardised_interconnector_loss_model_segments(data) -> dict:
"""Use breakpoints and segment factors to construct a new start-end representation for the MLF curve"""
return {i: get_parsed_interconnector_loss_model_segments(data, i)
for i in get_interconnector_index(data)}
def get_mnsp_price_bands(data) -> dict:
"""Get MNSP price bands"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('InterconnectorCollection')
.get('Interconnector'))
# Container for price band information
price_bands = {}
for i in interconnectors:
if i.get('MNSPPriceStructureCollection') is None:
continue
# MNSP price structure
price_structure = (i.get('MNSPPriceStructureCollection')
.get('MNSPPriceStructure')
.get('MNSPRegionPriceStructureCollection')
.get('MNSPRegionPriceStructure'))
for j in price_structure:
for k in range(1, 11):
key = (i['@InterconnectorID'], j['@RegionID'], k)
price_bands[key] = float(j[f'@PriceBand{k}'])
return price_bands
def get_mnsp_quantity_bands(data) -> dict:
"""Get MNSP quantity bands"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for quantity band information
quantity_bands = {}
for i in interconnectors:
if i.get('MNSPOfferCollection') is None:
continue
# MNSP offers
offers = i.get('MNSPOfferCollection').get('MNSPOffer')
for j in offers:
for k in range(1, 11):
key = (i['@InterconnectorID'], j['@RegionID'], k)
quantity_bands[key] = float(j[f'@BandAvail{k}'])
return quantity_bands
def get_mnsp_offer_attribute(data, attribute) -> dict:
"""MNSP offer attribute"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for MNSP offer attributes
values = {}
for i in interconnectors:
if i.get('MNSPOfferCollection') is None:
continue
# MNSP offers
offers = i.get('MNSPOfferCollection').get('MNSPOffer')
for j in offers:
for k in range(1, 11):
key = (i['@InterconnectorID'], j['@RegionID'], k)
values[key] = float(j[f'@{attribute}'])
return values
def get_mnsp_quantity_band_attribute(data, attribute, func) -> dict:
"""Get MNSP max available"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for max available data
max_available = {}
for i in interconnectors:
if i.get('MNSPOfferCollection') is None:
continue
for j in i.get('MNSPOfferCollection').get('MNSPOffer'):
max_available[(i['@InterconnectorID'], j['@RegionID'])] = func(j[attribute])
return max_available
def get_mnsp_period_collection_attribute(data, attribute, func) -> dict:
"""
Get MNSP period collection attribute
Parameters
----------
data : dict
NEMDE case file dictionary
attribute : str
Name of attribute to extract
func : function
Function used to parse extracted attribute
Returns
-------
values : dict
MNSP period collection attribute
"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for extracted values
values = {}
for i in interconnectors:
if i['@MNSP'] != '1':
continue
# Append to container
values[i['@InterconnectorID']] = func(i[attribute])
return values
def get_region_initial_condition_attribute(data, attribute, func) -> dict:
"""
Get region initial condition attribute
Parameters
----------
data : dict
NEMDE case file dictionary
attribute : str
Name of attribute to extract
func : function
Function used to parse attribute value
Returns
-------
values : dict
Extract attribute values for each NEM region
"""
regions = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('RegionCollection').get('Region'))
# Container for extracted values
values = {}
for i in regions:
initial_conditions = (i.get('RegionInitialConditionCollection')
.get('RegionInitialCondition'))
for j in initial_conditions:
if j['@InitialConditionID'] == attribute:
values[i['@RegionID']] = func(j['@Value'])
return values
def get_region_period_collection_attribute(data, attribute, func) -> dict:
"""Get region period collection attribute"""
return {i['@RegionID']: func(i[attribute])
for i in (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('RegionPeriodCollection').get('RegionPeriod'))}
def get_generic_constraint_rhs(data, intervention) -> dict:
"""
Get generic constraint right-hand-side term
Parameters
----------
data : dict
NEMDE case file dictionary
intervention : str
Intervention flag - '0' -> no intervention constraints, '1' -> intervention constraints included
Returns
-------
rhs : dict
Dictionary with keys = ConstraintIDs, values = constraint RHS
"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdOutputs')
.get('ConstraintSolution'))
# Container for constraint RHS terms
rhs = {}
for i in constraints:
# Check intervention flag
if i['@Intervention'] == intervention:
rhs[i['@ConstraintID']] = float(i['@RHS'])
return rhs
def get_generic_constraint_collection_attribute(data, attribute, func) -> dict:
"""
Get generic constraint collection attribute
Parameters
----------
data : dict
NEMDE case file data
attribute : str
Name of attribute to extract
func : function
Function used to parse attribute e.g. float, or str
Returns
-------
values : dict
Extracted generic constraint collection values
"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
# Container for extract values
values = {}
for i in constraints:
# Skip constraints with missing LHS factors
if i.get('LHSFactorCollection') is None:
continue
values[i['@ConstraintID']] = func(i[f'{attribute}'])
return values
def parse_constraint(constraint_data):
"""Constraint data"""
lhs = constraint_data.get('LHSFactorCollection')
# Trader factors
traders = {(i['@TraderID'], i['@TradeType']): float(i['@Factor'])
for i in convert_to_list(lhs.get('TraderFactor', []))}
# Interconnector factors
interconnectors = {(i['@InterconnectorID']): float(i['@Factor'])
for i in convert_to_list(lhs.get('InterconnectorFactor', []))}
# Region factors
regions = {(i['@RegionID'], i['@TradeType']): float(i['@Factor'])
for i in convert_to_list(lhs.get('RegionFactor', []))}
# Combine constraint terms into single dictionary
terms = {'traders': traders, 'interconnectors': interconnectors, 'regions': regions}
return terms
def get_generic_constraint_lhs_terms(data) -> dict:
"""
Generic constraint LHS terms - if no LHS terms then constraint is skipped
"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
return {i.get('@ConstraintID'): parse_constraint(i) for i in constraints
if i.get('LHSFactorCollection') is not None}
def get_case_attribute(data, attribute, func):
"""Extract case attribute"""
return func(data.get('NEMSPDCaseFile').get('NemSpdInputs').get('Case')[attribute])
def reorder_tuple(input_tuple) -> tuple:
"""Sort tuples alphabetically"""
if input_tuple[0][0] > input_tuple[1][0]:
return tuple((input_tuple[1], input_tuple[0]))
else:
return tuple((input_tuple[0], input_tuple[1]))
def get_price_tied_bands(data, trade_type):
"""
Get price-tied generators and loads. 'trade_type'=ENOF for generators,
'trade_type'=LDOF for loads.
"""
# Price and quantity bands
price_bands = get_trader_price_bands(data)
quantity_bands = get_trader_quantity_bands(data)
# Generator energy offer price bands
filtered_price_bands = {k: v for k, v in price_bands.items() if k[1] == trade_type}
# Trader region
trader_region = get_trader_period_attribute(data, '@RegionID', str)
# Container for price tied bands
price_tied = []
# For each price band
for i, j in filtered_price_bands.items():
# Compare it to every other price band
for m, n in filtered_price_bands.items():
# Price bands must be in same region (also ignore the input trader - will of course match)
if (m == i) or (trader_region[i[0]] != trader_region[m[0]]):
continue
# Check if price difference less than threshold - append to container if so
if abs(j - n) < 1e-6:
if (quantity_bands[m[0], m[1], m[2]] != 0) and (quantity_bands[i[0], i[1], i[2]] != 0):
price_tied.append((i, m))
# # Can break early if price > input price band - monotonically increase prices
# elif n > j:
# break
# Re-order tuples, get unique price-tied combinations, and sort alphabetically
price_tied_reordered = [reorder_tuple(i) for i in price_tied]
price_tied_unique = list(set(price_tied_reordered))
price_tied_unique.sort()
# Flatten to produce one tuple for a given pair of price-tied generators
price_tied_flattened = [(i[0][0], i[0][1], i[0][2], i[1][0],
i[1][1], i[1][2]) for i in price_tied_unique]
return price_tied_flattened
def get_trader_effective_initial_mw(data, mode):
"""
Effective InitialMW depends on run mode. If a pricing run for an
intervention pricing period, then WhatIfInitialMW should be used. Otherwise
should use InitialMW.
"""
# Get intervention flag
intervention_flag = get_case_attribute(data, '@Intervention', str)
# Use 'What If' if an intervention pricing period and run mode is 'pricing'
if (intervention_flag == 'True') and (mode == 'pricing'):
return get_trader_initial_condition_attribute(data, 'WhatIfInitialMW', float)
else:
return get_trader_initial_condition_attribute(data, 'InitialMW', float)
def get_interconnector_effective_initial_mw(data, mode):
"""
Effective InitialMW depends on run mode. If a pricing run for an
intervention pricing period, then WhatIfInitialMW should be used. Otherwise
should use InitialMW.
"""
# Get intervention flag
intervention_flag = get_case_attribute(data, '@Intervention', str)
# Use 'What If' if an intervention pricing period and run mode is 'pricing'
if (intervention_flag == 'True') and (mode == 'pricing'):
return get_interconnector_collection_attribute(data, 'WhatIfInitialMW', float)
else:
return get_interconnector_collection_attribute(data, 'InitialMW', float)
def get_mnsp_region_loss_indicator(data, mode) -> dict:
"""
Get region loss indicator. =1 if FromRegion and InitialMW >= 0,
or if ToRegion and InitialMW < 0, else =0
"""
# MNSP and region index
mnsp_index = get_mnsp_index(data)
region_index = get_region_index(data)
# MNSP attributes # TODO: this needs to change if intervention pricing case is considered
initial_mw = get_interconnector_effective_initial_mw(data=data, mode=mode)
to_region = get_interconnector_period_collection_attribute(data, '@ToRegion', str)
from_region = get_interconnector_period_collection_attribute(data, '@FromRegion', str)
# Container for output
out = {}
for i in mnsp_index:
for j in region_index:
# Loss applied to FromRegion
if (j == from_region[i]) and (initial_mw[i] >= 0):
out[(i, j)] = 1
# Loss applied to ToRegion
elif (j == to_region[i]) and (initial_mw[i] < 0):
out[(i, j)] = 1
else:
out[(i, j)] = 0
return out
def get_interconnector_initial_loss_estimate(data, mode) -> dict:
"""Get initial loss estimate for each interconnector"""
# Initial MW for all interconnectors
interconnectors = get_interconnector_index(data=data)
# Depends on intervention pricing period status
initial_mw = get_interconnector_effective_initial_mw(data=data, mode=mode)
return {i: get_interconnector_loss_estimate(
data=data, interconnector_id=i, flow=initial_mw[i])
for i in interconnectors}
def get_interconnector_loss_model_breakpoints_y(data) -> dict:
"""Get interconnector loss model breakpoints - y-coordinate (estimated loss)"""
# Get loss model segments
interconnectors = get_interconnector_index(data=data)
limit = get_interconnector_loss_model_segment_attribute(data, '@Limit', float)
lower_limit = get_interconnector_loss_model_attribute(data, '@LossLowerLimit', float)
# segments = get_standardised_interconnector_loss_model_segments(data=data)
# Break point values - offset segment ID - first segment should be loss lower limit
values = {(i, s + 1):
get_interconnector_loss_estimate(data=data, interconnector_id=i, flow=v)
for (i, s), v in limit.items()}
# Add loss lower limit with zero index (corresponds to first segment)
for i in interconnectors:
values[(i, 0)] = get_interconnector_loss_estimate(
data=data, interconnector_id=i, flow=-lower_limit[i])
return values
def get_interconnector_loss_model_breakpoints_x(data) -> dict:
"""Get interconnector loss model breakpoints - x-coordinate (power output)"""
# Get loss model segments and lower limits for each interconnector
limit = get_interconnector_loss_model_segment_attribute(data, '@Limit', float)
# limit = data['P_INTERCONNECTOR_LOSS_SEGMENT_LIMIT']
lower_limit = get_interconnector_loss_model_attribute(data, '@LossLowerLimit', float)
# lower_limit = data['P_INTERCONNECTOR_LOSS_LOWER_LIMIT']
# Container for break point values - offset segment ID - first segment should be loss lower limit
values = {(interconnector_id, segment_id + 1): flow
for (interconnector_id, segment_id), flow in limit.items()}
# Add loss lower limit with zero index (corresponds to first segment)
for i in get_interconnector_index(data=data):
values[(i, 0)] = -lower_limit[i]
return values
def get_trader_fcas_info(data, mode) -> dict:
"""Extract parameter used in FCAS availability calculations - convert to standard format"""
# FCAS trade types
fcas_trade_types = ['R6SE', 'R60S', 'R5MI', 'R5RE', 'L6SE', 'L60S', 'L5MI', 'L5RE']
# Extract data used for FCAS calculations
trader_quantity_bands = get_trader_quantity_bands(data=data)
trader_type = get_trader_collection_attribute(data, '@TraderType', str)
max_avail = get_trader_period_trade_attribute(data, '@MaxAvail', float)
enablement_min = get_trader_period_trade_attribute(data, '@EnablementMin', float)
low_breakpoint = get_trader_period_trade_attribute(data, '@LowBreakpoint', float)
high_breakpoint = get_trader_period_trade_attribute(data, '@HighBreakpoint', float)
enablement_max = get_trader_period_trade_attribute(data, '@EnablementMax', float)
effective_initial_mw = get_trader_effective_initial_mw(data=data, mode=mode)
uigf = get_trader_period_attribute(data, '@UIGF', float)
hmw = get_trader_initial_condition_attribute(data, 'HMW', float)
lmw = get_trader_initial_condition_attribute(data, 'LMW', float)
agc_status = get_trader_initial_condition_attribute(data, 'AGCStatus', str)
scada_ramp_up_rate = get_trader_initial_condition_attribute(
data, 'SCADARampUpRate', float)
scada_ramp_dn_rate = get_trader_initial_condition_attribute(
data, 'SCADARampDnRate', float)
semi_dispatch = get_trader_collection_attribute(data, '@SemiDispatch', str)
# Container for output
out = {}
for trader_id, trade_type in get_trader_offer_index(data=data):
if trade_type in fcas_trade_types:
# Extract trader quantity bands for given service
quantity_bands = {k: v for k, v in trader_quantity_bands.items()
if k[0] == trader_id and k[1] == trade_type}
# Energy offer trade type depends on whether trader is a generator or a load
if trader_type[trader_id] == 'GENERATOR':
energy_offer_type = 'ENOF'
elif trader_type[trader_id] in ['LOAD', 'NORMALLY_ON_LOAD']:
energy_offer_type = 'LDOF'
else:
raise Exception('Unexpected trader type:',
trader_id, trader_type[trader_id])
# Compile output into single dictionary
out[(trader_id, trade_type)] = {
'trader_id': trader_id,
'trade_type': trade_type,
'quantity_bands': quantity_bands,
'energy_max_avail': max_avail.get((trader_id, energy_offer_type)),
'enablement_min': enablement_min[(trader_id, trade_type)],
'low_breakpoint': low_breakpoint[(trader_id, trade_type)],
'high_breakpoint': high_breakpoint[(trader_id, trade_type)],
'enablement_max': enablement_max[(trader_id, trade_type)],
'max_avail': max_avail[(trader_id, trade_type)],
'initial_mw': effective_initial_mw.get(trader_id),
'uigf': uigf.get(trader_id),
'hmw': hmw.get(trader_id),
'lmw': lmw.get(trader_id),
'agc_status': agc_status.get(trader_id),
'agc_ramp_up': scada_ramp_up_rate.get(trader_id),
'agc_ramp_dn': scada_ramp_dn_rate.get(trader_id),
'trader_type': trader_type.get(trader_id),
'semi_dispatch': semi_dispatch.get(trader_id),
}
return out
def get_trader_fcas_availability_status(data, mode) -> dict:
"""Get FCAS availability"""
# Extract trade FCAS parameters into single dictionary to assist with availability calculations
fcas_info = get_trader_fcas_info(data=data, mode=mode)
# Container for FCAS availability
fcas_status = {}
for (trader_id, trade_type), params in fcas_info.items():
# Get FCAS availability status
fcas_status[(trader_id, trade_type)
] = fcas.get_trader_fcas_availability_status(params)
return fcas_status
def get_trader_energy_offer_ramp_rate(trader_id, ramp_rates):
"""
Given dictionary of trader offer ramp rates, extract the energy offer ramp
rate for a given trader. Not all traders participate in the energy market
so the function may return if no energy offer ramp rate exists.
"""
# Check that a trader doesn't have both energy and load offers. Will
# not know which offer ramp rate should be used. This case shouldn't
# occur in practice.
has_generation_offer = (trader_id, 'ENOF') in ramp_rates.keys()
has_load_offer = (trader_id, 'LDOF') in ramp_rates.keys()
if has_generation_offer and has_load_offer:
raise Exception('Trader has both generation and load offers')
# Ramp rate corresponding to energy offer
if (trader_id, 'ENOF') in ramp_rates.keys():
return ramp_rates[(trader_id, 'ENOF')]
elif (trader_id, 'LDOF') in ramp_rates.keys():
return ramp_rates[(trader_id, 'LDOF')]
else:
return None
def get_trader_scada_ramp_rate(trader_id, ramp_rates):
"""
Extract SCADA ramp rate for a given trader. If the SCADA ramp rate is 0
or missing return None.
"""
if (trader_id in ramp_rates.keys()) and (ramp_rates[trader_id] > 0):
return ramp_rates[trader_id]
else:
return None
def get_trader_effective_ramp_rate(data, direction) -> dict:
"""
Compute effective ramp-up rate. Min of energy offer ramp rate and SCADA
ramp rate. Some traders do not have ramp rates specified and have None
corresponding to their ramp rate.
"""
traders = get_trader_index(data=data)
# Get attributes corresponding to ramp direction (up or down)
if direction == 'up':
offer_attribute = '@RampUpRate'
scada_attribute = 'SCADARampUpRate'
elif direction == 'down':
offer_attribute = '@RampDnRate'
scada_attribute = 'SCADARampDnRate'
else:
raise ValueError("'direction' must be either 'up' or 'down'")
# Extract ramp rates defined in trader offers and SCADA initial conditions
offers = get_trader_period_trade_attribute(
data=data, attribute=offer_attribute, func=float)
scada = get_trader_initial_condition_attribute(
data=data, attribute=scada_attribute, func=float)
out = {}
for i in traders:
offer_ramp = get_trader_energy_offer_ramp_rate(trader_id=i, ramp_rates=offers)
scada_ramp = get_trader_scada_ramp_rate(trader_id=i, ramp_rates=scada)
# Non-none ramp rates
ramp_rates = [i for i in [offer_ramp, scada_ramp] if i is not None]
# Effective ramp rate is the min of the offer and SCADA ramp rates
if ramp_rates:
out[i] = min(ramp_rates)
return out
def construct_case(data, mode) -> dict:
"""
Parse json data
Parameters
----------
data : dict
NEMDE casefile
intervention : str
NEMDE intervention flag. Either '1' or '0'.
Returns
-------
case : dict
Dictionary containing case data to be read into model
"""
# Get intervention status
intervention = get_intervention_status(data=data, mode=mode)
case = {
'S_REGIONS': get_region_index(data),
'S_TRADERS': get_trader_index(data),
'S_TRADERS_SEMI_DISPATCH': get_trader_semi_dispatch_index(data),
'S_TRADER_OFFERS': get_trader_offer_index(data),
'S_TRADER_ENERGY_OFFERS': get_trader_energy_offer_index(data),
'S_TRADER_FCAS_OFFERS': get_trader_fcas_offer_index(data),
'S_TRADER_FAST_START': get_trader_fast_start_index(data),
'S_TRADER_PRICE_TIED_GENERATORS': get_price_tied_bands(data, trade_type='ENOF'),
'S_TRADER_PRICE_TIED_LOADS': get_price_tied_bands(data, trade_type='LDOF'),
'S_GENERIC_CONSTRAINTS': get_generic_constraint_index(data),
'S_GC_TRADER_VARS': get_generic_constraint_trader_variable_index(data),
'S_GC_INTERCONNECTOR_VARS': get_generic_constraint_interconnector_variable_index(data),
'S_GC_REGION_VARS': get_generic_constraint_region_variable_index(data),
'S_MNSPS': get_mnsp_index(data),
'S_MNSP_OFFERS': get_mnsp_offer_index(data),
'S_INTERCONNECTORS': get_interconnector_index(data),
'S_INTERCONNECTOR_LOSS_MODEL_BREAKPOINTS': get_interconnector_loss_model_breakpoint_index(data),
'S_INTERCONNECTOR_LOSS_MODEL_INTERVALS': get_interconnector_loss_model_interval_index(data),
'P_CASE_ID': get_case_attribute(data, '@CaseID', str),
'P_INTERVENTION_STATUS': intervention,
'P_TRADER_PRICE_BAND': get_trader_price_bands(data),
'P_TRADER_QUANTITY_BAND': get_trader_quantity_bands(data),
'P_TRADER_MAX_AVAIL': get_trader_period_trade_attribute(data, '@MaxAvail', float),
'P_TRADER_UIGF': get_trader_period_attribute(data, '@UIGF', float),
'P_TRADER_INITIAL_MW': get_trader_initial_condition_attribute(data, 'InitialMW', float),
'P_TRADER_WHAT_IF_INITIAL_MW': get_trader_initial_condition_attribute(data, 'WhatIfInitialMW', float),
'P_TRADER_HMW': get_trader_initial_condition_attribute(data, 'HMW', float),
'P_TRADER_LMW': get_trader_initial_condition_attribute(data, 'LMW', float),
'P_TRADER_AGC_STATUS': get_trader_initial_condition_attribute(data, 'AGCStatus', str),
'P_TRADER_SEMI_DISPATCH_STATUS': get_trader_collection_attribute(data, '@SemiDispatch', str),
'P_TRADER_REGION': get_trader_period_attribute(data, '@RegionID', str),
'P_TRADER_PERIOD_RAMP_UP_RATE': get_trader_period_trade_attribute(data, '@RampUpRate', float),
'P_TRADER_PERIOD_RAMP_DN_RATE': get_trader_period_trade_attribute(data, '@RampDnRate', float),
'P_TRADER_TYPE': get_trader_collection_attribute(data, '@TraderType', str),
'P_TRADER_SCADA_RAMP_UP_RATE': get_trader_initial_condition_attribute(data, 'SCADARampUpRate', float),
'P_TRADER_SCADA_RAMP_DN_RATE': get_trader_initial_condition_attribute(data, 'SCADARampDnRate', float),
'P_TRADER_MIN_LOADING_MW': get_trader_fast_start_attribute(data, '@MinLoadingMW', float),
'P_TRADER_CURRENT_MODE': get_trader_fast_start_attribute(data, '@CurrentMode', int),
'P_TRADER_CURRENT_MODE_TIME': get_trader_fast_start_attribute(data, '@CurrentModeTime', float),
'P_TRADER_T1': get_trader_fast_start_attribute(data, '@T1', float),
'P_TRADER_T2': get_trader_fast_start_attribute(data, '@T2', float),
'P_TRADER_T3': get_trader_fast_start_attribute(data, '@T3', float),
'P_TRADER_T4': get_trader_fast_start_attribute(data, '@T4', float),
'P_TRADER_ENABLEMENT_MIN': get_trader_period_trade_attribute(data, '@EnablementMin', float),
'P_TRADER_LOW_BREAKPOINT': get_trader_period_trade_attribute(data, '@LowBreakpoint', float),
'P_TRADER_HIGH_BREAKPOINT': get_trader_period_trade_attribute(data, '@HighBreakpoint', float),
'P_TRADER_ENABLEMENT_MAX': get_trader_period_trade_attribute(data, '@EnablementMax', float),
'P_TRADER_EFFECTIVE_INITIAL_MW': get_trader_effective_initial_mw(data=data, mode=mode),
'P_TRADER_FCAS_AVAILABILITY_STATUS': get_trader_fcas_availability_status(data=data, mode=mode),
'P_TRADER_EFFECTIVE_RAMP_UP_RATE': get_trader_effective_ramp_rate(data=data, direction='up'),
'P_TRADER_EFFECTIVE_RAMP_DN_RATE': get_trader_effective_ramp_rate(data=data, direction='down'),
'P_INTERCONNECTOR_INITIAL_MW': get_interconnector_collection_attribute(data, 'InitialMW', float),
'P_INTERCONNECTOR_TO_REGION': get_interconnector_period_collection_attribute(data, '@ToRegion', str),
'P_INTERCONNECTOR_FROM_REGION': get_interconnector_period_collection_attribute(data, '@FromRegion', str),
'P_INTERCONNECTOR_LOWER_LIMIT': get_interconnector_period_collection_attribute(data, '@LowerLimit', float),
'P_INTERCONNECTOR_UPPER_LIMIT': get_interconnector_period_collection_attribute(data, '@UpperLimit', float),
'P_INTERCONNECTOR_MNSP_STATUS': get_interconnector_period_collection_attribute(data, '@MNSP', str),
'P_INTERCONNECTOR_LOSS_SHARE': get_interconnector_loss_model_attribute(data, '@LossShare', float),
'P_INTERCONNECTOR_LOSS_LOWER_LIMIT': get_interconnector_loss_model_attribute(data, '@LossLowerLimit', float),
'P_INTERCONNECTOR_LOSS_SEGMENT_LIMIT': get_interconnector_loss_model_segment_attribute(data, '@Limit', float),
'P_INTERCONNECTOR_LOSS_SEGMENT_FACTOR': get_interconnector_loss_model_segment_attribute(data, '@Factor', float),
'P_INTERCONNECTOR_EFFECTIVE_INITIAL_MW': get_interconnector_effective_initial_mw(data=data, mode=mode),
'P_INTERCONNECTOR_INITIAL_LOSS_ESTIMATE': get_interconnector_initial_loss_estimate(data=data, mode=mode),
'P_INTERCONNECTOR_LOSS_MODEL_BREAKPOINT_Y': get_interconnector_loss_model_breakpoints_y(data=data),
'P_INTERCONNECTOR_LOSS_MODEL_BREAKPOINT_X': get_interconnector_loss_model_breakpoints_x(data=data),
'P_MNSP_PRICE_BAND': get_mnsp_price_bands(data),
'P_MNSP_QUANTITY_BAND': get_mnsp_quantity_bands(data),
'P_MNSP_MAX_AVAILABLE': get_mnsp_quantity_band_attribute(data, '@MaxAvail', float),
'P_MNSP_TO_REGION_LF': get_mnsp_period_collection_attribute(data, '@ToRegionLF', float),
'P_MNSP_TO_REGION_LF_EXPORT': get_mnsp_period_collection_attribute(data, '@ToRegionLFExport', float),
'P_MNSP_TO_REGION_LF_IMPORT': get_mnsp_period_collection_attribute(data, '@ToRegionLFImport', float),
'P_MNSP_FROM_REGION_LF': get_mnsp_period_collection_attribute(data, '@FromRegionLF', float),
'P_MNSP_FROM_REGION_LF_EXPORT': get_mnsp_period_collection_attribute(data, '@FromRegionLFExport', float),
'P_MNSP_FROM_REGION_LF_IMPORT': get_mnsp_period_collection_attribute(data, '@FromRegionLFImport', float),
'P_MNSP_LOSS_PRICE': get_case_attribute(data, '@MNSPLossesPrice', float),
'P_MNSP_RAMP_UP_RATE': get_mnsp_quantity_band_attribute(data, '@RampUpRate', float),
'P_MNSP_RAMP_DOWN_RATE': get_mnsp_quantity_band_attribute(data, '@RampDnRate', float),
'P_MNSP_REGION_LOSS_INDICATOR': get_mnsp_region_loss_indicator(data=data, mode=mode),
'P_REGION_INITIAL_DEMAND': get_region_initial_condition_attribute(data, 'InitialDemand', float),
'P_REGION_ADE': get_region_initial_condition_attribute(data, 'ADE', float),
'P_REGION_DF': get_region_period_collection_attribute(data, '@DF', float),
'P_GC_RHS': get_generic_constraint_rhs(data, intervention),
'P_GC_TYPE': get_generic_constraint_collection_attribute(data, '@Type', str),
'P_CVF_GC': get_generic_constraint_collection_attribute(data, '@ViolationPrice', float),
'P_CVF_VOLL': get_case_attribute(data, '@VoLL', float),
'P_CVF_ENERGY_DEFICIT_PRICE': get_case_attribute(data, '@EnergyDeficitPrice', float),
'P_CVF_ENERGY_SURPLUS_PRICE': get_case_attribute(data, '@EnergySurplusPrice', float),
'P_CVF_UIGF_SURPLUS_PRICE': get_case_attribute(data, '@UIGFSurplusPrice', float),
'P_CVF_RAMP_RATE_PRICE': get_case_attribute(data, '@RampRatePrice', float),
'P_CVF_CAPACITY_PRICE': get_case_attribute(data, '@CapacityPrice', float),
'P_CVF_OFFER_PRICE': get_case_attribute(data, '@OfferPrice', float),
'P_CVF_MNSP_OFFER_PRICE': get_case_attribute(data, '@MNSPOfferPrice', float),
'P_CVF_MNSP_RAMP_RATE_PRICE': get_case_attribute(data, '@MNSPRampRatePrice', float),
'P_CVF_MNSP_CAPACITY_PRICE': get_case_attribute(data, '@MNSPCapacityPrice', float),
'P_CVF_AS_PROFILE_PRICE': get_case_attribute(data, '@ASProfilePrice', float),
'P_CVF_AS_MAX_AVAIL_PRICE': get_case_attribute(data, '@ASMaxAvailPrice', float),
'P_CVF_AS_ENABLEMENT_MIN_PRICE': get_case_attribute(data, '@ASEnablementMinPrice', float),
'P_CVF_AS_ENABLEMENT_MAX_PRICE': get_case_attribute(data, '@ASEnablementMaxPrice', float),
'P_CVF_INTERCONNECTOR_PRICE': get_case_attribute(data, '@InterconnectorPrice', float),
'P_CVF_FAST_START_PRICE': get_case_attribute(data, '@FastStartPrice', float),
'P_CVF_GENERIC_CONSTRAINT_PRICE': get_case_attribute(data, '@GenericConstraintPrice', float),
'P_CVF_SATISFACTORY_NETWORK_PRICE': get_case_attribute(data, '@Satisfactory_Network_Price', float),
'P_TIE_BREAK_PRICE': get_case_attribute(data, '@TieBreakPrice', float),
'P_FAST_START_THRESHOLD': get_case_attribute(data, '@FastStartThreshold', float),
'intermediate': {
'generic_constraint_lhs_terms': get_generic_constraint_lhs_terms(data),
'loss_model_segments': get_standardised_interconnector_loss_model_segments(data),
},
}
return case
|
the-stack_106_26111 | # Natural Language Toolkit: Twitter client
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Ewan Klein <[email protected]>
# Lorenzo Rubio <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions for the :module:`twitterclient` module which do not require
the `twython` library to have been installed.
"""
import csv
import gzip
import json
from nltkma.internals import deprecated
HIER_SEPARATOR = "."
def extract_fields(tweet, fields):
"""
Extract field values from a full tweet and return them as a list
:param json tweet: The tweet in JSON format
:param list fields: The fields to be extracted from the tweet
:rtype: list(str)
"""
out = []
for field in fields:
try:
_add_field_to_out(tweet, field, out)
except TypeError as e:
raise RuntimeError(
"Fatal error when extracting fields. Cannot find field ", field
) from e
return out
def _add_field_to_out(json, field, out):
if _is_composed_key(field):
key, value = _get_key_value_composed(field)
_add_field_to_out(json[key], value, out)
else:
out += [json[field]]
def _is_composed_key(field):
return HIER_SEPARATOR in field
def _get_key_value_composed(field):
out = field.split(HIER_SEPARATOR)
# there could be up to 3 levels
key = out[0]
value = HIER_SEPARATOR.join(out[1:])
return key, value
def _get_entity_recursive(json, entity):
if not json:
return None
elif isinstance(json, dict):
for key, value in json.items():
if key == entity:
return value
# 'entities' and 'extended_entities' are wrappers in Twitter json
# structure that contain other Twitter objects. See:
# https://dev.twitter.com/overview/api/entities-in-twitter-objects
if key == "entities" or key == "extended_entities":
candidate = _get_entity_recursive(value, entity)
if candidate is not None:
return candidate
return None
elif isinstance(json, list):
for item in json:
candidate = _get_entity_recursive(item, entity)
if candidate is not None:
return candidate
return None
else:
return None
def json2csv(
fp, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False
):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full tweets to be easily converted
to a CSV file for easier processing. For example, just TweetIDs or
just the text content of the Tweets can be extracted.
Additionally, the function allows combinations of fields of other Twitter
objects (mainly the users, see below).
For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see
`json2csv_entities`
:param str infile: The name of the file containing full tweets
:param str outfile: The name of the text file where results should be\
written
:param list fields: The list of fields to be extracted. Useful examples\
are 'id_str' for the tweetID and 'text' for the text of the tweet. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.\
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\
Additionally, it allows IDs from other Twitter objects, e. g.,\
['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count']
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, output files are compressed with gzip
"""
(writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress)
# write the list of fields as header
writer.writerow(fields)
# process the file
for line in fp:
tweet = json.loads(line)
row = extract_fields(tweet, fields)
writer.writerow(row)
outf.close()
@deprecated("Use open() and csv.writer() directly instead.")
def outf_writer_compat(outfile, encoding, errors, gzip_compress=False):
"""Get a CSV writer with optional compression."""
return _outf_writer(outfile, encoding, errors, gzip_compress)
def _outf_writer(outfile, encoding, errors, gzip_compress=False):
if gzip_compress:
outf = gzip.open(outfile, "wt", encoding=encoding, errors=errors)
else:
outf = open(outfile, "w", encoding=encoding, errors=errors)
writer = csv.writer(outf)
return (writer, outf)
def json2csv_entities(
tweets_file,
outfile,
main_fields,
entity_type,
entity_fields,
encoding="utf8",
errors="replace",
gzip_compress=False,
):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full Tweets to be easily converted
to a CSV file for easier processing of Twitter entities. For example, the
hashtags or media elements of a tweet can be extracted.
It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags
there will be two lines in the output file, one per hashtag
:param tweets_file: the file-like object containing full Tweets
:param str outfile: The path of the text file where results should be\
written
:param list main_fields: The list of fields to be extracted from the main\
object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']
If `entity_type` is expressed with hierarchy, then it is the list of\
fields of the object that corresponds to the key of the entity_type,\
(e.g., for entity_type='user.urls', the fields in the main_fields list\
belong to the user object; for entity_type='place.bounding_box', the\
files in the main_field list belong to the place object of the tweet).
:param list entity_type: The name of the entity: 'hashtags', 'media',\
'urls' and 'user_mentions' for the tweet object. For a user object,\
this needs to be expressed with a hierarchy: `'user.urls'`. For the\
bounding box of the Tweet location, use `'place.bounding_box'`.
:param list entity_fields: The list of fields to be extracted from the\
entity. E.g. `['text']` (of the Tweet)
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, output files are compressed with gzip
"""
(writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress)
header = get_header_field_list(main_fields, entity_type, entity_fields)
writer.writerow(header)
for line in tweets_file:
tweet = json.loads(line)
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
object_json = _get_entity_recursive(tweet, key)
if not object_json:
# this can happen in the case of "place"
continue
object_fields = extract_fields(object_json, main_fields)
items = _get_entity_recursive(object_json, value)
_write_to_file(object_fields, items, entity_fields, writer)
else:
tweet_fields = extract_fields(tweet, main_fields)
items = _get_entity_recursive(tweet, entity_type)
_write_to_file(tweet_fields, items, entity_fields, writer)
outf.close()
def get_header_field_list(main_fields, entity_type, entity_fields):
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
main_entity = key
sub_entity = value
else:
main_entity = None
sub_entity = entity_type
if main_entity:
output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields]
else:
output1 = main_fields
output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields]
return output1 + output2
def _write_to_file(object_fields, items, entity_fields, writer):
if not items:
# it could be that the entity is just not present for the tweet
# e.g. tweet hashtag is always present, even as [], however
# tweet media may not be present
return
if isinstance(items, dict):
# this happens e.g. for "place" of a tweet
row = object_fields
# there might be composed keys in de list of required fields
entity_field_values = [x for x in entity_fields if not _is_composed_key(x)]
entity_field_composed = [x for x in entity_fields if _is_composed_key(x)]
for field in entity_field_values:
value = items[field]
if isinstance(value, list):
row += value
else:
row += [value]
# now check required dictionaries
for d in entity_field_composed:
kd, vd = _get_key_value_composed(d)
json_dict = items[kd]
if not isinstance(json_dict, dict):
raise RuntimeError(
"""Key {0} does not contain a dictionary
in the json file""".format(
kd
)
)
row += [json_dict[vd]]
writer.writerow(row)
return
# in general it is a list
for item in items:
row = object_fields + extract_fields(item, entity_fields)
writer.writerow(row)
|
the-stack_106_26112 | """Determine the status of a nix build as lazily as possible in a
bisect-friendly format"""
import sys
import argparse
from pathlib import Path
from nix_bisect import nix, exceptions, git_bisect
from nix_bisect.derivation import Derivation
def drvish_to_drv(drvish, nix_file, nix_options, nix_argstr):
"""No-op on drv files, otherwise evaluate in the context of nix_file"""
path = Path(drvish)
if path.exists() and path.name.endswith(".drv"):
return str(path)
else:
return nix.instantiate(
drvish, nix_file, nix_options=nix_options, nix_argstr=nix_argstr
)
def build_status(
drvish,
nix_file,
nix_options,
nix_argstr,
failure_line=None,
max_rebuilds=None,
rebuild_blacklist=(),
):
"""Determine the status of `drvish` and return the result as indicated"""
try:
drv = drvish_to_drv(
drvish, nix_file, nix_options=nix_options, nix_argstr=nix_argstr
)
except nix.InstantiationFailure:
return "instantiation_failure"
print(f"Querying status of {drv}.")
try:
drv = Derivation(
drv,
nix_options=nix_options,
max_rebuilds=max_rebuilds,
rebuild_blacklist=rebuild_blacklist,
)
if not drv.can_build_deps():
failed = drv.sample_dependency_failure()
print(f"Dependency {failed} failed to build.")
return f"dependency_failure"
if drv.can_build():
return "success"
else:
if failure_line is None or drv.log_contains(failure_line):
return "failure"
else:
return "failure_without_line"
except exceptions.ResourceConstraintException as e:
print(e)
return "resource_limit"
class _ActionChoices(list):
def __init__(self):
self.named_choices = ["good", "bad", "skip", "skip-range"]
# Add a dummy choice that will only show up in --help but will not
# actually be accepted.
choice_list = self.named_choices + ["<int>"]
super().__init__(choice_list)
# An extension of list that just pretends every integer is a member. Used
# to accept arbitrary return codes as choices (in addition to named
# actions).
def __contains__(self, other):
if self.named_choices.__contains__(other):
return True
try:
_retcode = int(other)
return True
except ValueError:
return False
def _main():
def to_exit_code(action):
try:
return int(action)
except ValueError:
return {"good": 0, "bad": 1, "skip": 125, "skip-range": 129, "abort": 128,}[
action
]
action_choices = _ActionChoices()
parser = argparse.ArgumentParser(
description="Build a package with nix, suitable for git-bisect."
)
parser.add_argument(
"drvish",
type=str,
help="Derivation or an attribute/expression that can be resolved to a derivation in the context of the nix file",
)
parser.add_argument(
"--file",
"-f",
help="Nix file that contains the attribute",
type=str,
default=".",
)
parser.add_argument(
"--option",
nargs=2,
metavar=("name", "value"),
action="append",
default=[],
help="Set the Nix configuration option `name` to `value`.",
)
parser.add_argument(
"--argstr",
nargs=2,
metavar=("name", "value"),
action="append",
default=[],
help="Passed on to `nix instantiate`",
)
parser.add_argument(
"--max-rebuilds", type=int, help="Number of builds to allow.", default=None,
)
parser.add_argument(
"--failure-line",
help="Line required in the build logs to count as a failure.",
default=None,
)
parser.add_argument(
"--on-success",
default="good",
choices=action_choices,
help="Bisect action if the expression can be successfully built",
)
parser.add_argument(
"--on-failure",
default="bad",
choices=action_choices,
help="Bisect action if the expression can be successfully built",
)
parser.add_argument(
"--on-dependency-failure",
default="skip-range",
choices=action_choices,
help="Bisect action if the expression can be successfully built",
)
parser.add_argument(
"--on-failure-without-line",
default="skip-range",
choices=action_choices,
help="Bisect action if the expression can be successfully built",
)
parser.add_argument(
"--on-instantiation-failure",
default="skip-range",
choices=action_choices,
help="Bisect action if the expression cannot be instantiated",
)
parser.add_argument(
"--on-resource-limit",
default="skip",
choices=action_choices,
help="Bisect action if a resource limit like rebuild count is exceeded",
)
parser.add_argument(
"--rebuild-blacklist",
action="append",
help="If any derivation matching this regex needs to be rebuilt, the build is skipped",
)
try:
args = parser.parse_args()
except SystemExit:
git_bisect.abort()
status = build_status(
args.drvish,
args.file,
nix_options=args.option,
nix_argstr=args.argstr,
failure_line=args.failure_line,
max_rebuilds=args.max_rebuilds,
rebuild_blacklist=args.rebuild_blacklist
if args.rebuild_blacklist is not None
else (),
)
action_on_status = {
"success": args.on_success,
"failure": args.on_failure,
"dependency_failure": args.on_dependency_failure,
"failure_without_line": args.on_failure_without_line,
"instantiation_failure": args.on_instantiation_failure,
"resource_limit": args.on_resource_limit,
}
print(f"Build status: {status}")
sys.exit(to_exit_code(action_on_status[status]))
if __name__ == "__main__":
sys.exit(_main())
|
the-stack_106_26113 | # Enter an interactive TensorFlow Session.
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Initialize 'x' using the run() method of its initializer op.
x.initializer.run()
# Add an op to subtract 'a' from 'x'. Run it and print the result
sub = tf.sub(x, a)
print(sub.eval())
# ==> [-2. -1.]
# Close the Session when we're done.
sess.close() |
the-stack_106_26114 | # Copyright 2021 University College London (UCL) Research Software Development
# Group. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: Apache-2.0
import os.path as path
import sys
import reframe as rfm
import reframe.utility.sanity as sn
# Add top-level directory to `sys.path` so we can easily import extra modules
# from any directory.
sys.path.append(path.join(path.dirname(__file__), '..', '..'))
# `identify_build_environment` will be used to identify the Spack environment
# to be used when running the benchmark.
from modules.utils import identify_build_environment
# See ReFrame documentation about writing tests for more details. In
# particular:
# * https://reframe-hpc.readthedocs.io/en/stable/tutorials.html (this is a
# walkthrough guide for writing your first tests)
# * https://reframe-hpc.readthedocs.io/en/stable/regression_test_api.html
# (reference about the regression tests API)
# Class to define the benchmark. See
# https://reframe-hpc.readthedocs.io/en/stable/regression_test_api.html#the-reframe-module
# for more information about the API of ReFrame tests.
@rfm.simple_test
class SombreroBenchmark(rfm.RegressionTest):
# Systems and programming environments where to run this benchmark. We
# typically run them on all systems ('*'), unless there are particular
# constraints.
valid_systems = ['*']
valid_prog_environs = ['*']
# The build system to use. We always use Spack.
build_system = 'Spack'
# Number of (MPI) tasks and CPUs per task. Here we hard-code 1, but in
# other cases you may want to use something different. Note: ReFrame will
# automatically launch MPI with the given number of tasks, using the
# launcher specificed in the config for the current system.
num_tasks = 1
num_tasks_per_node = 1
num_cpus_per_task = 1
# The program for running the benchmarks.
executable = 'sombrero1'
# Arguments to pass to the program above to run the benchmarks.
executable_opts = ['-w', '-s', 'small']
# Time limit of the job, automatically set in the job script.
time_limit = '2m'
# With `variables` you can set environment variables to be used in the job.
# For example with `OMP_NUM_THREADS` we set the number of OpenMP threads
# (not actually used in this specific benchmark).
variables = {
'OMP_NUM_THREADS': f'{num_cpus_per_task}',
}
# These extra resources are needed for when using the SGE scheduler.
extra_resources = {
'mpi': {'num_slots': num_tasks * num_cpus_per_task}
}
# Reference values for the performance benchmarks, see the
# `set_perf_patterns` function below.
reference = {
'*': {
# 1 is the reference value, second and third elements are the lower
# and upper bound (if `None`, there are no bounds), last element is
# the unit.
'flops': (1, None, None, 'Gflops/seconds'),
}
}
@run_before('compile')
def setup_build_system(self):
# Spack spec(s) to install the desired package(s). It is recommended
# to specify also the version number for reproducibility.
self.build_system.specs = ['sombrero@2021-08-16']
# Identify the Spack environment for the current system. Keep this
# setting as is.
self.build_system.environment = identify_build_environment(
self.current_system.name)
# Function defining a sanity check. See
# https://reframe-hpc.readthedocs.io/en/stable/regression_test_api.html
# for the API of ReFrame tests, including performance ones.
@run_before('sanity')
def set_sanity_patterns(self):
# Check that the string `[RESULT][0]` appears in the standard outout of
# the program.
self.sanity_patterns = sn.assert_found(r'\[RESULT\]\[0\]', self.stdout)
# A performance benchmark.
@run_before('performance')
def set_perf_patterns(self):
# This performance pattern parses the output of the program to extract
# the desired figure of merit.
self.perf_patterns = {
'flops': sn.extractsingle(
r'\[RESULT\]\[0\] Case 1 (\S+) Gflops/seconds',
self.stdout, 1, float),
}
|
the-stack_106_26116 | """Tests for the storage helper."""
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.common import async_fire_time_changed, mock_coro
MOCK_VERSION = 1
MOCK_KEY = 'storage-test'
MOCK_DATA = {'hello': 'world'}
@pytest.fixture
def mock_save():
"""Fixture to mock JSON save."""
written = []
with patch('homeassistant.util.json.save_json',
side_effect=lambda *args: written.append(args)):
yield written
@pytest.fixture
def mock_load(mock_save):
"""Fixture to mock JSON read."""
with patch('homeassistant.util.json.load_json',
side_effect=lambda *args: mock_save[-1][1]):
yield
@pytest.fixture
def store(hass):
"""Fixture of a store that prevents writing on HASS stop."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store._async_ensure_stop_listener = lambda: None
yield store
async def test_loading(hass, store, mock_save, mock_load):
"""Test we can save and load data."""
await store.async_save(MOCK_DATA)
data = await store.async_load()
assert data == MOCK_DATA
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch('homeassistant.util.json.open', side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None
async def test_saving_with_delay(hass, store, mock_save):
"""Test saving data after a delay."""
await store.async_save(MOCK_DATA, delay=1)
assert len(mock_save) == 0
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert len(mock_save) == 1
async def test_saving_on_stop(hass, mock_save):
"""Test delayed saves trigger when we quit Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
await store.async_save(MOCK_DATA, delay=1)
assert len(mock_save) == 0
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert len(mock_save) == 1
async def test_loading_while_delay(hass, store, mock_save, mock_load):
"""Test we load new data even if not written yet."""
await store.async_save({'delay': 'no'})
assert len(mock_save) == 1
await store.async_save({'delay': 'yes'}, delay=1)
assert len(mock_save) == 1
data = await store.async_load()
assert data == {'delay': 'yes'}
async def test_writing_while_writing_delay(hass, store, mock_save, mock_load):
"""Test a write while a write with delay is active."""
await store.async_save({'delay': 'yes'}, delay=1)
assert len(mock_save) == 0
await store.async_save({'delay': 'no'})
assert len(mock_save) == 1
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert len(mock_save) == 1
data = await store.async_load()
assert data == {'delay': 'no'}
async def test_migrator_no_existing_config(hass, store, mock_save):
"""Test migrator with no existing config."""
with patch('os.path.isfile', return_value=False), \
patch.object(store, 'async_load',
return_value=mock_coro({'cur': 'config'})):
data = await storage.async_migrator(
hass, 'old-path', store)
assert data == {'cur': 'config'}
assert len(mock_save) == 0
async def test_migrator_existing_config(hass, store, mock_save):
"""Test migrating existing config."""
with patch('os.path.isfile', return_value=True), \
patch('os.remove') as mock_remove, \
patch('homeassistant.util.json.load_json',
return_value={'old': 'config'}):
data = await storage.async_migrator(
hass, 'old-path', store)
assert len(mock_remove.mock_calls) == 1
assert data == {'old': 'config'}
assert len(mock_save) == 1
assert mock_save[0][1] == {
'key': MOCK_KEY,
'version': MOCK_VERSION,
'data': data,
}
async def test_migrator_transforming_config(hass, store, mock_save):
"""Test migrating config to new format."""
async def old_conf_migrate_func(old_config):
"""Migrate old config to new format."""
return {'new': old_config['old']}
with patch('os.path.isfile', return_value=True), \
patch('os.remove') as mock_remove, \
patch('homeassistant.util.json.load_json',
return_value={'old': 'config'}):
data = await storage.async_migrator(
hass, 'old-path', store,
old_conf_migrate_func=old_conf_migrate_func)
assert len(mock_remove.mock_calls) == 1
assert data == {'new': 'config'}
assert len(mock_save) == 1
assert mock_save[0][1] == {
'key': MOCK_KEY,
'version': MOCK_VERSION,
'data': data,
}
|
the-stack_106_26118 | from urllib.parse import urljoin
import requests
from cypherpunkpay.common import *
from cypherpunkpay import disable_unverified_certificate_warnings
from cypherpunkpay.bitcoin.ln_invoice import LnInvoice
from cypherpunkpay.net.http_client.clear_http_client import ClearHttpClient
class LightningException(Exception):
pass
class InvalidMacaroonLightningException(LightningException):
pass
class UnknownInvoiceLightningException(LightningException):
pass
class LndClient(object):
def __init__(self, lnd_node_url, invoice_macaroon=None, http_client=None):
self._lnd_node_url = lnd_node_url
self._invoice_macaroon = invoice_macaroon
self._http_client = http_client if http_client else ClearHttpClient()
# Returns payment request string
def addinvoice(self, total_btc: [Decimal, None] = None, memo: str = None, expiry_seconds: [int, None] = None) -> str:
# URL
lnd_node_url = urljoin(self._lnd_node_url, 'v1/invoices')
# Headers
headers_d = self._auth_header()
# Body
body_d = {'private': True}
if memo:
body_d['memo'] = memo
if total_btc:
total_satoshi = round(total_btc * 10 ** 8)
body_d['value'] = str(total_satoshi)
if expiry_seconds:
body_d['expiry'] = str(expiry_seconds)
body_s = json.dumps(body_d)
try:
log.debug(f'Calling LND REST API: POST {lnd_node_url} with body={body_s}')
disable_unverified_certificate_warnings()
res = self._http_client.post_accepting_linkability(
url=lnd_node_url,
headers=headers_d,
body=body_s,
set_tor_browser_headers=False,
verify=False
)
except requests.exceptions.RequestException as e:
log.error(f'Error connecting to LND: {e}')
raise LightningException()
try:
res_json = json.loads(res.text)
except JSONDecodeError as e:
log.error(f'Non-json response from LND: {res.text}')
raise LightningException()
if 'error' in res_json:
if 'signature mismatch' in res_json['error']:
log.error(f'Error authenticating to LND, check btc_lightning_lnd_invoice_macaroon option in your cypherpunkpay.conf file')
raise InvalidMacaroonLightningException()
log.error(f'LND returned error: {res_json["error"]}')
raise LightningException()
return res_json['payment_request']
def lookupinvoice(self, r_hash: bytes) -> LnInvoice:
# Sanity check
assert isinstance(r_hash, bytes)
assert len(r_hash) == 32
# URL
lnd_node_url = urljoin(self._lnd_node_url, f'v1/invoice/{r_hash.hex()}')
# Headers
headers_d = self._auth_header()
try:
log.debug(f'Calling LND REST API: GET {lnd_node_url}')
disable_unverified_certificate_warnings()
res = self._http_get(headers_d, lnd_node_url)
except requests.exceptions.RequestException as e:
log.error(f'Error connecting to LND: {e}')
raise LightningException()
try:
res_json = json.loads(res.text)
except JSONDecodeError as e:
log.error(f'Non-json response from LND: {res.text}')
raise LightningException()
if 'error' in res_json:
log.error(f'LND returned error: {res_json["error"]}')
if res_json['code'] == 2:
raise UnknownInvoiceLightningException()
raise LightningException()
ln_invoice = LnInvoice()
if res_json['settled']:
ln_invoice.is_settled = True
ln_invoice.amt_paid_msat = int(res_json['amt_paid_msat'])
return ln_invoice
# private
def _auth_header(self):
if self._invoice_macaroon:
return {'Grpc-Metadata-macaroon': self._invoice_macaroon}
else:
return {}
# mock me
def _http_get(self, headers_d, lnd_node_url) -> requests.Response:
return self._http_client.get_accepting_linkability(
url=lnd_node_url,
headers=headers_d,
set_tor_browser_headers=False,
verify=False
)
|
the-stack_106_26119 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax.config import config
from jax import test_util as jtu
config.parse_flags_with_absl()
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
@unittest.skipIf(jax.lib._xla_extension_version < 31,
"Requires jaxlib 0.1.71")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
the-stack_106_26120 | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#import copy
import torch
import torch.utils.data
import torchvision
from PIL import Image
import os
from pycocotools import mask as coco_mask
from transforms import Compose
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, anno):
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
return image, anno
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
return image, anno
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, anno):
w, h = image.size
segmentations = [obj["segmentation"] for obj in anno]
cats = [obj["category_id"] for obj in anno]
if segmentations:
masks = convert_coco_poly_to_mask(segmentations, h, w)
cats = torch.as_tensor(cats, dtype=masks.dtype)
# merge all instance masks into a single segmentation map
# with its corresponding categories
target, _ = (masks * cats[:, None, None]).max(dim=0)
# discard overlapping instances
target[masks.sum(0) > 1] = 255
else:
target = torch.zeros((h, w), dtype=torch.uint8)
target = Image.fromarray(target.numpy())
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if more than 1k pixels occupied in the image
return sum(obj["area"] for obj in anno) > 1000
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def get_coco(root, image_set, transforms):
PATHS = {
"train": ("train2017", os.path.join("annotations", "instances_train2017.json")),
"val": ("val2017", os.path.join("annotations", "instances_val2017.json")),
# "train": ("val2017", os.path.join("annotations", "instances_val2017.json"))
}
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
transforms = Compose([
FilterAndRemapCocoCategories(CAT_LIST, remap=True),
ConvertCocoPolysToMask(),
transforms
])
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = torchvision.datasets.CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset, CAT_LIST)
return dataset
|
the-stack_106_26122 | # -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
PXE Driver and supporting meta-classes.
"""
import os
from oslo.config import cfg
from ironic.common import exception
from ironic.common import image_service as service
from ironic.common import images
from ironic.common import keystone
from ironic.common import neutron
from ironic.common import paths
from ironic.common import states
from ironic.common import tftp
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import strutils
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('pxe_config_template',
default=paths.basedir_def(
'drivers/modules/pxe_config.template'),
help='Template file for PXE configuration.'),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help='Default file system format for ephemeral partition, '
'if one is created.'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk.'),
cfg.StrOpt('tftp_master_path',
default='/tftpboot/master_images',
help='Directory where master tftp images are stored on disk.'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master instance images are stored on '
'disk.'),
# NOTE(dekehn): Additional boot files options may be created in the event
# other architectures require different boot files.
cfg.StrOpt('pxe_bootfile_name',
default='pxelinux.0',
help='Neutron bootfile DHCP parameter.'),
cfg.IntOpt('image_cache_size',
default=1024,
help='Maximum size (in MiB) of cache for master images, '
'including those in use'),
cfg.IntOpt('image_cache_ttl',
default=60,
help='Maximum TTL (in minutes) for old master images in cache'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
CONF.import_opt('use_ipv6', 'ironic.netconf')
def _check_for_missing_params(info_dict, param_prefix=''):
missing_info = []
for label, value in info_dict.items():
if not value:
missing_info.append(param_prefix + label)
if missing_info:
raise exception.InvalidParameterValue(_(
"Can not validate PXE bootloader. The following parameters "
"were not passed to ironic: %s") % missing_info)
def _parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
"""
info = node.driver_info
d_info = {}
d_info['deploy_kernel'] = info.get('pxe_deploy_kernel')
d_info['deploy_ramdisk'] = info.get('pxe_deploy_ramdisk')
_check_for_missing_params(d_info, 'pxe_')
return d_info
def _parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
i_info['root_gb'] = info.get('root_gb')
_check_for_missing_params(i_info)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
i_info['ephemeral_format'] = info.get('ephemeral_format')
err_msg_invalid = _("Can not validate PXE bootloader. Invalid parameter "
"%(param)s. Reason: %(reason)s")
for param in ('root_gb', 'swap_mb', 'ephemeral_gb'):
try:
int(i_info[param])
except ValueError:
reason = _("'%s' is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param, 'reason': reason})
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = strutils.bool_from_string(
preserve_ephemeral, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': 'preserve_ephemeral', 'reason': e})
return i_info
def _parse_deploy_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
"""
info = {}
info.update(_parse_instance_info(node))
info.update(_parse_driver_info(node))
return info
def _build_pxe_config_options(node, pxe_info, ctx):
"""Build the PXE config options for a node
This method builds the PXE boot options for a node,
given all the required parameters.
The options should then be passed to tftp.create_pxe_config to create
the actual config files.
:param node: a single Node.
:param pxe_info: a dict of values to set on the configuration file
:param ctx: security context
:returns: A dictionary of pxe options to be used in the pxe bootfile
template.
"""
# NOTE: we should strip '/' from the end because this is intended for
# hardcoded ramdisk script
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
deploy_key = utils.random_alnum(32)
i_info = node.instance_info
i_info['deploy_key'] = deploy_key
node.instance_info = i_info
node.save(ctx)
pxe_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'deployment_iscsi_iqn': "iqn-%s" % node.uuid,
'deployment_aki_path': pxe_info['deploy_kernel'][1],
'deployment_ari_path': pxe_info['deploy_ramdisk'][1],
'aki_path': pxe_info['kernel'][1],
'ari_path': pxe_info['ramdisk'][1],
'ironic_api_url': ironic_api,
'pxe_append_params': CONF.pxe.pxe_append_params,
}
return pxe_options
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def _get_token_file_path(node_uuid):
"""Generate the path for PKI token file."""
return os.path.join(CONF.tftp.tftp_root, 'token-' + node_uuid)
class PXEImageCache(image_cache.ImageCache):
def __init__(self, master_dir, image_service=None):
super(PXEImageCache, self).__init__(
master_dir,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
class TFTPImageCache(PXEImageCache):
def __init__(self, image_service=None):
super(TFTPImageCache, self).__init__(CONF.pxe.tftp_master_path)
class InstanceImageCache(PXEImageCache):
def __init__(self, image_service=None):
super(InstanceImageCache, self).__init__(CONF.pxe.instance_master_path)
def _free_disk_space_for(path):
"""Get free disk space on a drive where path is located."""
stat = os.statvfs(path)
return stat.f_frsize * stat.f_bavail
def _cleanup_caches_if_required(ctx, cache, images_info):
# NOTE(dtantsur): I'd prefer to have this code inside ImageCache. But:
# To reclaim disk space efficiently, this code needs to be aware of
# all existing caches (e.g. cleaning instance image cache can be
# much more efficient, than cleaning TFTP cache).
total_size = sum(images.download_size(ctx, uuid)
for (uuid, path) in images_info)
free = _free_disk_space_for(cache.master_dir)
if total_size >= free:
# NOTE(dtantsur): instance cache is larger - always clean it first
# NOTE(dtantsur): filter caches, whose directory is on the same device
st_dev = os.stat(cache.master_dir).st_dev
caches = [c for c in (InstanceImageCache(), TFTPImageCache())
if os.stat(c.master_dir).st_dev == st_dev]
for cache_to_clean in caches:
# NOTE(dtantsur): multiplying by 2 is an attempt to account for
# images converting to raw format
cache_to_clean.clean_up(amount=(2 * total_size - free))
free = _free_disk_space_for(cache.master_dir)
if total_size < free:
break
else:
msg = _("Disk volume where '%(path)s' is located doesn't have "
"enough disk space. Required %(required)d MiB, "
"only %(actual)d MiB available space present.")
raise exception.InstanceDeployFailure(reason=msg % {
'path': cache.master_dir,
'required': total_size / 1024 / 1024,
'actual': free / 1024 / 1024
})
def _fetch_images(ctx, cache, images_info):
"""Check for available disk space and fetch images using ImageCache.
:param ctx: context
:param cache: ImageCache instance to use for fetching
:param images_info: list of tuples (image uuid, destination path)
:raises: InstanceDeployFailure if unable to find enough disk space
"""
_cleanup_caches_if_required(ctx, cache, images_info)
# NOTE(dtantsur): This code can suffer from race condition,
# if disk space is used between the check and actual download.
# This is probably unavoidable, as we can't control other
# (probably unrelated) processes
for uuid, path in images_info:
cache.fetch_image(uuid, path, ctx=ctx)
def _cache_tftp_images(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.tftp.tftp_root, node.uuid))
LOG.debug("Fetching kernel and ramdisk for node %s",
node.uuid)
_fetch_images(ctx, TFTPImageCache(), pxe_info.values())
def _cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.tftp.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
i_info = _parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s" %
{'ami': uuid, 'uuid': node.uuid})
_fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)])
return (uuid, image_path)
def _get_tftp_image_info(node, ctx):
"""Generate the paths for tftp files for this instance
Raises IronicException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
driver_info and defaults are not set
"""
d_info = _parse_deploy_info(node)
image_info = {}
for label in ('deploy_kernel', 'deploy_ramdisk'):
image_info[label] = (
str(d_info[label]).split('/')[-1],
os.path.join(CONF.tftp.tftp_root, node.uuid, label)
)
i_info = node.instance_info
labels = ('kernel', 'ramdisk')
if not (i_info.get('kernel') and i_info.get('ramdisk')):
glance_service = service.Service(version=1, context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in labels:
i_info[label] = str(iproperties[label + '_id']).split('/')[-1]
node.instance_info = i_info
node.save(ctx)
for label in labels:
image_info[label] = (
i_info[label],
os.path.join(CONF.tftp.tftp_root, node.uuid, label)
)
return image_info
def _destroy_images(d_info, node_uuid):
"""Delete instance's image file."""
utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def _create_token_file(task):
"""Save PKI token to file."""
token_file_path = _get_token_file_path(task.node.uuid)
token = task.context.auth_token
if token:
utils.write_to_file(token_file_path, token)
else:
utils.unlink_without_raise(token_file_path)
def _destroy_token_file(node):
"""Delete PKI token file."""
token_file_path = _get_token_file_path(node['uuid'])
utils.unlink_without_raise(token_file_path)
def _check_image_size(task):
"""Check if the requested image is larger than the root partition size."""
i_info = _parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = deploy_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. '
'Image size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def _validate_glance_image(ctx, deploy_info):
"""Validate the image in Glance.
Check if the image exist in Glance and if it contains the
'kernel_id' and 'ramdisk_id' properties.
:raises: InvalidParameterValue.
"""
image_id = deploy_info['image_source']
try:
glance_service = service.Service(version=1, context=ctx)
image_props = glance_service.show(image_id)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_id)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s not found in Glance") % image_id)
missing_props = []
for prop in ('kernel_id', 'ramdisk_id'):
if not image_props.get(prop):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.InvalidParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_id, 'properties': props})
class PXEDeploy(base.DeployInterface):
"""PXE Deploy Interface: just a stub until the real driver is ported."""
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.InvalidParameterValue(_("Node %s does not have "
"any port associated with it.") % node.uuid)
d_info = _parse_deploy_info(node)
# Try to get the URL of the Ironic API
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.CatalogFailure,
exception.CatalogNotFound,
exception.CatalogUnauthorized):
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog."))
_validate_glance_image(task.context, d_info)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Start deployment of the task's node'.
Fetches instance image, creates a temporary keystone token file,
updates the Neutron DHCP port options for next boot, and issues a
reboot request to the power driver.
This causes the node to boot into the deployment ramdisk and triggers
the next phase of PXE-based deployment via
VendorPassthru._continue_deploy().
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DEPLOYING.
"""
_cache_instance_image(task.context, task.node)
_check_image_size(task)
# TODO(yuriyz): more secure way needed for pass auth token
# to deploy ramdisk
_create_token_file(task)
neutron.update_neutron(task, CONF.pxe.pxe_bootfile_name)
manager_utils.node_set_boot_device(task, 'pxe', persistent=True)
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node.
Power off the node. All actual clean-up is done in the clean_up()
method which should be called separately.
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DELETED.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
return states.DELETED
def prepare(self, task):
"""Prepare the deployment environment for this task's node.
Generates the TFTP configuration for PXE-booting both the deployment
and user images, fetches the TFTP image from Glance and add it to the
local cache.
:param task: a TaskManager instance containing the node to act on.
"""
# TODO(deva): optimize this if rerun on existing files
pxe_info = _get_tftp_image_info(task.node, task.context)
pxe_options = _build_pxe_config_options(task.node, pxe_info,
task.context)
tftp.create_pxe_config(task, pxe_options, CONF.pxe.pxe_config_template)
_cache_tftp_images(task.context, task.node, pxe_info)
def clean_up(self, task):
"""Clean up the deployment environment for the task's node.
Unlinks TFTP and instance images and triggers image cache cleanup.
Removes the TFTP configuration files for this node. As a precaution,
this method also ensures the keystone auth token file was removed.
:param task: a TaskManager instance containing the node to act on.
"""
node = task.node
pxe_info = _get_tftp_image_info(node, task.context)
d_info = _parse_deploy_info(node)
for label in pxe_info:
path = pxe_info[label][1]
utils.unlink_without_raise(path)
TFTPImageCache().clean_up()
tftp.clean_up_pxe_config(task)
_destroy_images(d_info, node.uuid)
_destroy_token_file(node)
def take_over(self, task):
neutron.update_neutron(task, CONF.pxe.pxe_bootfile_name)
class VendorPassthru(base.VendorInterface):
"""Interface to mix IPMI and PXE vendor-specific interfaces."""
def _get_deploy_info(self, node, **kwargs):
d_info = _parse_deploy_info(node)
deploy_key = kwargs.get('key')
if d_info['deploy_key'] != deploy_key:
raise exception.InvalidParameterValue(_("Deploy key does not"
" match"))
params = {'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(node.uuid),
'pxe_config_path':
tftp.get_pxe_config_file_path(node.uuid),
'root_mb': 1024 * int(d_info['root_gb']),
'swap_mb': int(d_info['swap_mb']),
'ephemeral_mb': 1024 * int(d_info['ephemeral_gb']),
'preserve_ephemeral': d_info['preserve_ephemeral'],
'node_uuid': node.uuid,
}
missing = [key for key in params.keys() if params[key] is None]
if missing:
raise exception.InvalidParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
# ephemeral_format is nullable
params['ephemeral_format'] = d_info.get('ephemeral_format')
return params
def validate(self, task, **kwargs):
method = kwargs['method']
if method == 'pass_deploy_info':
self._get_deploy_info(task.node, **kwargs)
else:
raise exception.InvalidParameterValue(_(
"Unsupported method (%s) passed to PXE driver.")
% method)
@task_manager.require_exclusive_lock
def _continue_deploy(self, task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
"""
node = task.node
driver_info = _parse_driver_info(node)
def _set_failed_state(msg):
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
node.save(task.context)
try:
manager_utils.node_power_action(task, states.POWER_OFF)
except Exception:
msg = (_('Node %s failed to power off while handling deploy '
'failure. This may be a serious condition. Node '
'should be removed from Ironic or put in maintenance '
'mode until the problem is resolved.') % node.uuid)
LOG.error(msg)
finally:
# NOTE(deva): node_power_action() erases node.last_error
# so we need to set it again here.
node.last_error = msg
node.save(task.context)
if node.provision_state != states.DEPLOYWAIT:
LOG.error(_('Node %s is not waiting to be deployed.') %
node.uuid)
return
node.provision_state = states.DEPLOYING
node.save(task.context)
# remove cached keystone token immediately
_destroy_token_file(node)
params = self._get_deploy_info(node, **kwargs)
ramdisk_error = kwargs.get('error')
if ramdisk_error:
LOG.error(_('Error returned from PXE deploy ramdisk: %s')
% ramdisk_error)
_set_failed_state(_('Failure in PXE deploy ramdisk.'))
_destroy_images(driver_info, node.uuid)
return
LOG.info(_('Continuing deployment for node %(node)s, params '
'%(params)s') % {'node': node.uuid, 'params': params})
try:
deploy_utils.deploy(**params)
except Exception as e:
LOG.error(_('PXE deploy failed for instance %(instance)s. '
'Error: %(error)s') % {'instance': node.instance_uuid,
'error': e})
_set_failed_state(_('PXE driver failed to continue deployment.'))
else:
LOG.info(_('Deployment to node %s done') % node.uuid)
node.provision_state = states.ACTIVE
node.target_provision_state = states.NOSTATE
node.save(task.context)
_destroy_images(driver_info, node.uuid)
def vendor_passthru(self, task, **kwargs):
method = kwargs['method']
if method == 'pass_deploy_info':
self._continue_deploy(task, **kwargs)
|
the-stack_106_26128 | # (C) Datadog, Inc. 2013-2016
# (C) Justin Slattery <[email protected]> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import re
# 3rd party
import requests
# project
from checks import AgentCheck
from util import headers
# Constants
COUCHBASE_STATS_PATH = '/pools/default'
COUCHBASE_VITALS_PATH = '/admin/vitals'
DEFAULT_TIMEOUT = 10
class Couchbase(AgentCheck):
"""Extracts stats from Couchbase via its REST API
http://docs.couchbase.com/couchbase-manual-2.0/#using-the-rest-api
"""
SERVICE_CHECK_NAME = 'couchbase.can_connect'
# Selected metrics to send amongst all the bucket stats, after name normalization
BUCKET_STATS = set([
"avg_bg_wait_time",
"avg_disk_commit_time",
"avg_disk_update_time",
"bg_wait_total",
"bytes_read",
"bytes_written",
"cas_badval",
"cas_hits",
"cas_misses",
"cmd_get",
"cmd_set",
"couch_docs_actual_disk_size",
"couch_docs_data_size",
"couch_docs_disk_size",
"couch_docs_fragmentation",
"couch_spatial_data_size",
"couch_spatial_disk_size",
"couch_spatial_ops",
"couch_total_disk_size",
"couch_views_data_size",
"couch_views_disk_size",
"couch_views_fragmentation",
"couch_views_ops",
"cpu_idle_ms",
"cpu_utilization_rate",
"curr_connections",
"curr_items_tot",
"curr_items",
"decr_hits",
"decr_misses",
"delete_hits",
"delete_misses",
"disk_commit_count",
"disk_update_count",
"disk_write_queue",
"ep_bg_fetched",
"ep_cache_miss_rate",
"ep_cache_miss_ratio",
"ep_dcp_fts_backoff",
"ep_dcp_fts_count",
"ep_dcp_fts_items_remaining",
"ep_dcp_fts_items_sent",
"ep_dcp_fts_producer_count",
"ep_dcp_fts_total_bytes",
"ep_dcp_2i_backoff",
"ep_dcp_2i_count",
"ep_dcp_2i_items_remaining",
"ep_dcp_2i_items_sent",
"ep_dcp_2i_producer_count",
"ep_dcp_2i_total_bytes",
"ep_dcp_other_backoff",
"ep_dcp_other_count",
"ep_dcp_other_items_remaining",
"ep_dcp_other_items_sent",
"ep_dcp_other_producer_count",
"ep_dcp_other_total_bytes",
"ep_dcp_replica_backoff",
"ep_dcp_replica_count",
"ep_dcp_replica_items_remaining",
"ep_dcp_replica_items_sent",
"ep_dcp_replica_producer_count",
"ep_dcp_replica_total_bytes",
"ep_dcp_views_backoff",
"ep_dcp_views_count",
"ep_dcp_views_items_remaining",
"ep_dcp_views_items_sent",
"ep_dcp_views_producer_count",
"ep_dcp_views_total_bytes",
"ep_dcp_xdcr_backoff",
"ep_dcp_xdcr_count",
"ep_dcp_xdcr_items_remaining",
"ep_dcp_xdcr_items_sent",
"ep_dcp_xdcr_producer_count",
"ep_dcp_xdcr_total_bytes",
"ep_diskqueue_drain",
"ep_diskqueue_fill",
"ep_diskqueue_items",
"ep_flusher_todo",
"ep_item_commit_failed",
"ep_kv_size",
"ep_max_size",
"ep_mem_high_wat",
"ep_mem_low_wat",
"ep_meta_data_memory",
"ep_num_non_resident",
"ep_num_ops_del_meta",
"ep_num_ops_del_ret_meta",
"ep_num_ops_get_meta",
"ep_num_ops_set_meta",
"ep_num_ops_set_ret_meta",
"ep_num_value_ejects",
"ep_oom_errors",
"ep_ops_create",
"ep_ops_update",
"ep_overhead",
"ep_queue_size",
"ep_resident_items_rate",
"ep_tap_replica_queue_drain",
"ep_tap_total_queue_drain",
"ep_tap_total_queue_fill",
"ep_tap_total_total_backlog_size",
"ep_tmp_oom_errors",
"ep_vb_total",
"evictions",
"get_hits",
"get_misses",
"hibernated_requests",
"hibernated_waked",
"hit_ratio",
"incr_hits",
"incr_misses",
"mem_actual_free",
"mem_actual_used",
"mem_free",
"mem_total",
"mem_used",
"mem_used_sys",
"misses",
"ops",
"page_faults",
"replication_docs_rep_queue",
"replication_meta_latency_aggr",
"rest_requests",
"swap_total",
"swap_used",
"vb_active_eject",
"vb_active_itm_memory",
"vb_active_meta_data_memory",
"vb_active_num_non_resident",
"vb_active_num",
"vb_active_ops_create",
"vb_active_ops_update",
"vb_active_queue_age",
"vb_active_queue_drain",
"vb_active_queue_fill",
"vb_active_queue_size",
"vb_active_resident_items_ratio",
"vb_avg_active_queue_age",
"vb_avg_pending_queue_age",
"vb_avg_replica_queue_age",
"vb_avg_total_queue_age",
"vb_pending_curr_items",
"vb_pending_eject",
"vb_pending_itm_memory",
"vb_pending_meta_data_memory",
"vb_pending_num_non_resident",
"vb_pending_num",
"vb_pending_ops_create",
"vb_pending_ops_update",
"vb_pending_queue_age",
"vb_pending_queue_drain",
"vb_pending_queue_fill",
"vb_pending_queue_size",
"vb_pending_resident_items_ratio",
"vb_replica_curr_items",
"vb_replica_eject",
"vb_replica_itm_memory",
"vb_replica_meta_data_memory",
"vb_replica_num_non_resident",
"vb_replica_num",
"vb_replica_ops_create",
"vb_replica_ops_update",
"vb_replica_queue_age",
"vb_replica_queue_drain",
"vb_replica_queue_fill",
"vb_replica_queue_size",
"vb_replica_resident_items_ratio",
"vb_total_queue_age",
"xdc_ops",
])
# Selected metrics of the query monitoring API
# See https://developer.couchbase.com/documentation/server/4.5/tools/query-monitoring.html
QUERY_STATS = set([
'cores',
'cpu_sys_percent',
'cpu_user_percent',
'gc_num',
'gc_pause_percent',
'gc_pause_time',
'memory_system',
'memory_total',
'memory_usage',
'request_active_count',
'request_completed_count',
'request_per_sec_15min',
'request_per_sec_1min',
'request_per_sec_5min',
'request_prepared_percent',
'request_time_80percentile',
'request_time_95percentile',
'request_time_99percentile',
'request_time_mean',
'request_time_median',
'total_threads',
])
TO_SECONDS = {
'ns': 1e9,
'us': 1e6,
'ms': 1e3,
's': 1,
}
seconds_value_pattern = re.compile('(\d+(\.\d+)?)(\D+)')
def _create_metrics(self, data, tags=None):
storage_totals = data['stats']['storageTotals']
for key, storage_type in storage_totals.items():
for metric_name, val in storage_type.items():
if val is not None:
metric_name = '.'.join(['couchbase', key, self.camel_case_to_joined_lower(metric_name)])
self.gauge(metric_name, val, tags=tags)
for bucket_name, bucket_stats in data['buckets'].items():
for metric_name, val in bucket_stats.items():
if val is not None:
norm_metric_name = self.camel_case_to_joined_lower(metric_name)
if norm_metric_name in self.BUCKET_STATS:
full_metric_name = '.'.join(['couchbase', 'by_bucket', norm_metric_name])
metric_tags = list(tags)
metric_tags.append('bucket:%s' % bucket_name)
self.gauge(full_metric_name, val[0], tags=metric_tags, device_name=bucket_name)
for node_name, node_stats in data['nodes'].items():
for metric_name, val in node_stats['interestingStats'].items():
if val is not None:
metric_name = '.'.join(['couchbase', 'by_node', self.camel_case_to_joined_lower(metric_name)])
metric_tags = list(tags)
metric_tags.append('node:%s' % node_name)
self.gauge(metric_name, val, tags=metric_tags, device_name=node_name)
for metric_name, val in data['query'].items():
if val is not None:
# for query times, the unit is part of the value, we need to extract it
if isinstance(val, basestring):
val = self.extract_seconds_value(val)
norm_metric_name = self.camel_case_to_joined_lower(metric_name)
if norm_metric_name in self.QUERY_STATS:
full_metric_name = '.'.join(['couchbase', 'query', self.camel_case_to_joined_lower(norm_metric_name)])
self.gauge(full_metric_name, val, tags=tags)
def _get_stats(self, url, instance):
""" Hit a given URL and return the parsed json. """
self.log.debug('Fetching Couchbase stats at url: %s' % url)
timeout = float(instance.get('timeout', DEFAULT_TIMEOUT))
auth = None
if 'user' in instance and 'password' in instance:
auth = (instance['user'], instance['password'])
r = requests.get(url, auth=auth, headers=headers(self.agentConfig),
timeout=timeout)
r.raise_for_status()
return r.json()
def check(self, instance):
server = instance.get('server', None)
if server is None:
raise Exception("The server must be specified")
tags = instance.get('tags', [])
# Clean up tags in case there was a None entry in the instance
# e.g. if the yaml contains tags: but no actual tags
if tags is None:
tags = []
else:
tags = list(set(tags))
tags.append('instance:%s' % server)
data = self.get_data(server, instance)
self._create_metrics(data, tags=list(set(tags)))
def get_data(self, server, instance):
# The dictionary to be returned.
couchbase = {
'stats': None,
'buckets': {},
'nodes': {},
'query': {},
}
# build couchbase stats entry point
url = '%s%s' % (server, COUCHBASE_STATS_PATH)
# Fetch initial stats and capture a service check based on response.
service_check_tags = instance.get('tags', [])
if service_check_tags is None:
service_check_tags = []
else:
service_check_tags = list(set(service_check_tags))
service_check_tags.append('instance:%s' % server)
try:
overall_stats = self._get_stats(url, instance)
# No overall stats? bail out now
if overall_stats is None:
raise Exception("No data returned from couchbase endpoint: %s" % url)
except requests.exceptions.HTTPError as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=str(e.message))
raise
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=str(e))
raise
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
couchbase['stats'] = overall_stats
nodes = overall_stats['nodes']
# Next, get all the nodes
if nodes is not None:
for node in nodes:
couchbase['nodes'][node['hostname']] = node
# Next, get all buckets .
endpoint = overall_stats['buckets']['uri']
url = '%s%s' % (server, endpoint)
buckets = self._get_stats(url, instance)
if buckets is not None:
for bucket in buckets:
bucket_name = bucket['name']
# Fetch URI for the stats bucket
endpoint = bucket['stats']['uri']
url = '%s%s' % (server, endpoint)
try:
bucket_stats = self._get_stats(url, instance)
except requests.exceptions.HTTPError:
url_backup = '%s/pools/nodes/buckets/%s/stats' % (server, bucket_name)
bucket_stats = self._get_stats(url_backup, instance)
bucket_samples = bucket_stats['op']['samples']
if bucket_samples is not None:
couchbase['buckets'][bucket['name']] = bucket_samples
# Next, get the query monitoring data
query_monitoring_url = instance.get('query_monitoring_url', None)
if query_monitoring_url is not None:
try:
url = '%s%s' % (query_monitoring_url, COUCHBASE_VITALS_PATH)
query = self._get_stats(url, instance)
if query is not None:
couchbase['query'] = query
except requests.exceptions.HTTPError:
self.log.error("Error accessing the endpoint %s, make sure you're running at least "
"couchbase 4.5 to collect the query monitoring metrics", url)
return couchbase
# Takes a camelCased variable and returns a joined_lower equivalent.
# Returns input if non-camelCase variable is detected.
def camel_case_to_joined_lower(self, variable):
# replace non-word with _
converted_variable = re.sub('\W+', '_', variable)
# insert _ in front of capital letters and lowercase the string
converted_variable = re.sub('([A-Z])', '_\g<1>', converted_variable).lower()
# remove duplicate _
converted_variable = re.sub('_+', '_', converted_variable)
# handle special case of starting/ending underscores
converted_variable = re.sub('^_|_$', '', converted_variable)
return converted_variable
# Takes a string with a time and a unit (e.g '3.45ms') and returns the value in seconds
def extract_seconds_value(self, value):
match = self.seconds_value_pattern.search(value)
val, unit = match.group(1, 3)
# They use the 'micro' symbol for microseconds so there is an encoding problem
# so let's assume it's microseconds if we don't find the key in unit
if unit not in self.TO_SECONDS:
unit = 'us'
return float(val)/self.TO_SECONDS[unit]
|
the-stack_106_26130 | import operator
from operator import methodcaller
import dask.array as da
import dask.dataframe as dd
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from dask.dataframe.utils import tm
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
pytestmark = pytest.mark.dask
def test_table_column(t, df):
expr = t.plain_int64
result = expr.execute()
expected = df.plain_int64
tm.assert_series_equal(result.compute(), expected.compute())
def test_literal(client):
assert client.execute(ibis.literal(1)) == 1
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df')
def test_selection(t, df):
expr = t[
((t.plain_strings == 'a') | (t.plain_int64 == 3))
& (t.dup_strings == 'd')
]
result = expr.execute()
expected = df[
((df.plain_strings == 'a') | (df.plain_int64 == 3))
& (df.dup_strings == 'd')
].reset_index(drop=True)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_mutate(t, df):
expr = t.mutate(x=t.plain_int64 + 1, y=t.plain_int64 * 2)
result = expr.execute()
expected = df.assign(x=df.plain_int64 + 1, y=df.plain_int64 * 2)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
@pytest.mark.xfail(reason='TODO - windowing - #2553')
def test_project_scope_does_not_override(t, df):
col = t.plain_int64
expr = t[
[
col.name('new_col'),
col.sum()
.over(ibis.window(group_by='dup_strings'))
.name('grouped'),
]
]
result = expr.execute()
expected = dd.concat(
[
df[['plain_int64', 'dup_strings']].rename(
columns={'plain_int64': 'new_col'}
),
df.groupby('dup_strings')
.plain_int64.transform('sum')
.reset_index(drop=True)
.rename('grouped'),
],
axis=1,
)[['new_col', 'grouped']]
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.xfail(reason="TODO - aggregations - #2553")
@pytest.mark.parametrize(
'where',
[
lambda t: None,
lambda t: t.dup_strings == 'd',
lambda t: (t.dup_strings == 'd') | (t.plain_int64 < 100),
],
)
@pytest.mark.parametrize(
('ibis_func', 'dask_func'),
[
(methodcaller('abs'), np.abs),
(methodcaller('ceil'), np.ceil),
(methodcaller('exp'), np.exp),
(methodcaller('floor'), np.floor),
(methodcaller('ln'), np.log),
(methodcaller('log10'), np.log10),
(methodcaller('log', 2), lambda x: np.log(x) / np.log(2)),
(methodcaller('log2'), np.log2),
(methodcaller('round', 0), lambda x: x.round(0).astype('int64')),
(methodcaller('round', -2), methodcaller('round', -2)),
(methodcaller('round', 2), methodcaller('round', 2)),
(methodcaller('round'), lambda x: x.round().astype('int64')),
(methodcaller('sign'), np.sign),
(methodcaller('sqrt'), np.sqrt),
],
)
def test_aggregation_group_by(t, df, where, ibis_func, dask_func):
ibis_where = where(t)
expr = t.group_by(t.dup_strings).aggregate(
avg_plain_int64=t.plain_int64.mean(where=ibis_where),
sum_plain_float64=t.plain_float64.sum(where=ibis_where),
mean_float64_positive=ibis_func(t.float64_positive).mean(
where=ibis_where
),
neg_mean_int64_with_zeros=(-t.int64_with_zeros).mean(where=ibis_where),
nunique_dup_ints=t.dup_ints.nunique(),
)
result = expr.execute()
dask_where = where(df)
mask = slice(None) if dask_where is None else dask_where
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_int64': lambda x, mask=mask: x[mask].mean(),
'plain_float64': lambda x, mask=mask: x[mask].sum(),
'dup_ints': 'nunique',
'float64_positive': (
lambda x, mask=mask, func=dask_func: func(x[mask]).mean()
),
'int64_with_zeros': lambda x, mask=mask: (-x[mask]).mean(),
}
)
.reset_index()
.rename(
columns={
'plain_int64': 'avg_plain_int64',
'plain_float64': 'sum_plain_float64',
'dup_ints': 'nunique_dup_ints',
'float64_positive': 'mean_float64_positive',
'int64_with_zeros': 'neg_mean_int64_with_zeros',
}
)
)
# TODO(phillipc): Why does pandas not return floating point values here?
expected['avg_plain_int64'] = expected.avg_plain_int64.astype('float64')
result['avg_plain_int64'] = result.avg_plain_int64.astype('float64')
expected[
'neg_mean_int64_with_zeros'
] = expected.neg_mean_int64_with_zeros.astype('float64')
result[
'neg_mean_int64_with_zeros'
] = result.neg_mean_int64_with_zeros.astype('float64')
expected['mean_float64_positive'] = expected.mean_float64_positive.astype(
'float64'
)
result['mean_float64_positive'] = result.mean_float64_positive.astype(
'float64'
)
lhs = result[expected.columns]
rhs = expected
tm.assert_frame_equal(lhs.compute(), rhs.compute())
@pytest.mark.xfail(reason="TODO - aggregations - #2553")
def test_aggregation_without_group_by(t, df):
expr = t.aggregate(
avg_plain_int64=t.plain_int64.mean(),
sum_plain_float64=t.plain_float64.sum(),
)
result = expr.execute()[['avg_plain_int64', 'sum_plain_float64']]
new_names = {
'plain_float64': 'sum_plain_float64',
'plain_int64': 'avg_plain_int64',
}
expected = (
dd.from_array(
[df['plain_int64'].mean(), df['plain_float64'].sum()],
index=['plain_int64', 'plain_float64'],
)
.to_frame()
.T.rename(columns=new_names)
)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_group_by_with_having(t, df):
expr = (
t.group_by(t.dup_strings)
.having(t.plain_float64.sum() == 5)
.aggregate(avg_a=t.plain_int64.mean(), sum_c=t.plain_float64.sum())
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_int64': 'mean', 'plain_float64': 'sum'})
.reset_index()
.rename(columns={'plain_int64': 'avg_a', 'plain_float64': 'sum_c'})
)
expected = expected.loc[expected.sum_c == 5, ['avg_a', 'sum_c']]
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_group_by_rename_key(t, df):
expr = t.groupby(t.dup_strings.name('foo')).aggregate(
dup_string_count=t.dup_strings.count()
)
assert 'foo' in expr.schema()
result = expr.execute()
assert 'foo' in result.columns
expected = (
df.groupby('dup_strings')
.dup_strings.count()
.rename('dup_string_count')
.reset_index()
.rename(columns={'dup_strings': 'foo'})
)
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('reduction', ['mean', 'sum', 'count', 'std', 'var'])
@pytest.mark.parametrize(
'where',
[
lambda t: (t.plain_strings == 'a') | (t.plain_strings == 'c'),
lambda t: (t.dup_strings == 'd')
& ((t.plain_int64 == 1) | (t.plain_int64 == 3)),
lambda t: None,
],
)
def test_reduction(t, df, reduction, where):
func = getattr(t.plain_int64, reduction)
mask = where(t)
expr = func(where=mask)
result = expr.execute()
df_mask = where(df)
expected_func = getattr(
df.loc[df_mask if df_mask is not None else slice(None), 'plain_int64'],
reduction,
)
expected = expected_func()
assert result.compute() == expected.compute()
@pytest.mark.xfail(NotImplementedError, reason="TODO - aggregations - #2553")
@pytest.mark.parametrize(
'reduction',
[
lambda x: x.any(),
lambda x: x.all(),
lambda x: ~(x.any()),
lambda x: ~(x.all()),
],
)
def test_boolean_aggregation(t, df, reduction):
expr = reduction(t.plain_int64 == 1)
result = expr.execute()
expected = reduction(df.plain_int64 == 1)
assert result.compute() == expected.compute()
@pytest.mark.parametrize('column', ['float64_with_zeros', 'int64_with_zeros'])
def test_null_if_zero(t, df, column):
expr = t[column].nullifzero()
result = expr.execute()
expected = df[column].replace(0, np.nan)
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize(
('left', 'right', 'expected', 'compare'),
[
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(1),
lambda df: np.nan,
np.testing.assert_array_equal, # treats NaNs as equal
id='literal_literal_equal',
),
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(2),
lambda df: 1,
np.testing.assert_equal,
id='literal_literal_not_equal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: ibis.literal('a'),
lambda df: df.dup_strings.where(df.dup_strings != 'a'),
tm.assert_series_equal,
id='series_literal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: t.dup_strings,
lambda df: df.dup_strings.where(df.dup_strings != df.dup_strings),
tm.assert_series_equal,
id='series_series',
),
pytest.param(
lambda t: ibis.literal('a'),
lambda t: t.dup_strings,
lambda df: dd.from_array(
da.where(df.dup_strings.eq('a').values, np.nan, 'a')
),
tm.assert_series_equal,
id='literal_series',
),
],
)
def test_nullif(t, df, left, right, expected, compare):
expr = left(t).nullif(right(t))
result = execute(expr)
if isinstance(result, (dd.Series, dd.DataFrame)):
compare(result.compute(), expected(df).compute())
else:
compare(result, expected(df))
def test_nullif_inf():
df = dd.from_pandas(
pd.DataFrame({'a': [np.inf, 3.14, -np.inf, 42.0]}), npartitions=1,
)
con = connect(dict(t=df))
t = con.table('t')
expr = t.a.nullif(np.inf).nullif(-np.inf)
result = expr.execute()
expected = dd.from_pandas(
pd.Series([np.nan, 3.14, np.nan, 42.0], name='a'), npartitions=1,
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_group_concat(t, df):
expr = t.groupby(t.dup_strings).aggregate(
foo=t.plain_int64.group_concat(',')
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(lambda df: ','.join(df.plain_int64.astype(str)))
.reset_index()
.rename(columns={0: 'foo'})
)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
@pytest.mark.parametrize('offset', [0, 2])
def test_frame_limit(t, df, offset):
n = 5
df_expr = t.limit(n, offset=offset)
result = df_expr.execute()
expected = df.loc[offset : offset + n].reset_index(drop=True)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
@pytest.mark.xfail(
raises=AttributeError, reason='TableColumn does not implement limit'
)
@pytest.mark.parametrize('offset', [0, 2])
def test_series_limit(t, df, offset):
n = 5
s_expr = t.plain_int64.limit(n, offset=offset)
result = s_expr.execute()
tm.assert_series_equal(result, df.plain_int64.iloc[offset : offset + n])
@pytest.mark.xfail(reason="TODO - sorting - #2553")
@pytest.mark.parametrize(
('key', 'dask_by', 'dask_ascending'),
[
(lambda t, col: [ibis.desc(t[col])], lambda col: [col], False),
(
lambda t, col: [t[col], ibis.desc(t.plain_int64)],
lambda col: [col, 'plain_int64'],
[True, False],
),
(
lambda t, col: [ibis.desc(t.plain_int64 * 2)],
lambda col: ['plain_int64'],
False,
),
],
)
@pytest.mark.parametrize(
'column',
['plain_datetimes_naive', 'plain_datetimes_ny', 'plain_datetimes_utc'],
)
def test_sort_by(t, df, column, key, dask_by, dask_ascending):
expr = t.sort_by(key(t, column))
result = expr.execute()
expected = (
df.compute()
.sort_values(dask_by(column), ascending=dask_ascending)
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns].compute(), expected)
@pytest.mark.xfail(reason="TODO - sorting - #2553")
def test_complex_sort_by(t, df):
expr = t.sort_by(
[ibis.desc(t.plain_int64 * t.plain_float64), t.plain_float64]
)
result = expr.execute()
expected = (
df.assign(foo=df.plain_int64 * df.plain_float64)
.sort_values(['foo', 'plain_float64'], ascending=[False, True])
.drop(['foo'], axis=1)
.reset_index(drop=True)
)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_distinct(t, df):
expr = t.dup_strings.distinct()
result = expr.execute()
expected = df.dup_strings.unique()
tm.assert_series_equal(result.compute(), expected.compute())
def test_count_distinct(t, df):
expr = t.dup_strings.nunique()
result = expr.execute()
expected = df.dup_strings.nunique()
assert result.compute() == expected.compute()
def test_value_counts(t, df):
expr = t.dup_strings.value_counts()
result = expr.execute()
expected = (
df.compute()
.dup_strings.value_counts()
.reset_index()
.rename(columns={'dup_strings': 'count'})
.rename(columns={'index': 'dup_strings'})
.sort_values(['dup_strings'])
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns].compute(), expected)
def test_table_count(t, df):
expr = t.count()
result = expr.execute()
expected = len(df)
assert result == expected
@pytest.mark.xfail(reason="TODO - grouping - #2553")
def test_weighted_average(t, df):
expr = t.groupby(t.dup_strings).aggregate(
avg=(t.plain_float64 * t.plain_int64).sum() / t.plain_int64.sum()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(
lambda df: (df.plain_int64 * df.plain_float64).sum()
/ df.plain_int64.sum()
)
.reset_index()
.rename(columns={0: 'avg'})
)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_group_by_multiple_keys(t, df):
expr = t.groupby([t.dup_strings, t.dup_ints]).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
result = expr.execute()
expected = (
df.groupby(['dup_strings', 'dup_ints'])
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
def test_mutate_after_group_by(t, df):
gb = t.groupby(t.dup_strings).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
expr = gb.mutate(x=gb.avg_plain_float64)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
expected = expected.assign(x=expected.avg_plain_float64)
tm.assert_frame_equal(
result[expected.columns].compute(), expected.compute()
)
@pytest.mark.xfail(reason="TODO - grouping - #2553")
def test_groupby_with_unnamed_arithmetic(t, df):
expr = t.groupby(t.dup_strings).aggregate(
naive_variance=(
(t.plain_float64 ** 2).sum() - t.plain_float64.mean() ** 2
)
/ t.plain_float64.count()
)
result = expr.execute()
expected = (
df.compute()
.groupby('dup_strings')
.agg(
{
'plain_float64': lambda x: ((x ** 2).sum() - x.mean() ** 2)
/ x.count()
}
)
.reset_index()
.rename(columns={'plain_float64': 'naive_variance'})
)
tm.assert_frame_equal(result[expected.columns].compute(), expected)
def test_isnull(t, df):
expr = t.strings_with_nulls.isnull()
result = expr.execute()
expected = df.strings_with_nulls.isnull()
tm.assert_series_equal(result.compute(), expected.compute())
def test_notnull(t, df):
expr = t.strings_with_nulls.notnull()
result = expr.execute()
expected = df.strings_with_nulls.notnull()
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('raw_value', [0.0, 1.0])
def test_scalar_parameter(t, df, raw_value):
value = ibis.param(dt.double)
expr = t.float64_with_zeros == value
result = expr.execute(params={value: raw_value})
expected = df.float64_with_zeros == raw_value
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('elements', [[1], (1,), {1}, frozenset({1})])
def test_isin(t, df, elements):
expr = t.plain_float64.isin(elements)
expected = df.plain_float64.isin(elements)
result = expr.execute()
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('elements', [[1], (1,), {1}, frozenset({1})])
def test_notin(t, df, elements):
expr = t.plain_float64.notin(elements)
expected = ~df.plain_float64.isin(elements)
result = expr.execute()
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.xfail(reason="TODO - grouping - #2553")
def test_cast_on_group_by(t, df):
expr = t.groupby(t.dup_strings).aggregate(
casted=(t.float64_with_zeros == 0).cast('int64').sum()
)
result = expr.execute()
expected = (
df.compute()
.groupby('dup_strings')
.float64_with_zeros.apply(lambda s: (s == 0).astype('int64').sum())
.reset_index()
.rename(columns={'float64_with_zeros': 'casted'})
)
tm.assert_frame_equal(result.compute(), expected)
@pytest.mark.parametrize(
'op',
[
operator.add,
operator.mul,
operator.sub,
operator.truediv,
operator.floordiv,
operator.mod,
operator.pow,
],
ids=operator.attrgetter('__name__'),
)
@pytest.mark.parametrize('args', [lambda c: (1.0, c), lambda c: (c, 1.0)])
def test_left_binary_op(t, df, op, args):
expr = op(*args(t.float64_with_zeros))
result = expr.execute()
expected = op(*args(df.float64_with_zeros))
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.xfail(reason="TODO - aggregations - #2553")
@pytest.mark.parametrize(
'op',
[
operator.add,
operator.mul,
operator.sub,
operator.truediv,
operator.floordiv,
operator.mod,
operator.pow,
],
ids=operator.attrgetter('__name__'),
)
@pytest.mark.parametrize('argfunc', [lambda c: (1.0, c), lambda c: (c, 1.0)])
def test_left_binary_op_gb(t, df, op, argfunc):
expr = t.groupby('dup_strings').aggregate(
foo=op(*argfunc(t.float64_with_zeros)).sum()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.float64_with_zeros.apply(lambda s: op(*argfunc(s)).sum())
.reset_index()
.rename(columns={'float64_with_zeros': 'foo'})
)
tm.assert_frame_equal(result.compute(), expected.compute())
def test_where_series(t, df):
col_expr = t['plain_int64']
result = ibis.where(col_expr > col_expr.mean(), col_expr, 0.0).execute()
ser = df['plain_int64']
expected = ser.where(ser > ser.mean(), other=0.0)
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize(
('cond', 'expected_func'),
[
(True, lambda df: df['plain_int64']),
(False, lambda df: dd.from_array(np.repeat(3.0, len(df)))),
],
)
def test_where_scalar(t, df, cond, expected_func):
expr = ibis.where(cond, t['plain_int64'], 3.0)
result = expr.execute()
expected = expected_func(df)
tm.assert_series_equal(result.compute(), expected.compute())
def test_where_long(batting, batting_df):
col_expr = batting['AB']
result = ibis.where(col_expr > col_expr.mean(), col_expr, 0.0).execute()
ser = batting_df['AB']
expected = ser.where(ser > ser.mean(), other=0.0)
tm.assert_series_equal(result.compute(), expected.compute())
def test_round(t, df):
precision = 2
mult = 3.33333
result = (t.count() * mult).round(precision).execute()
expected = np.around(len(df) * mult, precision)
npt.assert_almost_equal(result, expected, decimal=precision)
def test_quantile_groupby(batting, batting_df):
def q_fun(x, quantile, interpolation):
res = x.quantile(quantile, interpolation=interpolation).tolist()
return [res for _ in range(len(x))]
frac = 0.2
intp = 'linear'
result = (
batting.groupby('teamID')
.mutate(res=lambda x: x.RBI.quantile([frac, 1 - frac], intp))
.res.execute()
)
expected = (
batting_df.groupby('teamID')
.RBI.transform(q_fun, quantile=[frac, 1 - frac], interpolation=intp)
.rename('res')
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_summary_numeric(batting, batting_df):
expr = batting.G.summary()
result = expr.execute()
assert len(result) == 1
G = batting_df.G
expected = dict(
count=G.count(),
nulls=G.isnull().sum(),
min=G.min(),
max=G.max(),
sum=G.sum(),
mean=G.mean(),
approx_nunique=G.nunique(),
)
assert dict(result.iloc[0]) == expected
def test_summary_numeric_group_by(batting, batting_df):
expr = batting.groupby('teamID').G.summary()
result = expr.execute()
expected = (
batting_df.groupby('teamID')
.G.apply(
lambda s: dd.from_pandas(
pd.DataFrame(
dict(
count=s.count(),
nulls=s.isnull().sum(),
min=s.min(),
max=s.max(),
sum=s.sum(),
mean=s.mean(),
approx_nunique=s.nunique(),
),
index=[0],
),
npartitions=1,
)
)
.reset_index(level=1, drop=True)
.reset_index()
)
columns = expected.columns
tm.assert_frame_equal(result[columns], expected)
def test_summary_non_numeric(batting, batting_df):
expr = batting.teamID.summary()
result = expr.execute()
assert len(result) == 1
assert len(result.columns) == 3
expected = dict(
count=batting_df.teamID.count(),
nulls=batting_df.teamID.isnull().sum(),
uniques=batting_df.teamID.nunique(),
)
assert dict(result.iloc[0]) == expected
def test_summary_non_numeric_group_by(batting, batting_df):
expr = batting.groupby('teamID').playerID.summary()
result = expr.execute()
expected = (
batting_df.groupby('teamID')
.playerID.apply(
lambda s: dd.from_pandas(
pd.DataFrame(
dict(
count=s.count(),
nulls=s.isnull().sum(),
uniques=s.nunique(),
),
index=[0],
),
npartitions=1,
)
)
.reset_index(level=1, drop=True)
.reset_index()
)
columns = expected.columns
tm.assert_frame_equal(result[columns], expected, check_dtype=False)
def test_searched_case_scalar(client):
expr = ibis.case().when(True, 1).when(False, 2).end()
result = client.execute(expr)
expected = np.int8(1)
assert result == expected
def test_searched_case_column(batting, batting_df):
t = batting
df = batting_df
expr = (
ibis.case()
.when(t.RBI < 5, 'really bad team')
.when(t.teamID == 'PH1', 'ph1 team')
.else_(t.teamID)
.end()
)
result = expr.execute()
expected = dd.from_array(
np.select(
[df.RBI < 5, df.teamID == 'PH1'],
['really bad team', 'ph1 team'],
df.teamID,
)
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_simple_case_scalar(client):
x = ibis.literal(2)
expr = x.case().when(2, x - 1).when(3, x + 1).when(4, x + 2).end()
result = client.execute(expr)
expected = np.int8(1)
assert result == expected
def test_simple_case_column(batting, batting_df):
t = batting
df = batting_df
expr = (
t.RBI.case()
.when(5, 'five')
.when(4, 'four')
.when(3, 'three')
.else_('could be good?')
.end()
)
result = expr.execute()
expected = dd.from_array(
np.select(
[df.RBI == 5, df.RBI == 4, df.RBI == 3],
['five', 'four', 'three'],
'could be good?',
)
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_table_distinct(t, df):
expr = t[['dup_strings']].distinct()
result = expr.execute()
expected = df[['dup_strings']].drop_duplicates()
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.parametrize("distinct", [True, False])
def test_union(client, df1, distinct):
t = client.table('df1')
expr = t.union(t, distinct=distinct)
result = expr.execute()
expected = (
df1 if distinct else dd.concat([df1, df1], axis=0, ignore_index=True)
)
tm.assert_frame_equal(result.compute(), expected.compute())
def test_intersect(client, df1, intersect_df2):
t1 = client.table('df1')
t2 = client.table('intersect_df2')
expr = t1.intersect(t2)
result = expr.execute()
expected = df1.merge(intersect_df2, on=list(df1.columns))
tm.assert_frame_equal(result.compute(), expected.compute())
def test_difference(client, df1, intersect_df2):
t1 = client.table('df1')
t2 = client.table('intersect_df2')
expr = t1.difference(t2)
result = expr.execute()
merged = df1.merge(
intersect_df2, on=list(df1.columns), how="outer", indicator=True
)
expected = merged[merged["_merge"] != "both"].drop("_merge", 1)
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.parametrize(
"distinct",
[
pytest.param(
True,
marks=pytest.mark.xfail(
raises=TypeError,
reason=(
"dask cannot compute the distinct element of an "
"array column"
),
),
),
False,
],
)
def test_union_with_list_types(t, df, distinct):
expr = t.union(t, distinct=distinct)
result = expr.execute()
expected = (
df if distinct else dd.concat([df, df], axis=0, ignore_index=True)
)
tm.assert_frame_equal(result.compute(), expected.compute())
|
the-stack_106_26131 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client import V1ResourceRequirements # noqa: F401,E501
class V1alpha2AlibiExplainerSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'config': 'dict(str, str)',
'resources': 'V1ResourceRequirements',
'runtime_version': 'str',
'storage_uri': 'str',
'type': 'str'
}
attribute_map = {
'config': 'config',
'resources': 'resources',
'runtime_version': 'runtimeVersion',
'storage_uri': 'storageUri',
'type': 'type'
}
def __init__(self, config=None, resources=None, runtime_version=None, storage_uri=None, type=None): # noqa: E501
"""V1alpha2AlibiExplainerSpec - a model defined in Swagger""" # noqa: E501
self._config = None
self._resources = None
self._runtime_version = None
self._storage_uri = None
self._type = None
self.discriminator = None
if config is not None:
self.config = config
if resources is not None:
self.resources = resources
if runtime_version is not None:
self.runtime_version = runtime_version
if storage_uri is not None:
self.storage_uri = storage_uri
self.type = type
@property
def config(self):
"""Gets the config of this V1alpha2AlibiExplainerSpec. # noqa: E501
Inline custom parameter settings for explainer # noqa: E501
:return: The config of this V1alpha2AlibiExplainerSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1alpha2AlibiExplainerSpec.
Inline custom parameter settings for explainer # noqa: E501
:param config: The config of this V1alpha2AlibiExplainerSpec. # noqa: E501
:type: dict(str, str)
"""
self._config = config
@property
def resources(self):
"""Gets the resources of this V1alpha2AlibiExplainerSpec. # noqa: E501
Defaults to requests and limits of 1CPU, 2Gb MEM. # noqa: E501
:return: The resources of this V1alpha2AlibiExplainerSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1alpha2AlibiExplainerSpec.
Defaults to requests and limits of 1CPU, 2Gb MEM. # noqa: E501
:param resources: The resources of this V1alpha2AlibiExplainerSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def runtime_version(self):
"""Gets the runtime_version of this V1alpha2AlibiExplainerSpec. # noqa: E501
Defaults to latest Alibi Version. # noqa: E501
:return: The runtime_version of this V1alpha2AlibiExplainerSpec. # noqa: E501
:rtype: str
"""
return self._runtime_version
@runtime_version.setter
def runtime_version(self, runtime_version):
"""Sets the runtime_version of this V1alpha2AlibiExplainerSpec.
Defaults to latest Alibi Version. # noqa: E501
:param runtime_version: The runtime_version of this V1alpha2AlibiExplainerSpec. # noqa: E501
:type: str
"""
self._runtime_version = runtime_version
@property
def storage_uri(self):
"""Gets the storage_uri of this V1alpha2AlibiExplainerSpec. # noqa: E501
The location of a trained explanation model # noqa: E501
:return: The storage_uri of this V1alpha2AlibiExplainerSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1alpha2AlibiExplainerSpec.
The location of a trained explanation model # noqa: E501
:param storage_uri: The storage_uri of this V1alpha2AlibiExplainerSpec. # noqa: E501
:type: str
"""
self._storage_uri = storage_uri
@property
def type(self):
"""Gets the type of this V1alpha2AlibiExplainerSpec. # noqa: E501
The type of Alibi explainer # noqa: E501
:return: The type of this V1alpha2AlibiExplainerSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1alpha2AlibiExplainerSpec.
The type of Alibi explainer # noqa: E501
:param type: The type of this V1alpha2AlibiExplainerSpec. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha2AlibiExplainerSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2AlibiExplainerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_26132 | import json
HTML_TEMPLATE = """
<h1>Resource</h1>
<h2>Attributes</h2>
<pre>
<code>
{attributes}
</code>
</pre>
<h2>Links</h2>
<ul>
{links}
</ul>
"""
def deserialize_html(resource):
def render(accumulator, link):
return accumulator + '<li><a href="{0}">{0}</a></li>\n'.format(link)
def render_links(accumulator, relation):
resource = relation[1]
if isinstance(resource, list):
get_uri = lambda resource: resource.get_uri()
links = reduce(render, map(get_uri, resource), '')
return accumulator + '<li>{} <ul>{}</ul></li>'.format(relation[0], links)
return accumulator + '<li><a href="{}">{}</a></li>\n'.format(resource.get_uri(), relation[0])
return HTML_TEMPLATE.format(
attributes=json.dumps(resource.get_attributes(), sort_keys=True, indent=4),
links=reduce(render_links, resource.get_relations().items(), ''),
)
|
the-stack_106_26134 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest
from blinkpy.common.system.executive import Executive
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.common.system.filesystem import FileSystem
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.platform_info import PlatformInfo
def fake_sys(platform_str='darwin', windows_version_tuple=None):
class FakeSysModule(object):
platform = platform_str
if windows_version_tuple:
getwindowsversion = lambda x: windows_version_tuple
return FakeSysModule()
def fake_platform(mac_version_string='10.12.3',
release_string='bar',
linux_version='trusty',
win_version_string=None):
class FakePlatformModule(object):
def mac_ver(self):
return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
def linux_distribution(self):
return tuple([None, None, linux_version])
def platform(self):
return 'foo'
def release(self):
return release_string
def win32_ver(self):
return tuple([None, win_version_string])
return FakePlatformModule()
def fake_executive(output=None):
if output:
return MockExecutive(output=output)
return MockExecutive(exception=SystemError)
class TestPlatformInfo(unittest.TestCase):
def make_info(self,
sys_module=None,
platform_module=None,
filesystem_module=None,
executive=None):
return PlatformInfo(sys_module or fake_sys(), platform_module
or fake_platform(), filesystem_module
or MockFileSystem(), executive or fake_executive())
def test_real_code(self):
# This test makes sure the real (unmocked) code actually works.
info = PlatformInfo(sys, platform, FileSystem(), Executive())
self.assertNotEquals(info.os_name, '')
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
self.assertTrue(info.is_mac() or info.is_win() or info.is_linux()
or info.is_freebsd())
self.assertIsNotNone(info.terminal_width())
if info.is_linux():
self.assertIsNotNone(info.linux_distribution())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
else:
self.assertIsNone(info.total_bytes_memory())
def test_os_name_and_wrappers(self):
info = self.make_info(fake_sys('linux2'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('linux3'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('darwin'), fake_platform('10.12.3'))
self.assertEqual(info.os_name, 'mac')
self.assertFalse(info.is_linux())
self.assertTrue(info.is_mac())
self.assertFalse(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])),
fake_platform(win_version_string="6.1.7600"))
self.assertEqual(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('freebsd8'))
self.assertEqual(info.os_name, 'freebsd')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
self.assertTrue(info.is_freebsd())
with self.assertRaises(AssertionError):
self.make_info(fake_sys('vms'))
def test_os_version(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.10.0')).os_version, 'mac10.10')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.11.0')).os_version, 'mac10.11')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.12.0')).os_version, 'mac10.12')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.13.0')).os_version, 'mac10.13')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.14.0')).os_version, 'mac10.14')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.15.0')).os_version, 'mac10.15')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('10.16.0')).os_version, 'mac10.16')
self.assertEqual(
self.make_info(fake_sys('darwin'),
fake_platform('11.0.0')).os_version, 'mac11')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('darwin'), fake_platform('10.20.0'))
self.assertEqual(
self.make_info(fake_sys('linux2')).os_version, 'trusty')
info = self.make_info(
fake_sys('linux2'), fake_platform(linux_version='utopic'))
self.assertEqual(info.os_version, 'trusty')
self.assertEqual(
self.make_info(
fake_sys('freebsd8'), fake_platform(
'', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
self.assertEqual(
self.make_info(
fake_sys('freebsd9'),
fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([5, 0, 1234])),
fake_platform(win_version_string="5.0.1234"))
with self.assertRaises(AssertionError):
self.make_info(fake_sys('win32', tuple([6, 1, 1234])),
fake_platform(win_version_string="6.1.1234"))
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([10, 1, 1234])),
fake_platform(win_version_string="10.1.1234")).os_version,
'future')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([10, 0, 1234])),
fake_platform(win_version_string="10.0.1234")).os_version,
'10.1909')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([10, 0, 19042])),
fake_platform(win_version_string="10.0.19042")).os_version,
'10.20h2')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([6, 3, 1234])),
fake_platform(win_version_string="6.3.1234")).os_version,
'8.1')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([6, 2, 1234])),
fake_platform(win_version_string="6.2.1234")).os_version, '8')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([6, 1, 7601])),
fake_platform(win_version_string="6.1.7601")).os_version,
'7sp1')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([6, 1, 7600])),
fake_platform(win_version_string="6.1.7600")).os_version,
'7sp0')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([6, 0, 1234])),
fake_platform(win_version_string="6.0.1234")).os_version,
'vista')
self.assertEqual(
self.make_info(
fake_sys('win32', tuple([5, 1, 1234])),
fake_platform(win_version_string="5.1.1234")).os_version, 'xp')
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('5.0.1234'))
with self.assertRaises(AssertionError):
self.make_info(
fake_sys('win32'), executive=fake_executive('6.1.1234'))
def _assert_files_imply_linux_distribution(self, file_paths, distribution):
fs_module = MockFileSystem({file_path: '' for file_path in file_paths})
info = self.make_info(
sys_module=fake_sys('linux2'), filesystem_module=fs_module)
self.assertEqual(info.linux_distribution(), distribution)
def test_linux_distro_detection(self):
self._assert_files_imply_linux_distribution(['/etc/arch-release'],
'arch')
self._assert_files_imply_linux_distribution(['/etc/debian_version'],
'debian')
self._assert_files_imply_linux_distribution(['/etc/fedora-release'],
'fedora')
self._assert_files_imply_linux_distribution(
['/etc/fedora-release', '/etc/redhat-release'], 'fedora')
self._assert_files_imply_linux_distribution(['/etc/redhat-release'],
'redhat')
self._assert_files_imply_linux_distribution(['/etc/mock-release'],
'unknown')
def test_display_name(self):
info = self.make_info(fake_sys('darwin'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])),
fake_platform(win_version_string="6.1.7600"))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('linux2'))
self.assertNotEquals(info.display_name(), '')
info = self.make_info(fake_sys('freebsd9'))
self.assertNotEquals(info.display_name(), '')
def test_total_bytes_memory(self):
info = self.make_info(
fake_sys('darwin'),
fake_platform('10.12.3'),
executive=fake_executive('1234'))
self.assertEqual(info.total_bytes_memory(), 1234)
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])),
fake_platform(win_version_string="6.1.7600"))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('linux2'))
self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('freebsd9'))
self.assertIsNone(info.total_bytes_memory())
def test_unsupported_platform(self):
with self.assertRaises(AssertionError):
self.make_info(fake_sys('cygwin'))
|
the-stack_106_26135 | try:
import uerrno
try:
import uos_vfs as uos
open = uos.vfs_open
except ImportError:
import uos
except ImportError:
print("SKIP")
raise SystemExit
try:
uos.mkfat
except AttributeError:
print("SKIP")
raise SystemExit
class RAMFS:
SEC_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.SEC_SIZE)
def readblocks(self, n, buf):
#print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
for i in range(len(buf)):
buf[i] = self.data[n * self.SEC_SIZE + i]
def writeblocks(self, n, buf):
#print("writeblocks(%s, %x)" % (n, id(buf)))
for i in range(len(buf)):
self.data[n * self.SEC_SIZE + i] = buf[i]
def ioctl(self, op, arg):
#print("ioctl(%d, %r)" % (op, arg))
if op == 4: # BP_IOCTL_SEC_COUNT
return len(self.data) // self.SEC_SIZE
if op == 5: # BP_IOCTL_SEC_SIZE
return self.SEC_SIZE
try:
bdev = RAMFS(50)
except MemoryError:
print("SKIP")
raise SystemExit
uos.mkfat.mkfs(bdev)
vfs = uos.mkfat(bdev)
uos.mount(vfs, '/ramdisk')
uos.chdir('/ramdisk')
try:
vfs.mkdir("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EEXIST)
try:
vfs.remove("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EISDIR)
try:
vfs.remove("no_file.txt")
except OSError as e:
print(e.args[0] == uerrno.ENOENT)
try:
vfs.rename("foo_dir", "/null/file")
except OSError as e:
print(e.args[0] == uerrno.ENOENT)
# file in dir
with open("foo_dir/file-in-dir.txt", "w+t") as f:
f.write("data in file")
with open("foo_dir/file-in-dir.txt", "r+b") as f:
print(f.read())
with open("foo_dir/sub_file.txt", "w") as f:
f.write("subdir file")
# directory not empty
try:
vfs.rmdir("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EACCES)
# trim full path
vfs.rename("foo_dir/file-in-dir.txt", "foo_dir/file.txt")
print(list(vfs.ilistdir("foo_dir")))
vfs.rename("foo_dir/file.txt", "moved-to-root.txt")
print(list(vfs.ilistdir()))
# check that renaming to existing file will overwrite it
with open("temp", "w") as f:
f.write("new text")
vfs.rename("temp", "moved-to-root.txt")
print(list(vfs.ilistdir()))
with open("moved-to-root.txt") as f:
print(f.read())
# valid removes
vfs.remove("foo_dir/sub_file.txt")
vfs.rmdir("foo_dir")
print(list(vfs.ilistdir()))
# disk full
try:
bsize = vfs.statvfs("/ramdisk")[0]
free = vfs.statvfs("/ramdisk")[2] + 1
f = open("large_file.txt", "wb")
f.write(bytearray(bsize * free))
except OSError as e:
print("ENOSPC:", e.args[0] == 28) # uerrno.ENOSPC
|
the-stack_106_26138 | import json
import time
from os import listdir
from os.path import isfile, join
import requests
from lxml import html, cssselect
from lxml.html.clean import clean_html
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_gov(tax_code, write_to_file=True):
try:
start_time = time.time()
# Gọi trang chủ để khởi tạo session và lấy tham số
home_url = 'https://dangkyquamang.dkkd.gov.vn/inf/default.aspx'
request_session = requests.sessions.Session()
home_page = request_session.get(home_url, verify=False)
home_tree = html.fromstring(home_page.content)
event_validation = home_tree.get_element_by_id('__EVENTVALIDATION').value
hd_param = home_tree.get_element_by_id('ctl00_hdParameter').value
# Gọi trang Search lấy secret ID của doanh nghiệp
search_url = 'https://dangkyquamang.dkkd.gov.vn/inf/Public/Srv.aspx/GetSearch'
search_form = {
"searchField": tax_code,
"h": hd_param
}
try_times = 0
while try_times < 20:
try:
search_headers = {'content-type': 'application/json', 'charset': 'UTF-8'}
search_response = request_session.post(search_url, json=search_form, headers=search_headers)
search_data = json.loads(search_response.text)['d']
break
except:
time.sleep(1)
try_times += 1
continue
secret_id = ''
for org in search_data:
if org['Enterprise_Gdt_Code'] == tax_code:
secret_id = org['Id']
# Gọi trang kết quả tìm kiếm, trích rút thông tin
form_data = {
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': '',
'__EVENTVALIDATION': event_validation,
'ctl00$nonceKeyFld': '',
'ctl00$hdParameter': hd_param,
'ctl00$FldSearch': tax_code,
'ctl00$FldSearchID': secret_id,
'ctl00$btnSearch': 'Tìm kiếm >>',
'ctl00$searchtype': 1
}
result_page = request_session.post(home_url, data=form_data, verify=False)
result_tree = html.fromstring(clean_html(result_page.text.replace('</br>', '<br>').replace('\r', '').replace('\n', '').replace('\t', '')))
#
# for br in result_tree.cssselect("br"):
# br.tail = "\n" + br.tail if br.tail else "\n"
name = normalize(result_tree.get_element_by_id('ctl00_C_NAMEFld', ''))
if name == '':
return {}
eng_name = normalize(result_tree.get_element_by_id('ctl00_C_NAME_FFld', ''))
short_name = normalize(result_tree.get_element_by_id('ctl00_C_SHORT_NAMEFld', ''))
active_status = normalize(result_tree.get_element_by_id('ctl00_C_STATUSNAMEFld', ''))
active_status = 1 if active_status == 'Đang hoạt động' else 0
enterprise_type = normalize(result_tree.get_element_by_id('ctl00_C_ENTERPRISE_TYPEFld', ''))
founding_date = normalize(result_tree.get_element_by_id('ctl00_C_FOUNDING_DATE', ''))
founding_date = change_date_type(founding_date)
legal_representative = normalize(result_tree.cssselect('#ctl00_C_Tab1 > div:nth-child(8) > p:nth-child(1) > span.viewInput.viewSearch'))
address = normalize(result_tree.get_element_by_id('ctl00_C_HO_ADDRESS', ''))
i_table = result_tree.get_element_by_id('i_Data', {})
if write_to_file:
with open('data/' + tax_code + '.yaml', 'w') as f:
f.write('tax_code: "' + tax_code + '"\n')
f.write('name: "' + name + '"\n')
f.write('eng_name: "' + eng_name + '"\n')
f.write('short_name: "' + short_name + '"\n')
f.write('active_status: "' + str(active_status) + '"\n')
f.write('enterprise_type: "' + enterprise_type + '"\n')
f.write('founding_date: "' + founding_date + '"\n')
f.write('legal_representative: "' + legal_representative + '"\n')
f.write('address: "' + address + '"\n')
f.write('time: "' + str(time.time() - start_time) + '"')
f.close()
request_session.close()
return {
'tax_code': tax_code,
'name': name,
'eng_name': eng_name,
'short_name': short_name,
'active_status': str(active_status),
'enterprise_type': enterprise_type,
'founding_date': founding_date,
'legal_representative': legal_representative,
'address': address
}
except Exception as e:
return {}
def normalize(data):
if isinstance(data, str):
return data
if isinstance(data, list):
return data[0].text.strip() if len(data) > 0 else ''
return data.text.strip() if data.text is not None else ''
def change_date_type(date: str):
if date is None or date == '':
return ''
date_lst = date.split('/')
return date_lst[2] + '-' + date_lst[1] + '-' + date_lst[0]
if __name__ == '__main__':
data = get_gov('0101243151', False)
print()
# directory = '/home/misa/Desktop/CrawlerORGINFO/thongtincongty.co/storage/decription'
# files = [f for f in listdir(directory) if isfile(join(directory, f))]
# files.sort()
# error_list = []
# for file in files:
# try:
# get(file[:-4])
# print(file)
# except Exception as e:
# error_list.append(file[:-4])
# print('Sai: ' + file)
# print(e)
# continue
# error_list2 = []
# for e in error_list:
# try:
# get(e)
# except:
# error_list2.append(e)
# print(error_list2)
|
the-stack_106_26139 | """CFNgin blueprint representing raw template module."""
import hashlib
import json
import os
import sys
from jinja2 import Template
from ..exceptions import InvalidConfig, UnresolvedVariable
from ..util import parse_cloudformation_template
from .base import Blueprint
def get_template_path(filename):
"""Find raw template in working directory or in sys.path.
template_path from config may refer to templates colocated with the Stacker
config, or files in remote package_sources. Here, we emulate python module
loading to find the path to the template.
Args:
filename (str): Template filename.
Returns:
Optional[str]: Path to file, or None if no file found
"""
if os.path.isfile(filename):
return os.path.abspath(filename)
for i in sys.path:
if os.path.isfile(os.path.join(i, filename)):
return os.path.abspath(os.path.join(i, filename))
return None
def get_template_params(template):
"""Parse a CFN template for defined parameters.
Args:
template (dict): Parsed CFN template.
Returns:
dict: Template parameters.
"""
params = {}
if 'Parameters' in template:
params = template['Parameters']
return params
def resolve_variable(provided_variable, blueprint_name):
"""Resolve a provided variable value against the variable definition.
This acts as a subset of resolve_variable logic in the base module, leaving
out everything that doesn't apply to CFN parameters.
Args:
provided_variable (:class:`runway.cfngin.variables.Variable`):
The variable value provided to the blueprint.
blueprint_name (str): The name of the blueprint that the variable is
being applied to.
Returns:
object: The resolved variable string value.
Raises:
UnresolvedVariable: Raised when the provided variable is not already
resolved.
"""
value = None
if provided_variable:
if not provided_variable.resolved:
raise UnresolvedVariable(blueprint_name, provided_variable)
value = provided_variable.value
return value
class RawTemplateBlueprint(Blueprint): # pylint: disable=abstract-method
"""Blueprint class for blueprints auto-generated from raw templates."""
def __init__(self, name, context, # pylint: disable=super-init-not-called
raw_template_path, mappings=None, description=None):
"""Instantiate class."""
self.name = name
self.context = context
self.mappings = mappings
self.resolved_variables = None
self.raw_template_path = raw_template_path
self._rendered = None
self._version = None
def to_json(self, variables=None):
"""Return the template in JSON.
Args:
variables (dict):
Unused in this subclass (variables won't affect the template).
Returns:
str: the rendered CFN JSON template
"""
# load -> dumps will produce json from json or yaml templates
return json.dumps(self.to_dict(), sort_keys=True, indent=4)
def to_dict(self):
"""Return the template as a python dictionary.
Returns:
dict: the loaded template as a python dictionary
"""
return parse_cloudformation_template(self.rendered)
def render_template(self):
"""Load template and generate its md5 hash."""
return (self.version, self.rendered)
def get_parameter_definitions(self):
"""Get the parameter definitions to submit to CloudFormation.
Returns:
Dict[str, Any]: parameter definitions. Keys are parameter names,
the values are dicts containing key/values for various parameter
properties.
"""
return get_template_params(self.to_dict())
def get_output_definitions(self):
"""Get the output definitions.
Returns:
Dict[str, Any]: output definitions. Keys are output names, the
values are dicts containing key/values for various output
properties.
"""
return self.to_dict().get('Outputs', {})
def resolve_variables(self, provided_variables):
"""Resolve the values of the blueprint variables.
This will resolve the values of the template parameters with values
from the env file, the config, and any lookups resolved. The
resolution is run twice, in case the blueprint is jinja2 templated
and requires provided variables to render.
Args:
provided_variables (List[:class:`runway.cfngin.variables.Variable`]):
List of provided variables.
"""
# Pass 1 to set resolved_variables to provided variables
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in variable_dict.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
# Pass 2 to render the blueprint and set resolved_variables according
# to defined variables
defined_variables = self.get_parameter_definitions()
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in defined_variables.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
def get_parameter_values(self):
"""Return a dictionary of variables with `type` :class:`CFNType`.
Returns:
Dict[str, Any]: variables that need to be submitted as
CloudFormation Parameters. Will be a dictionary of
``<parameter name>: <parameter value>``.
"""
return self.resolved_variables
@property
def requires_change_set(self):
"""Return True if the underlying template has transforms."""
return bool("Transform" in self.to_dict())
@property
def rendered(self):
"""Return (generating first if needed) rendered template."""
if not self._rendered:
template_path = get_template_path(self.raw_template_path)
if template_path:
with open(template_path, 'r') as template:
if len(os.path.splitext(template_path)) == 2 and (
os.path.splitext(template_path)[1] == '.j2'):
self._rendered = Template(template.read()).render(
context=self.context,
mappings=self.mappings,
name=self.name,
variables=self.resolved_variables
)
else:
self._rendered = template.read()
else:
raise InvalidConfig(
'Could not find template %s' % self.raw_template_path
)
return self._rendered
@property
def version(self):
"""Return (generating first if needed) version hash."""
if not self._version:
self._version = hashlib.md5(self.rendered.encode()).hexdigest()[:8]
return self._version
|
the-stack_106_26141 | import sys, os, glob, shutil
from subprocess import check_call
from scrapy import version_info
def build(suffix):
for ifn in glob.glob("debian/scrapy.*"):
s = open(ifn).read()
s = s.replace('SUFFIX', suffix)
pre, suf = ifn.split('.', 1)
ofn = "%s-%s.%s" % (pre, suffix, suf)
with open(ofn, 'w') as of:
of.write(s)
for ifn in ['debian/control', 'debian/changelog']:
s = open(ifn).read()
s = s.replace('SUFFIX', suffix)
with open(ifn, 'w') as of:
of.write(s)
check_call('debchange -m -D unstable --force-distribution -v $(python setup.py --version)+$(date +%s) "Automatic build"', \
shell=True)
check_call('debuild -us -uc -b', shell=True)
def clean(suffix):
for f in glob.glob("debian/python-scrapy%s*" % suffix):
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
def main():
cmd = sys.argv[1]
suffix = '%s.%s' % version_info[:2]
if cmd == 'build':
build(suffix)
elif cmd == 'clean':
clean(suffix)
if __name__ == '__main__':
main()
|
the-stack_106_26146 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Set axis label
"""
import numpy as np
import matplotlib.pyplot as plt
# Build datas ###############
x = np.arange(-10, 10, 0.01)
y = np.sin(x)
# Plot data #################
fig = plt.figure(figsize=(8.0, 8.0))
ax = fig.add_subplot(111)
ax.plot(x, y)
# Set labels ################
ax.set_xlabel(r"$x$", fontsize=32)
ax.set_ylabel(r"$f(x)$", fontsize=32)
# Save file and plot ########
plt.savefig("ax_set_label.png")
plt.show()
|
the-stack_106_26147 | """
Preprocess the SNLI dataset and word embeddings to be used by the ESIM model.
"""
# Aurelien Coet, 2018.
import os
import pickle
import argparse
import fnmatch
import json
from esim.data import Preprocessor
def preprocess_SNLI_data(inputdir,
embeddings_file,
targetdir,
lowercase=False,
ignore_punctuation=False,
num_words=None,
stopwords=[],
labeldict={},
bos=None,
eos=None):
"""
Preprocess the data from the SNLI corpus so it can be used by the
ESIM model.
Compute a worddict from the train set, and transform the words in
the sentences of the corpus to their indices, as well as the labels.
Build an embedding matrix from pretrained word vectors.
The preprocessed data is saved in pickled form in some target directory.
Args:
inputdir: The path to the directory containing the NLI corpus.
embeddings_file: The path to the file containing the pretrained
word vectors that must be used to build the embedding matrix.
targetdir: The path to the directory where the preprocessed data
must be saved.
lowercase: Boolean value indicating whether to lowercase the premises
and hypotheseses in the input data. Defautls to False.
ignore_punctuation: Boolean value indicating whether to remove
punctuation from the input data. Defaults to False.
num_words: Integer value indicating the size of the vocabulary to use
for the word embeddings. If set to None, all words are kept.
Defaults to None.
stopwords: A list of words that must be ignored when preprocessing
the data. Defaults to an empty list.
bos: A string indicating the symbol to use for beginning of sentence
tokens. If set to None, bos tokens aren't used. Defaults to None.
eos: A string indicating the symbol to use for end of sentence tokens.
If set to None, eos tokens aren't used. Defaults to None.
"""
if not os.path.exists(targetdir):
os.makedirs(targetdir)
# Retrieve the train, dev and test data files from the dataset directory.
train_file = ""
dev_file = ""
test_file = ""
for file in os.listdir(inputdir):
if fnmatch.fnmatch(file, "*_train.txt"):
train_file = file
elif fnmatch.fnmatch(file, "*_dev.txt"):
dev_file = file
elif fnmatch.fnmatch(file, "*_test.txt"):
test_file = file
# -------------------- Train data preprocessing -------------------- #
preprocessor = Preprocessor(lowercase=lowercase,
ignore_punctuation=ignore_punctuation,
num_words=num_words,
stopwords=stopwords,
labeldict=labeldict,
bos=bos,
eos=eos)
print(20*"=", " Preprocessing train set ", 20*"=")
print("\t* Reading data...")
data = preprocessor.read_data(os.path.join(inputdir, train_file))
print("\t* Computing worddict and saving it...")
preprocessor.build_worddict(data)
with open(os.path.join(targetdir, "worddict.pkl"), "wb") as pkl_file:
pickle.dump(preprocessor.worddict, pkl_file)
print("\t* Transforming words in premises and hypotheses to indices...")
transformed_data = preprocessor.transform_to_indices(data)
print("\t* Saving result...")
with open(os.path.join(targetdir, "train_data.pkl"), "wb") as pkl_file:
pickle.dump(transformed_data, pkl_file)
# -------------------- Validation data preprocessing -------------------- #
print(20*"=", " Preprocessing dev set ", 20*"=")
print("\t* Reading data...")
data = preprocessor.read_data(os.path.join(inputdir, dev_file))
print("\t* Transforming words in premises and hypotheses to indices...")
transformed_data = preprocessor.transform_to_indices(data)
print("\t* Saving result...")
with open(os.path.join(targetdir, "dev_data.pkl"), "wb") as pkl_file:
pickle.dump(transformed_data, pkl_file)
# -------------------- Test data preprocessing -------------------- #
print(20*"=", " Preprocessing test set ", 20*"=")
print("\t* Reading data...")
data = preprocessor.read_data(os.path.join(inputdir, test_file))
print("\t* Transforming words in premises and hypotheses to indices...")
transformed_data = preprocessor.transform_to_indices(data)
print("\t* Saving result...")
with open(os.path.join(targetdir, "test_data.pkl"), "wb") as pkl_file:
pickle.dump(transformed_data, pkl_file)
# -------------------- Embeddings preprocessing -------------------- #
print(20*"=", " Preprocessing embeddings ", 20*"=")
print("\t* Building embedding matrix and saving it...")
embed_matrix = preprocessor.build_embedding_matrix(embeddings_file)
with open(os.path.join(targetdir, "embeddings.pkl"), "wb") as pkl_file:
pickle.dump(embed_matrix, pkl_file)
if __name__ == "__main__":
default_config = "../../config/preprocessing/snli_preprocessing.json"
parser = argparse.ArgumentParser(description="Preprocess the SNLI dataset")
parser.add_argument(
"--config",
default=default_config,
help="Path to a configuration file for preprocessing SNLI"
)
args = parser.parse_args()
script_dir = os.path.dirname(os.path.realpath(__file__))
if args.config == default_config:
config_path = os.path.join(script_dir, args.config)
else:
config_path = args.config
with open(os.path.normpath(config_path), "r") as cfg_file:
config = json.load(cfg_file)
preprocess_SNLI_data(
os.path.normpath(os.path.join(script_dir, config["data_dir"])),
os.path.normpath(os.path.join(script_dir, config["embeddings_file"])),
os.path.normpath(os.path.join(script_dir, config["target_dir"])),
lowercase=config["lowercase"],
ignore_punctuation=config["ignore_punctuation"],
num_words=config["num_words"],
stopwords=config["stopwords"],
labeldict=config["labeldict"],
bos=config["bos"],
eos=config["eos"]
)
|
the-stack_106_26148 | import logging
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional, Union
from transformers import Pipeline
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, pipeline
try:
from uvicorn import run
from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel
_serve_dependancies_installed = True
except (ImportError, AttributeError):
BaseModel = object
def Body(*x, **y):
pass
_serve_dependancies_installed = False
logger = logging.getLogger("transformers-cli/serving")
def serve_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
return ServeCommand(nlp, args.host, args.port)
class ServeModelInfoResult(BaseModel):
"""
Expose model information
"""
infos: dict
class ServeTokenizeResult(BaseModel):
"""
Tokenize result model
"""
tokens: List[str]
tokens_ids: Optional[List[int]]
class ServeDeTokenizeResult(BaseModel):
"""
DeTokenize result model
"""
text: str
class ServeForwardResult(BaseModel):
"""
Forward result model
"""
output: Any
class ServeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
serve_parser = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
)
serve_parser.add_argument(
"--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
)
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
serve_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, pipeline: Pipeline, host: str, port: int):
self._pipeline = pipeline
self._host = host
self._port = port
if not _serve_dependancies_installed:
raise ImportError(
"Using serve command requires FastAPI and unicorn. "
"Please install transformers with [serving]: pip install transformers[serving]."
"Or install FastAPI and unicorn separatly."
)
else:
logger.info("Serving model over {}:{}".format(host, port))
self._app = FastAPI()
# Register routes
self._app.add_api_route("/", self.model_info, response_model=ServeModelInfoResult, methods=["GET"])
self._app.add_api_route("/tokenize", self.tokenize, response_model=ServeTokenizeResult, methods=["POST"])
self._app.add_api_route(
"/detokenize", self.detokenize, response_model=ServeDeTokenizeResult, methods=["POST"]
)
self._app.add_api_route("/forward", self.forward, response_model=ServeForwardResult, methods=["POST"])
def run(self):
run(self._app, host=self._host, port=self._port)
def model_info(self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
"""
Tokenize the provided input and eventually returns corresponding tokens id:
- **text_input**: String to tokenize
- **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer mapping.
"""
try:
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
if return_ids:
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
else:
return ServeTokenizeResult(tokens=tokens_txt)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
def detokenize(
self,
tokens_ids: List[int] = Body(None, embed=True),
skip_special_tokens: bool = Body(False, embed=True),
cleanup_tokenization_spaces: bool = Body(True, embed=True),
):
"""
Detokenize the provided tokens ids to readable text:
- **tokens_ids**: List of tokens ids
- **skip_special_tokens**: Flag indicating to not try to decode special tokens
- **cleanup_tokenization_spaces**: Flag indicating to remove all leading/trailing spaces and intermediate ones.
"""
try:
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
return ServeDeTokenizeResult(model="", text=decoded_str)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
def forward(self, inputs: Union[str, dict, List[str], List[int], List[dict]] = Body(None, embed=True)):
"""
**inputs**:
**attention_mask**:
**tokens_type_ids**:
"""
# Check we don't have empty string
if len(inputs) == 0:
return ServeForwardResult(output=[], attention=[])
try:
# Forward through the model
output = self._pipeline(inputs)
return ServeForwardResult(output=output)
except Exception as e:
raise HTTPException(500, {"error": str(e)})
|
the-stack_106_26151 | import logging
import sastvd.helpers.tokenise as svdt
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def train_d2v(
train_corpus,
vector_size=300,
window=2,
min_count=5,
workers=4,
epochs=100,
dm_concat=1,
dm=1,
):
"""Train Doc2Vec model.
Doc2Vec.load(savedir / "d2v.model")
"""
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
train_corpus = [
TaggedDocument(doc.split(), [i]) for i, doc in enumerate(train_corpus)
]
model = Doc2Vec(
vector_size=vector_size,
window=window,
min_count=min_count,
workers=workers,
epochs=epochs,
dm_concat=dm_concat,
dm=dm,
)
model.build_vocab(train_corpus)
model.train(train_corpus, total_examples=model.corpus_count, epochs=model.epochs)
return model
def load_d2v(path: str):
"""Load Doc2Vec model.
path = svd.processed_dir() / "bigvul/d2v_False"
"""
path = str(path)
if path.split("/")[-1] != "d2v.model":
path += "/d2v.model"
return Doc2Vec.load(path)
class D2V:
"""Doc2Vec class."""
def __init__(self, path: str):
"""Init class."""
self.model = load_d2v(path)
def infer(self, text: str):
"""Infer vector."""
text = svdt.tokenise(text)
return self.model.infer_vector(text.split())
|
the-stack_106_26152 | import asyncio
import time
import pandas as pd
def extract(file):
dtype_dict = {"Nr": "int",
"Kommunenavn": "string",
"Adm. senter": "string",
"Fylke": "category",
"Målform": "category",
"Domene": "string"
}
df = pd.read_csv(file, dtype=dtype_dict,low_memory=False)
return df
async def transform(df):
df["Lenke"] = "https://" + df["Domene"]
await load(df)
async def load(tdf):
tdf.to_csv("kommuner_lenker.csv", index=False)
await asyncio.sleep(0)
async def main():
pd.set_option("mode.chained_assignment", None)
file = "https://raw.githubusercontent.com/tobiasmcvey/kommunale-nettsider/main/kommuner.csv"
df = extract(file)
chunk_size = int(df.shape[0] / 4)
for start in range(0, df.shape[0], chunk_size):
df_subset = df.iloc[start:start + chunk_size]
x = asyncio.create_task(transform(df_subset))
await x
start = time.time()
asyncio.run(main())
end=time.time()-start
print("execution time {} sec".format(end))
|
the-stack_106_26153 | import PIL
from PIL import Image, ImageOps, ImageDraw
import pandas as pd
import shutil
import os.path
import random
from pathlib import Path
############### CONFIGURE ########################
# Table Configure Variables
# Image Size Configuration
IMAGE_START_NUMBER = 1
IMAGE_END_NUMBER = 200
TABLE_IM_PIXEL = 480
TABLE_IM_WIDTH_NUMBER = 4
TABLE_IM_HEIGHT_NUMBER = 4
# Image Background Configuration
BACKGROUND_START_NUMBER = 1
BACKGROUND_END_NUMBER = 16
BACKGROUND_FOLDER = 'backgrounds'
BACKGROUND_IMAGE_FILE_NAME = '{}_background.jpg'
# Set input path and output path
INPUT_FOLDER = 'images'
INPUT_IMAGE_FILE_NAME = '{}_crop.png'
OUTPUT_FOLDER = 'data'
OUTPUT_IMAGE_FILE_NAME = '{}_table{}.jpg'
OUTPUT_CLONE_FOLDER = 'data/clone'
# Set REPETITION number of extraction
EXTRACT_OUTPUT_INDEX_MIN = 181
EXTRACT_OUTPUT_INDEX_MAX = 240
# REPETITION NUMBER = EXTRACT_OUTPUT_INDEX_MAX - EXTRACT_OUTPUT_INDEX_MIN + 1
# Toggle options
TOGGLE_BACKGROUND = True
TOGGLE_SHUFFLE_BACKGROUND = False
TOGGLE_SHUFFLE_IMAGE = True
TOGGLE_CSV_TO_SAVE_INDIVIDUAL = False
TOGGLE_CLONE_IMAGE_TO_SHOW = False
TOGGLE_CLONE_IMAGE_TO_SAVE = True
OUTPUT_CLONE_IMAGE_FILE_NAME = 'include_boundaries_{}_table{}.jpg'
# Set index of EXTRACT_MODE to OUTPUT_IMAGE_EXTRACT_MODE
# Default is same as 'all'
EXTRACT_MODE = ['default', 'all', 'odd', 'even' , 'random']
RANDOM_START_RANGE_MIN = 0
RANDOM_START_RANGE_MAX = 3
RANDOM_INCREMENT_RANGE_MIN = 2
RANDOM_INCREMENT_RANGE_MAX = 6
OUTPUT_IMAGE_EXTRACT_MODE = EXTRACT_MODE[4]
# Table Boundary Configure
BOUNDARY_PADDING_PIXEL = {'top': 4, 'bottom': 4, 'left': 4, 'right': 4}
# CSV Configure
LABEL = 'face'
OUTPUT_CSV_FILE_NAME = '{}_labels{}.csv'
# Extract Training(True) or Testing(False)?
DATA_USAGE = True
###################################################
start_step = 0
increment_step = 1
def check_image_with_pil(path):
try:
Image.open(path)
except IOError:
return False
return True
def show_table_image(tableImg):
tableImg.show()
def save_table_image(path , tableImg):
tableImg.save(path)
def save_boundaries_to_csv(path, input_image_list):
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
images_df = pd.DataFrame(input_image_list, columns=column_name)
images_df.to_csv(path, index=None)
def append_boundary_to_csv(output_image_list, filename, width, height, label, xmin, ymin, xmax, ymax):
value = (filename, width, height, label, xmin, ymin, xmax, ymax)
output_image_list.append(value)
def extract(repeat_index, background_index, all_image_list):
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
image_list = []
SOURCE_IM_PIXEL = (TABLE_IM_PIXEL / TABLE_IM_WIDTH_NUMBER)
tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
IMAGES_COUNT = IMAGE_START_NUMBER
clone_tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
DrawImg = ImageDraw.Draw(clone_tableImage)
if TOGGLE_BACKGROUND:
background = Image.open('{}/{}'.format(BACKGROUND_FOLDER, BACKGROUND_IMAGE_FILE_NAME.format(background_index)))
background = background.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
tableImage.paste(background, (0, 0))
clone_tableImage.paste(background, (0, 0))
if not RANDOM_INCREMENT_RANGE_MIN > 0 or not RANDOM_INCREMENT_RANGE_MAX > RANDOM_INCREMENT_RANGE_MIN:
print('RANDOM_INCREMENT_RANGE should be set properly')
return
for directory in [INPUT_FOLDER]:
for i in range(0, TABLE_IM_WIDTH_NUMBER):
start_step = 0
increment_step = 1
if OUTPUT_IMAGE_EXTRACT_MODE == 'all':
start_step = 0
increment_step = 1
elif OUTPUT_IMAGE_EXTRACT_MODE == 'odd':
if i % 2 == 0:
start_step = 1
else:
start_step = 0
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'even':
if i % 2 == 0:
start_step = 0
else:
start_step = 1
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'random':
start_step = random.randrange(RANDOM_START_RANGE_MIN, RANDOM_START_RANGE_MAX)
increment_step = random.randrange(RANDOM_INCREMENT_RANGE_MIN, RANDOM_INCREMENT_RANGE_MAX)
for j in range(start_step, TABLE_IM_HEIGHT_NUMBER, increment_step):
# Open image on images directory
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image is not exist on folder
while not check_image_with_pil('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT))):
# Skip to next index
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image index is overwhelmed the end number
if IMAGES_COUNT > IMAGE_END_NUMBER:
# Save process35f
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of script
return
im = Image.open('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
im = ImageOps.expand(im, border=(int)(SOURCE_IM_PIXEL*0.01), fill='white')
im = im.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
im.thumbnail((SOURCE_IM_PIXEL, SOURCE_IM_PIXEL))
xmin = (j * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['left']
ymin = (i * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['top']
xmax = (j * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['right']
ymax = (i * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['bottom']
append_boundary_to_csv(image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
append_boundary_to_csv(all_image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
clone_tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
DrawImg.rectangle([(xmin, ymin), (xmax, ymax)], fill=None, outline='green')
# Save process
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of Script
def main():
if not EXTRACT_OUTPUT_INDEX_MIN > 0 or not EXTRACT_OUTPUT_INDEX_MAX >= EXTRACT_OUTPUT_INDEX_MIN:
print('EXTRACT_OUTPUT_INDEX should be set properly')
return
background_index = 0
image_list = []
for i in range(EXTRACT_OUTPUT_INDEX_MIN, EXTRACT_OUTPUT_INDEX_MAX + 1):
if TOGGLE_SHUFFLE_BACKGROUND:
background_index = random.randrange(BACKGROUND_START_NUMBER, BACKGROUND_END_NUMBER)
else:
background_index = background_index + 1;
if(background_index >= BACKGROUND_END_NUMBER):
background_index = BACKGROUND_START_NUMBER
extract(i, background_index, image_list)
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage, ''))
save_boundaries_to_csv(csv_path, image_list)
main() |
the-stack_106_26154 | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
from ..util.serialization import is_not_none
T = TypeVar("T", bound="EndpointAddresses")
@attr.s(auto_attribs=True)
class EndpointAddresses:
"""Addresses at which an endpoint is reachable over the network.
Attributes:
grpc (Union[Unset, str]):
rpcq (Union[Unset, str]):
"""
grpc: Union[Unset, str] = UNSET
rpcq: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self, pick_by_predicate: Optional[Callable[[Any], bool]] = is_not_none) -> Dict[str, Any]:
grpc = self.grpc
rpcq = self.rpcq
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if grpc is not UNSET:
field_dict["grpc"] = grpc
if rpcq is not UNSET:
field_dict["rpcq"] = rpcq
field_dict = {k: v for k, v in field_dict.items() if v != UNSET}
if pick_by_predicate is not None:
field_dict = {k: v for k, v in field_dict.items() if pick_by_predicate(v)}
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
grpc = d.pop("grpc", UNSET)
rpcq = d.pop("rpcq", UNSET)
endpoint_addresses = cls(
grpc=grpc,
rpcq=rpcq,
)
endpoint_addresses.additional_properties = d
return endpoint_addresses
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
the-stack_106_26156 | from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import QFile
from PyQt5 import QtCore, QtGui, uic, QtWidgets
import sys
import cv2
import threading
import queue
app = QtWidgets.QApplication(sys.argv)
running = False
capture_thread = None
q = queue.Queue()
form_class = uic.loadUiType("test.ui")
print(form_class)
form_class = form_class[0]
"""
print(form_class)
ui_file = QFile("test.ui")
print(ui_file)
ui_file.open(QFile.ReadOnly)
loader = QUiLoader()
print(loader)
window = loader.load(ui_file)
print(window)
"""
def grab(cam, queue, width, height, fps):
global running #sets global variable running
capture = cv2.VideoCapture(cam)
#grabs properties of the camera
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
while(running):
frame = {}
capture.grab()
retval, img = capture.retrieve(0)
frame["img"] = img
if queue.qsize() < 10:
queue.put(frame)
else:
print(queue.qsize())
class OwnImageWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OwnImageWidget, self).__init__(parent) #inherits from QtWidgets.QWidget class
self.image = None
def setImage(self, image):
self.image = image
sz = image.size() #gets QSize
self.setMinimumSize(sz) #The widget cannot be resized to a smaller size than the minimum widget size
self.update() #This function does not cause an immediate repaint; instead it schedules a PAINTEVENT for processing when Qt returns to the main event loop.
def paintEvent(self, event):
qp = QtGui.QPainter() #QPainter provides highly optimized functions to do most of the drawing GUI programs require, this builts the object
qp.begin(self) #Begins painting the paint device and returns true if successful; otherwise returns false.
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image) #Draws the given image(self.image) at the given point (0,0)
qp.end()
class MyWindowClass(QtWidgets.QMainWindow, form_class):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent) #parent = QMainWindow, inherits its methods
self.setupUi(self) #setup MyWindowClass by reading properties of QMainWindow
self.startButton.clicked.connect(self.start_clicked) #Call start_clicked method if you click the button
#As MyWindowClass object has inherited everything from QMainWindow, we can modify this "cloned" object
self.window_width = self.ImgWidget.frameSize().width() #Read QSize width property frameSize of ImgWidget object
self.window_height = self.ImgWidget.frameSize().height() #Read QSize height property frameSize of ImgWidget object
self.ImgWidget = OwnImageWidget(self.ImgWidget) #Initiate OwnImageWidget object from ImgWidget object
self.timer = QtCore.QTimer(self) #Setup Qtimer object
self.timer.timeout.connect(self.update_frame) #Sets signal connection of Timeout to run update_frame method
self.timer.start(1) #Sets the timer to start with a timeout of 1 milisecond
def start_clicked(self):
global running #Sets running var as global
running = True #Sets it to True
capture_thread.start() #Starts capture_thread threading object (with args and all)
self.startButton.setEnabled(False) #Greys out the startButton
self.startButton.setText('Starting...') #Sets the text to Starting before the camera feed connects
def update_frame(self):
if not q.empty(): #Checks if the algorithm has some frames grabbed in the buffer
self.startButton.setText('Camera is live') #Sets the startButton text to different text
frame = q.get() #Grabs the frame from the queue buffer
img = frame["img"] #Gets the array under "img" key
img_height, img_width, img_colors = img.shape #reads array shape
scale_w = float(self.window_width) / float(img_width) #scales img_width based on window_width
scale_h = float(self.window_height) / float(img_height) #scales img_height based on windwo_height
scale = min([scale_w, scale_h]) #scales the minimum of width/height to preserve aspect ratio
if scale == 0:
scale = 1 #prevents the image to congregate upon a point
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC) #resizes the img by scale
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #convert to RGB
height, width, bpc = img.shape #bpc Bytes per channel
bpl = bpc * width #bpl Bytes per line
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888) #creates an QImage object
self.ImgWidget.setImage(image) #displays an QImage object
def closeEvent(self, event):
global running
running = False
capture_thread = threading.Thread(target=grab, args = (0, q, 1920, 1080, 30))
w = MyWindowClass(None)
w.setWindowTitle('USB camera Test')
w.show()
app.exec_()
|
the-stack_106_26157 | """Tests queue_info.py"""
from __future__ import absolute_import
import tempfile
import unittest
import mock
from botocore.exceptions import ClientError
from kale import queue_info
from kale import settings
from kale import sqs
class QueueInfoTest(unittest.TestCase):
"""Tests for QueueInfo class."""
_previous_region = None
def setUp(self):
self._previous_region = settings.AWS_REGION
settings.AWS_REGION = 'us-east-1'
def tearDown(self):
settings.AWS_REGION = self._previous_region
test_string = ('default: \n'
' name: default\n'
' priority: 10\n'
' batch_size: 10\n'
' visibility_timeout_sec: 5\n'
' long_poll_time_sec: 5\n'
' num_iterations: 2\n'
'digest:\n'
' name: digest\n'
' priority: 22\n'
' batch_size: 11\n'
' visibility_timeout_sec: 55\n'
' long_poll_time_sec: 65\n'
' num_iterations: 13\n'
'lowp:\n'
' name: lowp\n'
' priority: 1\n'
' batch_size: 10\n'
' visibility_timeout_sec: 5\n'
' long_poll_time_sec: 5\n'
' num_iterations: 2\n')
def test_get_queues_from_config(self):
"""Success case for get_queues_from_config.
Don't have failure case. If fails, fails loudly.
"""
queue_config = tempfile.NamedTemporaryFile(delete=True)
queue_config.write(self.test_string.encode('utf8'))
queue_config.seek(0)
queues = queue_info.QueueInfo._get_queues_from_config(
queue_config.name, queue_info.TaskQueue)
queue_config.close()
self.assertEquals(len(queues), 3)
self.assertEquals(queues[0].name, 'digest')
self.assertEquals(queues[0].priority, 22)
self.assertEquals(queues[0].batch_size, 11)
self.assertEquals(queues[0].visibility_timeout_sec, 55)
self.assertEquals(queues[0].long_poll_time_sec, 65)
self.assertEquals(queues[0].num_iterations, 13)
self.assertEquals(queues[1].name, 'default')
self.assertEquals(queues[2].name, 'lowp')
def _build_queue_info(self):
sqs_inst = sqs.SQSTalk()
queue_config = tempfile.NamedTemporaryFile(delete=True)
queue_config.write(self.test_string.encode('utf8'))
queue_config.seek(0)
queue_info.QueueInfo._queues = None
queue_info.QueueInfo._simple_name_queues_map = None
qinfo = queue_info.QueueInfo(queue_config.name, sqs_inst,
queue_info.TaskQueue)
return qinfo
def test_queues(self):
qinfo = self._build_queue_info()
queues = qinfo.get_queues()
self.assertEquals(len(queues), 3)
# TODO (wenbin): add a separate test case for
# get_highest_priority_non_empty_queue.
def test_not_implemented_ops(self):
queue_info_base = queue_info.QueueInfoBase()
with self.assertRaises(NotImplementedError):
queue_info_base.get_queues()
with self.assertRaises(NotImplementedError):
queue_info_base.get_highest_priority_queue_that_needs_work()
with self.assertRaises(NotImplementedError):
queue_info_base.is_queue_empty(mock.MagicMock())
with self.assertRaises(NotImplementedError):
queue_info_base.does_queue_need_work(mock.MagicMock())
def test_does_queue_need_work_empty(self):
with mock.patch.object(queue_info.QueueInfo, 'is_queue_empty', return_value=True):
qinfo = self._build_queue_info()
self.assertFalse(qinfo.does_queue_need_work(None))
def test_does_queue_need_work_non_empty(self):
with mock.patch.object(queue_info.QueueInfo, 'is_queue_empty', return_value=False):
qinfo = self._build_queue_info()
self.assertTrue(qinfo.does_queue_need_work(None))
def test_does_queue_need_work_rate_limited(self):
rate_limit_exception = ClientError(
{'Error': {'Code': 'ThrottlingException'}}, 'get_queue_url')
with mock.patch.object(
queue_info.QueueInfo, 'is_queue_empty', side_effect=rate_limit_exception):
qinfo = self._build_queue_info()
self.assertTrue(qinfo.does_queue_need_work(None))
|
the-stack_106_26158 | import json
import stat
import datetime
import base64
import re
import tarfile
import io
from connexion import request
from anchore_engine import utils
import anchore_engine.apis
from anchore_engine.apis.authorization import get_authorizer, RequestingAccountValue, ActionBoundPermission
from anchore_engine.apis.context import ApiRequestContextProxy
from anchore_engine.apis import exceptions as api_exceptions
from anchore_engine.clients.services.policy_engine import PolicyEngineClient
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.clients.services import internal_client_for
import anchore_engine.common
from anchore_engine.common.helpers import make_response_error, make_anchore_exception, make_eval_record, make_policy_record, make_response_routes
import anchore_engine.common.images
import anchore_engine.configuration.localconfig
from anchore_engine.subsys import taskstate, logger
import anchore_engine.subsys.metrics
from anchore_engine.utils import parse_dockerimage_string
from anchore_engine.services.apiext.api.controllers.utils import normalize_image_add_source, validate_image_add_source
from anchore_engine.services.apiext.api import helpers
from anchore_engine.subsys.metrics import flask_metrics
authorizer = get_authorizer()
def make_cvss_scores(metrics):
"""
[
{
"cvss_v2": {
"base_metrics": {
...
},
"vector_string": "AV:N/AC:L/Au:N/C:P/I:P/A:P",
"version": "2.0"
},
"cvss_v3": {
"base_metrics": {
...
},
"vector_string": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"version": "3.0"
},
"id": "CVE-2019-1234"
},
{
"cvss_v2": {
"base_metrics": {
...
},
"vector_string": "AV:N/AC:L/Au:N/C:P/I:P/A:P",
"version": "2.0"
},
"cvss_v3": {
"base_metrics": {
...
},
"vector_string": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"version": "3.0"
},
"id": "CVE-2019-3134"
},
]
:param metrics:
:return:
"""
score_list = []
for metric in metrics:
new_score_packet = {
'id': metric.get('id'),
}
score_list.append(new_score_packet)
for i in [3, 2]:
cvss_dict = metric.get('cvss_v{}'.format(i), {})
base_metrics = cvss_dict.get('base_metrics', {}) if cvss_dict else {}
tmp = base_metrics.get('base_score', -1.0)
base_score = float(tmp) if tmp else -1.0
tmp = base_metrics.get('exploitability_score', -1.0)
exploitability_score = float(tmp) if tmp else -1.0
tmp = base_metrics.get('impact_score', -1.0)
impact_score = float(tmp) if tmp else -1.0
new_score_packet['cvss_v{}'.format(i)] = {
'base_score': base_score,
'exploitability_score': exploitability_score,
'impact_score': impact_score
}
return score_list
def make_response_vulnerability(vulnerability_type, vulnerability_data):
ret = []
if not vulnerability_data:
logger.warn("empty query data given to format - returning empty result")
return ret
eltemplate = {
'vuln': 'None',
'severity': 'None',
'url': 'None',
'fix': 'None',
'package': 'None',
'package_name': 'None',
'package_version': 'None',
'package_type': 'None',
'package_cpe': 'None',
'package_cpe23': 'None',
'package_path': 'None',
'feed': 'None',
'feed_group': 'None',
'nvd_data': 'None',
'vendor_data': 'None'
}
osvulns = []
nonosvulns = []
keymap = {
'vuln': 'CVE_ID',
'severity': 'Severity',
'package': 'Vulnerable_Package',
'fix': 'Fix_Available',
'url': 'URL',
'package_type': 'Package_Type',
'feed': 'Feed',
'feed_group': 'Feed_Group',
'package_name': 'Package_Name',
'package_path': 'Package_Path',
'package_version': 'Package_Version',
}
id_cves_map = {}
scan_result = vulnerability_data['legacy_report']
try:
for imageId in list(scan_result.keys()):
header = scan_result[imageId]['result']['header']
rows = scan_result[imageId]['result']['rows']
for row in rows:
el = {}
el.update(eltemplate)
for k in list(keymap.keys()):
try:
el[k] = row[header.index(keymap[k])]
except:
el[k] = 'None'
# conversions
if el[k] == 'N/A':
el[k] = 'None'
if el['package_type'].lower() in anchore_engine.common.os_package_types:
osvulns.append(el)
else:
nonosvulns.append(el)
el['nvd_data'] = []
el['vendor_data'] = []
if row[header.index('CVES')]:
all_data = json.loads(row[header.index('CVES')]) # {'nvd_data': [], 'vendor_data': []}
el['nvd_data'] = make_cvss_scores(all_data.get('nvd_data', []))
el['vendor_data'] = make_cvss_scores(all_data.get('vendor_data', []))
for nvd_el in el['nvd_data']:
id_cves_map[nvd_el.get('id')] = el.get('vuln')
except Exception as err:
logger.exception('could not prepare query response')
logger.warn("could not prepare query response - exception: " + str(err))
ret = []
#non-os CPE search
keymap = {
'vuln': 'vulnerability_id',
'severity': 'severity',
'package_name': 'name',
'package_version': 'version',
'package_path': 'pkg_path',
'package_type': 'pkg_type',
'package_cpe': 'cpe',
'package_cpe23': 'cpe23',
'url': 'link',
'feed': 'feed_name',
'feed_group': 'feed_namespace',
}
scan_result = vulnerability_data['cpe_report']
for vuln in scan_result:
el = {}
el.update(eltemplate)
for k in list(keymap.keys()):
el[k] = vuln[keymap[k]]
if vuln['name'] != vuln['version']:
pkg_final = "{}-{}".format(vuln['name'], vuln['version'])
else:
pkg_final = vuln['name']
el['package'] = pkg_final
# get nvd scores
el['nvd_data'] = []
el['nvd_data'] = make_cvss_scores(vuln.get('nvd_data', []))
# get vendor scores
el['vendor_data'] = []
el['vendor_data'] = make_cvss_scores(vuln.get('vendor_data', []))
fixed_in = vuln.get('fixed_in', [])
el['fix'] = ', '.join(fixed_in) if fixed_in else 'None'
# dedup logic for filtering nvd cpes that are referred by vulndb
if vuln.get('feed_name') == 'vulndb':
for nvd_item in vuln.get('nvd_data', []):
try:
id_cves_map[nvd_item.get('id')] = el.get('vuln')
except Exception as err:
logger.warn('failure during vulnerability dedup check (vulndbs over nvd) with {}'.format(err))
nonosvulns.append(el)
# perform a de-dup pass
final_nonosvulns = []
for v in nonosvulns:
include = True
try:
if v.get('vuln') in id_cves_map:
include = False
except Exception as err:
logger.warn("failure during vulnerability dedup check: {}".format(str(err)))
if include:
final_nonosvulns.append(v)
if vulnerability_type == 'os':
ret = osvulns
elif vulnerability_type == 'non-os':
ret = final_nonosvulns
elif vulnerability_type == 'all':
ret = osvulns + final_nonosvulns
else:
ret = vulnerability_data
return ret
def make_response_policyeval(eval_record, params, catalog_client):
ret = {}
try:
tag = eval_record['tag']
ret[tag] = {}
if eval_record['evalId'] and eval_record['policyId']:
ret[tag]['detail'] = {}
if params and 'detail' in params and params['detail']:
eval_data = eval_record['result']
ret[tag]['detail']['result'] = eval_data
bundle_data = catalog_client.get_document('policy_bundles', eval_record['policyId'])
ret[tag]['detail']['policy'] = bundle_data
ret[tag]['policyId'] = eval_record['policyId']
if eval_record['final_action'].upper() in ['GO', 'WARN']:
ret[tag]['status'] = 'pass'
else:
ret[tag]['status'] = 'fail'
ret[tag]['last_evaluation'] = datetime.datetime.utcfromtimestamp(eval_record['created_at']).isoformat() + 'Z'
else:
ret[tag]['policyId'] = "N/A"
ret[tag]['final_action'] = "fail"
ret[tag]['last_evaluation'] = "N/A"
ret[tag]['detail'] = {}
except Exception as err:
raise Exception("failed to format policy eval response: " + str(err))
return ret
def make_response_image(image_record, include_detail=True):
ret = image_record
image_content = {'metadata': {}}
for key in ['arch', 'distro', 'distro_version', 'dockerfile_mode', 'image_size', 'layer_count']:
val = image_record.pop(key, None)
image_content['metadata'][key] = val
image_record['image_content'] = image_content
if image_record['annotations']:
try:
annotation_data = json.loads(image_record['annotations'])
image_record['annotations'] = annotation_data
except:
pass
# try to assemble full strings
if image_record and 'image_detail' in image_record:
for image_detail in image_record['image_detail']:
try:
image_detail['fulldigest'] = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail[
'digest']
image_detail['fulltag'] = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail[
'tag']
except:
image_detail['fulldigest'] = None
image_detail['fulltag'] = None
for removekey in ['record_state_val', 'record_state_key']:
image_detail.pop(removekey, None)
for datekey in ['last_updated', 'created_at', 'tag_detected_at']:
try:
image_detail[datekey] = datetime.datetime.utcfromtimestamp(image_detail[datekey]).isoformat() + 'Z'
except:
pass
if not include_detail:
image_record['image_detail'] = []
for datekey in ['last_updated', 'created_at', 'analyzed_at']:
try:
image_record[datekey] = datetime.datetime.utcfromtimestamp(image_record[datekey]).isoformat() +'Z'
except:
pass
for removekey in ['record_state_val', 'record_state_key']:
image_record.pop(removekey, None)
return ret
def lookup_imageDigest_from_imageId(request_inputs, imageId):
user_auth = request_inputs['auth']
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
userId = request_inputs['userId']
ret = None
try:
client = internal_client_for(CatalogClient, request_inputs['userId'])
image_records = client.get_image_by_id(imageId=imageId)
if image_records:
image_record = image_records[0]
imageDigest = image_record['imageDigest']
ret = imageDigest
except Exception as err:
logger.debug("operation exception: " + str(err))
raise err
return ret
def vulnerability_query(account, digest, vulnerability_type, force_refresh=False, vendor_only=True, doformat=False):
# user_auth = request_inputs['auth']
# method = request_inputs['method']
# bodycontent = request_inputs['bodycontent']
# params = request_inputs['params']
return_object = {}
httpcode = 500
# userId = request_inputs['userId']
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig['system_user_auth']
verify = localconfig['internal_ssl_verify']
# force_refresh = params.get('force_refresh', False)
# vendor_only = params.get('vendor_only', True)
try:
if vulnerability_type not in anchore_engine.common.image_vulnerability_types + ['all']:
httpcode = 404
raise Exception("content type ("+str(vulnerability_type)+") not available")
# tag = params.pop('tag', None)
# imageDigest = params.pop('imageDigest', None)
# digest = params.pop('digest', None)
catalog_client = internal_client_for(CatalogClient, account)
image_report = catalog_client.get_image(digest)
if image_report and image_report['analysis_status'] != taskstate.complete_state('analyze'):
httpcode = 404
raise Exception("image is not analyzed - analysis_status: " + image_report['analysis_status'])
imageDigest = image_report['imageDigest']
try:
image_detail = image_report['image_detail'][0]
imageId = image_detail['imageId']
client = internal_client_for(PolicyEngineClient, account)
resp = client.get_image_vulnerabilities(user_id=account, image_id=imageId, force_refresh=force_refresh, vendor_only=vendor_only)
if doformat:
ret = make_response_vulnerability(vulnerability_type, resp)
return_object[imageDigest] = ret
else:
return_object[imageDigest] = resp
httpcode = 200
except Exception as err:
httpcode = 500
raise Exception("could not fetch vulnerabilities - exception: " + str(err))
httpcode = 200
except Exception as err:
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
def get_content(request_inputs, content_type, doformat=False):
user_auth = request_inputs['auth']
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
return_object = {}
httpcode = 500
userId, pw = user_auth
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
all_content_types = localconfig.get('image_content_types', []) + localconfig.get('image_metadata_types', [])
if content_type not in all_content_types:
httpcode = 404
raise Exception("content type ("+str(content_type)+") not available")
tag = params.pop('tag', None)
imageDigest = params.pop('imageDigest', None)
digest = params.pop('digest', None)
logger.debug('Request inputs: {}'.format(request_inputs))
client = internal_client_for(CatalogClient, request_inputs['userId'])
image_report = client.get_image(imageDigest)
if image_report and image_report['analysis_status'] != taskstate.complete_state('analyze'):
httpcode = 404
raise Exception("image is not analyzed - analysis_status: " + image_report['analysis_status'])
imageDigest = image_report['imageDigest']
if content_type == 'manifest':
try:
image_manifest_data = client.get_document('manifest_data', imageDigest)
except Exception as err:
raise make_anchore_exception(err, input_message="cannot fetch content data {} from archive".format(content_type), input_httpcode=500)
image_content_data = {
'manifest': image_manifest_data
}
else:
try:
image_content_data = client.get_document('image_content_data', imageDigest)
except Exception as err:
raise make_anchore_exception(err, input_message="cannot fetch content data from archive", input_httpcode=500)
# special handler for dockerfile contents from old method to new
if content_type == 'dockerfile' and not image_content_data.get('dockerfile', None):
try:
if image_report.get('dockerfile_mode', None) == 'Actual':
for image_detail in image_report.get('image_detail', []):
if image_detail.get('dockerfile', None):
logger.debug("migrating old dockerfile content form into new")
image_content_data['dockerfile'] = utils.ensure_str(base64.decodebytes(utils.ensure_bytes(image_detail.get('dockerfile', ""))))
client.put_document(user_auth, 'image_content_data', imageDigest, image_content_data)
break
except Exception as err:
logger.warn("cannot fetch/decode dockerfile contents from image_detail - {}".format(err))
if content_type not in image_content_data:
httpcode = 404
raise Exception("image content of type ("+str(content_type)+") was not an available type at analysis time for this image")
return_object[imageDigest] = helpers.make_image_content_response(content_type, image_content_data[content_type])
httpcode = 200
except Exception as err:
logger.exception('Failed content lookup')
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
# repositories
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def add_repository(repository=None, autosubscribe=False, dryrun=False):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'autosubscribe':autosubscribe, 'repository':repository, 'dryrun':dryrun})
return_object, httpcode = repositories(request_inputs)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
def repositories(request_inputs):
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
return_object = {}
httpcode = 500
input_repo = None
if params and 'repository' in params:
input_repo = params['repository']
autosubscribe = False
if params and 'autosubscribe' in params:
autosubscribe = params['autosubscribe']
lookuptag = None
if params and 'lookuptag' in params:
lookuptag = params['lookuptag']
dryrun = False
if params and 'dryrun' in params:
dryrun = params['dryrun']
try:
if method == 'POST':
logger.debug("handling POST: ")
try:
client = internal_client_for(CatalogClient, request_inputs['userId'])
return_object = []
repo_records = client.add_repo(regrepo=input_repo, autosubscribe=autosubscribe, lookuptag=lookuptag, dryrun=dryrun)
for repo_record in repo_records:
return_object.append(repo_record)
httpcode = 200
except Exception as err:
raise err
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
# images CRUD
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_imagetags():
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
user_auth = request_inputs['auth']
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
return_object = {}
httpcode = 500
client = internal_client_for(CatalogClient, request_inputs['userId'])
return_object = client.get_imagetags()
httpcode = 200
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_archive(archive_file):
httpcode = 500
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
request_account = request_inputs['userId']
# TODO perform the archive format validation here, for now just a READ
try:
archive_buffer = archive_file.read()
except Exception as err:
httpcode = 409
raise Exception("invalid archive format (must be an image archive tar.gz generated by anchore) - exception: {}".format(err))
# get some information out of the archive for input validation
archive_account = None
archive_digest = None
with tarfile.open(fileobj=io.BytesIO(archive_buffer), format=tarfile.PAX_FORMAT) as TFH:
try:
with TFH.extractfile("archive_manifest") as AMFH:
archive_manifest = json.loads(utils.ensure_str(AMFH.read()))
archive_account = archive_manifest['account']
archive_digest = archive_manifest['image_digest']
except Exception as err:
httpcode = 409
raise Exception ("cannot extract/parse archive_manifest from archive file - exception: {}".format(err))
# removed the bloe validation check as the checks are now performed in the archiving subsystem, based on the authenticated account
# perform verification that the account set in the archive matches the calling account namespace
# if (not request_account or not archive_account) or (request_account != archive_account):
# httpcode = 409
# raise Exception ("account in import archive ({}) does not match API request account ({})".format(archive_account, request_account))
# make the import call to the catalog
client = internal_client_for(CatalogClient, request_inputs['userId'])
catalog_import_return_object = client.import_archive(archive_digest, io.BytesIO(archive_buffer))
# finally grab the image record from the catalog, prep the respose and return
image_record = client.get_image(archive_digest)
return_object = [make_response_image(image_record, include_detail=True)]
httpcode = 200
except api_exceptions.AnchoreApiError as err:
return_object = make_response_error(err, in_httpcode=err.__response_code__)
httpcode = err.__response_code__
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_images(history=None, image_to_get=None, fulltag=None, detail=False):
httpcode = 500
try:
if image_to_get and not fulltag:
fulltag = image_to_get.get('tag')
digest = image_to_get.get('digest')
else:
digest = None
return_object = do_list_images(account=ApiRequestContextProxy.namespace(), filter_digest=digest, filter_tag=fulltag, history=history)
httpcode = 200
except api_exceptions.AnchoreApiError as err:
return_object = make_response_error(err, in_httpcode=err.__response_code__)
httpcode = err.__response_code__
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def delete_images_async(imageDigests, force=False):
return_object = {}
httpcode = 500
try:
logger.debug('Handling DELETE on imageDigests: %s' % imageDigests)
client = internal_client_for(CatalogClient, ApiRequestContextProxy.namespace())
rc = client.delete_images_async(imageDigests, force=force)
if rc:
return_object = rc
httpcode = 200
else:
httpcode = 500
raise Exception('Operation failed due to an error/connectivity issue with catalog')
except Exception as err:
logger.exception('Error in asynchronous deletion of images')
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
def validate_pullstring_is_tag(pullstring):
try:
parsed = parse_dockerimage_string(pullstring)
return parsed.get('tag') is not None
except Exception as e:
logger.debug_exception('Error parsing pullstring {}. Err = {}'.format(pullstring, e))
raise ValueError('Error parsing pullstring {}'.format(pullstring))
def validate_pullstring_is_digest(pullstring):
try:
parsed = parse_dockerimage_string(pullstring)
return parsed.get('digest') is not None
except Exception as e:
logger.debug_exception('Error parsing pullstring {}. Err = {}'.format(pullstring, e))
raise ValueError('Error parsing pullstring {}'.format(pullstring))
digest_regex = re.compile('sha256:[a-fA-F0-9]{64}')
def validate_archive_digest(digest: str):
return digest is not None and digest_regex.match(digest.strip())
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def add_image(image, force=False, autosubscribe=False):
# TODO: use for validation pass
spec = ApiRequestContextProxy.get_service().api_spec
httpcode = 500
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'force': force})
try:
normalized = normalize_image_add_source(analysis_request_dict=image)
validate_image_add_source(normalized, spec)
except api_exceptions.AnchoreApiError:
raise
except Exception as e:
raise api_exceptions.BadRequest('Could not validate request due to error', detail={'validation_error': str(e)})
enable_subscriptions = [
'analysis_update'
]
if autosubscribe:
enable_subscriptions.append('tag_update')
source = normalized['source']
return_object = analyze_image(ApiRequestContextProxy.namespace(), source, force, enable_subscriptions, image.get('annotations'))
httpcode = 200
except api_exceptions.AnchoreApiError as err:
raise err
# httpcode = err.__response_code__
# return_object = make_response_error(err.message, details=err.detail, in_httpcode=httpcode)
except ValueError as err:
httpcode = 400
return_object = make_response_error(str(err), in_httpcode=400)
except Exception as err:
logger.debug("operation exception: {}".format(str(err)))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def delete_image(imageDigest, force=False):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'force': force})
return_object, httpcode = images_imageDigest(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image(imageDigest, history=None):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'history': False})
return_object, httpcode = images_imageDigest(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_by_imageId(imageId, history=None):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'history': False})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
return_object, httpcode = images_imageDigest(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def delete_image_by_imageId(imageId, force=False):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'force': force})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
return_object, httpcode = images_imageDigest(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_policy_check(imageDigest, policyId=None, tag=None, detail=True, history=False):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'tag':None, 'detail':True, 'history':False, 'policyId':None})
return_object, httpcode = images_imageDigest_check(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_policy_check_by_imageId(imageId, policyId=None, tag=None, detail=None, history=None):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'tag':None, 'detail':True, 'history':False, 'policyId':None})
return_object, httpcode = images_imageDigest_check(request_inputs, imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_image_metadata(imageDigest):
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
return_object = localconfig.get('image_metadata_types', [])
httpcode = 200
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_metadata_by_type(imageDigest, mtype):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'imageDigest':imageDigest})
return_object, httpcode = get_content(request_inputs, mtype, doformat=True)
if httpcode == 200:
return_object = {
'imageDigest': imageDigest,
'metadata_type': mtype,
'metadata': list(return_object.values())[0]
}
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_image_content(imageDigest):
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
return_object = localconfig.get('image_content_types', [])
httpcode = 200
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_image_content_by_imageid(imageId):
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
return_object = localconfig.get('image_content_types', [])
httpcode = 200
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type(imageDigest, ctype):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={'imageDigest':imageDigest})
return_object, httpcode = get_content(request_inputs, ctype, doformat=True)
if httpcode == 200:
return_object = {
'imageDigest': imageDigest,
'content_type': ctype,
'content': list(return_object.values())[0]
}
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type_files(imageDigest):
return get_image_content_by_type(imageDigest, 'files')
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type_javapackage(imageDigest):
return get_image_content_by_type(imageDigest, 'java')
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type_imageId(imageId, ctype):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
return_object, httpcode = get_image_content_by_type(imageDigest, ctype)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type_imageId_files(imageId):
return get_image_content_by_type_imageId(imageId, 'files')
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_content_by_type_imageId_javapackage(imageId):
return get_image_content_by_type_imageId(imageId, 'java')
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_vulnerability_types(imageDigest):
try:
return_object = anchore_engine.common.image_vulnerability_types + ['all']
httpcode = 200
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_vulnerability_types_by_imageId(imageId):
try:
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
return_object, httpcode = get_image_vulnerability_types(imageDigest)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_vulnerabilities_by_type(imageDigest, vtype, force_refresh=False, vendor_only=True):
try:
vulnerability_type = vtype
return_object, httpcode = vulnerability_query(ApiRequestContextProxy.namespace(), imageDigest, vulnerability_type, force_refresh, vendor_only, doformat=True)
if httpcode == 200:
return_object = {
'imageDigest': imageDigest,
'vulnerability_type': vulnerability_type,
'vulnerabilities': list(return_object.values())[0]
}
except Exception as err:
logger.exception('Exception getting vulns')
httpcode = 500
return_object = str(err)
return return_object, httpcode
@flask_metrics.do_not_track()
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_image_vulnerabilities_by_type_imageId(imageId, vtype):
try:
vulnerability_type = vtype
request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
try:
imageDigest = lookup_imageDigest_from_imageId(request_inputs, imageId)
except:
imageDigest = imageId
return_object, httpcode = get_image_vulnerabilities_by_type(imageDigest, vulnerability_type)
except Exception as err:
httpcode = 500
return_object = str(err)
return return_object, httpcode
#@flask_metrics.do_not_track()
#@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
#def import_image(analysis_report):
# try:
# request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
# return_object, httpcode = do_import_image(request_inputs, analysis_report)
#
# except Exception as err:
# httpcode = 500
# return_object = str(err)
#
# return return_object, httpcode
#def do_import_image(request_inputs, importRequest):
# user_auth = request_inputs['auth']
# method = request_inputs['method']
# bodycontent = request_inputs['bodycontent']
# params = request_inputs['params']
#
# return_object = {}
# httpcode = 500
#
# userId, pw = user_auth
#
# try:
# client = internal_client_for(CatalogClient, request_inputs['userId'])
# return_object = []
# image_records = client.import_image(json.loads(bodycontent))
# for image_record in image_records:
# return_object.append(make_response_image(image_record))
# httpcode = 200
#
# except Exception as err:
# logger.debug("operation exception: " + str(err))
# return_object = make_response_error(err, in_httpcode=httpcode)
# httpcode = return_object['httpcode']
#
# return(return_object, httpcode)
def do_list_images(account, filter_tag=None, filter_digest=None, history=False):
client = internal_client_for(CatalogClient, account)
try:
# Query param fulltag has precedence for search
image_records = client.list_images(tag=filter_tag, digest=filter_digest, history=history)
return [make_response_image(image_record, include_detail=True) for image_record in image_records]
except Exception as err:
logger.debug("operation exception: " + str(err))
raise err
def analyze_image(account, source, force=False, enable_subscriptions=None, annotations=None):
"""
Analyze an image from a source where a source can be one of:
'digest': {
'pullstring': str, (digest or tag, e.g docker.io/alpine@sha256:abc),
'tag': str, the tag itself to associate (e.g. docker.io/alpine:latest),
'creation_timestamp_override: str, rfc3339 format. necessary only if not doing a force re-analysis of existing image,
'dockerfile': str, the base64 encoded dockerfile content to associate with this tag at analysis time. optional
}
'tag': {
'pullstring': str, the full tag-style pull string for docker (e.g. docker.io/nginx:latest),
'dockerfile': str optional base-64 encoded dockerfile content to associate with this tag at analysis time. optional
}
'archive': {
'digest': str, the digest to restore from the analysis archive
}
:param account: str account id
:param source: dict source object with keys: 'tag', 'digest', and 'archive', with associated config for pulling source from each. See the api spec for schema details
:param force: bool, if true re-analyze existing image
:param enable_subscriptions: the list of subscriptions to enable at add time. Optional
:param annotations: Dict of k/v annotations. Optional.
:return: resulting image record
"""
if not source:
raise Exception('Must have source to fetch image or analysis from')
client = internal_client_for(CatalogClient, account)
tag = None
digest = None
ts = None
is_from_archive = False
dockerfile = None
image_check = None
try:
logger.debug("handling POST: source={}, force={}, enable_subscriptions={}, annotations={}".format(source, force, enable_subscriptions, annotations))
# if not, add it and set it up to be analyzed
if source.get('archive'):
img_source = source.get('archive')
# Do archive-based add
digest = img_source['digest']
is_from_archive = True
elif source.get('tag'):
# Do tag-based add
img_source= source.get('tag')
tag = img_source['pullstring']
dockerfile = img_source.get('dockerfile')
elif source.get('digest'):
# Do digest-based add
img_source = source.get('digest')
tag = img_source['tag']
digest_info = anchore_engine.utils.parse_dockerimage_string(img_source['pullstring'])
digest = digest_info['digest']
dockerfile = img_source.get('dockerfile')
ts = img_source.get('creation_timestamp_override')
if ts:
try:
ts = utils.rfc3339str_to_epoch(ts)
except Exception as err:
raise api_exceptions.InvalidDateFormat('source.creation_timestamp_override', ts)
if force:
# Grab the trailing digest sha section and ensure it exists
try:
image_check = client.get_image(digest)
if not image_check:
raise Exception('No image found for digest {}'.format(digest))
except Exception as err:
raise ValueError("image digest must already exist to force re-analyze using tag+digest")
elif not ts:
# If a new analysis of an image by digest + tag, we need a timestamp to insert into the tag history properly
raise ValueError("must supply creation_timestamp_override when adding a new image by tag+digest")
else:
raise ValueError("The source property must have at least one of tag, digest, or archive set to non-null")
# add the image to the catalog
image_record = client.add_image(tag=tag, digest=digest, dockerfile=dockerfile, annotations=annotations,
created_at=ts, from_archive=is_from_archive, allow_dockerfile_update=force)
imageDigest = image_record['imageDigest']
# finally, do any state updates and return
if image_record:
logger.debug("added image: " + str(imageDigest))
# auto-subscribe for NOW
for image_detail in image_record['image_detail']:
fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
foundtypes = []
try:
subscription_records = client.get_subscription(subscription_key=fulltag)
except Exception as err:
subscription_records = []
for subscription_record in subscription_records:
if subscription_record['subscription_key'] == fulltag:
foundtypes.append(subscription_record['subscription_type'])
sub_types = anchore_engine.common.subscription_types
for sub_type in sub_types:
if sub_type in ['repo_update']:
continue
if sub_type not in foundtypes:
try:
default_active = False
if enable_subscriptions and sub_type in enable_subscriptions:
logger.debug("auto-subscribing image: " + str(sub_type))
default_active = True
client.add_subscription({'active': default_active, 'subscription_type': sub_type,
'subscription_key': fulltag})
except:
try:
client.update_subscription({'subscription_type': sub_type, 'subscription_key': fulltag})
except:
pass
else:
if enable_subscriptions and sub_type in enable_subscriptions:
client.update_subscription({'active': True, 'subscription_type': sub_type, 'subscription_key': fulltag})
# set the state of the image appropriately
currstate = image_record['analysis_status']
if not currstate:
newstate = taskstate.init_state('analyze', None)
elif force or currstate == taskstate.fault_state('analyze'):
newstate = taskstate.reset_state('analyze')
elif image_record['image_status'] == 'deleted':
newstate = taskstate.reset_state('analyze')
else:
newstate = currstate
if (currstate != newstate) or (force):
logger.debug("state change detected: " + str(currstate) + " : " + str(newstate))
image_record.update({'image_status': 'active', 'analysis_status': newstate})
updated_image_record = client.update_image(imageDigest, image_record)
if updated_image_record:
image_record = updated_image_record[0]
else:
logger.debug("no state change detected: " + str(currstate) + " : " + str(newstate))
return [make_response_image(image_record, include_detail=True)]
except Exception as err:
logger.debug("operation exception: " + str(err))
raise err
def images_imageDigest(request_inputs, imageDigest):
user_auth = request_inputs['auth']
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs.get('params', {})
return_object = {}
httpcode = 500
username, pw = user_auth
userId = request_inputs['userId']
try:
client = internal_client_for(CatalogClient, request_inputs['userId'])
if method == 'GET':
logger.debug("handling GET on imageDigest: " + str(imageDigest))
image_record = client.get_image(imageDigest)
if image_record:
if 'detail' in params and not params.get('detail'):
detail = False
else:
detail = True
return_object = [make_response_image(image_record, include_detail=detail)]
httpcode = 200
else:
httpcode = 404
raise Exception("cannot locate specified image")
elif method == 'DELETE':
logger.debug("handling DELETE on imageDigest: " + str(imageDigest))
rc = False
try:
rc = client.delete_image(imageDigest, force=params['force'])
except Exception as err:
raise err
if rc:
return_object = rc
httpcode = 200
else:
httpcode = 500
raise Exception("failed to delete")
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
def images_check_impl(request_inputs, image_records):
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
return_object = []
httpcode = 500
userId = request_inputs['userId']
try:
client = internal_client_for(CatalogClient, request_inputs['userId'])
if 'policyId' in params and params['policyId']:
bundle_records = client.get_policy(policyId=params['policyId'])
policyId = params['policyId']
else:
bundle_records = client.get_active_policy()
policyId = None
if not bundle_records:
httpcode = 404
raise Exception("user has no active policy to evaluate: " + str(userId))
# this is to check that we got at least one evaluation in the response, otherwise routine should throw a 404
atleastone = False
if image_records:
for image_record in image_records:
imageDigest = image_record['imageDigest']
return_object_el = {}
return_object_el[imageDigest] = {}
tags = []
if params and 'tag' in params and params['tag']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", params['tag'], registry_lookup=False,
registry_creds=[])
if 'fulltag' in image_info and image_info['fulltag']:
params['tag'] = image_info['fulltag']
tags.append(params['tag'])
else:
for image_detail in image_record['image_detail']:
fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
tags.append(fulltag)
for tag in tags:
if tag not in return_object_el[imageDigest]:
return_object_el[imageDigest][tag] = []
try:
if params and params.get('history', False):
results = client.get_evals(imageDigest=imageDigest, tag=tag, policyId=policyId)
elif params and params.get('interactive', False):
results = [client.get_eval_interactive(imageDigest=imageDigest, tag=tag, policyId=policyId)]
else:
results = [client.get_eval_latest(imageDigest=imageDigest, tag=tag, policyId=policyId)]
except Exception as err:
results = []
httpcode = 200
for result in results:
fresult = make_response_policyeval(result, params, client)
return_object_el[imageDigest][tag].append(fresult[tag])
atleastone = True
if return_object_el:
return_object.append(return_object_el)
else:
httpcode = 404
raise Exception("could not find image record(s) input imageDigest(s)")
if not atleastone:
httpcode = 404
raise Exception("could not find any evaluations for input images")
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
def images_imageDigest_check(request_inputs, imageDigest):
user_auth = request_inputs['auth']
method = request_inputs['method']
bodycontent = request_inputs['bodycontent']
params = request_inputs['params']
return_object = {}
httpcode = 500
username, pw = user_auth
userId = request_inputs['userId']
try:
client = internal_client_for(CatalogClient, request_inputs['userId'])
image_record = client.get_image(imageDigest)
if image_record and image_record['analysis_status'] != taskstate.complete_state('analyze'):
httpcode = 404
raise Exception("image is not analyzed - analysis_status: " + str(image_record['analysis_status']))
# Use a list of records here for backwards compat of api
return_object, httpcode = images_check_impl(request_inputs, [image_record])
except Exception as err:
logger.debug("operation exception: " + str(err))
return_object = make_response_error(err, in_httpcode=httpcode)
httpcode = return_object['httpcode']
return return_object, httpcode
def _get_image_ok(account, imageDigest):
"""
Get the image id if the image exists and is analyzed, else raise error
:param account:
:param imageDigest:
:return:
"""
catalog_client = internal_client_for(CatalogClient, account)
image_report = catalog_client.get_image(imageDigest)
if image_report and image_report['analysis_status'] != taskstate.complete_state('analyze'):
raise api_exceptions.ResourceNotFound('artifacts', detail={"details": "image is not analyzed - analysis_status: " + image_report['analysis_status']})
elif not image_report:
raise api_exceptions.ResourceNotFound(imageDigest, detail={})
image_detail = image_report['image_detail'][0]
imageId = image_detail['imageId']
return imageId
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_retrieved_files(imageDigest):
"""
GET /images/{imageDigest}/artifacts/retrieved_files
:param imageDigest:
:param artifactType:
:return:
"""
account = ApiRequestContextProxy.namespace()
try:
imageId = _get_image_ok(account, imageDigest)
client = internal_client_for(PolicyEngineClient, account)
resp = client.list_image_analysis_artifacts(user_id=account, image_id=imageId, artifact_type='retrieved_files')
return resp, 200
except api_exceptions.AnchoreApiError:
raise
except Exception as err:
raise api_exceptions.InternalError(str(err), detail={})
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_file_content_search_results(imageDigest):
"""
GET /images/{imageDigest}/artifacts/file_content_search
:param imageDigest:
:param artifactType:
:return:
"""
account = ApiRequestContextProxy.namespace()
try:
imageId = _get_image_ok(account, imageDigest)
client = internal_client_for(PolicyEngineClient, account)
resp = client.list_image_analysis_artifacts(user_id=account, image_id=imageId, artifact_type='file_content_search')
return resp, 200
except api_exceptions.AnchoreApiError:
raise
except Exception as err:
raise api_exceptions.InternalError(str(err), detail={})
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_secret_search_results(imageDigest):
"""
GET /images/{imageDigest}/artifacts/secret_search
:param imageDigest:
:param artifactType:
:return:
"""
account = ApiRequestContextProxy.namespace()
try:
imageId = _get_image_ok(account, imageDigest)
client = internal_client_for(PolicyEngineClient, account)
resp = client.list_image_analysis_artifacts(user_id=account, image_id=imageId, artifact_type='secret_search')
return resp, 200
except api_exceptions.AnchoreApiError:
raise
except Exception as err:
raise api_exceptions.InternalError(str(err), detail={})
|
the-stack_106_26160 | # -*- coding: utf8 -*-
# Imports. {{{1
import sys
# Try to load the required modules from Python's standard library.
try:
import os
import traceback
import argparse
from time import time
import hashlib
except ImportError as e:
msg = "Error: Failed to load one of the required Python modules! (%s)\n"
sys.stderr.write(msg % str(e))
sys.exit(1)
from dedupsqlfs.lib import constants
from dedupsqlfs.db import check_engines
from dedupsqlfs.log import logging
from dedupsqlfs.fs import which
from dedupsqlfs.argp import SmartFormatter
from dedupsqlfs.my_formats import format_size
import dedupsqlfs
def fuse_mount(options, compression_methods=None):
from dedupsqlfs.fuse.dedupfs import DedupFS
from dedupsqlfs.fuse.operations import DedupOperations
ops = None
ret = -1
try:
ops = DedupOperations()
_fuse = DedupFS(
ops, options.mountpoint,
options,
fsname="dedupsqlfs", allow_root=True)
logger = ops.getApplication().getLogger()
logger.info("Mount: DeDupSQLfs %s/%s, llFuse %s, Python %s" % (dedupsqlfs.__version__, dedupsqlfs.__fsversion__, dedupsqlfs.fuse.dedupfs.fuse.__version__, sys.version.split()[0]))
if not _fuse.checkIfLocked():
_fuse.saveCompressionMethods(compression_methods)
for modname in compression_methods:
_fuse.appendCompression(modname)
ret = _fuse.main()
except Exception:
import traceback
err_str = traceback.format_exc()
if ops:
logger = ops.getApplication().getLogger()
logger.error(err_str)
print(err_str)
if ops:
ops.getManager().close()
ops.getApplication().stopCacheFlusher()
if options.memory_usage:
import resource
kbytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger = ops.getApplication().getLogger()
logger.important("\n ~~ MOUNT ~~")
logger.important("-= Memory statistics: =-")
logger.important("Peak memory usage: %s\n" % format_size(kbytes * 1024))
return ret
def main(): # {{{1
"""
This function enables using mount.dedupsqlfs.py as a shell script that creates FUSE
mount points. Execute "mount.dedupsqlfs -h" for a list of valid command line options.
"""
logger = logging.getLogger("mount.dedupsqlfs/main")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
parser = argparse.ArgumentParser(
prog="%s/%s mount/%s python/%s" % (dedupsqlfs.__name__, dedupsqlfs.__version__, dedupsqlfs.__fsversion__, sys.version.split()[0]),
formatter_class = SmartFormatter,
conflict_handler="resolve")
# Register some custom command line options with the option parser.
option_stored_in_db = " (this option is only useful when creating a new database, because your choice is stored in the database and can't be changed after that)"
generic = parser.add_argument_group('Generic')
generic.add_argument('-h', '--help', action='help', help="show this help message followed by the command line options defined by the Python FUSE binding and exit")
generic.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help="increase verbosity: 0 - error, 1 - warning, 2 - info, 3 - debug, 4 - trace")
generic.add_argument('--verbose-stats', dest='verbose_stats', action='store_true', help="Enable FS opterations statistic output. Verbosity level must be 2+.")
generic.add_argument('--verbose-stats-detailed', dest='verbose_stats_detailed', action='store_true', help="Enable FS opterations statistic output, very detailed - timings, operations count. Verbosity level must be 2+.")
generic.add_argument('--log-file', dest='log_file', help="specify log file location")
generic.add_argument('--log-file-only', dest='log_file_only', action='store_true',
help="Don't send log messages to stderr.")
generic.add_argument('--lock-file', dest='lock_file', help="Specify lock file location. Useful to check fs status via content or existsnce.")
generic.add_argument('--data', dest='data', metavar='DIRECTORY', default="~/data", help="Specify the base location for the files in which metadata and blocks data is stored. Defaults to ~/data")
generic.add_argument('--name', dest='name', metavar='DATABASE', default="dedupsqlfs", help="Specify the name for the database directory in which metadata and blocks data is stored. Defaults to dedupsqlfs")
generic.add_argument('--temp', dest='temp', metavar='DIRECTORY', help="Specify the location for the files in which temporary data is stored. By default honour TMPDIR environment variable value.")
generic.add_argument('-b', '--block-size', dest='block_size', metavar='BYTES', default=1024*128, type=int, help="Specify the maximum block size in bytes" + option_stored_in_db + ". Defaults to 128kB.")
generic.add_argument('--mount-subvolume', dest='mounted_subvolume', metavar='NAME', default=None, help="Use subvolume NAME as root fs.")
generic.add_argument('--memory-limit', dest='memory_limit', action='store_true', help="Use some lower values for less memory consumption.")
generic.add_argument('--cpu-limit', dest='cpu_limit', metavar='NUMBER', default=0, type=int, help="Specify the maximum CPU count to use in multiprocess compression. Defaults to 0 (auto).")
generic.add_argument('--multi-cpu', dest='multi_cpu', metavar='TYPE', default="single", choices=("single", "process", "thread",), help="Specify type of compression tool: single process, multi-process or multi-thread. Choices are: 'single', 'process', 'thread'. Defaults to 'single'.")
grp_data = parser.add_argument_group('Data')
engines, msg = check_engines()
if not engines:
logger.error("No storage engines available! Please install sqlite or pymysql python module!")
return 1
grp_data.add_argument('--storage-engine', dest='storage_engine', metavar='ENGINE', choices=engines, default=engines[0],
help=msg)
if "mysql" in engines:
from dedupsqlfs.db.mysql import get_table_engines
table_engines = get_table_engines()
msg = "One of MySQL table engines: "+", ".join(table_engines)+". Default: %r. Aria and TokuDB engine can be used only with MariaDB or Percona server." % table_engines[0]
grp_data.add_argument('--table-engine', dest='table_engine', metavar='ENGINE',
choices=table_engines, default=table_engines[0],
help=msg)
grp_data.add_argument('--no-transactions', dest='use_transactions', action='store_false', help="Don't use transactions when making multiple related changes, this might make the file system faster or slower (?).")
grp_data.add_argument('--no-sync', dest='synchronous', action='store_false', help="Disable SQLite's normal synchronous behavior which guarantees that data is written to disk immediately, because it slows down the file system too much (this means you might lose data when the mount point isn't cleanly unmounted).")
# Dynamically check for supported hashing algorithms.
msg = "Specify the hashing algorithm that will be used to recognize duplicate data blocks: one of %s. Choose wisely - it can't be changed on the fly."
hash_functions = list({}.fromkeys([h.lower() for h in hashlib.algorithms_available]).keys())
hash_functions.sort()
work_hash_funcs = set(hash_functions) & constants.WANTED_HASH_FUCTIONS
msg %= ', '.join('%r' % fun for fun in work_hash_funcs)
defHash = 'md5' # Hope it will be there always. Stupid.
msg += ". Defaults to %r." % defHash
grp_data.add_argument('--hash', dest='hash_function', metavar='FUNCTION', choices=work_hash_funcs, default=defHash, help=msg)
grp_data.add_argument('--collision-check', dest='collision_check_enabled', action='store_true', help="Check for hash collision on writed data.")
grp_cache = parser.add_argument_group('Cache')
grp_cache.add_argument('--no-cache', dest='use_cache', action='store_false', help="Don't use cache in memory and delayed writes to storage.")
grp_cache.add_argument('--no-cache-flusher', dest='use_cache_flusher', action='store_false', help="Don't use separate cache flusher process. It touches file in mount_point directory. This may prevent FS to umount cleanly.")
grp_cache.add_argument('--cache-meta-timeout', dest='cache_meta_timeout', metavar='SECONDS', type=int, default=10, help="Delay flush writed/unused metadata from memory for NUMBER of seconds. Defaults to 10 seconds.")
grp_cache.add_argument('--cache-block-write-timeout', dest='cache_block_write_timeout', metavar='SECONDS', type=int, default=10, help="Expire writed/unused data and flush from memory after NUMBER of seconds. Defaults to 10 seconds.")
grp_cache.add_argument('--cache-block-write-size', dest='cache_block_write_size', metavar='BYTES', type=int,
default=1024*1024*1024,
help="Write cache for blocks: potential size in BYTES. Set to -1 for infinite. Defaults to ~1024 MB.")
grp_cache.add_argument('--cache-block-read-timeout', dest='cache_block_read_timeout', metavar='SECONDS', type=int, default=10, help="Expire readed/unused data and flush from memory after NUMBER of seconds. Defaults to 10 seconds.")
grp_cache.add_argument('--cache-block-read-size', dest='cache_block_read_size', metavar='BYTES', type=int,
default=1024*1024*1024,
help="Readed cache for blocks: potential size in BYTES. Set to -1 for infinite. Defaults to ~1024 MB.")
grp_cache.add_argument('--flush-interval', dest='flush_interval', metavar="SECONDS", type=int, default=5, help="Call expired/flushed cache callector every Nth seconds on FUSE operations. Defaults to 5.")
grp_compress = parser.add_argument_group('Compression')
# Dynamically check for supported compression methods.
compression_methods = [constants.COMPRESSION_TYPE_NONE]
compression_methods_cmd = [constants.COMPRESSION_TYPE_NONE]
for modname in constants.COMPRESSION_SUPPORTED:
try:
module = __import__(modname)
if hasattr(module, 'compress') and hasattr(module, 'decompress'):
compression_methods.append(modname)
if modname not in constants.COMPRESSION_READONLY:
compression_methods_cmd.append(modname)
except ImportError:
pass
if len(compression_methods) > 1:
compression_methods_cmd.append(constants.COMPRESSION_TYPE_BEST)
compression_methods_cmd.append(constants.COMPRESSION_TYPE_DEFAULT)
compression_methods_cmd.append(constants.COMPRESSION_TYPE_FAST)
msg = "R|Enable compression of data blocks using one or more of the supported compression methods: %s"
msg %= ', '.join('%r' % mth for mth in compression_methods_cmd[:-3])
msg += ".\n- To use two or more methods select this option in command line for each compression method."
msg += "\n- You can use <method>:<level> syntax, <level> can be integer or value from --compression-level."
if len(compression_methods_cmd) > 1:
msg += "\n- Method %r will try all compression methods with 'best' level and choose one with smaller result data." % constants.COMPRESSION_TYPE_BEST
msg += "\n- Method %r will try all compression methods with 'default' level and choose one with smaller result data." % constants.COMPRESSION_TYPE_DEFAULT
msg += "\n- Method %r will try all compression methods with 'fast' level and choose one with smaller result data." % constants.COMPRESSION_TYPE_FAST
msg += "\nDefaults to %r." % constants.COMPRESSION_TYPE_NONE
grp_compress.add_argument('--compress', dest='compression', metavar='METHOD', action="append",
default=[constants.COMPRESSION_TYPE_NONE], help=msg)
grp_compress.add_argument('--force-compress', dest='compression_forced', action="store_true", help="Force compression even if resulting data is bigger than original.")
grp_compress.add_argument('--minimal-compress-size', dest='compression_minimal_size', metavar='BYTES', type=int, default=512, help="Minimal block data size for compression. Defaults to 512 bytes. Value -1 means auto - per method absolute minimum. Do not compress if data size is less than BYTES long. If not forced to.")
grp_compress.add_argument('--minimal-compress-ratio', dest='compression_minimal_ratio', metavar='RATIO', type=float, default=0.01, help="Minimal data compression ratio. Defaults to 0.01 (1%%). Do not store block compressed if ratio is less than RATIO. If not forced to.")
levels = (constants.COMPRESSION_LEVEL_DEFAULT, constants.COMPRESSION_LEVEL_FAST, constants.COMPRESSION_LEVEL_NORM, constants.COMPRESSION_LEVEL_BEST)
grp_compress.add_argument('--compression-level', dest='compression_level', metavar="LEVEL", default=constants.COMPRESSION_LEVEL_DEFAULT,
help="Compression level ratio: one of %s; or INT. Defaults to %r. Not all methods support this option." % (
', '.join('%r' % lvl for lvl in levels), constants.COMPRESSION_LEVEL_DEFAULT
))
# Dynamically check for supported compression programs
compression_progs = [constants.COMPRESSION_PROGS_NONE]
for pname, opts in constants.COMPRESSION_PROGS.items():
if which(pname):
compression_progs.append(pname)
msg = "R|Enable compression of snapshot sqlite database files using one of the supported compression programs: %s"
msg %= ', '.join('%r' % mth for mth in compression_progs)
msg += ".\nDefaults to %r." % constants.COMPRESSION_PROGS_DEFAULT
grp_compress.add_argument('--sqlite-compression-prog', dest='sqlite_compression_prog', metavar='PROGNAME',
choices=compression_progs,
default=constants.COMPRESSION_PROGS_DEFAULT, help=msg)
grp_compress.add_argument('--recompress-on-fly', dest='compression_recompress_now', action="store_true", help="Do recompress blocks which compressed with deprecated compression method.")
grp_compress.add_argument('--recompress-not-current', dest='compression_recompress_current', action="store_true",
help="Do recompress blocks which compressed with not currently selected compression method.")
grp_compress.add_argument('--decompress-try-all', dest='decompress_try_all', action="store_true", help="Try to decompress blocks with every available method if stored one fails.")
grp_profile = parser.add_argument_group('Profiling')
# Dynamically check for profiling support.
try:
# Using __import__() here because of pyflakes.
for p in 'cProfile', 'pstats': __import__(p)
grp_profile.add_argument('--profile', action='store_true', default=False, help="Use the Python modules cProfile and pstats to create a profile of time spent in various function calls and print out a table of the slowest functions at exit (of course this slows everything down but it can nevertheless give a good indication of the hot spots).")
except ImportError:
logger.warning("No profiling support available, --profile option disabled.")
logger.warning("If you're on Ubuntu try 'sudo apt-get install python-profiler'.")
pass
grp_profile.add_argument('-M', '--memory-usage', dest='memory_usage', help="Output into stderr memory statistics at the exit of process", action="store_true")
grp_mount = parser.add_argument_group('Mounting')
grp_mount.add_argument('-o', '--mountoption', help="specify mount option", action="append")
grp_mount.add_argument('mountpoint', help="specify mount point")
args = parser.parse_args()
if args.profile:
sys.stderr.write("Enabling profiling..\n")
import cProfile, pstats
profile = '.dedupsqlfs.cprofile-%i' % time()
profiler = cProfile.Profile()
result = profiler.runcall(fuse_mount, args, compression_methods)
profiler.dump_stats(profile)
sys.stderr.write("\n Profiling statistics:\n\n")
s = pstats.Stats(profile)
s.sort_stats('calls').print_stats(0.1)
s.sort_stats('cumtime').print_stats(0.1)
s.sort_stats('tottime').print_stats(0.1)
os.unlink(profile)
else:
result = fuse_mount(args, compression_methods)
return result
if __name__ == '__main__':
sys.exit(main())
# vim: ts=2 sw=2 et
|
the-stack_106_26161 | import copy
import torch.nn as nn
from .layer_config import default_layer_config, LayerConfig, NormType
from .flatten import Flatten
from typing import Any, Sequence, List, Optional
def denses(
sizes: Sequence[int],
dropout_probability: float = None,
activation: Any = nn.ReLU,
normalization_type: Optional[NormType] = NormType.BatchNorm,
last_layer_is_output: bool = False,
with_flatten: bool = True,
config: LayerConfig = default_layer_config(dimensionality=None)) -> nn.Module:
"""
Args:
sizes: the size of the linear layers_legacy. The format is [linear1_input, linear1_output, ..., linearN_output]
dropout_probability: the probability of the dropout layer. If `None`, no dropout layer is added.
activation: the activation to be used
normalization_type: the normalization to be used between dense layers_legacy. If `None`, no normalization added
last_layer_is_output: This must be set to `True` if the last layer of dense is actually an output.
If the last layer is an output, we should not add batch norm, dropout or
activation of the last `nn.Linear`
with_flatten: if True, the input will be flattened
config: defines the available operations
Returns:
a nn.Module
"""
config = copy.copy(config)
if activation is not None:
config.activation = activation
config.norm_type = normalization_type
config.set_dim(1)
ops: List[nn.Module] = []
if with_flatten:
ops.append(Flatten())
for n in range(len(sizes) - 1):
current = sizes[n]
next = sizes[n + 1]
ops.append(nn.Linear(current, next))
if n + 2 == len(sizes) and last_layer_is_output:
pass
else:
if config.norm_type is not None:
ops.append(nn.BatchNorm1d(next, **config.norm_kwargs))
ops.append(activation(**config.activation_kwargs))
if dropout_probability is not None and config.dropout is not None:
ops.append(config.dropout(p=dropout_probability, **config.dropout_kwargs))
return nn.Sequential(*ops)
|
the-stack_106_26162 | '''
Created on 5 nov. 2018
@author: david
'''
import logging
import time
from engine.motor import Motor
from sensor.wheel import WheelMotion
logging.basicConfig(level=logging.INFO)
THROTTLE = 80.0
MAX_STEPS = 20
TIMEOUT = 0.02
done = False
def onStep():
if sensor.getTravelSteps() >= MAX_STEPS:
global done
done = True
def travel(throttle):
global done
sensor.start()
done = False
motor.setThrottle(throttle)
while not done:
time.sleep(TIMEOUT)
logging.info("Total steps = {0}".format(sensor.getTravelSteps()))
sensor.stop()
sensor = WheelMotion(67)
sensor.onStep += onStep
motor = Motor(1)
motor.start()
try:
travel(THROTTLE)
travel(THROTTLE/2.0)
travel(THROTTLE/4.0)
motor.setNeutralThrottle()
time.sleep(1)
travel(-THROTTLE)
travel(-THROTTLE/2.0)
travel(-THROTTLE/4.0)
motor.setNeutralThrottle()
time.sleep(1)
finally:
motor.stop()
sensor.stop()
|
the-stack_106_26164 | import socket
import threading
def recv_data(sock):
while True:
data = sock.recv(1024)
print('\r' + data.decode() + '\n' + 'You: ', end='')
host = '127.0.0.1'
port = int(input('Input port: '))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((host, port))
if not port:
port = 3000
else:
port = int(port)
while True:
nick = input('Input sentence nickname: ')
nickname = '<nick_check>='+nick
if nick == 'exit':
sock.close()
print('Disconnection')
break
sock.send(nickname.encode())
data = sock.recv(1024)
data = data.decode()
if data == '<nick_check_true>':
print(f'Welcome to the chat!, {nick}')
break
elif data == '<nick_check_false>':
print(f'Change nickname')
tread = threading.Thread(target=recv_data, args=(sock,), daemon=True)
tread.start()
sock.send('enter'.encode())
while True:
data = input(f'you: ')
sock.send(data.encode())
if data == 'exit':
sock.close()
print('Disconnection')
break
|
the-stack_106_26166 | """
**************************************************************************
Script Name : Expunge_eMails_Utilities.py
Author : SS. Kanagal.
Description : This file contains all the utilities required by
: Expunge_eMail_v1.5.py.
Input Parameters: None.
Version History : Dates Version Description
: 30 May,2021 1.0 Initial Release.
: 01 Jun,2021 1.5 Final release.
: 27 Jun,2021 1.6 Added Command Line Parameters.
**************************************************************************
"""
import Expunge_eMails_Constants as CONST
import imaplib as IMAP_LIB
import logging as LOGFILE
import datetime as SYSDT
import os as SYSTEM
import re
"""
**************************************************************************
Function Name : CreateLogFile()
Author : SS. Kanagal.
Description : This creates a logfile in current working directory
: with supplied name in the logfilename parameter which
: is optional. It holds default value from constant
: LOG_FILE_NAME_PREFIX. To have a different log filename
: name change the value of this constant in Constants.py
Input Parameters: logfilename (Optional).
Return Value : logfile.
Version History : Dates Version Description
: 30 May,2021 1.0 Initial Release.
**************************************************************************
"""
def CreateLogFile(logfilename=CONST.LOG_FILE_NAME_PREFIX):
logfile = SYSTEM.getcwd() + logfilename + SYSDT.datetime.now().strftime("%d%m%Y_%H%M") + \
CONST.LOG_FILE_NAME_EXTENSION
LOGFILE.basicConfig(format = '%(levelname)s:%(message)s', filename = logfile, level = LOGFILE.INFO)
return logfile
"""
**************************************************************************
Function Name : WriteLogFile()
Author : SS. Kanagal.
Description : This function updates the log file with supplied
: message based on message type. By Default INFO is
: set as log type.
Input Parameters: log_msg, log_type=CONST.INFO
Return Value : None.
Version History : Dates Version Description
: 31 May,2021 1.0 Initial Release.
**************************************************************************
"""
def WriteLogFile(log_msg, log_type=CONST.INFO):
if log_type.lower() == CONST.INFO:LOGFILE.info(log_msg)
if log_type.lower() == CONST.WARNING:LOGFILE.warning(log_msg)
if log_type.lower() == CONST.CRITICAL:LOGFILE.critical(log_msg)
if log_type.lower() == CONST.ERR:LOGFILE.error(log_msg)
return
"""
**************************************************************************
Function Name : User_Quit()
Author : SS. Kanagal.
Description : This function updates log file upon user termination.
Input Parameters: custom_msg=False, custommsg=""
Return Value : None.
Version History : Dates Version Description
: 31 May,2021 1.0 Initial Release.
**************************************************************************
"""
def User_Quit(custom_msg=False, custommsg=""):
if not custom_msg:
WriteLogFile(CONST.USER_QUIT + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
WriteLogFile(CONST.FINISHED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
else:
WriteLogFile(CONST.SYS_QUIT + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) +
custommsg + "]\n", CONST.WARNING)
WriteLogFile(CONST.FINISHED + str(SYSDT.datetime.now().strftime("%d%m%Y_%H%M")) + "]\n")
return
"""
**************************************************************************
Function Name : isValid_eMailAddress()
Author : SS. Kanagal.
Description : This function checks for invalid eMail address input.
Input Parameters: email_addr
Return Value : Boolean.
Version History : Dates Version Description
: 31 May,2021 1.0 Initial Release
**************************************************************************
"""
def isValid_eMailAddress(email_addr):
regex = "^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$"
return bool(re.search(regex, email_addr))
"""
**************************************************************************
Function Name : isValid_Date()
Author : SS. Kanagal.
Description : This function will check for valid date inputs.
Input Parameters: log_msg, log_type="info"
Return Value : Boolean.
Version History : Dates Version Description
: 31 May,2021 1.0 Initial Release
**************************************************************************
"""
def isValid_Date(ip_date2Chk, start_dt="", ChkEndDate=False):
try:
today_dt = SYSDT.date.today()
today_dt = str(today_dt.day) + "-" + str(today_dt.month) + "-" + str(today_dt.year)
ip_date2Chk = SYSDT.datetime.strptime(ip_date2Chk, "%d-%m-%Y")
today_dt = SYSDT.datetime.strptime(today_dt, "%d-%m-%Y")
if ip_date2Chk > today_dt:
print(CONST.INVALID_END_DATE_GR)
return False
else:
if ChkEndDate:
start_dt = SYSDT.datetime.strptime(start_dt, "%d-%m-%Y")
if start_dt > ip_date2Chk:
print(CONST.INVALID_END_DATE_LR)
return False
else:
return True
else:
return True
except ValueError:
return False
"""
**************************************************************************
Function Name : Expunge_eMails()
Author : SS. Kanagal.
Description : This function deletes/expunge email messages found in
: supplied date range.
Input Parameters: email_addr, email_pwd, start_date, end_date,
: foldername=CONST.FOLDER_NAME
Return Value : None.
Version History : Dates Version Description
: 31 May,2021 1.0 Initial Release
: 26 Jun,2021 1.6 Final release.
**************************************************************************
"""
def Expunge_eMails(email_addr, email_pwd, start_date, end_date, foldername=CONST.FOLDER_NAME):
# Build email host server name.
if CONST.YAHOO_DOMAIN_NAME.lower() in email_addr[email_addr.index("@") + 1:].lower():
email_host_addr = CONST.YAHOO_IMAP + email_addr[email_addr.index("@") + 1:]
else:
email_host_addr = CONST.IMAP_PREFIX + email_addr[email_addr.index("@") + 1:]
# Convert and change start & end date format as required.
start_date = SYSDT.datetime.strptime(start_date, "%d-%m-%Y")
start_date = SYSDT.datetime.strftime(start_date, "%d-%b-%Y")
end_date = SYSDT.datetime.strptime(end_date, "%d-%m-%Y")
end_date = SYSDT.datetime.strftime(end_date, "%d-%b-%Y")
# Login to email host.
try:
WriteLogFile(CONST.SYS_CONNECT + "<" + email_addr + ">")
email_server = IMAP_LIB.IMAP4_SSL(email_host_addr)
email_server.login(email_addr, email_pwd)
WriteLogFile(CONST.SYS_CONNECTED)
WriteLogFile(CONST.SYS_SELE_FOLDER + "<" + foldername + ">")
email_server.select(foldername)
WriteLogFile(CONST.SYS_SELED_FOLDER + "<" + foldername + ">")
except Exception as e:
ex = str(e.args[0])
if CONST.IMAP_AUTHEN_FAILED.lower() in ex.lower():
print(CONST.IMAP_AUTHENTICATION_FAILED)
User_Quit(True, "\n" + CONST.IMAP_AUTHENTICATION_FAILED)
exit()
if CONST.WRONG_IMAP_HOST.lower() in e.args[1].lower():
print(CONST.WRONG_IMAP_HOST_MSG + " >>> [" + email_host_addr + "]")
WriteLogFile(CONST.WRONG_IMAP_HOST_MSG + " >>> [" + email_host_addr + "]")
User_Quit(True, "\n" + CONST.IMAP_AUTHENTICATION_FAILED)
exit()
# Build email search string.
srch_str1 = '(SINCE {0})'.format(start_date)
srch_str2 = '(BEFORE {0})'.format(end_date)
srch_str = srch_str1 + " " + srch_str2
# Selected messages to delete permanently based on dates entered.
WriteLogFile(CONST.SYS_DT_SEARCH_CRITERIA + "From: " + start_date + " To: " + end_date)
rep, my_mailbox = email_server.search(None, srch_str)
# Check if there are any email messages returned from the search.
if int(len(my_mailbox[0].split())) > 0:
print(CONST.SYS_MSG_FOUND, int(len(my_mailbox[0].split())))
WriteLogFile(CONST.SYS_MSG_FOUND + str(len(my_mailbox[0].split())))
# ---------------------------------------------------------------------------------------
for items in my_mailbox[0].split():
# Delete selected messages permanently and log it.
return_response, deleteitems = email_server.store(items, '+FLAGS', '(\\Deleted)')
WriteLogFile("Message ID: " + str(items) + " - return_response: " + return_response)
# ---------------------------------------------------------------------------------------
WriteLogFile(str(len(my_mailbox[0].split())) + CONST.SYS_EXPUNGED_SUCCESSFULLY + "\n")
else:
print(CONST.SYS_MSG_NOTFOUND)
WriteLogFile(CONST.SYS_MSG_NOTFOUND + "\n")
pass
"""
**************************************************************************
Function Name : GetInputs()
Author : SS. Kanagal.
Description : This function is called to collect inputs from user
: when there are no Command Line Parameters are passed.
Input Parameters: None.
Return Value : List of inputs collected from user.
Version History : Dates Version Description
: 27 Jun,2021 1.0 Initial Release.
**************************************************************************
"""
def GetInputs():
# -------------------------------------------------------------------------------------------
# Get all user inputs. - Start.
# Get eMail Address.-------------------------------------------------------------------
no_input = True
while no_input:
ip_email_address = input(CONST.EMAIL_ADDR_PRMT)
if len(ip_email_address) > 0:
if ip_email_address.lower() == CONST.STOP_CHAR.lower():
User_Quit()
exit()
if not isValid_eMailAddress(ip_email_address):
print(CONST.INVALID_EMAIL_ADDR)
else:
no_input = False
else:
print(CONST.EMPTY_EMAIL_ADDR)
# Get email password.------------------------------------------------------------------
no_input = True
while no_input:
ip_pwd = input(CONST.EMAIL_PWD_PRMT)
if len(ip_pwd) > 0:
if ip_pwd.lower() == CONST.STOP_CHAR.lower():
User_Quit()
exit()
no_input = False
else:
print(CONST.EMPTY_EMAIL_PWD)
# Get start date.----------------------------------------------------------------------
no_input = True
while no_input:
ip_start_date = input(CONST.EMAIL_SRCH_START_DT_PRMT) # '2020-10-31'
if len(ip_start_date) > 0:
if ip_start_date.lower() == CONST.STOP_CHAR.lower():
User_Quit()
exit()
# Validate start date.
if not isValid_Date(ip_start_date):
print(CONST.INVALID_START_DATE)
else:
no_input = False
else:
print(CONST.EMPTY_START_DT)
# Get end date.------------------------------------------------------------------------
no_input = True
while no_input:
print(CONST.END_DATE_RANGE_MSGS)
ip_end_date = input(CONST.EMAIL_SRCH_END_DT_PRMT) # '2020-10-31'
if len(ip_end_date) > 0:
if ip_end_date.lower() == CONST.STOP_CHAR.lower():
User_Quit()
exit()
# Validate end date.
if not isValid_Date(ip_end_date, ip_start_date, True):
pass
else:
no_input = False
else:
print(CONST.EMPTY_END_DT)
# Get all user inputs. - End.
# -------------------------------------------------------------------------------------------
return (ip_email_address, ip_pwd, ip_start_date, ip_end_date)
"""
**************************************************************************
Function Name : DisplayHelpText()
Author : SS. Kanagal.
Description : This function displays help text on console.
Input Parameters: None.
Return Value : None.
Version History : Dates Version Description
: 27 Jun,2021 1.0 Initial Release.
**************************************************************************
"""
def DisplayHelpText():
print(CONST.HELP_TEXT)
pass
|
the-stack_106_26169 | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for TransformerVaeFlowPrior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import transformer_glow_layers_ops as gops
from tensor2tensor.models.transformer import transformer_decoder_layer
from tensor2tensor.models.transformer import transformer_encoder
from tensor2tensor.models.transformer import transformer_prepare_encoder
from tensor2tensor.utils import learning_rate as lr
from tensor2tensor.utils import mlperf_log
import tensorflow.compat.v1 as tf
def _mixed_precision_is_enabled(hparams):
"""Should be the same as in common_attention, avoiding import."""
activation_dtype = hparams.activation_dtype
weight_dtype = hparams.weight_dtype
return activation_dtype == tf.float16 and weight_dtype == tf.float32
def encoder(name, hparams, inputs, target_space):
"""Compute encoder outputs and attention bias."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias) = (
transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
rate=hparams.layer_prepostprocess_dropout)
encoder_output = transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams)
return encoder_output, encoder_decoder_attention_bias
def transformer_decoder_layers(name,
n_layers,
decoder_input,
**kwargs):
"""A transformation block composed of transformer decoder layers."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hparams = kwargs["hparams"]
outputs = decoder_input
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
for layer_idx in range(n_layers):
outputs = transformer_decoder_layer(
decoder_input=outputs,
layer_idx=layer_idx,
**kwargs)
outputs = common_layers.layer_preprocess(outputs, hparams)
return outputs
def posterior(
name, hparams, targets, targets_mask, decoder_self_attention_bias,
**kwargs):
"""Compute mu and sigma for diagonal normal posterior q(z|x,y)."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = drop_2d(targets, hparams.mode, hparams.posterior_2d_dropout)
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
decoder_input = tf.nn.dropout(decoder_input,
rate=hparams.layer_prepostprocess_dropout)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_posterior_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
decoder_output = gops.dense_weightnorm(
"h2o_out", decoder_output, hparams.latent_size * 2, targets_mask,
init_scale=0.0, init=False)
return decoder_output
def cond_prior(
name, hparams, decoder_input, targets_mask, output_size,
decoder_self_attention_bias, init_scale=0.0, **kwargs):
"""Compute hidden states for parameters for conditional prior."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
decoder_input = tf.nn.dropout(decoder_input,
rate=hparams.layer_prepostprocess_dropout)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_posterior_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
decoder_output = gops.dense_weightnorm(
"h2o_out", decoder_output, output_size, targets_mask,
init_scale=init_scale, init=False)
return decoder_output
def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs):
"""Compute final hidden states for p(y|z,x)."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout)
if hparams.pos_attn:
decoder_input = gops.positional_attention(
"pos_attn", decoder_input, decoder_self_attention_bias, hparams)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
if common_layers.shape_list(latents)[-1] != hparams.hidden_size:
decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_decoder_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
batch_size, targets_length = common_layers.shape_list(decoder_output)[:2]
decoder_output = tf.reshape(
decoder_output, [batch_size, targets_length, 1, hparams.hidden_size])
# Expand since t2t expects 4d tensors.
return decoder_output
def drop_2d(targets, mode, dropout_p):
"""Dropout in 2D."""
if dropout_p > 0 and mode == tf.estimator.ModeKeys.TRAIN:
batch_size, targets_length, hidden_size = common_layers.shape_list(targets)
mask_prob = tf.random_uniform(
shape=(batch_size, targets_length), minval=0.0, maxval=1.0)
mask_prob = tf.tile(mask_prob[..., tf.newaxis], [1, 1, hidden_size])
scale = 1 / (1 - dropout_p)
targets_noisy = tf.where(
mask_prob > dropout_p, targets * scale, tf.zeros_like(targets))
return targets_noisy
return targets
def sequence_mask(length, hparams):
dtype = get_dtype(hparams)
return tf.sequence_mask(length, dtype=dtype)
def get_padding(mask, hparams):
dtype = get_dtype(hparams)
return tf.cast(tf.equal(mask, 0.0), dtype=dtype)
def get_dtype(hparams):
if hparams.activation_dtype == "float32":
return tf.float32
elif hparams.activation_dtype == "float64":
return tf.float64
elif hparams.activation_dtype == "bfloat16":
return tf.bfloat16
else:
return None
def lenpred_mlp(name, logits, hidden_size, bound):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
logits = tf.layers.dense(logits, hidden_size)
logits = tf.nn.elu(logits)
logits = tf.layers.dense(logits, hidden_size)
logits = tf.nn.elu(logits)
logits = tf.layers.dense(logits, bound * 2 + 1)
return logits
def predict_target_lengths(
encoder_output, inputs_mask, hparams, length_diff=None):
"""Predict target lengths."""
bound = hparams.lendiff_bound
inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32)
targets_length = inputs_length
loss = None
if hparams.predict_target_length:
encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask)
logits = tf.stop_gradient(encoder_output)
logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound)
if length_diff is not None:
labels = tf.maximum(tf.minimum(length_diff, bound), -bound)
labels = tf.cast(labels + bound, tf.int32)
labels = tf.stop_gradient(labels)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
diff_pred = tf.argmax(logits, 1)
diff_pred = tf.cast(diff_pred - bound, tf.int32)
targets_length = inputs_length + diff_pred
targets_length = tf.maximum(targets_length, 1)
divi = 4
targets_length = tf.ceil(targets_length / divi) * divi
targets_length = tf.cast(targets_length, tf.int32)
return targets_length, loss
def lenpred_stats(targets_length_pred, targets_length):
lenpred_diff = tf.abs(targets_length_pred - tf.cast(targets_length, tf.int32))
lenpred_acc = tf.cast(tf.equal(lenpred_diff, 0), tf.float32)
lenpred_acc = tf.reduce_mean(lenpred_acc)
lenpred_acc5 = tf.cast(tf.less_equal(lenpred_diff, 5), tf.float32)
lenpred_acc5 = tf.reduce_mean(lenpred_acc5)
return lenpred_acc, lenpred_acc5
def save_log_loss(
hparams, targets_mask, numerator, denominator, log_q_z, log_abs_det,
log_p_z_base, z_q, lenpred_loss, targets_length_pred, targets_length):
"""Populate loss dictionary and summary."""
anneal, kl_mask = get_anneal_mask(hparams)
lenpred_acc, lenpred_acc5 = (
lenpred_stats(targets_length_pred, targets_length))
batch_length = tf.reduce_sum(targets_mask)
z_q_norm = gops.reduce_mean_over_bl(
tf.norm(z_q, axis=2, keepdims=True), targets_mask)[0]
log_q_z = gops.reduce_mean_over_bl_sum_over_c(log_q_z, targets_mask)
log_p_z_base = tf.reduce_sum(log_p_z_base, axis=0) / batch_length
log_abs_det = tf.reduce_sum(log_abs_det, axis=0) / batch_length
log_p_z_reg = gops.standard_normal_density(z_q, targets_mask, reduce_sum=True)
log_p_x = -1 * numerator / denominator
log_p_z = log_p_z_base + log_abs_det
kl = log_q_z - log_p_z
kl_reg = log_p_z - log_p_z_reg
elbo = log_p_x - kl
monitor = {
"elbo": elbo,
"kl": kl,
"kl_reg": kl_reg,
"log_p_x": log_p_x,
"log_q_z": log_q_z,
"log_p_z": log_p_z,
"log_p_z_base": log_p_z_base,
"log_abs_det": log_abs_det,
"anneal": anneal,
"z_q_norm": z_q_norm,
"lenpred_acc": lenpred_acc,
"lenpred_acc5": lenpred_acc5,
}
kl = kl * anneal
kl_reg = hparams.kl_reg * kl_reg * anneal
loss_dict = {
"training": -1 * log_p_x,
"kl": kl * kl_mask,
"kl_reg": kl_reg * kl_mask,
}
if lenpred_loss is not None:
monitor["lenpred_loss"] = lenpred_loss
loss_dict["lenpred_loss"] = lenpred_loss
return loss_dict, monitor
def get_anneal_mask(hparams):
"""Get anneal and kl mask."""
startup = hparams.kl_startup_steps
anneal = hparams.kl_anneal_steps
global_step = tf.train.get_global_step()
min_value = hparams.anneal_min_value
step = tf.maximum(global_step - startup, 0)
anneal = common_layers.inverse_lin_decay(
anneal, min_value=min_value, step=step)
kl_mask = tf.less(startup, tf.to_int32(global_step))
kl_mask = tf.cast(kl_mask, tf.float32)
return anneal, kl_mask
def embedding_to_non_padding(emb, dtype=tf.float32):
"""Calculates the padding mask based on which embeddings are not zero."""
emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1)
return tf.cast(tf.not_equal(emb_sum, 0.0), dtype=dtype)
def save_summary(monitor, name):
with tf.name_scope(name):
for key in list(monitor.keys()):
tf.summary.scalar(key, monitor[key])
def _global_step(hparams):
"""Adjust global step if a multi-step optimizer is used."""
step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
multiplier = hparams.optimizer_multistep_accumulate_steps
if not multiplier:
return step
tf.logging.info("Dividing global step by %d for multi-step optimizer."
% multiplier)
return step / tf.cast(multiplier, tf.float32)
def learning_rate_schedule(hparams):
"""Learning rate schedule based on hparams."""
mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True)
mlperf_log.transformer_print(
key=mlperf_log.OPT_LR_WARMUP_STEPS,
value=hparams.learning_rate_warmup_steps)
step_num = _global_step(hparams)
# Simulate pretraining the encoder, decoder and posterior with the same
# learning rate schedule, and then restoring the parameters.
# using `warm_start_from` is not compatible with actnorm DDI on TPUs.
step_num = tf.where(
step_num < hparams.kl_startup_steps,
step_num,
step_num - hparams.kl_startup_steps)
schedule_string = hparams.learning_rate_schedule
names = schedule_string.split("*")
names = [name.strip() for name in names if name.strip()]
ret = tf.constant(1.0)
for name in names:
ret *= lr.learning_rate_factor(name, step_num, hparams)
return ret
def prepare_for_iw(x, k):
"""Prepare feature for importance sampling."""
batch_size = common_layers.shape_list(x)[0]
remaining_shape = common_layers.shape_list(x)[1:]
multiplier = [1] * x.shape.rank
x = tf.tile(x[tf.newaxis, ...], [k] + multiplier)
x = tf.reshape(x, [k * batch_size] + remaining_shape)
return x
def unprepare_for_iw(x, k):
"""Unprepare feature for importance sampling."""
batch_size_times_k = common_layers.shape_list(x)[0]
remaining_shape = common_layers.shape_list(x)[1:]
x = tf.reshape(x, [k, batch_size_times_k // k] + remaining_shape)
return x
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = common_attention.maybe_upcast(logits, hparams=model_hparams)
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn,
reduce_sum=False)
|
the-stack_106_26172 | # Copyright 2017 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example implementation of the classic four-rooms scenario."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import sys
from pycolab import ascii_art
from pycolab import human_ui
from pycolab.prefab_parts import sprites as prefab_sprites
GAME_ART = ['#############',
'# # #',
'# # #',
'# # #',
'# #',
'# # #',
'#### ###### #',
'# # #',
'# # #',
'# #',
'# # #',
'# P # #',
'#############']
def make_game():
"""Builds and returns a four-rooms game."""
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites={'P': PlayerSprite})
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player.
This `Sprite` ties actions to going in the four cardinal directions. If we
reach a magical location (in this example, (4, 3)), the agent receives a
reward of 1 and the epsiode terminates.
"""
def __init__(self, corner, position, character):
"""Inform superclass that we can't walk through walls."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable='#')
def update(self, actions, board, layers, backdrop, things, the_plot):
del layers, backdrop, things # Unused.
# Apply motion commands.
if actions == 0: # walk upward?
self._north(board, the_plot)
elif actions == 1: # walk downward?
self._south(board, the_plot)
elif actions == 2: # walk leftward?
self._west(board, the_plot)
elif actions == 3: # walk rightward?
self._east(board, the_plot)
# See if we've found the mystery spot.
if self.position == (4, 3):
the_plot.add_reward(1.0)
the_plot.terminate_episode()
def main(argv=()):
del argv # Unused.
# Build a four-rooms game.
game = make_game()
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_UP: 0, curses.KEY_DOWN: 1,
curses.KEY_LEFT: 2, curses.KEY_RIGHT: 3,
-1: 4},
delay=200)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
main(sys.argv)
|
the-stack_106_26173 | from hw2skeleton import cluster
from hw2skeleton import io
import os
def test_similarity():
filename_a = os.path.join("data", "276.pdb")
filename_b = os.path.join("data", "4629.pdb")
activesite_a = io.read_active_site(filename_a)
activesite_b = io.read_active_site(filename_b)
# update this assertion
assert cluster.compute_similarity(activesite_a, activesite_b) == 0.0
def test_partition_clustering():
# tractable subset
pdb_ids = [276, 4629, 10701]
active_sites = []
for id in pdb_ids:
filepath = os.path.join("data", "%i.pdb"%id)
active_sites.append(io.read_active_site(filepath))
# update this assertion
assert cluster.cluster_by_partitioning(active_sites) == []
def test_hierarchical_clustering():
# tractable subset
pdb_ids = [276, 4629, 10701]
active_sites = []
for id in pdb_ids:
filepath = os.path.join("data", "%i.pdb"%id)
active_sites.append(io.read_active_site(filepath))
# update this assertion
assert cluster.cluster_hierarchically(active_sites) == []
|
the-stack_106_26175 |
import pytest
from OpenSSL.SSL import TLSv1_2_METHOD
from OpenSSL.SSL import Error, WantReadError
from OpenSSL.SSL import Context, Connection
from openssl_psk import patch_context
patch_context()
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestPSK(object):
"""
Tests for PyOpenSSL's PSK support.
"""
def _client_connection(self, callback):
"""
Builds a client connection suitable for using PSK.
:param callback: The callback to register for PSK.
"""
ctx = Context(TLSv1_2_METHOD)
ctx.set_psk_client_callback(callback)
ctx.set_cipher_list(b'PSK')
client = Connection(ctx)
client.set_connect_state()
return client
def _server_connection(self, callback, hint=b'identity_hint'):
"""
Builds a server connection suitable for using PSK.
:param callback: The callback to register for PSK.
:param hint: The server PSK identity hint.
"""
ctx = Context(TLSv1_2_METHOD)
ctx.use_psk_identity_hint(hint)
ctx.set_psk_server_callback(callback)
ctx.set_cipher_list(b'PSK')
server = Connection(ctx)
server.set_accept_state()
return server
def test_valid_handshake(self):
"""
The client sends it's PSK and is verified by the server.
"""
PSK_MAP = {
b'pre_shared_key_identity': b'pre_shared_key',
b'pre_shared_key_identity1': b'pre_shared_key1',
b'pre_shared_key_identity2': b'pre_shared_key2',
b'pre_shared_key_identity3': b'pre_shared_key3',
}
def server_callback(conn, client_identity):
return PSK_MAP[client_identity]
for identity, secret in PSK_MAP.items():
def client_callback(conn, identity_hint):
assert identity_hint == b'identity_hint'
return (identity, secret)
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=server_callback)
handshake_in_memory(client, server)
def client_callback_bad_identity(conn, identity_hint):
return (secret, secret)
client = self._client_connection(
callback=client_callback_bad_identity)
server = self._server_connection(callback=server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
def client_callback_bad_psk(conn, identity_hint):
return (identity, identity)
client = self._client_connection(callback=client_callback_bad_psk)
server = self._server_connection(callback=server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_bad_callbacks(self):
"""
If the callbacks are not callable,
raise error.
"""
with pytest.raises(TypeError):
self._server_connection(callback=3)
with pytest.raises(TypeError):
self._client_connection(callback=3)
def test_server_returns_empty_string_terminates_handshake(self):
"""
If the server returns empty string from its callback,
the handshake fails.
"""
def server_callback(*args):
return b''
def client_callback(*args):
return (b'identity', b'psk')
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_empty_string_server_identity_hint(self):
"""
If the server can send an empty identity hint.
"""
def server_callback(conn, client_identity):
assert client_identity == b'client_identity'
return b'pre_shared_key'
def client_callback(conn, identity_hint):
assert identity_hint == b''
return (b'client_identity', b'pre_shared_key')
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=server_callback, hint=b'')
handshake_in_memory(client, server)
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=server_callback, hint=b'')
handshake_in_memory(client, server)
def test_non_bytestring_server_identity_hint(self):
"""
If the server identity hint is not convertable to bytestrings,
raise error.
"""
with pytest.raises(TypeError):
self._server_connection(callback=None, hint=3)
def test_psk_mismatch_terminates_handshake(self):
"""
If the PSKs do not match,
the handshake fails.
"""
def server_callback(*args):
return b'good_psk'
def client_callback(*args):
return (b'identity', b'bad_psk')
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_non_bytestring_terminates_handshakes(self):
"""
If the PSK info is not convertable to bytestrings,
the handshake fails.
"""
def client_callback(*args):
return (b'identity', b'psk')
def bad_server_callback(*args):
return 3
def bad_identity_client_callback(*args):
return (3, b'bad_psk')
def bad_psk_client_callback(*args):
return (b'identity', 3)
client = self._client_connection(callback=client_callback)
server = self._server_connection(callback=bad_server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
client = self._client_connection(callback=bad_identity_client_callback)
server = self._server_connection(callback=bad_server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
client = self._client_connection(callback=bad_psk_client_callback)
server = self._server_connection(callback=bad_server_callback)
with pytest.raises(Error):
handshake_in_memory(client, server)
|
the-stack_106_26177 | # This is the default command line for PMARS, with the settings we've specified
PMARS_CLI = "pmars -k -p 8000 -c 80000 -p 8000 -l 100 -d 100"
# This addition to the command line makes PMARS run with just validation
PMARS_NO_GRAPHICS = " -r 0 -v 000"
class MetadataNotFoundException (Exception):
pass
def check_for_name(source):
# Look through the source, trying to find a ;name
for line in source.split('\n'):
if len(line.split()) >= 2 and line.split()[0] == ";name":
return line.strip(";name")
# If execution reaches this point, there is no ;name line
raise MetadataNotFoundException
def check_for_author(source):
# Look through the source, trying to find a ;author
for line in source.split('\n'):
if len(line.split()) >= 2 and line.split()[0] == ";author":
return line.strip(";author")
# If execution reaches this point, there is not ;author line
raise MetadataNotFoundException
def validate(source):
import subprocess
program_data = {'name': "",
'author': "",
'source': ""}
# First, check if ;name, ;author, and ;assert are all present
try:
name = check_for_name(source)
except MetadataNotFoundException:
return (-1, "NAME metaline not found.\nYour program needs to have a" +
" line that starts with ;name and includes" +
" a program name.",
program_data)
try:
author = check_for_author(source)
except MetadataNotFoundException:
return (-1, "AUTHOR metaline not found.\nYour program needs to have" +
"a line that starts with ;author and includes your name.",
program_data)
# If we've reached this point, the program is validate
program_data = {'name': name,
'author': author,
'source': source}
# Open a PMARS process process we can communicate with
p = subprocess.Popen((PMARS_CLI + PMARS_NO_GRAPHICS + ' -').split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Send the program we need to validate, and get the output
out = str(p.communicate(input=bytes(source, encoding="UTF-8"))[0],
encoding="UTF-8") + "<br />"
retval = p.returncode
return (retval, out, program_data)
|
the-stack_106_26178 | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen, based on code from Ross Girshick
# --------------------------------------------------------
import tensorflow as tf
import numpy as np
import os
import cv2
import sys
from tqdm import tqdm
import json
from .model.test import im_detect, im_detect_fast
from .newnms.nms import soft_nms
from .nets.vgg16 import vgg16
from .nets.resnet_v1 import resnetv1
def render(class_name, dets, thresh=0.5):
inds = np.where(dets[:, -1] >= thresh)[0]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
yield {'class': class_name, 'bbox': list(bbox.astype(int)), 'score': float(score)}
def detect(sess, net, im_file, mode='normal', cls='person', cls_ind=1):
"""Detect all objects of a single class in an image using pre-computed object proposals."""
im = cv2.imread(im_file)
if mode == 'fast':
scores, boxes = im_detect_fast(sess, net, im)
else:
scores, boxes = im_detect(sess, net, im)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
dets = soft_nms(dets, method=2)
return dict(image=im_file, objects=list(render(cls, dets)))
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
NETS = {'vgg16': ('vgg16_faster_rcnn_iter_70000.ckpt',),
'res101': ('res101_faster_rcnn_iter_110000.ckpt',),
'res152':('res152.ckpt',)}
DATASETS= {'pascal_voc': ('voc_2007_trainval',),
'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',),
'coco':('coco_2014_train+coco_2014_valminusminival',)}
demonet = 'res152'
dataset = 'coco'
tfmodel = os.environ.get('TFMODEL', os.path.join('output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0]))
sess = tf.Session(config=tfconfig)
net = resnetv1(num_layers=152)
net.create_architecture("TEST", 81, tag='default', anchor_scales=[2, 4, 8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
im_names = sys.argv[1:]
for im_name in sys.argv[1:]:
json.dump(detect(sess, net, im_name), sys.stdout)
sys.stdout.write('\n')
|
the-stack_106_26179 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Defines the LeapfrogIntegrator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
__all__ = [
'LeapfrogIntegrator',
'SimpleLeapfrogIntegrator',
'process_args',
]
@six.add_metaclass(abc.ABCMeta)
class LeapfrogIntegrator(object):
"""Base class for all leapfrog integrators.
[Leapfrog integrators](https://en.wikipedia.org/wiki/Leapfrog_integration)
numerically integrate differential equations of the form:
```none
v' = dv/dt = F(x)
x' = dx/dt = v
```
This class defines minimal requirements for leapfrog integration calculations.
"""
@abc.abstractmethod
def __call__(self, momentum_parts, state_parts, target=None,
target_grad_parts=None, kinetic_energy_fn=None, name=None):
"""Computes the integration.
Args:
momentum_parts: Python `list` of `Tensor`s representing momentum for each
state part.
state_parts: Python `list` of `Tensor`s which collectively representing
the state.
target: Batch of scalar `Tensor` representing the target (i.e.,
unnormalized log prob) evaluated at `state_parts`.
target_grad_parts: Python `list` of `Tensor`s representing the gradient of
`target` with respect to each of `state_parts`.
kinetic_energy_fn: Python callable that can evaluate the kinetic energy
of the given momentum.
name: Python `str` used to group ops created by this function.
Returns:
next_momentum_parts: Python `list` of `Tensor`s representing new momentum.
next_state_parts: Python `list` of `Tensor`s which collectively
representing the new state.
next_target: Batch of scalar `Tensor` representing the target (i.e.,
unnormalized log prob) evaluated at `next_state_parts`.
next_target_grad_parts: Python `list` of `Tensor`s representing the
gradient of `next_target` with respect to each of `next_state_parts`.
"""
raise NotImplementedError('Integrate logic not implemented.')
class SimpleLeapfrogIntegrator(LeapfrogIntegrator):
# pylint: disable=line-too-long
"""Simple leapfrog integrator.
Calling this functor is conceptually equivalent to:
```none
def leapfrog(x, v, eps, L, f, M):
g = lambda x: gradient(f, x)
v[0] = v + eps/2 g(x)
for l = 1...L:
x[l] = x[l-1] + eps * inv(M) @ v[l-1]
v[l] = v[l-1] + eps * g(x[l])
v = v[L] - eps/2 * g(x[L])
return x[L], v
```
where `M = eye(dims(x))`.
(In the future we may support arbitrary covariance `M`.)
#### Examples:
```python
import matplotlib.pyplot as plt
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl
tf.enable_v2_behavior()
dims = 10
dtype = tf.float32
target_fn = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob
integrator = leapfrog_impl.SimpleLeapfrogIntegrator(
target_fn,
step_sizes=[0.1],
num_steps=3)
momentum = [tf.random.normal([dims], dtype=dtype)]
position = [tf.random.normal([dims], dtype=dtype)]
target = None
target_grad_parts = None
num_iter = int(1e3)
positions = tf.zeros([num_iter, dims], dtype)
for i in range(num_iter):
[momentum, position, target, target_grad_parts] = integrator(
momentum, position, target, target_grad_parts)
positions = tf.tensor_scatter_nd_update(positions, [[i]], position)
plt.plot(positions[:, 0]); # Sinusoidal.
```
"""
# pylint: enable=line-too-long
def __init__(self, target_fn, step_sizes, num_steps):
"""Constructs the LeapfrogIntegrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
Args:
target_fn: Python callable which takes an argument like `*state_parts` and
returns its (possibly unnormalized) log-density under the target
distribution.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
num_steps: `int` `Tensor` representing number of steps to run
the leapfrog integration. Total progress is roughly proportional to
`step_size * num_steps`.
"""
# Note on per-variable step sizes:
#
# Using per-variable step sizes is equivalent to using the same step
# size for all variables and adding a diagonal mass matrix in the
# kinetic energy term of the Hamiltonian being integrated. This is
# hinted at by Neal (2011) but not derived in detail there.
#
# Let x and v be position and momentum variables respectively.
# Let g(x) be the gradient of `target_fn(x)`.
# Let S be a diagonal matrix of per-variable step sizes.
# Let the Hamiltonian H(x, v) = -target_fn(x) + 0.5 * ||v||**2.
#
# Using per-variable step sizes gives the updates:
#
# v' = v0 + 0.5 * S @ g(x0)
# x1 = x0 + S @ v'
# v1 = v' + 0.5 * S @ g(x1)
#
# Let,
#
# u = inv(S) @ v
#
# for "u'", "u0", and "u1". Multiplying v by inv(S) in the updates above
# gives the transformed dynamics:
#
# u' = inv(S) @ v'
# = inv(S) @ v0 + 0.5 * g(x)
# = u0 + 0.5 * g(x)
#
# x1 = x0 + S @ v'
# = x0 + S @ S @ u'
#
# u1 = inv(S) @ v1
# = inv(S) @ v' + 0.5 * g(x1)
# = u' + 0.5 * g(x1)
#
# These are exactly the leapfrog updates for the Hamiltonian
#
# H'(x, u) = -target_fn(x) + 0.5 * (S @ u).T @ (S @ u)
# = -target_fn(x) + 0.5 * ||v||**2
# = H(x, v).
#
# To summarize:
#
# * Using per-variable step sizes implicitly simulates the dynamics
# of the Hamiltonian H' (which are energy-conserving in H'). We
# keep track of v instead of u, but the underlying dynamics are
# the same if we transform back.
# * The value of the Hamiltonian H'(x, u) is the same as the value
# of the original Hamiltonian H(x, v) after we transform back from
# u to v.
# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).
#
# So using per-variable step sizes in HMC will give results that are
# exactly identical to explicitly using a diagonal mass matrix.
self._target_fn = target_fn
self._step_sizes = step_sizes
self._num_steps = num_steps
@property
def target_fn(self):
return self._target_fn
@property
def step_sizes(self):
return self._step_sizes
@property
def num_steps(self):
return self._num_steps
def __call__(self,
momentum_parts,
state_parts,
target=None,
target_grad_parts=None,
kinetic_energy_fn=None,
name=None):
"""Applies `num_steps` of the leapfrog integrator.
Args:
momentum_parts: Python `list` of `Tensor`s representing momentum for each
state part.
state_parts: Python `list` of `Tensor`s which collectively representing
the state.
target: Batch of scalar `Tensor` representing the target (i.e.,
unnormalized log prob) evaluated at `state_parts`.
target_grad_parts: Python `list` of `Tensor`s representing the gradient of
`target` with respect to each of `state_parts`.
kinetic_energy_fn: Python callable that can evaluate the kinetic energy
of the given momentum. This is typically the negative log probability of
the distribution over the momentum.
name: Python `str` used to group ops created by this function.
Returns:
next_momentum_parts: Python `list` of `Tensor`s representing new momentum.
next_state_parts: Python `list` of `Tensor`s which collectively
representing the new state.
next_target: Batch of scalar `Tensor` representing the target (i.e.,
unnormalized log prob) evaluated at `next_state_parts`.
next_target_grad_parts: Python `list` of `Tensor`s representing the
gradient of `next_target` with respect to each of `next_state_parts`.
"""
with tf.name_scope(name or 'leapfrog_integrate'):
[
momentum_parts,
state_parts,
target,
target_grad_parts,
] = process_args(
self.target_fn,
momentum_parts,
state_parts,
target,
target_grad_parts)
if kinetic_energy_fn is None:
# Avoid adding ops and taking grads, when the implied kinetic energy
# is just 0.5 * ||x||^2, so the gradient is x
get_velocity_parts = lambda x: x
else:
def get_velocity_parts(half_next_momentum_parts):
_, velocity_parts = mcmc_util.maybe_call_fn_and_grads(
kinetic_energy_fn, half_next_momentum_parts)
return velocity_parts
# See Algorithm 1 of "Faster Hamiltonian Monte Carlo by Learning Leapfrog
# Scale", https://arxiv.org/abs/1810.04449.
half_next_momentum_parts = [
v + tf.cast(0.5 * eps, v.dtype) * tf.cast(g, v.dtype)
for v, eps, g
in zip(momentum_parts, self.step_sizes, target_grad_parts)]
[
_,
next_half_next_momentum_parts,
next_state_parts,
next_target,
next_target_grad_parts,
] = tf.while_loop(
cond=lambda i, *_: i < self.num_steps,
body=lambda i, *args: [i + 1] + list(_one_step( # pylint: disable=no-value-for-parameter,g-long-lambda
self.target_fn, self.step_sizes, get_velocity_parts, *args)),
loop_vars=[
tf.zeros_like(self.num_steps, name='iter'),
half_next_momentum_parts,
state_parts,
target,
target_grad_parts,
])
next_momentum_parts = [
v - tf.cast(0.5 * eps, v.dtype) * tf.cast(g, v.dtype) # pylint: disable=g-complex-comprehension
for v, eps, g
in zip(next_half_next_momentum_parts,
self.step_sizes,
next_target_grad_parts)
]
return (
next_momentum_parts,
next_state_parts,
next_target,
next_target_grad_parts,
)
def _one_step(
target_fn,
step_sizes,
get_velocity_parts,
half_next_momentum_parts,
state_parts,
target,
target_grad_parts):
"""Body of integrator while loop."""
with tf.name_scope('leapfrog_integrate_one_step'):
velocity_parts = get_velocity_parts(half_next_momentum_parts)
next_state_parts = []
for state_part, eps, velocity_part in zip(
state_parts, step_sizes, velocity_parts):
next_state_parts.append(
state_part + tf.cast(eps, state_part.dtype) *
tf.cast(velocity_part, state_part.dtype))
[next_target, next_target_grad_parts] = mcmc_util.maybe_call_fn_and_grads(
target_fn, next_state_parts)
if any(g is None for g in next_target_grad_parts):
raise ValueError(
'Encountered `None` gradient.\n'
' state_parts: {}\n'
' next_state_parts: {}\n'
' next_target_grad_parts: {}'.format(
state_parts,
next_state_parts,
next_target_grad_parts))
tensorshape_util.set_shape(next_target, target.shape)
for ng, g in zip(next_target_grad_parts, target_grad_parts):
tensorshape_util.set_shape(ng, g.shape)
next_half_next_momentum_parts = [
v + tf.cast(eps, v.dtype) * tf.cast(g, v.dtype) # pylint: disable=g-complex-comprehension
for v, eps, g
in zip(half_next_momentum_parts, step_sizes, next_target_grad_parts)]
return [
next_half_next_momentum_parts,
next_state_parts,
next_target,
next_target_grad_parts,
]
def process_args(target_fn, momentum_parts, state_parts,
target=None, target_grad_parts=None):
"""Sanitize inputs to `__call__`."""
with tf.name_scope('process_args'):
momentum_parts = [
tf.convert_to_tensor(
v, dtype_hint=tf.float32, name='momentum_parts')
for v in momentum_parts]
state_parts = [
tf.convert_to_tensor(
v, dtype_hint=tf.float32, name='state_parts')
for v in state_parts]
if target is None or target_grad_parts is None:
[target, target_grad_parts] = mcmc_util.maybe_call_fn_and_grads(
target_fn, state_parts)
else:
target = tf.convert_to_tensor(
target, dtype_hint=tf.float32, name='target')
target_grad_parts = [
tf.convert_to_tensor(
g, dtype_hint=tf.float32, name='target_grad_part')
for g in target_grad_parts]
return momentum_parts, state_parts, target, target_grad_parts
|
the-stack_106_26180 | #! /usr/bin/env python3
# GPTune Copyright (c) 2019, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S.Dept. of Energy) and the University of
# California, Berkeley. All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at [email protected].
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights.
# As such, the U.S. Government has been granted for itself and others acting
# on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in
# the Software to reproduce, distribute copies to the public, prepare
# derivative works, and perform publicly and display publicly, and to permit
# other to do so.
#
################################################################################
"""
Example of invocation of this script:
mpirun -n 1 python superlu_MLA.py -nprocmin_pernode 1 -ntask 20 -nrun 800 -obj time -tla 0
where:
-nprocmin_pernode is the minimum number of MPIs per node for launching the application code
-ntask is the number of different matrix sizes that will be tuned
-nrun is the number of calls per task
-obj is the tuning objective: "time" or "memory"
-tla is whether to perform TLA after MLA
"""
################################################################################
import sys
import os
import numpy as np
import argparse
import pickle
import mpi4py
from mpi4py import MPI
from array import array
import math
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(__file__ + "/../../../GPTune/"))
from gptune import * # import all
from autotune.problem import *
from autotune.space import *
from autotune.search import *
from callopentuner import OpenTuner
from callhpbandster import HpBandSter
import math
################################################################################
def objectives(point): # should always use this name for user-defined objective function
#########################################
##### constants defined in TuningProblem
nodes = point['nodes']
cores = point['cores']
#########################################
# # retval = max(-18,np.log(point['quad']**2.0)/np.log(10.0))
# # retval = np.log(point['quad']**2.0)/np.log(10.0)
# retval = point['quad']**2.0
# print(point['quad'], ' impact-z objective: ', retval)
# return retval
quad1 = 0.0 # point['quad']
quad2 = 0.0 # point['quad']
quad3 = point['quad']
quad4 = 0.0 # point['quad']
quad5 = 0.0 # point['quad']
inputfile = point['inputfile']
controlfile = point['controlfile']
nproc = nodes*cores
nproc = 2**(math.floor(math.log(nproc, 2)))
# nproc =16 # hardcoded now, nproc=32 will make the objective function slightly different ...
nthreads = 1
npernode = cores
params = [inputfile, controlfile, 'quad1', quad1, 'quad2', quad2, 'quad3', quad3, 'quad4', quad4, 'quad5', quad5]
os.system("cp "+inputfile+" ./ImpactZ0.in")
os.system("cp "+controlfile+" ./matchquad.in")
""" pass some parameters through environment variables """
info = MPI.Info.Create()
envstr= 'OMP_NUM_THREADS=%d\n' %(nthreads)
# envstr+= 'NREL=%d\n' %(NREL)
info.Set('env',envstr)
info.Set('npernode','%d'%(npernode)) # YL: npernode is deprecated in openmpi 4.0, but no other parameter (e.g. 'map-by') works
""" use MPI spawn to call the executable, and pass the other parameters and inputs through command line """
print('exec', "./ImpactZexe-mpi", 'args', ['%s'%(quad1), '%s'%(quad2), '%s'%(quad3), '%s'%(quad4), '%s'%(quad5)], 'nproc', nproc, 'env', 'OMP_NUM_THREADS=%d' %(nthreads))
comm = MPI.COMM_SELF.Spawn("./ImpactZexe-mpi", args=['%s'%(quad1), '%s'%(quad2), '%s'%(quad3), '%s'%(quad4), '%s'%(quad5)], maxprocs=nproc,info=info)
""" gather the return value using the inter-communicator, also refer to the INPUTDIR/pddrive_spawn.c to see how the return value are communicated """
tmpdata = np.array([0, 0],dtype=np.float64)
comm.Reduce(sendbuf=None, recvbuf=[tmpdata,MPI.DOUBLE],op=MPI.MIN,root=mpi4py.MPI.ROOT)
comm.Disconnect()
# retval = np.log(tmpdata[0])/np.log(10.0)
retval = tmpdata[0]
print(params, ' impact-z objective: ', retval)
return [retval]
def predict_aug(modeler, gt, point,tid): # point is the orginal space
x =point['quad']
xNorm = gt.problem.PS.transform([[x]])
xi0 = gt.problem.PS.inverse_transform(np.array(xNorm, ndmin=2))
xi=xi0[0]
IOrig = gt.data.I[tid]
# point0 = gt.data.D
point2 = {gt.problem.IS[k].name: IOrig[k] for k in range(gt.problem.DI)}
point = {gt.problem.PS[k].name: xi[k] for k in range(gt.problem.DP)}
# point.update(point0)
point.update(point2)
# print("point", point)
xNorm = gt.problem.PS.transform(xi0)[0]
if(gt.problem.models is not None):
if(gt.problem.driverabspath is not None):
modulename = Path(gt.problem.driverabspath).stem # get the driver name excluding all directories and extensions
sys.path.append(gt.problem.driverabspath) # add path to sys
module = importlib.import_module(modulename) # import driver name as a module
else:
raise Exception('performance models require passing driverabspath to GPTune')
# modeldata= self.problem.models(point)
modeldata= module.models(point)
xNorm = np.hstack((xNorm,modeldata)) # YL: here tmpdata in the normalized space, but modeldata is the in the original space
# print(xNorm)
(mu, var) = modeler[0].predict(xNorm, tid=tid)
return (mu, var)
def main():
# Parse command line arguments
args = parse_args()
# Extract arguments
ntask = args.ntask
optimization = args.optimization
nrun = args.nrun
TUNER_NAME = args.optimization
(machine, processor, nodes, cores) = GetMachineConfiguration()
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
os.system("cp ./IMPACT-Z/build/ImpactZexe-mpi .")
nprocmax = nodes*cores
inputfiles = ["ImpactZ.in_test1","ImpactZ.in_test2"]
controlfiles = ["matchquad.in_test1","matchquad.in_test2"]
# Task parameters
inputfile = Categoricalnorm (inputfiles, transform="onehot", name="inputfile")
controlfile = Categoricalnorm (controlfiles, transform="onehot", name="controlfile")
# Input parameters
# we know that XX = x00*(1+quad) has range [-50,50], so adjust range of quad accordingly
file1 = open('matchquad.in_test1', 'r')
Lines = file1.readlines()
npara = int(Lines[0].split()[0])
res = [i for i in Lines[-1].split()]
b1 = [-50.0/float(res[i])-1.0 for i in range(npara)]
b2 = [50.0/float(res[i])-1.0 for i in range(npara)]
lb = [min(b1[i],b2[i]) for i in range(npara)]
ub = [max(b1[i],b2[i]) for i in range(npara)]
# quad1 = Real (lb[0], ub[0], transform="normalize", name="quad1")
# quad2 = Real (lb[1], ub[1], transform="normalize", name="quad2")
# quad3 = Real (lb[2], ub[2], transform="normalize", name="quad3")
# quad4 = Real (lb[3], ub[3], transform="normalize", name="quad4")
# quad5 = Real (lb[4], ub[4], transform="normalize", name="quad5")
quad = Real (-1, 1, transform="normalize", name="quad")
# quad2 = Real (-1, 1, transform="normalize", name="quad2")
# quad3 = Real (-1, 1, transform="normalize", name="quad3")
# quad4 = Real (-1, 1, transform="normalize", name="quad4")
# quad5 = Real (-1, 1, transform="normalize", name="quad5")
# Output parameters
mismatch = Real (float("-Inf") , float("Inf"),name="mismatch")
IS = Space([inputfile,controlfile])
# PS = Space([quad1, quad2, quad3, quad4, quad5])
PS = Space([quad])
OS = Space([mismatch])
constraints = {}
models = {}
constants={"nodes":nodes,"cores":cores}
""" Print all input and parameter samples """
print(IS, PS, OS, constraints, models)
problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants)
computer = Computer(nodes = nodes, cores = cores, hosts = None)
""" Set and validate options """
options = Options()
options['model_processes'] = 1
# options['model_threads'] = 1
options['model_restarts'] = 1
# options['search_multitask_processes'] = 1
# options['model_restart_processes'] = 1
options['distributed_memory_parallelism'] = False
options['shared_memory_parallelism'] = False
options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM'
options['verbose'] = False
options['search_pop_size'] = 10000
options['sample_class'] = 'SampleOpenTURNS'
options.validate(computer = computer)
# """ Building MLA with the given list of tasks """
# giventask = [[np.random.choice(matrices,size=1)[0]] for i in range(ntask)]
giventask = [["ImpactZ.in_test1","matchquad.in_test1"]]
# giventask = [["big.rua"]]
data = Data(problem)
# Pdefault = [0,0,0,0,0]
# data.P = [[Pdefault]] * ntask
if(TUNER_NAME=='GPTune'):
gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__))
NI = len(giventask)
NS = nrun
(data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=int(NS/2))
print("stats: ", stats)
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d"%(tid))
print(" inputfile:%s controlfile:%s"%(data.I[tid][0],data.I[tid][1]))
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
# fig = plt.figure(figsize=[12.8, 9.6])
x = np.arange(-1, 1., 0.0001)
for tid in range(len(data.I)):
fig = plt.figure(figsize=[12.8, 9.6])
p = data.I[tid]
t = p[0]
I_orig=p
kwargst = {IS[k].name: I_orig[k] for k in range(len(IS))}
# y=np.zeros([len(x),1])
y_mean=np.zeros([len(x)])
y_std=np.zeros([len(x)])
for i in range(len(x)):
P_orig=[x[i]]
kwargs = {PS[k].name: P_orig[k] for k in range(len(PS))}
kwargs.update(kwargst)
kwargs.update(constants)
# y[i]=objectives(kwargs)
if(TUNER_NAME=='GPTune'):
(y_mean[i],var) = predict_aug(model, gt, kwargs,tid)
y_std[i]=np.sqrt(var)
# print(y_mean[i],y_std[i],y[i])
fontsize=40
plt.rcParams.update({'font.size': 40})
# plt.plot(x, y, 'b',lw=2,label='true')
plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='prediction')
plt.fill_between(x, y_mean - y_std, y_mean + y_std,alpha=0.2, color='k')
# print(data.P[tid])
plt.scatter(data.P[tid], data.O[tid], c='r', s=50, zorder=10, edgecolors=(0, 0, 0),label='sample')
plt.xlabel('x',fontsize=fontsize+2)
# plt.ylabel('log(y)',fontsize=fontsize+2)
plt.ylabel('y',fontsize=fontsize+2)
# plt.title('t=%f'%t,fontsize=fontsize+2)
# print('t:',t,'x:',x[np.argmin(y)],'ymin:',y.min())
# legend = plt.legend(loc='upper center', shadow=True, fontsize='x-large')
legend = plt.legend(loc='upper right', shadow=False, fontsize=fontsize)
# annot_min(x,y)
# plt.show()
plt.show(block=False)
plt.pause(0.5)
# input("Press [enter] to continue.")
fig.savefig('surrogate1D.pdf')
if(TUNER_NAME=='opentuner'):
NI = len(giventask)
NS = nrun
(data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None)
print("stats: ", stats)
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d"%(tid))
print(" inputfile:%s controlfile:%s"%(data.I[tid][0],data.I[tid][1]))
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
if(TUNER_NAME=='hpbandster'):
NI = len(giventask)
NS = nrun
(data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1)
print("stats: ", stats)
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d"%(tid))
print(" inputfile:%s controlfile:%s"%(data.I[tid][0],data.I[tid][1]))
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def parse_args():
parser = argparse.ArgumentParser()
# Algorithm related arguments
parser.add_argument('-optimization', type=str,default='GPTune',help='Optimization algorithm (opentuner, hpbandster, GPTune)')
parser.add_argument('-ntask', type=int, default=-1, help='Number of tasks')
parser.add_argument('-nrun', type=int, help='Number of runs per task')
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
|
the-stack_106_26182 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.svdf."""
from absl import logging
import numpy as np
from kws_streaming.layers import modes
from kws_streaming.layers import svdf
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
import kws_streaming.layers.test_utils as tu
from kws_streaming.models import utils
tf1.disable_eager_execution()
class SvdfTest(tu.TestBase):
def _run_non_stream_model(self):
# below model expects that input_data are already initialized in tu.TestBase
# in setUp, by default input_data should have 3 dimensions.
# size of each dimesnion is constant and is defiend by self.weights
mode = modes.Modes.TRAINING
input_tf = tf.keras.layers.Input(shape=(
None,
self.input_data.shape[2],
))
svdf_layer = svdf.Svdf(
units1=self.weights[0].shape[1],
memory_size=self.memory_size,
units2=self.weights[3].shape[1],
activation="linear",
inference_batch_size=self.batch_size,
mode=mode)
output_tf = svdf_layer(inputs=input_tf)
svdf_layer.dense1.set_weights([self.weights[0]])
depth_cnn_weight = self.weights[1]
depth_cnn_weight = np.expand_dims(depth_cnn_weight, 1)
depth_cnn_weight = np.expand_dims(depth_cnn_weight, 3)
svdf_layer.depth_cnn1.cell.set_weights([depth_cnn_weight, self.weights[2]])
svdf_layer.dense2.set_weights([self.weights[3], self.weights[4]])
model_tf = tf.keras.models.Model(input_tf, output_tf)
# run inference in non streaming mode
output_non_stream_np = model_tf.predict(self.input_data)
return output_non_stream_np, model_tf
def test_streaming_inference_internal_state(self):
output_non_stream_np, _ = self._run_non_stream_model()
mode = modes.Modes.STREAM_INTERNAL_STATE_INFERENCE
input_tf = tf.keras.layers.Input(shape=(
1,
self.input_data.shape[2],
), batch_size=None)
svdf_layer = svdf.Svdf(
units1=self.weights[0].shape[1],
memory_size=self.memory_size,
units2=self.weights[3].shape[1],
activation="linear",
inference_batch_size=self.batch_size,
mode=mode)
output_tf = svdf_layer(inputs=input_tf)
svdf_layer.dense1.set_weights([self.weights[0]])
depth_cnn_weight = self.weights[1]
depth_cnn_weight = np.expand_dims(depth_cnn_weight, 1)
depth_cnn_weight = np.expand_dims(depth_cnn_weight, 3)
input_states_np = np.zeros(svdf_layer.depth_cnn1.get_weights()[2].shape)
svdf_layer.depth_cnn1.set_weights(
[depth_cnn_weight, self.weights[2], input_states_np])
svdf_layer.dense2.set_weights([self.weights[3], self.weights[4]])
model = tf.keras.models.Model(input_tf, output_tf)
for i in range(self.input_data.shape[1]): # loop over every element in time
input_batch_np = self.input_data[:, i, :]
input_batch_np = np.expand_dims(input_batch_np, 1)
output_np = model.predict(input_batch_np)
for b in range(self.input_data.shape[0]): # loop over batch
self.assertAllClose(output_np[b][0], output_non_stream_np[b][i])
def test_streaming_inference_external_state(self):
with tf1.Session() as sess:
output_non_stream_np, model_tf = self._run_non_stream_model()
# input data for streaming stateless model
input_tensors = [
tf.keras.layers.Input(
shape=(
1,
self.input_data.shape[2],
),
batch_size=self.batch_size,
dtype=tf.float32)
]
# convert non streaming model to streaming one with external state
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
model_stream = utils.convert_to_inference_model(model_tf, input_tensors,
mode)
# validate that model is convertable to tflite
converter = tf1.lite.TFLiteConverter.from_session(
sess, model_stream.inputs, model_stream.outputs)
self.assertTrue(converter.convert())
inputs = []
for s in range(len(model_stream.inputs)):
inputs.append(np.zeros(model_stream.inputs[s].shape, dtype=np.float32))
# streaming emulation: loop over every element in time
for i in range(self.input_data.shape[1]):
input_batch_np = self.input_data[:, i, :]
input_batch_np = np.expand_dims(input_batch_np, 1)
inputs[0] = input_batch_np
outputs = model_stream.predict(inputs)
# input_states_np = output_states_np
for s in range(1, len(model_stream.inputs)):
inputs[s] = outputs[s]
for b in range(self.input_data.shape[0]): # loop over batch
self.assertAllClose(outputs[0][b][0], output_non_stream_np[b][i])
def test_training(self):
# Test stateful svdf layer in training mode.
# create training model and run inference
output_np, model = self._run_non_stream_model()
# compile and train model
model.compile(
optimizer=tf.keras.optimizers.RMSprop(
lr=0.001, rho=0.9, epsilon=None, decay=0.0),
loss="mse")
model.summary()
res = model.fit(self.input_data, output_np)
logging.info("%f", res.history["loss"][0])
self.assertLess(res.history["loss"][0], 0.1)
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_26183 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from .builder import MATCH_COST
@MATCH_COST.register_module()
class BBoxL1Cost:
"""BBoxL1Cost.
Args:
weight (int | float, optional): loss_weight
box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
>>> import torch
>>> self = BBoxL1Cost()
>>> bbox_pred = torch.rand(1, 4)
>>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(bbox_pred, gt_bboxes, factor)
tensor([[1.6172, 1.6422]])
"""
def __init__(self, weight=1., box_format='xyxy', smooth = False, beta = 1.0):
self.weight = weight
assert box_format in ['xyxy', 'xywh']
self.box_format = box_format
self.smooth = smooth
self.beta = beta
def __call__(self, bbox_pred, gt_bboxes):
"""
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with normalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
torch.Tensor: bbox_cost value with weight
"""
if self.box_format == 'xywh':
gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
elif self.box_format == 'xyxy':
bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
if self.smooth:
bbox_cost = torch.where(bbox_cost < self.beta, 0.5 * bbox_cost * bbox_cost / self.beta, bbox_cost - 0.5 * self.beta)
return bbox_cost * self.weight
@MATCH_COST.register_module()
class FocalLossCost:
"""FocalLossCost.
Args:
weight (int | float, optional): loss_weight
alpha (int | float, optional): focal_loss alpha
gamma (int | float, optional): focal_loss gamma
eps (float, optional): default 1e-12
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
>>> import torch
>>> self = FocalLossCost()
>>> cls_pred = torch.rand(4, 3)
>>> gt_labels = torch.tensor([0, 1, 2])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(cls_pred, gt_labels)
tensor([[-0.3236, -0.3364, -0.2699],
[-0.3439, -0.3209, -0.4807],
[-0.4099, -0.3795, -0.2929],
[-0.1950, -0.1207, -0.2626]])
"""
def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
self.weight = weight
self.alpha = alpha
self.gamma = gamma
self.eps = eps
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
torch.Tensor: cls_cost value with weight
"""
cls_pred = cls_pred.sigmoid()
neg_cost = -(1 - cls_pred + self.eps).log() * (1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (1 - cls_pred).pow(self.gamma)
cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
return cls_cost * self.weight
@MATCH_COST.register_module()
class ClassificationCost:
"""ClsSoftmaxCost.
Args:
weight (int | float, optional): loss_weight
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import \
... ClassificationCost
>>> import torch
>>> self = ClassificationCost()
>>> cls_pred = torch.rand(4, 3)
>>> gt_labels = torch.tensor([0, 1, 2])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(cls_pred, gt_labels)
tensor([[-0.3430, -0.3525, -0.3045],
[-0.3077, -0.2931, -0.3992],
[-0.3664, -0.3455, -0.2881],
[-0.3343, -0.2701, -0.3956]])
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
torch.Tensor: cls_cost value with weight
"""
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be omitted.
cls_score = cls_pred.softmax(-1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
@MATCH_COST.register_module()
class IoUCost:
"""IoUCost.
Args:
iou_mode (str, optional): iou mode such as 'iou' | 'giou'
weight (int | float, optional): loss weight
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
>>> import torch
>>> self = IoUCost()
>>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
>>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
>>> self(bboxes, gt_bboxes)
tensor([[-0.1250, 0.1667],
[ 0.1667, -0.5000]])
"""
def __init__(self, iou_mode='giou', weight=1.):
self.weight = weight
self.iou_mode = iou_mode
def __call__(self, bboxes, gt_bboxes):
"""
Args:
bboxes (Tensor): Predicted boxes with unnormalized coordinates
(x1, y1, x2, y2). Shape [num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
torch.Tensor: iou_cost value with weight
"""
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight
|
the-stack_106_26184 | import unittest
import numpy as np
from fastestimator.op.numpyop.univariate import WordtoId
from fastestimator.test.unittest_util import is_equal
class TestWordToId(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.map_dict = {'a': 0, 'b': 11, 'test': 90, 'op': 25, 'c': 100, 'id': 10, 'word': 55, 'to': 5}
cls.single_input = [['a', 'b', 'test', 'op']]
cls.single_output = [np.array([0, 11, 90, 25])]
cls.multi_input = [['test', 'op', 'c'], ['word', 'to', 'id']]
cls.multi_output = [np.array([90, 25, 100]), np.array([55, 5, 10])]
def mapping_func(self, seq):
seq_ids = []
for token in seq:
if token in self.map_dict:
seq_ids.append(self.map_dict[token])
else:
seq_ids.append(-1)
return seq_ids
def test_single_input(self):
op = WordtoId(inputs='x', outputs='x', mapping=self.map_dict)
data = op.forward(data=self.single_input, state={})
self.assertTrue(is_equal(data, self.single_output))
def test_single_input_mapping_function(self):
op = WordtoId(inputs='x', outputs='x', mapping=self.mapping_func)
data = op.forward(data=self.single_input, state={})
self.assertTrue(is_equal(data, self.single_output))
def test_multi_input(self):
op = WordtoId(inputs='x', outputs='x', mapping=self.map_dict)
data = op.forward(data=self.multi_input, state={})
self.assertTrue(is_equal(data, self.multi_output))
|
the-stack_106_26185 | from django import forms
from .models import Snippet
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, ButtonHolder, Field, Div, HTML
class SnippetForm(forms.ModelForm):
helper = FormHelper()
class Meta():
model = Snippet
widgets = {
'title': forms.TextInput(attrs={'placeholder': 'Snippet title'}),
'code': forms.Textarea(attrs={'placeholder': 'Your snippet code'}),
}
fields = {'title', 'code', 'lang'}
labels = {
'title': '',
'code': '',
'lang': '',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('title', css_class='focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:border-transparent', ),
Field('code', css_class='focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:border-transparent'),
Field('lang', css_class='focus:ring-2 focus:ring-indigo-500'),
Div(
HTML(
'<div class="g-recaptcha" data-sitekey="6LfzZlAaAAAAANHl7mevl6PaGUzu1S9YquYL77jX"></div>'),
Submit('submit', 'Submit', css_class='w-full py-6 border-2 bg-white border-indigo-500 text-indigo-500 font-bold rounded-md transition duration-300 ease-in-out hover:bg-indigo-500 hover:text-white'),
css_class='flex items-center buttons'
)
,
css_class='rounded-3xl mx-auto w-10/12 flex flex-col text-gray-800 border border-gray-300 p-4 shadow-lg max-w-2xl'
)
)
|
the-stack_106_26186 | # -*- coding: utf-8 -*-
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth.core import Auth
from osf.models.files import File, Folder, BaseFileNode
from addons.base import exceptions
from addons.s3compat.provider import S3CompatProvider
from addons.s3compat.serializer import S3CompatSerializer
from addons.s3compat.settings import ENCRYPT_UPLOADS_DEFAULT
from addons.s3compat.utils import (bucket_exists,
get_bucket_location_or_error,
get_bucket_names,
find_service_by_host)
class S3CompatFileNode(BaseFileNode):
_provider = 's3compat'
class S3CompatFolder(S3CompatFileNode, Folder):
pass
class S3CompatFile(S3CompatFileNode, File):
version_identifier = 'version'
class UserSettings(BaseOAuthUserSettings):
oauth_provider = S3CompatProvider
serializer = S3CompatSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = S3CompatProvider
serializer = S3CompatSerializer
folder_id = models.TextField(blank=True, null=True)
folder_name = models.TextField(blank=True, null=True)
folder_location = models.TextField(blank=True, null=True)
encrypt_uploads = models.BooleanField(default=ENCRYPT_UPLOADS_DEFAULT)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
@property
def folder_path(self):
return self.folder_name
@property
def display_name(self):
return u'{0}: {1}'.format(self.config.full_name, self.folder_id)
def set_folder(self, folder_id, auth):
if not bucket_exists(self.external_account.provider_id.split('\t')[0],
self.external_account.oauth_key,
self.external_account.oauth_secret, folder_id):
error_message = ('We are having trouble connecting to that bucket. '
'Try a different one.')
raise exceptions.InvalidFolderError(error_message)
self.folder_id = str(folder_id)
host = self.external_account.provider_id.split('\t')[0]
bucket_location = get_bucket_location_or_error(
host,
self.external_account.oauth_key,
self.external_account.oauth_secret,
folder_id
)
self.folder_location = bucket_location
try:
service = find_service_by_host(host)
bucket_location = service['bucketLocations'][bucket_location]['name']
except KeyError:
# Unlisted location, Default to the key.
pass
if bucket_location is None or bucket_location == '':
bucket_location = 'Default'
self.folder_name = '{} ({})'.format(folder_id, bucket_location)
self.encrypt_uploads = service.get('serverSideEncryption', True)
self.save()
self.nodelogger.log(action='bucket_linked', extra={'bucket': str(folder_id)}, save=True)
def get_folders(self, **kwargs):
# This really gets only buckets, not subfolders,
# as that's all we want to be linkable on a node.
try:
buckets = get_bucket_names(self)
except Exception:
raise exceptions.InvalidAuthError()
return [
{
'addon': 's3compat',
'kind': 'folder',
'id': bucket,
'name': bucket,
'path': bucket,
'urls': {
'folders': ''
}
}
for bucket in buckets
]
@property
def complete(self):
return self.has_auth and self.folder_id is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.nodelogger.log(action='node_authorized', save=save)
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_location = None
def deauthorize(self, auth=None, log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
self.clear_auth() # Also performs a save
if log:
self.nodelogger.log(action='node_deauthorized', save=True)
def delete(self, save=True):
self.deauthorize(log=False)
super(NodeSettings, self).delete(save=save)
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Cannot serialize credentials for S3 Compatible Storage addon')
host = self.external_account.provider_id.split('\t')[0]
if self.folder_location is not None and len(self.folder_location) > 0:
try:
service = find_service_by_host(host)
host = service['bucketLocations'][self.folder_location]['host']
except KeyError:
# Unlisted location, use default host
pass
return {
'host': host,
'access_key': self.external_account.oauth_key,
'secret_key': self.external_account.oauth_secret,
}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('Cannot serialize settings for S3 Compatible Storage addon')
return {
'bucket': self.folder_id,
'encrypt_uploads': self.encrypt_uploads
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='s3compat')
self.owner.add_log(
's3compat_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'bucket': self.folder_id,
'urls': {
'view': url,
'download': url + '?action=download'
}
},
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
|
the-stack_106_26191 | import abc
import sys
import time
from collections import OrderedDict
from functools import reduce
import numba
import numpy as np
from shapely.geometry import Polygon
from second.core import box_np_ops
from second.core.geometry import (points_in_convex_polygon_3d_jit,
points_in_convex_polygon_jit)
import copy
class BatchSampler:
def __init__(self, sampled_list, name=None, epoch=None, shuffle=True, drop_reminder=False):
self._sampled_list = sampled_list
self._indices = np.arange(len(sampled_list))
if shuffle:
np.random.shuffle(self._indices)
self._idx = 0
self._example_num = len(sampled_list)
self._name = name
self._shuffle = shuffle
self._epoch = epoch
self._epoch_counter = 0
self._drop_reminder = drop_reminder
def _sample(self, num):
if self._idx + num >= self._example_num:
ret = self._indices[self._idx:].copy()
self._reset()
else:
ret = self._indices[self._idx:self._idx + num]
self._idx += num
return ret
def _reset(self):
if self._name is not None:
print("reset", self._name)
if self._shuffle:
np.random.shuffle(self._indices)
self._idx = 0
def sample(self, num):
indices = self._sample(num)
return [self._sampled_list[i] for i in indices]
# return np.random.choice(self._sampled_list, num)
class DataBasePreprocessing:
def __call__(self, db_infos):
return self._preprocess(db_infos)
@abc.abstractclassmethod
def _preprocess(self, db_infos):
pass
class DBFilterByDifficulty(DataBasePreprocessing):
def __init__(self, removed_difficulties):
self._removed_difficulties = removed_difficulties
print(removed_difficulties)
def _preprocess(self, db_infos):
new_db_infos = {}
for key, dinfos in db_infos.items():
new_db_infos[key] = [
info for info in dinfos
if info["difficulty"] not in self._removed_difficulties
]
return new_db_infos
class DBFilterByMinNumPoint(DataBasePreprocessing):
def __init__(self, min_gt_point_dict):
self._min_gt_point_dict = min_gt_point_dict
print(min_gt_point_dict)
def _preprocess(self, db_infos):
for name, min_num in self._min_gt_point_dict.items():
if min_num > 0:
filtered_infos = []
for info in db_infos[name]:
if info["num_points_in_gt"] >= min_num:
filtered_infos.append(info)
db_infos[name] = filtered_infos
return db_infos
class DataBasePreprocessor:
def __init__(self, preprocessors):
self._preprocessors = preprocessors
def __call__(self, db_infos):
for prepor in self._preprocessors:
db_infos = prepor(db_infos)
return db_infos
def random_crop_frustum(bboxes,
rect,
Trv2c,
P2,
max_crop_height=1.0,
max_crop_width=0.9):
num_gt = bboxes.shape[0]
crop_minxy = np.random.uniform(
[1 - max_crop_width, 1 - max_crop_height], [0.3, 0.3],
size=[num_gt, 2])
crop_maxxy = np.ones([num_gt, 2], dtype=bboxes.dtype)
crop_bboxes = np.concatenate([crop_minxy, crop_maxxy], axis=1)
left = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if left:
crop_bboxes[:, [0, 2]] -= crop_bboxes[:, 0:1]
# crop_relative_bboxes to real bboxes
crop_bboxes *= np.tile(bboxes[:, 2:] - bboxes[:, :2], [1, 2])
crop_bboxes += np.tile(bboxes[:, :2], [1, 2])
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(crop_bboxes, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
return frustums
def filter_gt_box_outside_range(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, [0, 1]], gt_boxes[:, [3, 3 + 1]], gt_boxes[:, 6])
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...])
ret = points_in_convex_polygon_jit(
gt_boxes_bv.reshape(-1, 2), bounding_box)
return np.any(ret.reshape(-1, 4), axis=1)
def filter_gt_box_outside_range_by_center(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_box_centers = gt_boxes[:, :2]
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...])
ret = points_in_convex_polygon_jit(gt_box_centers, bounding_box)
return ret.reshape(-1)
def filter_gt_low_points(gt_boxes,
points,
num_gt_points,
point_num_threshold=2):
points_mask = np.ones([points.shape[0]], np.bool)
gt_boxes_mask = np.ones([gt_boxes.shape[0]], np.bool)
for i, num in enumerate(num_gt_points):
if num <= point_num_threshold:
masks = box_np_ops.points_in_rbbox(points, gt_boxes[i:i + 1])
masks = masks.reshape([-1])
points_mask &= np.logical_not(masks)
gt_boxes_mask[i] = False
return gt_boxes[gt_boxes_mask], points[points_mask]
def remove_points_in_boxes(points, boxes):
masks = box_np_ops.points_in_rbbox(points, boxes)
points = points[np.logical_not(masks.any(-1))]
return points
def remove_points_outside_boxes(points, boxes):
masks = box_np_ops.points_in_rbbox(points, boxes)
points = points[masks.any(-1)]
return points
def mask_points_in_corners(points, box_corners):
surfaces = box_np_ops.corner_to_surfaces_3d(box_corners)
mask = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return mask
@numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = -rot_sin
rot_mat_T[2, 0] = rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = -rot_sin
rot_mat_T[2, 1] = rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.jit(nopython=True)
def _box_single_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(
1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
# print(valid_mask)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= boxes[i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j],
rot_mat_T)
current_corners += boxes[i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
# print(coll_mat)
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
return success_mask
@numba.njit
def noise_per_box_group(boxes, valid_mask, loc_noises, rot_noises, group_nums):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_groups = group_nums.shape[0]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_corners[i] = box_corners[i + idx]
current_corners[i] -= boxes[i + idx, :2]
_rotation_box2d_jit_(current_corners[i],
rot_noises[idx + i, j], rot_mat_T)
current_corners[
i] += boxes[i + idx, :2] + loc_noises[i + idx, j, :2]
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners)
for i in range(num): # remove self-coll
coll_mat[i, idx:idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
break
idx += num
return success_mask
@numba.njit
def noise_per_box_group_v2_(boxes, valid_mask, loc_noises, rot_noises,
group_nums, global_rot_noises):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_box = np.zeros((1, 5), dtype=boxes.dtype)
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
dst_pos = np.zeros((max_group_num, 2), dtype=boxes.dtype)
current_grot = np.zeros((max_group_num, ), dtype=boxes.dtype)
dst_grot = np.zeros((max_group_num, ), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_box[0, :] = boxes[i + idx]
current_radius = np.sqrt(current_box[0, 0]**2 +
current_box[0, 1]**2)
current_grot[i] = np.arctan2(current_box[0, 0],
current_box[0, 1])
dst_grot[
i] = current_grot[i] + global_rot_noises[idx + i, j]
dst_pos[i, 0] = current_radius * np.sin(dst_grot[i])
dst_pos[i, 1] = current_radius * np.cos(dst_grot[i])
current_box[0, :2] = dst_pos[i]
current_box[0, -1] += (dst_grot[i] - current_grot[i])
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[
i] = current_box[0, 2:
4] * corners_norm @ rot_mat_T + current_box[0, :
2]
current_corners[i] -= current_box[0, :2]
_rotation_box2d_jit_(current_corners[i],
rot_noises[idx + i, j], rot_mat_T)
current_corners[
i] += current_box[0, :2] + loc_noises[i + idx, j, :2]
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners)
for i in range(num): # remove self-coll
coll_mat[i, idx:idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
loc_noises[i + idx, j, :2] += (
dst_pos[i] - boxes[i + idx, :2])
rot_noises[i + idx, j] += (
dst_grot[i] - current_grot[i])
break
idx += num
return success_mask
@numba.njit
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
global_rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2, ), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0]**2 + boxes[i, 1]**2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += (dst_grot - current_grot)
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = current_box[0, 2:
4] * corners_norm @ rot_mat_T + current_box[0, :
2]
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j],
rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += (dst_pos - boxes[i, :2])
rot_noises[i, j] += (dst_grot - current_grot)
break
return success_mask
@numba.njit
def points_transform_(points, centers, point_masks, loc_transform,
rot_transform, valid_mask):
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
@numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
def _select_transform(transform, indices):
result = np.zeros(
(transform.shape[0], *transform.shape[2:]), dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
@numba.njit
def group_transform_(loc_noise, rot_noise, locs, rots, group_center,
valid_mask):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x**2 + y**2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j]) - np.sin(rot_center))
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j]) - np.cos(rot_center))
@numba.njit
def group_transform_v2_(loc_noise, rot_noise, locs, rots, group_center,
grot_noise, valid_mask):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x**2 + y**2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j] + grot_noise[i, j]) -
np.sin(rot_center + grot_noise[i, j]))
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j] + grot_noise[i, j]) -
np.cos(rot_center + grot_noise[i, j]))
def set_group_noise_same_(loc_noise, rot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
def set_group_noise_same_v2_(loc_noise, rot_noise, grot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
grot_noise[i] = grot_noise[gid_to_index_dict[group_ids[i]]]
def get_group_center(locs, group_ids):
num_groups = 0
group_centers = np.zeros_like(locs)
group_centers_ret = np.zeros_like(locs)
group_id_dict = {}
group_id_num_dict = OrderedDict()
for i, gid in enumerate(group_ids):
if gid >= 0:
if gid in group_id_dict:
group_centers[group_id_dict[gid]] += locs[i]
group_id_num_dict[gid] += 1
else:
group_id_dict[gid] = num_groups
num_groups += 1
group_id_num_dict[gid] = 1
group_centers[group_id_dict[gid]] = locs[i]
for i, gid in enumerate(group_ids):
group_centers_ret[
i] = group_centers[group_id_dict[gid]] / group_id_num_dict[gid]
return group_centers_ret, group_id_num_dict
def noise_per_object_v3_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100,
group_ids=None):
"""random rotate or remove each groundtrutn independently.
use kitti kittiviewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [
-global_random_rot_range, global_random_rot_range
]
enable_grot = np.abs(global_random_rot_range[0] -
global_random_rot_range[1]) >= 1e-3
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [
center_noise_std, center_noise_std, center_noise_std
]
if valid_mask is None:
valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(
scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try])
if group_ids is not None:
if enable_grot:
set_group_noise_same_v2_(loc_noises, rot_noises, global_rot_noises,
group_ids)
else:
set_group_noise_same_(loc_noises, rot_noises, group_ids)
group_centers, group_id_num_dict = get_group_center(
gt_boxes[:, :3], group_ids)
if enable_grot:
group_transform_v2_(loc_noises, rot_noises, gt_boxes[:, :3],
gt_boxes[:, 6], group_centers,
global_rot_noises, valid_mask)
else:
group_transform_(loc_noises, rot_noises, gt_boxes[:, :3],
gt_boxes[:, 6], group_centers, valid_mask)
group_nums = np.array(list(group_id_num_dict.values()), dtype=np.int64)
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=origin,
axis=2)
if group_ids is not None:
if not enable_grot:
selected_noise = noise_per_box_group(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises,
rot_noises, group_nums)
else:
selected_noise = noise_per_box_group_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises,
rot_noises, group_nums, global_rot_noises)
else:
if not enable_grot:
selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises)
else:
selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises,
rot_noises, global_rot_noises)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def noise_per_object_v2_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100):
"""random rotate or remove each groundtrutn independently.
use kitti kittiviewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [
-global_random_rot_range, global_random_rot_range
]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [
center_noise_std, center_noise_std, center_noise_std
]
if valid_mask is None:
valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(
scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try])
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=origin,
axis=2)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 1e-3:
selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises)
else:
selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises,
global_rot_noises)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def global_scaling(gt_boxes, points, scale=0.05):
if not isinstance(scale, list):
scale = [-scale, scale]
noise_scale = np.random.uniform(scale[0] + 1, scale[1] + 1)
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def global_rotation(gt_boxes, points, rotation=np.pi / 4):
if not isinstance(rotation, list):
rotation = [-rotation, rotation]
noise_rotation = np.random.uniform(rotation[0], rotation[1])
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2)
gt_boxes[:, 6] += noise_rotation
return gt_boxes, points
def random_flip(gt_boxes, points, probability=0.5):
enable = np.random.choice(
[False, True], replace=False, p=[1 - probability, probability])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6] + np.pi
points[:, 1] = -points[:, 1]
return gt_boxes, points
def global_scaling_v2(gt_boxes, points, min_scale=0.95, max_scale=1.05):
noise_scale = np.random.uniform(min_scale, max_scale)
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def global_rotation_v2(gt_boxes, points, min_rad=-np.pi / 4,
max_rad=np.pi / 4):
noise_rotation = np.random.uniform(min_rad, max_rad)
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2)
gt_boxes[:, 6] += noise_rotation
return gt_boxes, points
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack(
(boxes, boxes[:, slices, :]), axis=2) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes)
qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = (min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(
boxes_standup[i, 0], qboxes_standup[j, 0]))
if iw > 0:
ih = (min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(
boxes_standup[i, 1], qboxes_standup[j, 1]))
if ih > 0:
for k in range(4):
for l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, l, 0]
D = lines_qboxes[j, l, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (
C[1] - A[1]) * (D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (
C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (D[0] - A[0])
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
boxes[i, k, 0] - qboxes[j, l, 0])
cross -= vec[0] * (
boxes[i, k, 1] - qboxes[j, l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for l in range(4): # point l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
qboxes[j, k, 0] - boxes[i, l, 0])
cross -= vec[0] * (
qboxes[j, k, 1] - boxes[i, l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
def global_translate(gt_boxes, points, noise_translate_std):
"""
Apply global translation to gt_boxes and points.
"""
if not isinstance(noise_translate_std, (list, tuple, np.ndarray)):
noise_translate_std = np.array([noise_translate_std, noise_translate_std, noise_translate_std])
noise_translate = np.array([np.random.normal(0, noise_translate_std[0], 1),
np.random.normal(0, noise_translate_std[1], 1),
np.random.normal(0, noise_translate_std[0], 1)]).T
points[:, :3] += noise_translate
gt_boxes[:, :3] += noise_translate
return gt_boxes, points
|
the-stack_106_26192 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
__author__ = 'christoph.statz <at> tu-dresden.de'
from tqdm import tqdm
from maui import context
from maui.field import Field
from desolvex.helper import ObjectSwapper
class ExplicitSolver(object):
def __init__(self, actions, stop_criterion, step_wrapper=None, show_progress=True):
self.__actions = actions
self.__stop_criterion = stop_criterion
self.__show_progress = show_progress
self.__step_wrapper = step_wrapper
def solve(self):
if self.__show_progress:
self.__progress = tqdm(unit='Time Steps', unit_scale=True, miniters=1, desc='Solver Progress', file=context.stderr)
while True:
if self.__step_wrapper is not None:
abort = self.__step_wrapper(self.__step)
if abort:
break
else:
self.__step()
if self.__show_progress:
if self.__progress.total != self.__stop_criterion.int_maximum:
self.__progress.total = self.__stop_criterion.int_maximum
self.__progress.update()
if self.__stop_criterion.met():
break
if self.__show_progress:
self.__progress.close()
def __step(self):
for el in self.__actions:
func = el[0]
if len(el) > 1:
args = el[1:]
else:
args = None
domains = None
if args is not None:
for arg in args:
if isinstance(arg, Field): # or isinstance(arg, View):
domains = arg.partition.domains.keys()
break
elif isinstance(arg, ObjectSwapper): # TODO: hasattr('swap')
if isinstance(arg[0], Field): # or isinstance(arg[0], View):
domains = arg[0].partition.domains.keys()
break
elif isinstance(arg, list):
if isinstance(arg[0], ObjectSwapper):
if isinstance(arg[0][0], Field): #or isinstance(arg[0][0], View):
domains = arg[0][0].partition.domains.keys()
break
if domains is not None:
for domain in domains:
domain_args = []
for arg in args:
if isinstance(arg, Field): # or isinstance(arg, View):
domain_args.append(arg.d[domain])
elif isinstance(arg, ObjectSwapper):
for ar in arg[:]:
if isinstance(ar, Field): # or isinstance(ar, View):
domain_args.append(ar.d[domain])
else:
domain_args.append(ar)
elif isinstance(arg, list):
if len(arg) < 2:
raise ValueError("At least two list elements expected!")
# TODO: Extend to dict(field)!
if isinstance(arg[0], ObjectSwapper):
if isinstance(arg[0][0], Field): # or isinstance(arg[0][0], View):
domain_args.append(arg[0][arg[1]].d[domain])
else:
domain_args.append(arg[0][arg[1:]])
elif isinstance(arg[0], dict):
if isinstance(arg[0].values()[0], Field): # or isinstance(arg[0].values()[0], View):
domain_args.append(arg[0][arg[1]].d[domain])
else:
domain_args.append(arg[0][arg[1:]])
elif isinstance(arg[0], func):
# TODO: Sort out fields and objectswapper!
domain_args.append(arg[0](*arg[1:]))
else:
raise ValueError("Expected Field or ObjectSwapper of Function!")
else:
domain_args.append(arg)
func(*domain_args)
else:
domain_args = []
for arg in args:
if isinstance(arg, ObjectSwapper):
for ar in arg[:]:
domain_args.append(ar)
if isinstance(arg, list):
if len(arg) < 2:
raise ValueError("At least two list elements expected!")
# TODO: Extend to dict(field)!
if isinstance(arg[0], ObjectSwapper):
domain_args.append(arg[0][arg[1:]])
elif isinstance(arg[0], dict):
domain_args.append(arg[0][arg[1]])
elif isinstance(arg[0], func):
domain_args.append(arg[0](*arg[1:]))
else:
raise ValueError("Expected Field or ObjectSwapper of Function!")
else:
domain_args.append(arg)
func(*domain_args)
else:
func()
if args is not None:
for arg in args:
if isinstance(arg, Field): # or isinstance(arg, View):
arg.sync()
self.__stop_criterion.update()
def step(self):
if self.__step_wrapper is not None:
self.__step_wrapper.step()
else:
self.__step()
|
the-stack_106_26193 | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""
==============================
Metrics with Multiple Features
==============================
"""
# %%
# This notebook demonstrates the new API for metrics, which supports
# multiple sensitive and conditional features. This example does not
# contain a proper discussion of how fairness relates to the dataset
# used, although it does highlight issues which users may want to
# consider when analysing their datasets.
#
# We are going to consider a lending scenario, supposing that we have
# a model which predicts whether or not a particular customer will
# repay a loan. This could be used as the basis of deciding whether
# or not to offer that customer a loan. With traditional metrics,
# we would assess the model using:
#
# - The 'true' values from the test set
# - The model predictions from the test set
#
# Our fairness metrics compute group-based fairness statistics.
# To use these, we also need categorical columns from the test
# set. For this example, we will include:
#
# - The sex of each individual (two unique values)
# - The race of each individual (three unique values)
# - The credit score band of each individual (three unique values)
# - Whether the loan is considered 'large' or 'small'
#
# An individual's sex and race should not affect a lending decision,
# but it would be legitimate to consider an individual's credit score
# and the relative size of the loan which they desired.
#
# A real scenario will be more complicated, but this will serve to
# illustrate the use of the new metrics.
#
# Getting the Data
# ================
#
# *This section may be skipped. It simply creates a dataset for
# illustrative purposes*
#
# We will use the well-known UCI 'Adult' dataset as the basis of this
# demonstration. This is not for a lending scenario, but we will regard
# it as one for the purposes of this example. We will use the existing
# 'race' and 'sex' columns (trimming the former to three unique values),
# and manufacture credit score bands and loan sizes from other columns.
# We start with some uncontroversial `import` statements:
import functools
import numpy as np
import sklearn.metrics as skm
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_selector as selector
from sklearn.pipeline import Pipeline
from fairlearn.metrics import MetricFrame
from fairlearn.metrics import selection_rate, count
# %%
# Next, we import the data:
data = fetch_openml(data_id=1590, as_frame=True)
X_raw = data.data
y = (data.target == '>50K') * 1
# %%
# For purposes of clarity, we consolidate the 'race' column to have
# three unique values:
def race_transform(input_str):
"""Reduce values to White, Black and Other."""
result = 'Other'
if input_str == 'White' or input_str == 'Black':
result = input_str
return result
X_raw['race'] = X_raw['race'].map(race_transform).fillna('Other').astype('category')
print(np.unique(X_raw['race']))
# %%
# Now, we manufacture the columns for the credit score band and
# requested loan size. These are wholly constructed, and not
# part of the actual dataset in any way. They are simply for
# illustrative purposes.
def marriage_transform(m_s_string):
"""Perform some simple manipulations."""
result = 'Low'
if m_s_string.startswith("Married"):
result = 'Medium'
elif m_s_string.startswith("Widowed"):
result = 'High'
return result
def occupation_transform(occ_string):
"""Perform some simple manipulations."""
result = 'Small'
if occ_string.startswith("Machine"):
result = 'Large'
return result
col_credit = X_raw['marital-status'].map(marriage_transform).fillna('Low')
col_credit.name = "Credit Score"
col_loan_size = X_raw['occupation'].map(occupation_transform).fillna('Small')
col_loan_size.name = "Loan Size"
A = X_raw[['race', 'sex']]
A['Credit Score'] = col_credit
A['Loan Size'] = col_loan_size
A
# %%
# Now that we have imported our dataset and manufactured a few features, we
# can perform some more conventional processing. To avoid the problem of
# `data leakage <https://en.wikipedia.org/wiki/Leakage_(machine_learning)>`_,
# we need to split the data into training and test sets before applying
# any transforms or scaling:
(X_train, X_test, y_train, y_test, A_train, A_test) = train_test_split(
X_raw, y, A, test_size=0.3, random_state=54321, stratify=y
)
# Ensure indices are aligned between X, y and A,
# after all the slicing and splitting of DataFrames
# and Series
X_train = X_train.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
A_train = A_train.reset_index(drop=True)
A_test = A_test.reset_index(drop=True)
# %%
# Next, we build two :class:`~sklearn.pipeline.Pipeline` objects
# to process the columns, one for numeric data, and the other
# for categorical data. Both impute missing values; the difference
# is whether the data are scaled (numeric columns) or
# one-hot encoded (categorical columns). Imputation of missing
# values should generally be done with care, since it could
# potentially introduce biases. Of course, removing rows with
# missing data could also cause trouble, if particular subgroups
# have poorer data quality.
numeric_transformer = Pipeline(
steps=[
("impute", SimpleImputer()),
("scaler", StandardScaler())
]
)
categorical_transformer = Pipeline(
[
("impute", SimpleImputer(strategy="most_frequent")),
("ohe", OneHotEncoder(handle_unknown="ignore"))
]
)
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, selector(dtype_exclude="category")),
("cat", categorical_transformer, selector(dtype_include="category"))
]
)
# %%
# With our preprocessor defined, we can now build a
# new pipeline which includes an Estimator:
unmitigated_predictor = Pipeline(
steps=[
("preprocessor", preprocessor),
(
"classifier",
LogisticRegression(solver="liblinear", fit_intercept=True)
)
]
)
# %%
# With the pipeline fully defined, we can first train it
# with the training data, and then generate predictions
# from the test data.
unmitigated_predictor.fit(X_train, y_train)
y_pred = unmitigated_predictor.predict(X_test)
# %%
# Analysing the Model with Metrics
# ================================
#
# After our data manipulations and model training, we have the following
# from our test set:
#
# - A vector of true values called ``y_test``
# - A vector of model predictions called ``y_pred``
# - A DataFrame of categorical features relevant to fairness called ``A_test``
#
# In a traditional model analysis, we would now look at some metrics
# evaluated on the entire dataset. Suppose in this case, the relevant
# metrics are :func:`fairlearn.metrics.selection_rate` and
# :func:`sklearn.metrics.fbeta_score` (with
# ``beta=0.6``).
# We can evaluate these metrics directly:
print("Selection Rate:", selection_rate(y_test, y_pred))
print("fbeta:", skm.fbeta_score(y_test, y_pred, beta=0.6))
# %%
# We know that there are sensitive features in our data, and we want to
# ensure that we're not harming individuals due to membership in any of
# these groups. For this purpose, Fairlearn provides the
# :class:`fairlearn.metrics.MetricFrame`
# class. Let us construct an instance of this class, and then look at
# its capabilities:
fbeta_06 = functools.partial(skm.fbeta_score, beta=0.6)
metric_fns = {'selection_rate': selection_rate, 'fbeta_06': fbeta_06, 'count': count}
grouped_on_sex = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test['sex'])
# %%
# The :class:`fairlearn.metrics.MetricFrame` object requires a
# minimum of four arguments:
#
# 1. The underlying metric function(s) to be evaluated
# 2. The true values
# 3. The predicted values
# 4. The sensitive feature values
#
# These are all passed as arguments to the constructor. If more than
# one underlying metric is required (as in this case), then we must
# provide them in a dictionary.
#
# The underlying metrics must have a signature ``fn(y_true, y_pred)``,
# so we have to use :func:`functools.partial` on ``fbeta_score()`` to
# furnish ``beta=0.6`` (we will show how to pass in extra array
# arguments such as sample weights shortly).
#
# We will now take a closer look at the :class:`fairlearn.metrics.MetricFrame`
# object. First, there is the ``overall`` property, which contains
# the metrics evaluated on the entire dataset. We see that this contains the
# same values calculated above:
assert grouped_on_sex.overall['selection_rate'] == selection_rate(y_test, y_pred)
assert grouped_on_sex.overall['fbeta_06'] == skm.fbeta_score(y_test, y_pred, beta=0.6)
print(grouped_on_sex.overall)
# %%
# The other property in the :class:`fairlearn.metrics.MetricFrame` object
# is ``by_group``. This contains the metrics evaluated on each subgroup defined
# by the categories in the ``sensitive_features=`` argument. Note that
# :func:`fairlearn.metrics.count` can be used to display the number of
# data points in each subgroup. In this case, we have results for males and females:
grouped_on_sex.by_group
# %%
# We can immediately see a substantial disparity in the selection rate between
# males and females.
#
# We can also create another :class:`fairlearn.metrics.MetricFrame` object
# using race as the sensitive feature:
grouped_on_race = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test['race'])
# %%
# The ``overall`` property is unchanged:
assert (grouped_on_sex.overall == grouped_on_race.overall).all()
# %%
# The ``by_group`` property now contains the metrics evaluated based on the 'race'
# column:
grouped_on_race.by_group
# %%
# We see that there is also a significant disparity in selection rates when
# grouping by race.
# %%
# Sample weights and other arrays
# -------------------------------
#
# We noted above that the underlying metric functions passed to the
# :class:`fairlearn.metrics.MetricFrame` constructor need to be of
# the form ``fn(y_true, y_pred)`` - we do not support scalar arguments
# such as ``pos_label=`` or ``beta=`` in the constructor. Such
# arguments should be bound into a new function using
# :func:`functools.partial`, and the result passed in. However, we do
# support arguments which have one entry for each sample, with an array
# of sample weights being the most common example. These are divided
# into subgroups along with ``y_true`` and ``y_pred``, and passed along
# to the underlying metric.
#
# To use these arguments, we pass in a dictionary as the ``sample_params=``
# argument of the constructor. Let us generate some random weights, and
# pass these along:
random_weights = np.random.rand(len(y_test))
example_sample_params = {
'selection_rate': {'sample_weight': random_weights},
'fbeta_06': {'sample_weight': random_weights}
}
grouped_with_weights = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test['sex'],
sample_params=example_sample_params)
# %%
# We can inspect the overall values, and check they are as expected:
assert grouped_with_weights.overall['selection_rate'] == \
selection_rate(y_test, y_pred, sample_weight=random_weights)
assert grouped_with_weights.overall['fbeta_06'] == \
skm.fbeta_score(y_test, y_pred, beta=0.6, sample_weight=random_weights)
print(grouped_with_weights.overall)
# %%
# We can also see the effect on the metric being evaluated on the subgroups:
grouped_with_weights.by_group
# %%
# Quantifying Disparities
# =======================
#
# We now know that our model is selecting individuals who are female far less
# often than individuals who are male. There is a similar effect when
# examining the results by race, with blacks being selected far less often than
# whites (and those classified as 'other'). However, there are many cases where
# presenting all these numbers at once will not be useful (for example, a high
# level dashboard which is monitoring model performance). Fairlearn provides
# several means of aggregating metrics across the subgroups, so that disparities
# can be readily quantified.
#
# The simplest of these aggregations is ``group_min()``, which reports the
# minimum value seen for a subgroup for each underlying metric (we also provide
# ``group_max()``). This is
# useful if there is a mandate that "no subgroup should have an ``fbeta_score()``
# of less than 0.6." We can evaluate the minimum values easily:
grouped_on_race.group_min()
# %%
# As noted above, the selection rates varies greatly by race and by sex.
# This can be quantified in terms of a difference between the subgroup with
# the highest value of the metric, and the subgroup with the lowest value.
# For this, we provide the method ``difference(method='between_groups)``:
grouped_on_race.difference(method='between_groups')
# %%
# We can also evaluate the difference relative to the corresponding overall
# value of the metric. In this case we take the absolute value, so that the
# result is always positive:
grouped_on_race.difference(method='to_overall')
# %%
# There are situations where knowing the ratios of the metrics evaluated on
# the subgroups is more useful. For this we have the ``ratio()`` method.
# We can take the ratios between the minimum and maximum values of each metric:
grouped_on_race.ratio(method='between_groups')
# %%
# We can also compute the ratios relative to the overall value for each
# metric. Analogous to the differences, the ratios are always in the range
# :math:`[0,1]`:
grouped_on_race.ratio(method='to_overall')
# %%
# Intersections of Features
# =========================
#
# So far we have only considered a single sensitive feature at a time,
# and we have already found some serious issues in our example data.
# However, sometimes serious issues can be hiding in intersections of
# features. For example, the
# `Gender Shades project <https://www.media.mit.edu/projects/gender-shades/overview/>`_
# found that facial recognition algorithms performed worse for blacks
# than whites, and also worse for women than men (despite overall high
# accuracy score). Moreover, performance on black females was *terrible*.
# We can examine the intersections of sensitive features by passing
# multiple columns to the :class:`fairlearn.metrics.MetricFrame`
# constructor:
grouped_on_race_and_sex = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test[['race', 'sex']])
# %%
# The overall values are unchanged, but the ``by_group`` table now
# shows the intersections between subgroups:
assert (grouped_on_race_and_sex.overall == grouped_on_race.overall).all()
grouped_on_race_and_sex.by_group
# %%
# The aggregations are still performed across all subgroups for each metric,
# so each continues to reduce to a single value. If we look at the
# ``group_min()``, we see that we violate the mandate we specified for the
# ``fbeta_score()`` suggested above (for females with a race of 'Other' in
# fact):
grouped_on_race_and_sex.group_min()
# %%
# Looking at the ``ratio()`` method, we see that the disparity is worse
# (specifically between white males and black females, if we check in
# the ``by_group`` table):
grouped_on_race_and_sex.ratio(method='between_groups')
# %%
# Control Features
# ================
#
# There is a further way we can slice up our data. We have (*completely
# made up*) features for the individuals' credit scores (in three bands)
# and also the size of the loan requested (large or small). In our loan
# scenario, it is acceptable that individuals with high credit scores
# are selected more often than individuals with low credit scores.
# However, within each credit score band, we do not want a disparity
# between (say) black females and white males. To example these cases,
# we have the concept of *control features*.
#
# Control features are introduced by the ``control_features=``
# argument to the :class:`fairlearn.metrics.MetricFrame` object:
cond_credit_score = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test[['race', 'sex']],
control_features=A_test['Credit Score'])
# %%
# This has an immediate effect on the ``overall`` property. Instead
# of having one value for each metric, we now have a value for each
# unique value of the control feature:
cond_credit_score.overall
# %%
# The ``by_group`` property is similarly expanded:
cond_credit_score.by_group
# %%
# The aggregates are also evaluated once for each group identified
# by the control feature:
cond_credit_score.group_min()
# %%
# And:
cond_credit_score.ratio(method='between_groups')
# %%
# In our data, we see that we have a dearth of positive results
# for high income non-whites, which significantly affects the
# aggregates.
#
# We can continue adding more control features:
cond_both = MetricFrame(metrics=metric_fns,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test[['race', 'sex']],
control_features=A_test[['Loan Size', 'Credit Score']])
# %%
# The ``overall`` property now splits into more values:
cond_both.overall
# %%
# As does the ``by_groups`` property, where ``NaN`` values
# indicate that there were no samples in the cell:
cond_both.by_group
# %%
# The aggregates behave similarly. By this point, we are having significant issues
# with under-populated intersections. Consider:
def member_counts(y_true, y_pred):
assert len(y_true) == len(y_pred)
return len(y_true)
counts = MetricFrame(metrics=member_counts,
y_true=y_test,
y_pred=y_pred,
sensitive_features=A_test[['race', 'sex']],
control_features=A_test[['Loan Size', 'Credit Score']])
counts.by_group
# %%
# Recall that ``NaN`` indicates that there were no individuals
# in a cell - ``member_counts()`` will not even have been called.
# %%
# Exporting from MetricFrame
# ==========================
#
# Sometimes, we need to extract our data for use in other tools.
# For this, we can use the :py:meth:`pandas.DataFrame.to_csv` method,
# since the :py:meth:`~fairlearn.metrics.MetricFrame.by_group` property
# will be a :class:`pandas.DataFrame` (or in a few cases, it will be
# a :class:`pandas.Series`, but that has a similar
# :py:meth:`~pandas.Series.to_csv` method):
csv_output = cond_credit_score.by_group.to_csv()
print(csv_output)
# %%
# The :py:meth:`pandas.DataFrame.to_csv` method has a large number of
# arguments to control the exported CSV. For example, it can write
# directly to a CSV file, rather than returning a string (as shown
# above).
#
# The :meth:`~fairlearn.metrics.MetricFrame.overall` property can
# be handled similarly, in the cases that it is not a scalar.
|
the-stack_106_26196 | from docsvr import DocReqCmd
class doFILEVERIFY(DocReqCmdCmd):
def processCommand(self):
src=self.translatePath(self.path)
r=["BAD DIR","NOT EXISTS","EXISTS","BAD CALL"]
try:
r=r[self.validatePFN(src)]
except:
r="BAD CALL"
self.request.send_ok("OK|%s" % r)
return
class doVERIFY(DocReqCmd):
def processCommand(self):
src=self.translatePath(self.path)
if self.validatePFN(src) == 2:
# non-standard response, but client will hang if this is not just "OK"
self.request.send_ok("OK")
else:
self.request.send_error(400,"ERROR: SOURCE FILE NOT FOUND")
return
|
the-stack_106_26198 | import os
from google.cloud import storage
def load_data():
gcsBucket = "continuous-intelligence"
key = "store47-2016.csv"
if not os.path.exists('data/raw'):
os.makedirs('data/raw')
if not os.path.exists("data/raw/" + key):
client = storage.Client()
bucket = client.get_bucket(gcsBucket)
blob = bucket.get_blob(key)
blob.download_to_filename('data/raw/store47-2016.csv')
def main():
print("Loading data...")
load_data()
print("Finished downloading")
if __name__ == "__main__":
main()
|
the-stack_106_26199 | import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import InPlacePathSampler
from rlkit.torch import pytorch_util as ptu
class MetaRLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
train_tasks,
eval_tasks,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=True,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating #3-9
for idx in self.train_tasks:#对于每个任务
self.task_idx = idx#更换当前task id
self.env.reset_task(idx)#重置task
self.collect_data(self.num_initial_steps, 1, np.inf)#训练前采集num_initial_steps条transitions,每采集一条,更新z
# Sample data from train tasks.
for i in range(self.num_tasks_sample):#随机抽取num_tasks_sample个任务 3-10
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
self.enc_replay_buffer.task_buffers[idx].clear()
# collect some trajectories with z ~ prior
if self.num_steps_prior > 0:#采集多少c来推先验z
self.collect_data(self.num_steps_prior, 1, np.inf)#
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:#采集多少c来推后验z
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:#额外采集用于rl训练的c,不用于encoder
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train, add_to_enc_buffer=False)
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):#每次迭代需要梯度更新多少步 11
indices = np.random.choice(self.train_tasks, self.meta_batch)#从train_tasks里面随机选出meta_batch个任务 13
self._do_training(indices) #14-21
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True):#总共收集多少transition,多久
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()#reset q(z|c) to the prior,sample a new z from the prior 6
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
# 轨迹,步数
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)#将采集到的transition存入replay buffer 7 #7
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx) # 8
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate():
self.evaluate(epoch)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
sample_time = times_itrs['sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
if num_trajs >= self.num_exp_traj_eval:
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([a[-1] for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()#sample z
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))#随机选择一些train tasks作为测试集
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)#初始化任务
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context = self.sample_context(idx)#采集context C
self.agent.infer_posterior(context)#推后验z,更新self.z
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)#获取paths
paths += p
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(paths)
#self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
|
the-stack_106_26202 | import sys
import cPickle
import csv
gtf_file = sys.argv[1]
out_file = sys.argv[2]
#######################################
keep_list = ["gene", "CDS", "start_codon", "stop_codon", "five_prime_utr", "three_prime_utr", "exon"]
with open(gtf_file, "r") as gtf, open(out_file, "wb") as table_file:
writer = csv.writer(table_file, delimiter="\t")
for line in gtf:
if not line.startswith("#"):
chromosome, source, type, start, end, score, strand, frame, attributes = line.strip().split("\t")
if type in keep_list: # filters out additional annotations
gene_id = "N/A"
gene_name = "N/A"
attribute_list = attributes.split("; ")
for attribute in attribute_list:
if attribute.startswith("gene_id"):
gene_id = attribute.split(None, 1)[1]
elif attribute.startswith("gene_name"):
gene_name = attribute.split(None, 1)[1]
writer.writerow([chromosome, type, start, end, gene_id, gene_name]) |
the-stack_106_26206 | import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
binexpr_nodes = bblfsh.filter(uast, "//InfixExpression[@roleBinary and @roleExpression]")
for node in binexpr_nodes:
left = None
right = None
for c in node.children:
if bblfsh.role_id("LEFT") in c.roles:
left = c
elif bblfsh.role_id("RIGHT") in c.roles:
right = c
elif c.token in ["=", "*", "+"]:
left = None
right = None
break
if left and right:
break
if not left or not right:
continue
if utils.hash_node(left).hexdigest() == utils.hash_node(right).hexdigest():
findings.append({"msg": "Equal terms on both sides of binary expression, ",
"pos": node.start_position})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
|
the-stack_106_26211 |
import torch.nn as nn
import torch.nn.functional as F
class LeNet5(nn.Module):
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5,5), padding=0)
self.conv2 = nn.Conv2d(6, 16, (5,5))
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), (2,2))
# import pdb; pdb.set_trace()
x = F.max_pool2d(self.conv2(x), (2,2))
x = x.view(-1, 16*5*5)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
output = x
return output
class CustomMLP(nn.Module):
def __init__(self):
super(CustomMLP, self).__init__()
self.fc1 = nn.Linear(1*32*32, 56)
self.fc2 = nn.Linear(56, 44)
self.fc3 = nn.Linear(44, 32)
self.fc4 = nn.Linear(32, 10)
def forward(self, x):
x = x.view(-1, 1024)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
output = x
return output
if __name__ == "__main__" :
from torchsummary import summary
net = LeNet5()
summary(net.cuda(), input_size=(1, 32, 32), batch_size=1)
net = CustomMLP()
summary(net.cuda(), input_size=(1, 32, 32), batch_size=1)
|
the-stack_106_26212 | import sys
import click
import six
from pyfiglet import figlet_format
from termcolor import colored
import docker
from dstools.launcher import launch_tool
def log(string, color, font="slant", figlet=False):
if colored:
if not figlet:
six.print_(colored(string, color))
else:
six.print_(colored(figlet_format(
string, font=font), color))
else:
six.print_(string)
@click.group()
@click.version_option("0.1.0")
def main():
"""
A Data Science Tool Launcher CLI
"""
log("Data Science Tools!", color="blue", figlet=True)
log("Welcome to the Data Science Tools CLI", "green")
try:
client = docker.from_env()
except:
log("Docker is not installed! Visit https://docs.docker.com/get-docker/",color="red")
@main.command()
@click.argument('keyword', required=False)
def menu(**kwargs):
"""Select from a menu of tools"""
launch_tool()
@main.command()
@click.argument('name', required=False)
def id(**kwargs):
"""Launch a tool if you know the ID"""
pass
if __name__ == '__main__':
args = sys.argv
if "--help" in args or len(args) == 1:
print("Data Science Tools!")
main()
|
the-stack_106_26213 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 13:50:08 2018
@author: Salomon Wollenstein
"""
import numpy as np
import pandas as pd
import os
'Parameters'
dir_shpfile = 'G:/My Drive/Github/PoA/shp/Jing/journal.shp'
dir_data = 'G:/Team Drives/MPO 2015/INRIX_byQuarter/4-6' # Will take all of the csv files contained in folders and subfolders
files_ID = '_cdc_apr_2015'
dir_capacity_data = 'G:/Team Drives/MPO 2012/capacity data/'
out_dir = '../results/' + files_ID + '/'
filtered_data_dir = out_dir + 'filtered_tmc_data' + '/'
if os.path.isdir(out_dir) == False:
os.mkdir(out_dir)
os.mkdir(filtered_data_dir)
# Filtering by date range and tmc
dates_input = [#{'id':'Jan','start_date': '2015-01-01' , 'end_date':'2015-01-10'},
#{'id':'Feb','start_date': '2015-02-01' , 'end_date':'2015-02-15'}]
{'id':'Apr','start_date': '2015-04-01' , 'end_date':'2015-05-01'}]
#{'id':'Aug','start_date': '2015-08-01' , 'end_date':'2015-08-10'},
#{'id':'Nov','start_date': '2015-11-01' , 'end_date':'2015-11-10'}]
''' ALWAYS WRITE ONE MORE DAY'''
# Select of you want to analyze weekends or weekdays
days_of_week = 'weekdays'
dates = pd.DataFrame(dates_input)
percentile_free_flow = 85
# Time instances
time_instances_input = [{'id':'AM','start_time':'7:00', 'end_time':'8:59'},
{'id':'MD','start_time':'11:00', 'end_time':'12:59'},
{'id':'PM','start_time':'17:00', 'end_time':'18:59'},
{'id':'NT','start_time':'21:00', 'end_time':'22:59'}]
instances_ID = ["AM", "MD", "PM", "NT"]
time_instances = pd.DataFrame(time_instances_input)
data_granularity = '1min'
#start_time = '9:00'
#end_time = '11:00'
c_value_min = 0
confidence_score_min = 0
#OD Pairs, in this case, all combinations
od_pairs = []
for i in range(9)[1:]:
for j in range(9)[1:]:
if i != j:
od_pairs.append([i, j])
od_pairs
number_of_routes_per_od = 6
theta = 0.8
lower_bound_route = 0.02
average_over_time_p = 5 # averaging the samples to find OD Demand *min
#Inverse optimization
month_w = 'Apr'
month = 4
year = 2015
n_zones = 8
week_day_list_1 = [1 ,2 ,3 ,6 ,7 ,8 ,9 ,10] # train_idx = 1
week_day_list_2 = [13, 14, 15, 16, 17, 20, 21] # train_idx = 2
week_day_list_3 = [22, 23, 24, 27, 28 ,29, 30] # train_idx = 3
week_day_list = [1 ,2 ,3 ,6 ,7 ,8 ,9 ,10, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 27, 28 ,29, 30]
train_idx = range(1, 4)
#Machine learning parameters
deg_grid = range(4, 9)
c_grid = list(np.arange(.5, 3, .5))
lamb_grid = [10. ** m for m in range(-3, 4)]
# Density or Flow
var = "flow"
|
the-stack_106_26216 | import komand
import time
from .schema import GetNewAlertsInput, GetNewAlertsOutput, Input, Output, Component
# Custom imports below
from datetime import datetime
class GetNewAlerts(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name='get_new_alerts',
description=Component.DESCRIPTION,
input=GetNewAlertsInput(),
output=GetNewAlertsOutput())
def run(self, params={}):
interval = params.get(Input.FREQUENCY)
now = datetime.now()
while True:
then = now
now = datetime.now()
# triggered = [2017 - 07 - 30,)
# // same as 7 / 30 / 2017 <= triggered
params = {
"triggered": f"[{then.isoformat()},)"
}
response = self.connection.client.search_alerts(**params)
alerts = komand.helper.clean(response.result.get('data').get('results'))
for alert in alerts:
self.send({Output.ALERT: alert})
else:
self.logger.info("No new alerts found.")
self.logger.info(f"Sleeping for {interval}")
time.sleep(interval)
|
the-stack_106_26217 | import os
import re
import external.cclib as cclib
import logging
from subprocess import Popen, PIPE
from qmdata import CCLibData
from molecule import QMMolecule
class Mopac:
"""
A base class for all QM calculations that use MOPAC.
Classes such as :class:`MopacMol` will inherit from this class.
"""
inputFileExtension = '.mop'
outputFileExtension = '.out'
mopacEnv = os.getenv('MOPAC_DIR', default="/opt/mopac")
if os.path.exists(os.path.join(mopacEnv , 'MOPAC2012.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2012.exe')
elif os.path.exists(os.path.join(mopacEnv , 'MOPAC2009.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2009.exe')
else:
executablePath = os.path.join(mopacEnv , '(MOPAC 2009 or 2012)')
usePolar = False #use polar keyword in MOPAC
"Keywords for the multiplicity"
multiplicityKeywords = {
1: '',
2: 'uhf doublet',
3: 'uhf triplet',
4: 'uhf quartet',
5: 'uhf quintet',
6: 'uhf sextet',
7: 'uhf septet',
8: 'uhf octet',
9: 'uhf nonet',
}
#: List of phrases that indicate failure
#: NONE of these must be present in a succesful job.
failureKeys = [
'IMAGINARY FREQUENCIES',
'EXCESS NUMBER OF OPTIMIZATION CYCLES',
'NOT ENOUGH TIME FOR ANOTHER CYCLE',
]
#: List of phrases to indicate success.
#: ALL of these must be present in a successful job.
successKeys = [
'DESCRIPTION OF VIBRATIONS',
'MOPAC DONE'
]
def testReady(self):
if not os.path.exists(self.executablePath):
raise Exception("Couldn't find MOPAC executable at {0}. Try setting your MOPAC_DIR environment variable.".format(self.executablePath))
def run(self):
self.testReady()
# submits the input file to mopac
process = Popen([self.executablePath, self.inputFilePath])
process.communicate()# necessary to wait for executable termination!
return self.verifyOutputFile()
def verifyOutputFile(self):
"""
Check's that an output file exists and was successful.
Returns a boolean flag that states whether a successful MOPAC simulation already exists for the molecule with the
given (augmented) InChI Key.
The definition of finding a successful simulation is based on these criteria:
1) finding an output file with the file name equal to the InChI Key
2) NOT finding any of the keywords that are denote a calculation failure
3) finding all the keywords that denote a calculation success.
4) finding a match between the InChI of the given molecule and the InchI found in the calculation files
If any of the above criteria is not matched, False will be returned and the procedures to start a new calculation
will be initiated.
"""
if not os.path.exists(self.outputFilePath):
logging.debug("Output file {0} does not (yet) exist.".format(self.outputFilePath))
return False
InChIMatch=False #flag (1 or 0) indicating whether the InChI in the file matches InChIaug this can only be 1 if InChIFound is also 1
InChIFound=False #flag (1 or 0) indicating whether an InChI was found in the log file
# Initialize dictionary with "False"s
successKeysFound = dict([(key, False) for key in self.successKeys])
with open(self.outputFilePath) as outputFile:
for line in outputFile:
line = line.strip()
for element in self.failureKeys: #search for failure keywords
if element in line:
logging.error("MOPAC output file contains the following error: {0}".format(element) )
return False
for element in self.successKeys: #search for success keywords
if element in line:
successKeysFound[element] = True
if "InChI=" in line:
logFileInChI = line #output files should take up to 240 characters of the name in the input file
InChIFound = True
if logFileInChI == self.uniqueIDlong:
InChIMatch = True
else:
logging.warning("InChI in log file ({0}) didn't match that in geometry ({1}).".format(logFileInChI, self.uniqueIDlong))
# Use only up to first 80 characters to match due to MOPAC bug which deletes 81st character of InChI string
if self.uniqueIDlong.startswith(logFileInChI[:80]):
logging.warning("but the beginning matches so it's probably just a truncation problem.")
InChIMatch = True
# Check that ALL 'success' keywords were found in the file.
if not all( successKeysFound.values() ):
logging.error('Not all of the required keywords for success were found in the output file!')
return False
if not InChIFound:
logging.error("No InChI was found in the MOPAC output file {0}".format(self.outputFilePath))
return False
if InChIMatch:
logging.info("Successful MOPAC quantum result found in {0}".format(self.outputFilePath))
# " + self.molfile.name + " ("+self.molfile.InChIAug+") has been found. This log file will be used.")
return True
#InChIs do not match (most likely due to limited name length mirrored in log file (240 characters), but possibly due to a collision)
return self.checkForInChiKeyCollision(logFileInChI) # Not yet implemented!
def parse(self):
"""
Parses the results of the Mopac calculation, and returns a CCLibData object.
"""
parser = cclib.parser.Mopac(self.outputFilePath)
parser.logger.setLevel(logging.ERROR) #cf. http://cclib.sourceforge.net/wiki/index.php/Using_cclib#Additional_information
cclibData = parser.parse()
radicalNumber = sum([i.radicalElectrons for i in self.molecule.atoms])
qmData = CCLibData(cclibData, radicalNumber+1)
return qmData
class MopacMol(QMMolecule, Mopac):
"""
A base Class for calculations of molecules using MOPAC.
Inherits from both :class:`QMMolecule` and :class:`Mopac`.
"""
def writeInputFile(self, attempt):
"""
Using the :class:`Geometry` object, write the input file
for the `attmept`th attempt.
"""
molfile = self.getMolFilePathForCalculation(attempt)
atomline = re.compile('\s*([\- ][0-9.]+)\s+([\- ][0-9.]+)+\s+([\- ][0-9.]+)\s+([A-Za-z]+)')
output = [ self.geometry.uniqueIDlong, '' ]
atomCount = 0
with open(molfile) as molinput:
for line in molinput:
match = atomline.match(line)
if match:
output.append("{0:4s} {1} 1 {2} 1 {3} 1".format(match.group(4), match.group(1), match.group(2), match.group(3)))
atomCount += 1
assert atomCount == len(self.molecule.atoms)
output.append('')
input_string = '\n'.join(output)
top_keys, bottom_keys, polar_keys = self.inputFileKeywords(attempt)
with open(self.inputFilePath, 'w') as mopacFile:
mopacFile.write(top_keys)
mopacFile.write('\n')
mopacFile.write(input_string)
mopacFile.write('\n')
mopacFile.write(bottom_keys)
if self.usePolar:
mopacFile.write('\n\n\n')
mopacFile.write(polar_keys)
def inputFileKeywords(self, attempt):
"""
Return the top, bottom, and polar keywords.
"""
raise NotImplementedError("Should be defined by subclass, eg. MopacMolPM3")
def generateQMData(self):
"""
Calculate the QM data and return a QMData object, or None if it fails.
"""
if self.verifyOutputFile():
logging.info("Found a successful output file already; using that.")
source = "QM MOPAC result file found from previous run."
else:
self.createGeometry()
success = False
for attempt in range(1, self.maxAttempts+1):
self.writeInputFile(attempt)
logging.info('Trying {3} attempt {0} of {1} on molecule {2}.'.format(attempt, self.maxAttempts, self.molecule.toSMILES(), self.__class__.__name__))
success = self.run()
if success:
source = "QM {0} calculation attempt {1}".format(self.__class__.__name__, attempt )
break
else:
logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.toAugmentedInChI()))
return None
result = self.parse() # parsed in cclib
result.source = source
return result # a CCLibData object
class MopacMolPM3(MopacMol):
#: Keywords that will be added at the top and bottom of the qm input file
keywords = [
{'top':"precise nosym", 'bottom':"oldgeo thermo nosym precise "},
{'top':"precise nosym gnorm=0.0 nonr", 'bottom':"oldgeo thermo nosym precise "},
{'top':"precise nosym gnorm=0.0", 'bottom':"oldgeo thermo nosym precise "},
{'top':"precise nosym gnorm=0.0 bfgs", 'bottom':"oldgeo thermo nosym precise "},
{'top':"precise nosym recalc=10 dmax=0.10 nonr cycles=2000 t=2000", 'bottom':"oldgeo thermo nosym precise "},
]
@property
def scriptAttempts(self):
"The number of attempts with different script keywords"
return len(self.keywords)
@property
def maxAttempts(self):
"The total number of attempts to try"
return 2 * len(self.keywords)
def inputFileKeywords(self, attempt):
"""
Return the top, bottom, and polar keywords for attempt number `attempt`.
NB. `attempt`s begin at 1, not 0.
"""
assert attempt <= self.maxAttempts
if attempt > self.scriptAttempts:
attempt -= self.scriptAttempts
multiplicity_keys = self.multiplicityKeywords[self.geometry.multiplicity]
top_keys = "pm3 {0} {1}".format(
multiplicity_keys,
self.keywords[attempt-1]['top'],
)
bottom_keys = "{0} pm3 {1}".format(
self.keywords[attempt-1]['bottom'],
multiplicity_keys,
)
polar_keys = "oldgeo {0} nosym precise pm3 {1}".format(
'polar' if self.geometry.multiplicity == 1 else 'static',
multiplicity_keys,
)
return top_keys, bottom_keys, polar_keys |
the-stack_106_26218 | # pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.baseline_models."""
import datetime
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_c19_modelling.evaluation import baseline_models
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
def _get_dataset(num_training_dates, num_forecast_dates, num_sites):
sites = ["site_1", "site_2", "site_3"]
training_datetimes = [
datetime.datetime.strptime("2020-05-07", "%Y-%m-%d") +
datetime.timedelta(days=i) for i in range(num_training_dates)
]
eval_dates = range(num_training_dates,
num_training_dates + num_forecast_dates)
eval_datetimes = [
datetime.datetime.strptime("2020-05-07", "%Y-%m-%d") +
datetime.timedelta(days=i) for i in eval_dates
]
training_targets = np.random.randint(
0, 100, (num_training_dates, num_sites, 1))
eval_targets = np.random.randint(
0, 100, (num_forecast_dates, num_sites, 1))
sum_past_targets = np.random.randint(0, 100, (len(sites), 1))
return dataset_factory.Dataset(
training_targets=training_targets,
evaluation_targets=eval_targets,
sum_past_targets=sum_past_targets,
training_features=[],
target_names=["new_confirmed"],
feature_names=[],
training_dates=[
datetime.datetime.strftime(date, "%Y-%m-%d")
for date in training_datetimes
],
evaluation_dates=[
datetime.datetime.strftime(date, "%Y-%m-%d")
for date in eval_datetimes
],
sites=sites,
dataset_index_key="12345",
cadence=1
)
class BaselineModelsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._num_training_dates = 20
self._num_forecast_dates = 14
self._num_sites = 3
self._dataset = _get_dataset(
self._num_training_dates, self._num_forecast_dates, self._num_sites)
@parameterized.named_parameters([
("logistic", baseline_models.Logistic),
("gompertz", baseline_models.Gompertz),
("quadratic", functools.partial(
baseline_models.PolynomialFit,
polynomial_degree=2, num_context_dates=2, fit_cumulatives=False)),])
def test_curve_fitting_model_predict(self, model_class):
"""Checks that predictions are the correct shape, defined, and positive."""
model = model_class()
predictions = model.predict(self._dataset)
self.assertEqual(
predictions.shape, (self._num_forecast_dates, self._num_sites, 1))
if isinstance(model_class,
(baseline_models.Logistic, baseline_models.Gompertz)):
self.assertFalse(np.any(predictions < 0))
self.assertFalse(np.any(np.isnan(predictions)))
def test_repeat_weekly_model_insufficient_data_raises_value_error(self):
"""Checks that the repeat weekly model fails with only 6 days of data."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(6, self._num_forecast_dates, self._num_sites)
with self.assertRaisesRegex(ValueError,
"At least 1 week of training data required *"):
model.predict(dataset)
def test_repeat_weekly_deaths_model_6_day_horizon_outputs_correctly(self):
"""Checks predictions from the repeating model with horizon < 1 week."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(self._num_training_dates, 6, self._num_sites)
predictions = model.predict(dataset)
np.testing.assert_array_equal(predictions, dataset.training_targets[-7:-1])
def test_repeat_weekly_deaths_model_12_day_horizon_outputs_correctly(self):
"""Checks predictions from the repeating model with horizon > 1 week."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(self._num_training_dates, 12, self._num_sites)
predictions = model.predict(dataset)
np.testing.assert_array_equal(predictions[:7],
dataset.training_targets[-7:])
np.testing.assert_array_equal(predictions[7:],
dataset.training_targets[-7:-2])
if __name__ == "__main__":
absltest.main()
|
the-stack_106_26219 | import argparse
import os
import scipy.io as scio
import cv2 as cv
import numpy as np
import utils
import resnet_image
import pickle
import paddle.optimizer as optim
import paddle
import datetime
import sys
from paddle.io import DataLoader,TensorDataset
def net_train(net,data_loader,opt,loss_func,cur_e,args):
# 父类方法
net.train()
begin_time = datetime.datetime.now()
train_loss = 0.0
batch_num = int(len(data_loader.dataset) / args.batch_size)
for i,data in enumerate(data_loader,0):
#print('batch:%d/%d' % (i,batch_num))
img,label = data
img,label = img.cuda(),label.cuda()
# 归零梯度
opt.clear_grad()
# 输出
output = net(img)[1]
loss = loss_func(output, label)
loss.backward() # 反向
opt.step()
# loss
train_loss += loss
end_time = datetime.datetime.now()
delta_time = (end_time-begin_time)
delta_seconds = (delta_time.seconds*1000 + delta_time.microseconds)/1000
print('epoch:%d loss:%.4f time:%.4f'% (cur_e,train_loss.cpu(),(delta_seconds)))
return net
def net_test(net,data_loader):
num = len(data_loader.dataset)
correct = 0
net.eval()
with paddle.no_grad():
for i, data in enumerate(data_loader, 0):
img, label = data # cpu
img, label = img.cuda(), label.cuda() # gpu
output = net(img)[1]
predict_label = paddle.argmax(output,axis=1)
correct += (predict_label == label).sum()
return correct.cpu().numpy()/num
def main():
parser = argparse.ArgumentParser(description='AID_PRETRAIN')
parser.add_argument('--dataset', type=str, default='../data/proc_aid.pkl', help='the path of aid dataset')
parser.add_argument('--batch_size', type=int, default=32,help='training batch size')
parser.add_argument('--learning_rate', type=float, default=1e-3, help='training batch size')
parser.add_argument('--epoch',type=int,default=50,help='training epoch')
parser.add_argument('--gpu_ids', type=str, default='0', help='USING GPU IDS e.g.\'[0,4]\'')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help = 'SGD momentum (default: 0.9)')
args = parser.parse_args()
# 加载数据
print('loading data from:'+args.dataset)
data_dict = pickle.load(open(args.dataset,mode='rb'))
print('data loaded.')
train_dataset = TensorDataset(paddle.to_tensor(data_dict['tr_X']), paddle.to_tensor(data_dict['tr_Y'])) # 不需要变成one hot
test_dataset = TensorDataset(paddle.to_tensor(data_dict['te_X']), paddle.to_tensor(data_dict['te_Y']))
train_dataloader = DataLoader(train_dataset,batch_size=args.batch_size,shuffle=True,num_workers=4)
test_dataloader = DataLoader(test_dataset,batch_size=args.batch_size,shuffle=False,num_workers=4)
loss_func = paddle.nn.CrossEntropyLoss()
# 初始化模型
net = resnet_image.resnet101(False,num_classes= 30)
# 将模型转为cuda类型 适应多GPU并行训练
gpu_ids = [int(e) for e in args.gpu_ids.split(',')]
net = paddle.DataParallel(net, device_ids=[0,1,2,3]).cuda()
# 优化器
optimizer = optim.SGD(params=net.parameters(),lr=args.learning_rate, momentum=args.momentum)
#optimizer = optim.Adam(params=net.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), weight_decay=0.0001)
max_acc = 0.0
for e in range(args.epoch):
net.train()
begin_time = datetime.datetime.now()
train_loss = 0.0
batch_num = int(len(train_dataloader.dataset) / args.batch_size)
for i, data in enumerate(train_dataloader, 0):
# print('batch:%d/%d' % (i,batch_num))
img, label = data
img, label = img.cuda(), label.cuda()
# 归零梯度
optimizer.clear_grad()
# 输出
output = net(img)[1]
loss = loss_func(output, label)
loss.backward() # 反向
optimizer.step()
# loss
train_loss += loss
end_time = datetime.datetime.now()
delta_time = (end_time - begin_time)
delta_seconds = (delta_time.seconds * 1000 + delta_time.microseconds) / 1000
test_acc = net_test(net,test_dataloader)
print('epoch:%d loss:%.4f time:%.4f test acc:%f' % (e, train_loss.cpu(), (delta_seconds), test_acc))
sys.stdout.flush()
if test_acc > max_acc:
max_acc = test_acc
try:
state_dict = net.module.state_dict()
except AttributeError:
state_dict = net.state_dict()
paddle.save(state_dict, '../model/visual_model_pretrain.pt')
if __name__ == '__main__':
main()
|
the-stack_106_26222 | import argparse
import os
import random
from glob import glob
from pathlib import Path
import numpy as np
import tifffile
from PIL import Image
from tqdm import tqdm
def img_loader(fp):
if Path(fp).suffix.lower() in [".jpg", ".jpeg", ".png"]:
arr = np.array(Image.open(fp))
else:
arr = tifffile.imread(fp)
return arr
def main(args):
files = glob(os.path.join(args.root, "**/*"), recursive=True)
files = [x for x in files if Path(x).suffix.replace(".", "") in args.fmt]
random.shuffle(files)
files = files[0 : int(len(files) * args.percent)]
if not files:
print("INFO: No Image Found!")
return
pixel_num = 0 # store all pixel number in the dataset
channel_sum = np.zeros(args.channels)
channel_sum_squared = np.zeros(args.channels)
for item in tqdm(files):
arr = img_loader(item)
arr = arr / args.max
pixel_num += arr.shape[0] * arr.shape[1]
channel_sum += np.sum(arr, axis=(0, 1))
channel_sum_squared += np.sum(np.square(arr), axis=(0, 1))
mean = channel_sum / pixel_num
std = np.sqrt(channel_sum_squared / pixel_num - np.square(mean))
print("scaled mean:{} \nscaled std: {} ".format(mean, std))
print("orginal mean: {} \norginal std: {}".format(mean * args.max, std * args.max))
def parse_args():
parser = argparse.ArgumentParser(
description="calcuate the datasets mean and std value"
)
parser.add_argument(
"--root",default="/media/wtl/00077CE80009E4AD/tw/AID/flow55_1/train", type=str, help="root dir of image datasets"
)
parser.add_argument(
"--fmt",
default=["png"],
nargs="+",
help="file suffix to calcuate, default{jpg}, support suffix: jpg, jpeg, png, tif, tiff",
)
parser.add_argument("--percent", default=1, help="percent of images to calcuate")
parser.add_argument("--channels", default=3, help="datasets image channels")
parser.add_argument(
"--max", default=255, type=float, help="max value of all images default: {255}"
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args) |
the-stack_106_26224 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from qap.cli import QAProtocolCLI
if __name__ == "__main__":
obj = QAProtocolCLI()
obj.run()
|
the-stack_106_26226 | import random
import numpy as np
import time
import torch as T
import os.path
from tqdm import tqdm
#device = T.device("cpu") # apply to Tensor or Module
# -----------------------------------------------------------
class InputDataset(T.utils.data.Dataset):
def __init__(self, src_file,n_rows=None):
all_xy= np.loadtxt(src_file,max_rows=n_rows,
usecols=[ 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16,17,18,19, 20, 21], delimiter="\t",
dtype=np.float32)
n = len(all_xy)
tmp_x = all_xy[0:n, 0:19] # all rows, cols [0,18)
tmp_y = all_xy[0:n, 19:21]
self.x_data = \
T.tensor(tmp_x, dtype=T.float32)#.to(device)
self.y_data = \
T.tensor(tmp_y, dtype=T.float32)#.to(device)
#self.truedata = \
# T.tensor(true_d, dtype=T.int64)
def __len__(self):
return len(self.x_data)
def __getitem__(self, idx):
preds = self.x_data[idx]
trgts = self.y_data[idx]
#true = self.truedata[idx]
sample = {
'predictors': preds,
'targets': trgts
}
return sample
# -----------------------------------------------------------
class Net(T.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hid1 = T.nn.Linear(19, 38) # 18-(15-15)-11
self.hid2 = T.nn.Linear(38,76)
self.oupt = T.nn.Linear(76, 2)
self.relu = T.nn.ReLU()
T.nn.init.xavier_uniform_(self.hid1.weight)
T.nn.init.zeros_(self.hid1.bias)
T.nn.init.xavier_uniform_(self.hid2.weight)
T.nn.init.zeros_(self.hid2.bias)
T.nn.init.xavier_uniform_(self.oupt.weight)
T.nn.init.zeros_(self.oupt.bias)
def forward(self, x):
z = T.tanh(self.hid1(x))
z = self.relu(z)
z = T.tanh(self.hid2(z))
z = self.relu(z)
z = T.sigmoid(self.oupt(z))
return z
def accuracy(model, ds):
# assumes model.eval()
# granular but slow approach
n_correct = 0
n_wrong = 0
for i in range(len(ds)):
X = ds[i]['predictors']
Y = ds[i]['targets']
with T.no_grad():
oupt = model(X) # logits form
oupt = oupt.numpy()
max_index = int((oupt[0] + oupt[1]) / 2 * 10) / 10
if Y[0] <= max_index <= Y[1]:
n_correct += 1
else:
n_wrong += 1
acc = n_correct/(n_wrong+n_correct)
return acc
# -----------------------------------------------------------
def main():
# 0. get started
base_dir = os.getcwd() + '/'
file_dir = base_dir + "Msmarco/ance/" # input folder, modify this if want to test on other dataset
if not os.path.exists(file_dir+"model"):
os.mkdir(file_dir+"model")
print("\nBegin predict alpha value \n")
np.random.seed(1)
T.manual_seed(1)
result_dict = {}
for input_index in range(0, 11):
doc = file_dir + 'eval/mrr10/' + str(input_index / 10) + '.eval'
current_input = open(doc, 'r')
current_lines = current_input.readlines()
for current_line in current_lines:
current_item = current_line.split()
qid = current_item[1]
current_score = current_item[2]
if qid in result_dict.keys():
previous = result_dict.get(qid)
previous.append(current_score)
result_dict[qid] = previous
else:
result_dict[qid] = [current_score]
# 1. create DataLoader objects
print("Creating train and test datasets ")
test_file = file_dir + "feature_out/train_bound.txt"
testing_xdict = {}
test_lines = open(test_file, 'r').readlines()
for line in test_lines:
items = line.split(sep='\t')
xitem = [float(items[k]) for k in range(1, 20)]
testing_xdict[items[0]] = T.tensor([xitem], dtype=T.float32)
bat_size = 100
#print(train_ldr)
# 2. create network
net = Net()#.to(device)
# 3. train model
max_epochs = 1000
ep_log_interval = 10
lrn_rate = 0.01
# -----------------------------------------------------------
loss_func = T.nn.BCELoss() # apply log-softmax()
optimizer = T.optim.SGD(net.parameters(), lr=lrn_rate)
print("\nbat_size = %3d " % bat_size)
print("loss = " + str(loss_func))
print("optimizer = SGD")
print("max_epochs = %3d " % max_epochs)
print("lrn_rate = %0.3f " % lrn_rate)
print("\nStarting train with saved checkpoints")
net.train()
max_line_length = 0
train_file_name = file_dir + 'feature_out/train_bound.txt'
for epoch in tqdm(range(0, max_epochs)):
T.manual_seed(1 + epoch) # recovery reproducibility
epoch_loss = 0 # for one full epoch
train_ds = InputDataset(train_file_name)
train_ldr = T.utils.data.DataLoader(train_ds,
batch_size=bat_size, shuffle=False)
for (batch_idx, batch) in enumerate(train_ldr):
X = batch['predictors'] # inputs
Y = batch['targets'] # shape [10,3] (!)
optimizer.zero_grad()
oupt = net(X) # shape [10] (!)
loss_val = loss_func(oupt, Y) # avg loss in batch
epoch_loss += loss_val.item() # a sum of averages
loss_val.backward()
optimizer.step()
if epoch % ep_log_interval == 0:
print("epoch = %4d loss = %0.4f" % \
(epoch, epoch_loss))
# checkpoint after 0-based epoch 100, 200, etc.
print("Computing model accuracy")
eval_results = []
another = []
maxi = []
net.eval()
acc_train = accuracy(net, train_ds) # item-by-item
print("Accuracy on training data = %0.4f" % acc_train)
fn = file_dir + "model/model_bce_bound.pth"
# output_model = open(fn, 'w')
T.save(net.state_dict(), fn)
for testing_element in testing_xdict.keys():
inpt = testing_xdict.get(testing_element)
with T.no_grad():
logits = net(inpt[0:19])
logits = logits[0]
max_index = int(logits[0]+logits[1]/2 * 10)
eval_result = float(result_dict.get(testing_element)[max_index])
another_result = float(result_dict.get(testing_element)[2])
maxi_reuslt = float(max(result_dict.get(testing_element)))
eval_results.append(eval_result)
another.append(another_result)
maxi.append(maxi_reuslt)
print("Predicted mrr10 value is : " + str(sum(eval_results) / len(eval_results)) + " vs " + str(
sum(another) / len(another)) + " vs " + str(sum(maxi)/ len(maxi)))
print("Training complete ")
# 4. evaluate model accuracy
#acc_test = accuracy(net, test_ds) # en masse
# acc_test = accuracy_quick(net, test_ds) # en masse
#print("Accuracy on test data = %0.4f" % acc_test)
print("\nComputing model accuracy")
net.eval()
acc_train = accuracy(net, train_ds) # item-by-item
print("Accuracy on training data = %0.4f" % acc_train)
# 5. make a prediction
eval_results = []
another = []
maxi = []
print("\nPredicting: ")
count_p=0
count_n=0
count_e=0
count_reach_m = 0
count_not_reach_m = 0
for testing_element in testing_xdict.keys():
inpt = testing_xdict.get(testing_element)
with T.no_grad():
logits = net(inpt[0:19]) # values do not sum to 1.0
logits = logits[0]
max_index = int(logits[0] + logits[1] / 2 * 10)
eval_result = float(result_dict.get(testing_element)[max_index])
another_result = float(result_dict.get(testing_element)[2])
maxi_reuslt = float(max(result_dict.get(testing_element)))
if eval_result > another_result:
count_p +=1
print("The predicted appears at: " + str(max_index/10) + " with value of "+ result_dict.get(testing_element)[max_index] + " > fixed and max=" + max(result_dict.get(testing_element)))
elif eval_result == another_result:
count_e +=1
print("The predicted appears at: " + str(max_index/10) + " with value of "+ result_dict.get(testing_element)[max_index] + " = fixed and max=" + max(result_dict.get(testing_element)))
else:
count_n +=1
print("The predicted appears at: " + str(max_index / 10) + " with value of "+ result_dict.get(testing_element)[max_index] + " < fixed and max=" + max(result_dict.get(testing_element)))
eval_results.append(eval_result)
another.append(another_result)
maxi.append(maxi_reuslt)
print("Predicted mrr10 value is : " + str(sum(eval_results)/len(eval_results)) + " vs " + str(sum(another)/len(another)) + " vs " + str(sum(maxi)/ len(maxi)))
ratio_p = count_p/(count_e+count_p+count_n)
ratio_n = count_n/(count_e+count_p+count_n)
ratio_e = 1-ratio_n-ratio_p
print("\nHigher ratio = " + str(ratio_p) + " Equal ratio = " + str(ratio_e) + " Lower ratio = " + str(ratio_n) )
# 6. save model (state_dict approach)
print("\nSaving trained model ")
fn = file_dir+ "model/model_bce_bound.pth"
#output_model = open(fn, 'w')
T.save(net.state_dict(), fn)
print("\nEnd predict the output alpha value")
if __name__ == "__main__":
main()
|
the-stack_106_26227 | # Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for deterministic image op gradient functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import image_grad_test_base as test_base
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ResizeNearestNeighborOpDeterminismExceptionsTest(test.TestCase,
parameterized.TestCase):
"""Test d9m-unimplemented exceptions from ResizeNearestNeighborOpGrad.
Test that tf.errors.UnimplementedError is thrown, as appropriate, by the
GPU-specific code-path through ResizeNearestNeighborOpGrad when deterministic
ops are enabled.
This test assumes that image_grad_test.py runs equivalent test cases when
deterministic ops are not enabled and will therefore detect erroneous
exception throwing in those cases.
"""
@parameterized.parameters(
{
'align_corners': False,
'half_pixel_centers': False,
'data_type': dtypes.float16
}, {
'align_corners': False,
'half_pixel_centers': False,
'data_type': dtypes.float32
}, {
'align_corners': False,
'half_pixel_centers': False,
'data_type': dtypes.float64
}, {
'align_corners': True,
'half_pixel_centers': False,
'data_type': dtypes.float32
}, {
'align_corners': False,
'half_pixel_centers': True,
'data_type': dtypes.float32
})
@test_util.run_gpu_only
@test_util.run_all_in_graph_and_eager_modes
def testExceptionThrowing(self, align_corners, half_pixel_centers, data_type):
with self.session(), test_util.force_gpu():
input_image = array_ops.zeros((1, 2, 2, 1), dtype=data_type)
with backprop.GradientTape() as tape:
tape.watch(input_image)
output_image = image_ops.resize_nearest_neighbor(
input_image, (3, 3),
align_corners=align_corners,
half_pixel_centers=half_pixel_centers)
with self.assertRaisesRegex(
errors.UnimplementedError,
'A deterministic GPU implementation of ResizeNearestNeighborGrad' +
' is not currently available.'):
gradient = tape.gradient(output_image, input_image)
self.evaluate(gradient)
class ResizeBilinearOpDeterministicTest(test_base.ResizeBilinearOpTestBase):
"""Test that ResizeBilinearGrad operates reproducibly.
Inheriting from test_base.ResizeBilinearOpTestBase ensures that regular op
functionality is correct when the deterministic code-path is selected.
"""
def _randomNDArray(self, shape):
return 2 * np.random.random_sample(shape) - 1
def _randomDataOp(self, shape, data_type):
return constant_op.constant(self._randomNDArray(shape), dtype=data_type)
@parameterized.parameters(
# Note that there is no 16-bit floating point format registered for GPU
{
'align_corners': False,
'half_pixel_centers': False,
'data_type': dtypes.float32
},
{
'align_corners': False,
'half_pixel_centers': False,
'data_type': dtypes.float64
},
{
'align_corners': True,
'half_pixel_centers': False,
'data_type': dtypes.float32
},
{
'align_corners': False,
'half_pixel_centers': True,
'data_type': dtypes.float32
})
@test_util.run_in_graph_and_eager_modes
@test_util.run_gpu_only
def testDeterministicGradients(self, align_corners, half_pixel_centers,
data_type):
if not align_corners and test_util.is_xla_enabled():
# Align corners is deprecated in TF2.0, but align_corners==False is not
# supported by XLA.
self.skipTest('align_corners==False not currently supported by XLA')
with self.session(force_gpu=True):
seed = (
hash(align_corners) % 256 + hash(half_pixel_centers) % 256 +
hash(data_type) % 256)
np.random.seed(seed)
input_shape = (1, 25, 12, 3) # NHWC
output_shape = (1, 200, 250, 3)
input_image = self._randomDataOp(input_shape, data_type)
repeat_count = 3
if context.executing_eagerly():
def resize_bilinear_gradients(local_seed):
np.random.seed(local_seed)
upstream_gradients = self._randomDataOp(output_shape, dtypes.float32)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(input_image)
output_image = image_ops.resize_bilinear(
input_image,
output_shape[1:3],
align_corners=align_corners,
half_pixel_centers=half_pixel_centers)
gradient_injector_output = output_image * upstream_gradients
return tape.gradient(gradient_injector_output, input_image)
for i in range(repeat_count):
local_seed = seed + i # select different upstream gradients
result_a = resize_bilinear_gradients(local_seed)
result_b = resize_bilinear_gradients(local_seed)
self.assertAllEqual(result_a, result_b)
else: # graph mode
upstream_gradients = array_ops.placeholder(
dtypes.float32, shape=output_shape, name='upstream_gradients')
output_image = image_ops.resize_bilinear(
input_image,
output_shape[1:3],
align_corners=align_corners,
half_pixel_centers=half_pixel_centers)
gradient_injector_output = output_image * upstream_gradients
# The gradient function behaves as if grad_ys is multiplied by the op
# gradient result, not passing the upstream gradients through the op's
# gradient generation graph. This is the reason for using the
# gradient injector
resize_bilinear_gradients = gradients_impl.gradients(
gradient_injector_output,
input_image,
grad_ys=None,
colocate_gradients_with_ops=True)[0]
for i in range(repeat_count):
feed_dict = {upstream_gradients: self._randomNDArray(output_shape)}
result_a = resize_bilinear_gradients.eval(feed_dict=feed_dict)
result_b = resize_bilinear_gradients.eval(feed_dict=feed_dict)
self.assertAllEqual(result_a, result_b)
class CropAndResizeOpDeterminismExceptionsTest(test.TestCase):
"""Test d9m-unimplemented exceptions from CropAndResizeBackprop{Image|Boxes}.
Test that tf.errors.UnimplementedError is thrown or not thrown, as
appropriate, by the GPU code-paths for CropAndResizeBackprop{Image|Boxes} when
deterministic ops are enabled.
This test assumes that test_base.CropAndResizeOpTestBase runs all the same
test cases when deterministic ops are not enabled and will therefore detect
erroneous exception throwing in those cases.
"""
def _genParams(self, dtype=dtypes.float32):
batch_size = 1
image_height = 10
image_width = 10
channels = 1
image_shape = (batch_size, image_height, image_width, channels)
num_boxes = 3
boxes_shape = (num_boxes, 4)
random_seed.set_seed(123)
image = random_ops.random_normal(shape=image_shape, dtype=dtype)
boxes = random_ops.random_uniform(shape=boxes_shape, dtype=dtypes.float32)
box_indices = random_ops.random_uniform(
shape=(num_boxes,), minval=0, maxval=batch_size, dtype=dtypes.int32)
crop_size = constant_op.constant([3, 3], dtype=dtypes.int32)
return image, boxes, box_indices, crop_size
@test_util.run_in_graph_and_eager_modes
@test_util.run_gpu_only
def testExceptionThrowing(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
image, boxes, box_indices, crop_size = self._genParams(dtype)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(image)
tape.watch(boxes)
op_output = image_ops.crop_and_resize_v2(image, boxes, box_indices,
crop_size)
image_error_message = ('Deterministic GPU implementation of' +
' CropAndResizeBackpropImage not available')
with self.assertRaisesRegex(errors_impl.UnimplementedError,
image_error_message):
result = tape.gradient(op_output, image)
self.evaluate(result)
expected_error_message = ('Deterministic GPU implementation of' +
' CropAndResizeBackpropBoxes not available')
if context.executing_eagerly():
# With eager execution, the backprop-to-image code is apparently
# executed (first), even when its output is never used.
expected_error_message = image_error_message
with self.assertRaisesRegex(errors_impl.UnimplementedError,
expected_error_message):
result = tape.gradient(op_output, boxes)
self.evaluate(result)
class CropAndResizeOpDeterministicTest(test_base.CropAndResizeOpTestBase):
"""Test that CropAndResizeBackprop{Image|Boxes} operates reproducibly.
Inheriting from test_base.CropAndResizeOpTestBase ensures that regular op
functionality is correct when the deterministic code-path is selected.
"""
def _randomFloats(self, shape, low=0.0, high=1.0, dtype=dtypes.float32):
"""Generate a tensor of random floating-point values.
Values will be continuously distributed in the range [low, high).
Note that we use numpy to generate random numbers and then feed the result
through a constant op to avoid the re-rolling of TensorFlow random ops on
each run in graph mode.
Args:
shape: The output shape.
low: Lower bound of random numbers generated, inclusive.
high: Upper bound of random numbers generated, exclusive.
dtype: The output dtype.
Returns:
A random tensor
"""
val = np.random.random_sample(
shape) # float64 continuous uniform [0.0, 1.0)
diff = high - low
val *= diff
val += low
return constant_op.constant(val, dtype=dtype)
def _randomInts(self, shape, low, high):
"""Generate a tensor of random 32-bit integer values.
Note that we use numpy to generate random numbers and then feed the result
through a constant op to avoid the re-rolling of TensorFlow random ops on
each run in graph mode.
Args:
shape: The output shape.
low: Lower bound of random numbers generated, inclusive.
high: Upper bound of random numbers generated, exclusive.
Returns:
A random tensor
"""
val = np.random.randint(low=low, high=high, size=shape)
return constant_op.constant(val, dtype=dtypes.int32)
def _genParams(self, dtype=dtypes.float32):
batch_size = 16
input_height = 64
input_width = 64
depth = 1
input_shape = (batch_size, input_height, input_width, depth)
np.random.seed(456)
image = self._randomFloats(input_shape, low=-1.0, high=1.0, dtype=dtype)
box_count = 4 * batch_size
boxes = self._randomFloats((box_count, 4),
low=0.0,
high=1.01,
dtype=dtypes.float32)
box_indices = self._randomInts((box_count,), low=0, high=batch_size)
crop_size = [input_height * 2, input_width * 2]
output_shape = (box_count, *crop_size, depth)
# The output of this op is always float32, regardless of image data type
injected_gradients = self._randomFloats(
output_shape, low=-0.001, high=0.001, dtype=dtypes.float32)
return image, boxes, box_indices, crop_size, injected_gradients
def _testReproducibleBackprop(self, test_image_not_boxes):
with test_util.force_cpu():
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
params = self._genParams(dtype)
image, boxes, box_indices, crop_size, injected_gradients = params
with backprop.GradientTape(persistent=True) as tape:
tape.watch([image, boxes])
output = image_ops.crop_and_resize_v2(
image, boxes, box_indices, crop_size, method='bilinear')
upstream = output * injected_gradients
image_gradients_a, boxes_gradients_a = tape.gradient(
upstream, [image, boxes])
for _ in range(5):
image_gradients_b, boxes_gradients_b = tape.gradient(
upstream, [image, boxes])
if test_image_not_boxes:
self.assertAllEqual(image_gradients_a, image_gradients_b)
else:
self.assertAllEqual(boxes_gradients_a, boxes_gradients_b)
@test_util.run_in_graph_and_eager_modes
def testReproducibleBackpropToImage(self):
"""Test that backprop to image is reproducible.
With non-reproducible ordering of reduction operations, upsampling of a
crop, leading to three or more output pixels being derived from an input
pixel, can contribute to nondeterminism in the gradient associated with that
input pixel location.
Note that the number of boxes can be less than, equal to, or greater than
the batch size. Wth non-reproducible ordering of reduction operations, three
or more crops overlapping on the same input image pixel can independently
contribute to nondeterminism in the image gradient associated with that
input pixel location. This is independent of contributions caused by the
upsampling of any given crop.
"""
self._testReproducibleBackprop(test_image_not_boxes=True)
@test_util.run_in_graph_and_eager_modes
def testReproducibleBackpropToBoxes(self):
"""Test that backprop to boxes is reproducible.
If the input and output dimensions are the same, then the boxes gradients
will be deterministically zero. Otherwise, in the presence of
non-reproducible ordering of reduction operations, nondeterminism can be
introduced, whether there is upsampling or downsampling and whether or not
there are overlapping crops.
"""
self._testReproducibleBackprop(test_image_not_boxes=False)
if __name__ == '__main__':
# TODO(reedwm): Merge this file with image_grad_test.py and
# image_grad_test_base.py
config.enable_op_determinism()
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.