gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
from testUtils import Utils
import signal
import time
from Cluster import Cluster
from Cluster import NamedAccounts
from core_symbol import CORE_SYMBOL
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
from TestHelper import AppArgs
import json
###############################################################
# nodeos_high_transaction_test
#
# This test sets up <-p> producing node(s) and <-n - -p>
# non-producing node(s). The non-producing node will be sent
# many transfers. When it is complete it verifies that all
# of the transactions made it into blocks.
#
###############################################################
Print=Utils.Print
appArgs = AppArgs()
minTotalAccounts = 20
extraArgs = appArgs.add(flag="--transaction-time-delta", type=int, help="How many seconds seconds behind an earlier sent transaction should be received after a later one", default=5)
extraArgs = appArgs.add(flag="--num-transactions", type=int, help="How many total transactions should be sent", default=10000)
extraArgs = appArgs.add(flag="--max-transactions-per-second", type=int, help="How many transactions per second should be sent", default=500)
extraArgs = appArgs.add(flag="--total-accounts", type=int, help="How many accounts should be involved in sending transfers. Must be greater than %d" % (minTotalAccounts), default=100)
extraArgs = appArgs.add_bool(flag="--send-duplicates", help="If identical transactions should be sent to all nodes")
args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}, applicationSpecificArgs=appArgs)
Utils.Debug=args.v
totalProducerNodes=args.p
totalNodes=args.n
if totalNodes<=totalProducerNodes:
totalNodes=totalProducerNodes+1
totalNonProducerNodes=totalNodes-totalProducerNodes
maxActiveProducers=totalProducerNodes
totalProducers=totalProducerNodes
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
killAll=args.clean_run
walletPort=TestHelper.DEFAULT_WALLET_PORT
blocksPerSec=2
transBlocksBehind=args.transaction_time_delta * blocksPerSec
numTransactions = args.num_transactions
maxTransactionsPerSecond = args.max_transactions_per_second
assert args.total_accounts >= minTotalAccounts, Print("ERROR: Only %d was selected for --total-accounts, must have at least %d" % (args.total_accounts, minTotalAccounts))
if numTransactions % args.total_accounts > 0:
oldNumTransactions = numTransactions
numTransactions = int((oldNumTransactions + args.total_accounts - 1)/args.total_accounts) * args.total_accounts
Print("NOTE: --num-transactions passed as %d, but rounding to %d so each of the %d accounts gets the same number of transactions" %
(oldNumTransactions, numTransactions, args.total_accounts))
numRounds = int(numTransactions / args.total_accounts)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="cleos"
maxTransactionAttempts = 2 # max number of attempts to try to send a transaction
maxTransactionAttemptsNoSend = 1 # max number of attempts to try to create a transaction to be sent as a duplicate
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
if cluster.launch(pnodes=totalProducerNodes,
totalNodes=totalNodes, totalProducers=totalProducers,
useBiosBootFile=False, topo="ring") is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up eos cluster.")
# *** create accounts to vote in desired producers ***
Print("creating %d accounts" % (args.total_accounts))
namedAccounts=NamedAccounts(cluster,args.total_accounts)
accounts=namedAccounts.accounts
accountsToCreate = [cluster.eosioAccount]
for account in accounts:
accountsToCreate.append(account)
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, accountsToCreate)
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
for account in accounts:
walletMgr.importKey(account, testWallet)
# *** identify each node (producers and non-producing node) ***
nonProdNodes=[]
prodNodes=[]
allNodes=cluster.getNodes()
for i in range(0, totalNodes):
node=allNodes[i]
nodeProducers=Cluster.parseProducers(i)
numProducers=len(nodeProducers)
Print("node has producers=%s" % (nodeProducers))
if numProducers==0:
nonProdNodes.append(node)
else:
prodNodes.append(node)
nonProdNodeCount = len(nonProdNodes)
# *** delegate bandwidth to accounts ***
node=nonProdNodes[0]
checkTransIds = []
startTime = time.perf_counter()
Print("Create new accounts via %s" % (cluster.eosioAccount.name))
# create accounts via eosio as otherwise a bid is needed
for account in accounts:
trans = node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
checkTransIds.append(Node.getTransId(trans))
nextTime = time.perf_counter()
Print("Create new accounts took %s sec" % (nextTime - startTime))
startTime = nextTime
Print("Transfer funds to new accounts via %s" % (cluster.eosioAccount.name))
for account in accounts:
transferAmount="1000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
trans = node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False, reportStatus=False, sign = True)
checkTransIds.append(Node.getTransId(trans))
nextTime = time.perf_counter()
Print("Transfer funds took %s sec" % (nextTime - startTime))
startTime = nextTime
Print("Delegate Bandwidth to new accounts")
for account in accounts:
trans=node.delegatebw(account, 200.0000, 200.0000, waitForTransBlock=False, exitOnError=True, reportStatus=False)
checkTransIds.append(Node.getTransId(trans))
nextTime = time.perf_counter()
Print("Delegate Bandwidth took %s sec" % (nextTime - startTime))
startTime = nextTime
lastIrreversibleBlockNum = None
def cacheTransIdInBlock(transId, transToBlock, node):
global lastIrreversibleBlockNum
lastPassLIB = None
blockWaitTimeout = 60
transTimeDelayed = False
while True:
trans = node.getTransaction(transId, delayedRetry=False)
if trans is None:
if transTimeDelayed:
return (None, None)
else:
if Utils.Debug:
Print("Transaction not found for trans id: %s. Will wait %d seconds to see if it arrives in a block." %
(transId, args.transaction_time_delta))
transTimeDelayed = True
node.waitForTransInBlock(transId, timeout = args.transaction_time_delta)
continue
lastIrreversibleBlockNum = trans["last_irreversible_block"]
blockNum = Node.getTransBlockNum(trans)
assert blockNum is not None, Print("ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2)))
block = node.getBlock(blockNum)
if block is not None:
transactions = block["transactions"]
for trans_receipt in transactions:
btrans = trans_receipt["trx"]
assert btrans is not None, Print("ERROR: could not retrieve \"trx\" from transaction_receipt: %s, from transId: %s that led to block: %s" % (json.dumps(trans_receipt, indent=2), transId, json.dumps(block, indent=2)))
btransId = btrans["id"]
assert btransId is not None, Print("ERROR: could not retrieve \"id\" from \"trx\": %s, from transId: %s that led to block: %s" % (json.dumps(btrans, indent=2), transId, json.dumps(block, indent=2)))
assert btransId not in transToBlock, Print("ERROR: transaction_id: %s found in block: %d, but originally seen in block number: %d" % (btransId, blockNum, transToBlock[btransId]["block_num"]))
transToBlock[btransId] = block
break
if lastPassLIB is not None and lastPassLIB >= lastIrreversibleBlockNum:
Print("ERROR: could not find block number: %d from transId: %s, waited %d seconds for LIB to advance but it did not. trans: %s" % (blockNum, transId, blockWaitTimeout, json.dumps(trans, indent=2)))
return (trans, None)
if Utils.Debug:
extra = "" if lastPassLIB is None else " and since it progressed from %d in our last pass" % (lastPassLIB)
Print("Transaction returned for trans id: %s indicated it was in block num: %d, but that block could not be found. LIB is %d%s, we will wait to see if the block arrives." %
(transId, blockNum, lastIrreversibleBlockNum, extra))
lastPassLIB = lastIrreversibleBlockNum
node.waitForBlock(blockNum, timeout = blockWaitTimeout)
return (block, trans)
def findTransInBlock(transId, transToBlock, node):
if transId in transToBlock:
return
(block, trans) = cacheTransIdInBlock(transId, transToBlock, node)
assert trans is not None, Print("ERROR: could not find transaction for transId: %s" % (transId))
assert block is not None, Print("ERROR: could not retrieve block with block num: %d, from transId: %s, trans: %s" % (blockNum, transId, json.dumps(trans, indent=2)))
transToBlock = {}
for transId in checkTransIds:
findTransInBlock(transId, transToBlock, node)
nextTime = time.perf_counter()
Print("Verifying transactions took %s sec" % (nextTime - startTime))
startTransferTime = nextTime
#verify nodes are in sync and advancing
cluster.waitOnClusterSync(blockAdvancing=5)
nodeOrder = []
if args.send_duplicates:
# kill bios, since it prevents the ring topography from really being a ring
cluster.biosNode.kill(signal.SIGTERM)
nodeOrder.append(0)
# jump to node furthest in ring from node 0
next = int((totalNodes + 1) / 2)
nodeOrder.append(next)
# then just fill in the rest of the nodes
for i in range(1, next):
nodeOrder.append(i)
for i in range(next + 1, totalNodes):
nodeOrder.append(i)
Print("Sending %d transfers" % (numTransactions))
delayAfterRounds = int(maxTransactionsPerSecond / args.total_accounts)
history = []
startTime = time.perf_counter()
startRound = None
for round in range(0, numRounds):
# ensure we are not sending too fast
startRound = time.perf_counter()
timeDiff = startRound - startTime
expectedTransactions = maxTransactionsPerSecond * timeDiff
sentTransactions = round * args.total_accounts
if sentTransactions > expectedTransactions:
excess = sentTransactions - expectedTransactions
# round up to a second
delayTime = int((excess + maxTransactionsPerSecond - 1) / maxTransactionsPerSecond)
Utils.Print("Delay %d seconds to keep max transactions under %d per second" % (delayTime, maxTransactionsPerSecond))
time.sleep(delayTime)
transferAmount = Node.currencyIntToStr(round + 1, CORE_SYMBOL)
Print("Sending round %d, transfer: %s" % (round, transferAmount))
for accountIndex in range(0, args.total_accounts):
fromAccount = accounts[accountIndex]
toAccountIndex = accountIndex + 1 if accountIndex + 1 < args.total_accounts else 0
toAccount = accounts[toAccountIndex]
node = nonProdNodes[accountIndex % nonProdNodeCount]
trans = None
attempts = 0
maxAttempts = maxTransactionAttempts if not args.send_duplicates else maxTransactionAttemptsNoSend # for send_duplicates we are just constructing a transaction, so should never require a second attempt
# can try up to maxAttempts times to send the transfer
while trans is None and attempts < maxAttempts:
if attempts > 0:
# delay and see if transfer is accepted now
Utils.Print("Transfer rejected, delay 1 second and see if it is then accepted")
time.sleep(1)
trans=node.transferFunds(fromAccount, toAccount, transferAmount, "transfer round %d" % (round), exitOnError=False, reportStatus=False, sign = True, dontSend = args.send_duplicates)
attempts += 1
if args.send_duplicates:
sendTrans = trans
trans = None
numAccepted = 0
attempts = 0
while trans is None and attempts < maxTransactionAttempts:
for node in map(lambda ordinal: allNodes[ordinal], nodeOrder):
repeatTrans = node.pushTransaction(sendTrans, silentErrors=True)
if repeatTrans is not None:
if trans is None and repeatTrans[0]:
trans = repeatTrans[1]
transId = Node.getTransId(trans)
numAccepted += 1
attempts += 1
assert trans is not None, Print("ERROR: failed round: %d, fromAccount: %s, toAccount: %s" % (round, accountIndex, toAccountIndex))
transId = Node.getTransId(trans)
history.append(transId)
nextTime = time.perf_counter()
Print("Sending transfers took %s sec" % (nextTime - startTransferTime))
startTranferValidationTime = nextTime
blocks = {}
transToBlock = {}
missingTransactions = []
transBlockOrderWeird = []
newestBlockNum = None
newestBlockNumTransId = None
newestBlockNumTransOrder = None
lastBlockNum = None
lastTransId = None
transOrder = 0
lastPassLIB = None
for transId in history:
blockNum = None
block = None
transDesc = None
if transId not in transToBlock:
(block, trans) = cacheTransIdInBlock(transId, transToBlock, node)
if trans is None or block is None:
missingTransactions.append({
"newer_trans_id" : transId,
"newer_trans_index" : transOrder,
"newer_bnum" : None,
"last_trans_id" : lastTransId,
"last_trans_index" : transOrder - 1,
"last_bnum" : lastBlockNum,
})
if newestBlockNum > lastBlockNum:
missingTransactions[-1]["highest_block_seen"] = newestBlockNum
blockNum = Node.getTransBlockNum(trans)
assert blockNum is not None, Print("ERROR: could not retrieve block num from transId: %s, trans: %s" % (transId, json.dumps(trans, indent=2)))
else:
block = transToBlock[transId]
blockNum = block["block_num"]
assert blockNum is not None, Print("ERROR: could not retrieve block num for block retrieved for transId: %s, block: %s" % (transId, json.dumps(block, indent=2)))
if lastBlockNum is not None:
if blockNum > lastBlockNum + transBlocksBehind or blockNum + transBlocksBehind < lastBlockNum:
transBlockOrderWeird.append({
"newer_trans_id" : transId,
"newer_trans_index" : transOrder,
"newer_bnum" : blockNum,
"last_trans_id" : lastTransId,
"last_trans_index" : transOrder - 1,
"last_bnum" : lastBlockNum
})
if newestBlockNum > lastBlockNum:
last = transBlockOrderWeird[-1]
last["older_trans_id"] = newestBlockNumTransId
last["older_trans_index"] = newestBlockNumTransOrder
last["older_bnum"] = newestBlockNum
if newestBlockNum is None:
newestBlockNum = blockNum
newestBlockNumTransId = transId
newestBlockNumTransOrder = transOrder
elif blockNum > newestBlockNum:
newestBlockNum = blockNum
newestBlockNumTransId = transId
newestBlockNumTransOrder = transOrder
lastTransId = transId
transOrder += 1
lastBlockNum = blockNum
nextTime = time.perf_counter()
Print("Validating transfers took %s sec" % (nextTime - startTranferValidationTime))
delayedReportError = False
if len(missingTransactions) > 0:
verboseOutput = "Missing transaction information: [" if Utils.Debug else "Missing transaction ids: ["
verboseOutput = ", ".join([missingTrans if Utils.Debug else missingTrans["newer_trans_id"] for missingTrans in missingTransactions])
verboseOutput += "]"
Utils.Print("ERROR: There are %d missing transactions. %s" % (len(missingTransactions), verboseOutput))
delayedReportError = True
if len(transBlockOrderWeird) > 0:
verboseOutput = "Delayed transaction information: [" if Utils.Debug else "Delayed transaction ids: ["
verboseOutput = ", ".join([json.dumps(trans, indent=2) if Utils.Debug else trans["newer_trans_id"] for trans in transBlockOrderWeird])
verboseOutput += "]"
Utils.Print("ERROR: There are %d transactions delayed more than %d seconds. %s" % (len(transBlockOrderWeird), args.transaction_time_delta, verboseOutput))
delayedReportError = True
testSuccessful = not delayedReportError
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
if not testSuccessful:
Print(Utils.FileDivider)
Print("Compare Blocklog")
cluster.compareBlockLogs()
Print(Utils.FileDivider)
Print("Print Blocklog")
cluster.printBlockLog()
Print(Utils.FileDivider)
errorCode = 0 if testSuccessful else 1
exit(errorCode)
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' tracker.py '''
import json
import sys
import traceback
import collections
from functools import partial
from heron.common.src.python.utils.log import Log
from heron.proto import topology_pb2
from heron.statemgrs.src.python import statemanagerfactory
from heron.tools.tracker.src.python.topology import Topology
from heron.tools.tracker.src.python import javaobj
from heron.tools.tracker.src.python import pyutils
from heron.tools.tracker.src.python import utils
def convert_pb_kvs(kvs, include_non_primitives=True):
"""
converts pb kvs to dict
"""
config = {}
for kv in kvs:
if kv.value:
config[kv.key] = kv.value
elif kv.serialized_value:
# add serialized_value support for python values (fixme)
# is this a serialized java object
if topology_pb2.JAVA_SERIALIZED_VALUE == kv.type:
jv = _convert_java_value(kv, include_non_primitives=include_non_primitives)
if jv is not None:
config[kv.key] = jv
else:
config[kv.key] = _raw_value(kv)
return config
def _convert_java_value(kv, include_non_primitives=True):
try:
pobj = javaobj.loads(kv.serialized_value)
if pyutils.is_str_instance(pobj):
return pobj
if pobj.is_primitive():
return pobj.value
if include_non_primitives:
# java objects that are not strings return value and encoded value
# Hexadecimal byte array for Serialized objects that
return {
'value' : json.dumps(pobj,
default=lambda custom_field: custom_field.__dict__,
sort_keys=True,
indent=2),
'raw' : utils.hex_escape(kv.serialized_value)}
return None
except Exception:
Log.exception("Failed to parse data as java object")
if include_non_primitives:
return _raw_value(kv)
else:
return None
def _raw_value(kv):
return {
# The value should be a valid json object
'value' : '{}',
'raw' : utils.hex_escape(kv.serialized_value)}
class Tracker(object):
"""
Tracker is a stateless cache of all the topologies
for the given state managers. It watches for
any changes on the topologies, like submission,
killing, movement of containers, etc..
This class caches all the data and is accessed
by handlers.
"""
def __init__(self, config):
self.config = config
self.topologies = []
self.state_managers = []
# A map from a tuple of form
# (topologyName, state_manager_name) to its
# info, which is its representation
# exposed through the APIs.
# The state_manager_name help when we
# want to remove the topology,
# since other info can not be relied upon.
self.topologyInfos = {}
def synch_topologies(self):
"""
Sync the topologies with the statemgrs.
"""
self.state_managers = statemanagerfactory.get_all_state_managers(self.config.statemgr_config)
try:
for state_manager in self.state_managers:
state_manager.start()
except Exception as ex:
Log.error("Found exception while initializing state managers: %s. Bailing out..." % ex)
traceback.print_exc()
sys.exit(1)
# pylint: disable=deprecated-lambda
def on_topologies_watch(state_manager, topologies):
"""watch topologies"""
Log.info("State watch triggered for topologies.")
Log.debug("Topologies: " + str(topologies))
existingTopologies = self.getTopologiesForStateLocation(state_manager.name)
existingTopNames = map(lambda t: t.name, existingTopologies)
Log.debug("Existing topologies: " + str(existingTopNames))
for name in existingTopNames:
if name not in topologies:
Log.info("Removing topology: %s in rootpath: %s",
name, state_manager.rootpath)
self.removeTopology(name, state_manager.name)
for name in topologies:
if name not in existingTopNames:
self.addNewTopology(state_manager, name)
for state_manager in self.state_managers:
# The callback function with the bound
# state_manager as first variable.
onTopologiesWatch = partial(on_topologies_watch, state_manager)
state_manager.get_topologies(onTopologiesWatch)
def stop_sync(self):
for state_manager in self.state_managers:
state_manager.stop()
# pylint: disable=deprecated-lambda
def getTopologyByClusterRoleEnvironAndName(self, cluster, role, environ, topologyName):
"""
Find and return the topology given its cluster, environ, topology name, and
an optional role.
Raises exception if topology is not found, or more than one are found.
"""
topologies = list(filter(lambda t: t.name == topologyName
and t.cluster == cluster
and (not role or t.execution_state.role == role)
and t.environ == environ, self.topologies))
if not topologies or len(topologies) > 1:
if role is not None:
raise Exception("Topology not found for {0}, {1}, {2}, {3}".format(
cluster, role, environ, topologyName))
else:
raise Exception("Topology not found for {0}, {1}, {2}".format(
cluster, environ, topologyName))
# There is only one topology which is returned.
return topologies[0]
def getTopologiesForStateLocation(self, name):
"""
Returns all the topologies for a given state manager.
"""
return filter(lambda t: t.state_manager_name == name, self.topologies)
def addNewTopology(self, state_manager, topologyName):
"""
Adds a topology in the local cache, and sets a watch
on any changes on the topology.
"""
topology = Topology(topologyName, state_manager.name)
Log.info("Adding new topology: %s, state_manager: %s",
topologyName, state_manager.name)
self.topologies.append(topology)
# Register a watch on topology and change
# the topologyInfo on any new change.
topology.register_watch(self.setTopologyInfo)
def on_topology_pplan(data):
"""watch physical plan"""
Log.info("Watch triggered for topology pplan: " + topologyName)
topology.set_physical_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_packing_plan(data):
"""watch packing plan"""
Log.info("Watch triggered for topology packing plan: " + topologyName)
topology.set_packing_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_execution_state(data):
"""watch execution state"""
Log.info("Watch triggered for topology execution state: " + topologyName)
topology.set_execution_state(data)
if not data:
Log.debug("No data to be set")
def on_topology_tmaster(data):
"""set tmaster"""
Log.info("Watch triggered for topology tmaster: " + topologyName)
topology.set_tmaster(data)
if not data:
Log.debug("No data to be set")
def on_topology_scheduler_location(data):
"""set scheduler location"""
Log.info("Watch triggered for topology scheduler location: " + topologyName)
topology.set_scheduler_location(data)
if not data:
Log.debug("No data to be set")
# Set watches on the pplan, execution_state, tmaster and scheduler_location.
state_manager.get_pplan(topologyName, on_topology_pplan)
state_manager.get_packing_plan(topologyName, on_topology_packing_plan)
state_manager.get_execution_state(topologyName, on_topology_execution_state)
state_manager.get_tmaster(topologyName, on_topology_tmaster)
state_manager.get_scheduler_location(topologyName, on_topology_scheduler_location)
def removeTopology(self, topology_name, state_manager_name):
"""
Removes the topology from the local cache.
"""
topologies = []
for top in self.topologies:
if (top.name == topology_name and
top.state_manager_name == state_manager_name):
# Remove topologyInfo
if (topology_name, state_manager_name) in self.topologyInfos:
self.topologyInfos.pop((topology_name, state_manager_name))
else:
topologies.append(top)
self.topologies = topologies
def extract_execution_state(self, topology):
"""
Returns the repesentation of execution state that will
be returned from Tracker.
"""
execution_state = topology.execution_state
executionState = {
"cluster": execution_state.cluster,
"environ": execution_state.environ,
"role": execution_state.role,
"jobname": topology.name,
"submission_time": execution_state.submission_time,
"submission_user": execution_state.submission_user,
"release_username": execution_state.release_state.release_username,
"release_tag": execution_state.release_state.release_tag,
"release_version": execution_state.release_state.release_version,
"has_physical_plan": None,
"has_tmaster_location": None,
"has_scheduler_location": None,
}
viz_url = self.config.get_formatted_viz_url(executionState)
executionState["viz"] = viz_url
return executionState
def extract_metadata(self, topology):
"""
Returns metadata that will
be returned from Tracker.
"""
execution_state = topology.execution_state
metadata = {
"cluster": execution_state.cluster,
"environ": execution_state.environ,
"role": execution_state.role,
"jobname": topology.name,
"submission_time": execution_state.submission_time,
"submission_user": execution_state.submission_user,
"release_username": execution_state.release_state.release_username,
"release_tag": execution_state.release_state.release_tag,
"release_version": execution_state.release_state.release_version,
}
# refactor get_formatteed_viz_url
viz_url = self.config.get_formatted_viz_url(metadata)
metadata["viz"] = viz_url
return metadata
@staticmethod
def extract_runtime_state(topology):
runtime_state = {}
runtime_state["has_physical_plan"] = \
True if topology.physical_plan else False
runtime_state["has_packing_plan"] = \
True if topology.packing_plan else False
runtime_state["has_tmaster_location"] = \
True if topology.tmaster else False
runtime_state["has_scheduler_location"] = \
True if topology.scheduler_location else False
# "stmgrs" listed runtime state for each stream manager
# however it is possible that physical plan is not complete
# yet and we do not know how many stmgrs there are. That said,
# we should not set any key below (stream manager name)
runtime_state["stmgrs"] = {}
return runtime_state
# pylint: disable=no-self-use
def extract_scheduler_location(self, topology):
"""
Returns the representation of scheduler location that will
be returned from Tracker.
"""
schedulerLocation = {
"name": None,
"http_endpoint": None,
"job_page_link": None,
}
if topology.scheduler_location:
schedulerLocation["name"] = topology.scheduler_location.topology_name
schedulerLocation["http_endpoint"] = topology.scheduler_location.http_endpoint
schedulerLocation["job_page_link"] = \
topology.scheduler_location.job_page_link[0] \
if len(topology.scheduler_location.job_page_link) > 0 else ""
return schedulerLocation
def extract_tmaster(self, topology):
"""
Returns the representation of tmaster that will
be returned from Tracker.
"""
tmasterLocation = {
"name": None,
"id": None,
"host": None,
"controller_port": None,
"master_port": None,
"stats_port": None,
}
if topology.tmaster:
tmasterLocation["name"] = topology.tmaster.topology_name
tmasterLocation["id"] = topology.tmaster.topology_id
tmasterLocation["host"] = topology.tmaster.host
tmasterLocation["controller_port"] = topology.tmaster.controller_port
tmasterLocation["master_port"] = topology.tmaster.master_port
tmasterLocation["stats_port"] = topology.tmaster.stats_port
return tmasterLocation
def extract_logical_plan(self, topology):
"""
Returns the representation of logical plan that will
be returned from Tracker.
"""
logicalPlan = {
"spouts": {},
"bolts": {},
}
# Add spouts.
for spout in topology.spouts():
spoutName = spout.comp.name
spoutType = "default"
spoutSource = "NA"
spoutVersion = "NA"
spoutConfigs = spout.comp.config.kvs
for kvs in spoutConfigs:
if kvs.key == "spout.type":
spoutType = javaobj.loads(kvs.serialized_value)
elif kvs.key == "spout.source":
spoutSource = javaobj.loads(kvs.serialized_value)
elif kvs.key == "spout.version":
spoutVersion = javaobj.loads(kvs.serialized_value)
spoutPlan = {
"config": convert_pb_kvs(spoutConfigs, include_non_primitives=False),
"type": spoutType,
"source": spoutSource,
"version": spoutVersion,
"outputs": []
}
for outputStream in list(spout.outputs):
spoutPlan["outputs"].append({
"stream_name": outputStream.stream.id
})
logicalPlan["spouts"][spoutName] = spoutPlan
# Add bolts.
for bolt in topology.bolts():
boltName = bolt.comp.name
boltPlan = {
"config": convert_pb_kvs(bolt.comp.config.kvs, include_non_primitives=False),
"outputs": [],
"inputs": []
}
for outputStream in list(bolt.outputs):
boltPlan["outputs"].append({
"stream_name": outputStream.stream.id
})
for inputStream in list(bolt.inputs):
boltPlan["inputs"].append({
"stream_name": inputStream.stream.id,
"component_name": inputStream.stream.component_name,
"grouping": topology_pb2.Grouping.Name(inputStream.gtype)
})
logicalPlan["bolts"][boltName] = boltPlan
return logicalPlan
# pylint: disable=too-many-locals
def extract_physical_plan(self, topology):
"""
Returns the representation of physical plan that will
be returned from Tracker.
"""
physicalPlan = {
"instances": {},
"instance_groups": {},
"stmgrs": {},
"spouts": {},
"bolts": {},
"config": {},
"components": {}
}
if not topology.physical_plan:
return physicalPlan
spouts = topology.spouts()
bolts = topology.bolts()
stmgrs = None
instances = None
# Physical Plan
stmgrs = list(topology.physical_plan.stmgrs)
instances = list(topology.physical_plan.instances)
# Configs
if topology.physical_plan.topology.topology_config:
physicalPlan["config"] = convert_pb_kvs(topology.physical_plan.topology.topology_config.kvs)
for spout in spouts:
spout_name = spout.comp.name
physicalPlan["spouts"][spout_name] = []
if spout_name not in physicalPlan["components"]:
physicalPlan["components"][spout_name] = {
"config": convert_pb_kvs(spout.comp.config.kvs)
}
for bolt in bolts:
bolt_name = bolt.comp.name
physicalPlan["bolts"][bolt_name] = []
if bolt_name not in physicalPlan["components"]:
physicalPlan["components"][bolt_name] = {
"config": convert_pb_kvs(bolt.comp.config.kvs)
}
for stmgr in stmgrs:
host = stmgr.host_name
cwd = stmgr.cwd
shell_port = stmgr.shell_port if stmgr.HasField("shell_port") else None
physicalPlan["stmgrs"][stmgr.id] = {
"id": stmgr.id,
"host": host,
"port": stmgr.data_port,
"shell_port": shell_port,
"cwd": cwd,
"pid": stmgr.pid,
"joburl": utils.make_shell_job_url(host, shell_port, cwd),
"logfiles": utils.make_shell_logfiles_url(host, shell_port, cwd),
"instance_ids": []
}
instance_groups = collections.OrderedDict()
for instance in instances:
instance_id = instance.instance_id
stmgrId = instance.stmgr_id
name = instance.info.component_name
stmgrInfo = physicalPlan["stmgrs"][stmgrId]
host = stmgrInfo["host"]
cwd = stmgrInfo["cwd"]
shell_port = stmgrInfo["shell_port"]
# instance_id format container_<index>_component_1
# group name is container_<index>
group_name = instance_id.rsplit("_", 2)[0]
igroup = instance_groups.get(group_name, list())
igroup.append(instance_id)
instance_groups[group_name] = igroup
physicalPlan["instances"][instance_id] = {
"id": instance_id,
"name": name,
"stmgrId": stmgrId,
"logfile": utils.make_shell_logfiles_url(host, shell_port, cwd, instance.instance_id),
}
physicalPlan["stmgrs"][stmgrId]["instance_ids"].append(instance_id)
if name in physicalPlan["spouts"]:
physicalPlan["spouts"][name].append(instance_id)
else:
physicalPlan["bolts"][name].append(instance_id)
physicalPlan["instance_groups"] = instance_groups
return physicalPlan
# pylint: disable=too-many-locals
def extract_packing_plan(self, topology):
"""
Returns the representation of packing plan that will
be returned from Tracker.
"""
packingPlan = {
"id": "",
"container_plans": []
}
if not topology.packing_plan:
return packingPlan
container_plans = topology.packing_plan.container_plans
containers = []
for container_plan in container_plans:
instances = []
for instance_plan in container_plan.instance_plans:
instance_resources = {"cpu": instance_plan.resource.cpu,
"ram": instance_plan.resource.ram,
"disk": instance_plan.resource.disk}
instance = {"component_name" : instance_plan.component_name,
"task_id" : instance_plan.task_id,
"component_index": instance_plan.component_index,
"instance_resources": instance_resources}
instances.append(instance)
required_resource = {"cpu": container_plan.requiredResource.cpu,
"ram": container_plan.requiredResource.ram,
"disk": container_plan.requiredResource.disk}
scheduled_resource = {}
if container_plan.scheduledResource:
scheduled_resource = {"cpu": container_plan.scheduledResource.cpu,
"ram": container_plan.scheduledResource.ram,
"disk": container_plan.scheduledResource.disk}
container = {"id": container_plan.id,
"instances": instances,
"required_resources": required_resource,
"scheduled_resources": scheduled_resource}
containers.append(container)
packingPlan["id"] = topology.packing_plan.id
packingPlan["container_plans"] = containers
return json.dumps(packingPlan)
def setTopologyInfo(self, topology):
"""
Extracts info from the stored proto states and
convert it into representation that is exposed using
the API.
This method is called on any change for the topology.
For example, when a container moves and its host or some
port changes. All the information is parsed all over
again and cache is updated.
"""
# Execution state is the most basic info.
# If there is no execution state, just return
# as the rest of the things don't matter.
if not topology.execution_state:
Log.info("No execution state found for: " + topology.name)
return
Log.info("Setting topology info for topology: " + topology.name)
has_physical_plan = True
if not topology.physical_plan:
has_physical_plan = False
Log.info("Setting topology info for topology: " + topology.name)
has_packing_plan = True
if not topology.packing_plan:
has_packing_plan = False
has_tmaster_location = True
if not topology.tmaster:
has_tmaster_location = False
has_scheduler_location = True
if not topology.scheduler_location:
has_scheduler_location = False
topologyInfo = {
"name": topology.name,
"id": topology.id,
"logical_plan": None,
"physical_plan": None,
"packing_plan": None,
"execution_state": None,
"tmaster_location": None,
"scheduler_location": None,
}
executionState = self.extract_execution_state(topology)
executionState["has_physical_plan"] = has_physical_plan
executionState["has_packing_plan"] = has_packing_plan
executionState["has_tmaster_location"] = has_tmaster_location
executionState["has_scheduler_location"] = has_scheduler_location
executionState["status"] = topology.get_status()
topologyInfo["metadata"] = self.extract_metadata(topology)
topologyInfo["runtime_state"] = self.extract_runtime_state(topology)
topologyInfo["execution_state"] = executionState
topologyInfo["logical_plan"] = self.extract_logical_plan(topology)
topologyInfo["physical_plan"] = self.extract_physical_plan(topology)
topologyInfo["packing_plan"] = self.extract_packing_plan(topology)
topologyInfo["tmaster_location"] = self.extract_tmaster(topology)
topologyInfo["scheduler_location"] = self.extract_scheduler_location(topology)
self.topologyInfos[(topology.name, topology.state_manager_name)] = topologyInfo
def getTopologyInfo(self, topologyName, cluster, role, environ):
"""
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
"""
# Iterate over the values to filter the desired topology.
for (topology_name, _), topologyInfo in self.topologyInfos.items():
executionState = topologyInfo["execution_state"]
if (topologyName == topology_name and
cluster == executionState["cluster"] and
environ == executionState["environ"]):
# If role is specified, first try to match "role" field. If "role" field
# does not exist, try to match "submission_user" field.
if not role or executionState.get("role") == role:
return topologyInfo
if role is not None:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s, role: %s, and environ: %s",
topologyName, cluster, role, environ)
else:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s and environ: %s", topologyName, cluster, environ)
raise Exception("No topology found")
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.crissmoldovan.tisip.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.crissmoldovan.tisip.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComCrissmoldovanTisipModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def error(msg):
print "[ERROR] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
print 'Check build architectures\n'
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
print '\nMODULE BUILD FAILED'
error('there is discrepancy between the architectures specified in module manifest and compiled binary.')
error('Please update manifest to match module binary architectures.')
die('')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.conf import settings
from smart_selects.db_fields import ChainedForeignKey
from datospersonalesapp.models import Paciente
from django.contrib.auth.models import User
from .validators import valid_extension
import os
# Create your models here.
"""
A continuacion se presenta una serie de campos que son propios de un exemen de laboratorio.
OA-NA-NP:Campos obligatorios-Automatico, no modifica admin o paciente
OB-SA-NP:Campos obligatorios, cambios realizados por el administrador
OP-SA-NP:Campos opcionales, cambios realizados por el administrador
OB-SA-SP:Campos obligatorios, cambios realizados por paciente
OP-SA-SP:Campos opcionales, cambios realizados por paciente
"""
NITRITOS_OPCIONES = (
('NEGATIVO','NEGATIVO'),
('POSITIVO','POSITIVO'),
)
OPCIONES = (
('ESCASAS','ESCASAS'),
('MODERADAS','MODERADAS'),
('ABUNDANTES','ABUNDANTES')
)
OPCIONES_RESTOS = (
('E','ESCASOS'),
('M','MODERADOS'),
('A','ABUNDANTES')
)
COLOR = (
('A','AMARILLO'),
('C','CAFE'),
('N','NEGRO'),
('R','ROJO'),
('V','VERDE')
)
CONSISTENCIA = (
('P','PASTOSA'),
('D','DURA'),
('B','BLANDA'),
('L','LIQUIDA')
)
CILINDROS_OPCIONES = (
('NO SE OBSERVAN','NO SE OBSERVAN'),
('CL','CILINDRO LEUCOCIDARIO'),
('CG','CILINDRO GRANULOSO'),
('CE','CILINDRO ERITROCITARIO')
)
CRISTALES_OPCIONES = (
('NO SE OBSERVAN','NO SE OBSERVAN'),
('SC','SULFATOS DE CALCIO'),
('OC','OXALATOS DE CALCIO'),
('UA','URATOS AMORFOS'),
('AU','ACIDO URICO'),
('FT','FOSFATOS TRIPLES'),
('FA','FOSFATOS AMORFOS')
)
def generar_path(instancia, nombreArchivo):
folder = "modelo_" + str(instancia.user)
return os.path.join("Adjuntos", folder, nombreArchivo)
class Examen_Hematologia(models.Model):
codExamen_Hematologia=models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0, null = True, blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Creacion",auto_now_add=True, null=True, blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Modificacion",auto_now_add=True, null = True, blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1, blank=True) #OA-NA-NP
hemoglobina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
mcv = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
mch = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
mchc = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
hematocrito = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
globulosBlancos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
globulosRojos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
plaquetas = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
neutrofilos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
linfocitos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
monocitos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
eosinofilos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
basofilos = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
eritrosedimentacion=models.CharField("ERITROSEDIMENTACION", max_length=500, null=True, blank=True) #OB-SA-NP
gotaGruesa=models.CharField("GOTA GRUESA", max_length=500, null=True, blank=True) #OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='hematologia/')
def __str__(self):
return '%s %s' % (self.codExamen_Hematologia, self.paciente)
class Examen_Orina(models.Model):
codExamen_Orina=models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0,null=True,blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True,null=True,blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True, null=True,blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1, blank=True) #OA-NA-NP
color=models.CharField("COLOR", max_length=25,null=True,blank=True) #OB-SA-NP
aspecto=models.CharField("ASPECTO", max_length=25,null=True,blank=True) #OB-SA-NP
ph = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
nitritos = models.CharField("NITRITOS", max_length=10, choices=NITRITOS_OPCIONES,null=True,blank=True) #OB-SA-NP
densidad = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
proteinas=models.CharField("PROTEINAS", max_length=30,null=True,blank=True) #OB-SA-NP
cetonicos=models.CharField("C. CETONICOS", max_length=30,null=True,blank=True) #OB-SA-NP
glucosa = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
bilirrubina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
urobilinigeno = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
leucocitos = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
hematies = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
epiteliales = models.CharField("CELULAS EPITELIALES", max_length=15, choices=OPCIONES,null=True,blank=True) #OB-SA-NP
cristales = models.CharField("CRISTALES", max_length=20, choices=CRISTALES_OPCIONES,null=True,blank=True) #OB-SA-NP
cilindros = models.CharField("CILINDROS", max_length=20, choices=CILINDROS_OPCIONES,null=True,blank=True) #OB-SA-NP
sangre = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
esterasaLeucocitaria = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
bacterias = models.CharField("BACTERIAS Y LEVADURAS", max_length=15, choices=OPCIONES,null=True,blank=True) #OB-SA-NP
observaciones= models.CharField("OBSERVACIONES",max_length=1000,null=True,blank=True)#OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='orina/')
def __str__(self):
return '%s %s' % (self.codExamen_Orina, self.paciente)
class Examen_Heces(models.Model):
codExamen_Heces=models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0,null=True,blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True,null=True,blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True, null=True,blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1) #OA-NA-NP
color=models.CharField("COLOR", max_length=25,null=True,blank=True) #OB-SA-NP
consistencia=models.CharField("COLOR", max_length=25,null=True,blank=True) #OB-SA-NP
mucus=models.CharField("MUCUS", max_length=20,null=True,blank=True) #OB-SA-NP
protoActivos=models.CharField("ACTIVOS", max_length=40,null=True,blank=True) #OB-SA-NP
protoQuistes=models.CharField("QUISTES", max_length=40,null=True,blank=True) #OB-SA-NP
metazoarios=models.CharField("METAZOARIOS", max_length=40,null=True,blank=True) #OB-SA-NP
hematies = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
leucocitos = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
macro=models.CharField("MACRO", max_length=40,null=True,blank=True) #OB-SA-NP
micro=models.CharField("MICRO", max_length=40,null=True,blank=True) #OB-SA-NP
observaciones= models.CharField("OBSERVACIONES",max_length=1000,null=True,blank=True)#OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='heces/')
def __str__(self):
return '%s %s' % (self.codExamen_Heces, self.paciente)
class Examen_General(models.Model):
codExamen_General = models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0,null=True,blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True,null=True,blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True, null=True,blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1) #OA-NA-NP
colorHeces = models.CharField("COLOR HECES", max_length=25,null=True,blank=True) #OB-SA-NP
consistencia = models.CharField("CONSISTENCIA", max_length=25,null=True,blank=True) #OB-SA-NP
mucus=models.CharField("MUCUS", max_length=20,null=True,blank=True) #OB-SA-NP
protoActivos=models.CharField("ACTIVOS", max_length=40,null=True,blank=True) #OB-SA-NP
protoQuistes=models.CharField("QUISTES", max_length=40,null=True,blank=True) #OB-SA-NP
metazoarios=models.CharField("METAZOARIOS", max_length=40,null=True,blank=True) #OB-SA-NP
hematiesHeces = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
leucocitosHeces = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
macro = models.CharField("MACRO", max_length=25,null=True,blank=True) #OB-SA-NP
micro = models.CharField("MICRO", max_length=25,null=True,blank=True) #OB-SA-NP
colorOrina=models.CharField("COLOR", max_length=25,null=True,blank=True) #OB-SA-NP
aspecto=models.CharField("ASPECTO", max_length=25,null=True,blank=True) #OB-SA-NP
ph = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
nitritos = models.CharField("NITRITOS", max_length=10, choices=NITRITOS_OPCIONES,null=True,blank=True) #OB-SA-NP
densidad = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
proteinas = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
cetonicos=models.CharField("C. CETONICOS", max_length=30,null=True,blank=True) #OB-SA-NP
glucosa = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
bilirrubina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
urobilinigeno = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
leucocitosOrina = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
hematiesOrina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
epiteliales = models.CharField("CELULAS EPITELIALES", max_length=15, choices=OPCIONES,null=True,blank=True) #OB-SA-NP
cristales = models.CharField("CRISTALES", max_length=20, choices=CRISTALES_OPCIONES,null=True,blank=True) #OB-SA-NP
cilindros = models.CharField("CILINDROS", max_length=20, choices=CILINDROS_OPCIONES,null=True,blank=True) #OB-SA-NP
sangre=models.CharField("SANGRE", max_length=30,null=True,blank=True) #OB-SA-NP
esterasaLeucocitaria=models.CharField("ESTERASA LEUCOCITARIA", max_length=30,null=True,blank=True) #OB-SA-NP
bacterias = models.CharField("BACTERIAS", max_length=15, choices=OPCIONES,null=True,blank=True) #OB-SA-NP
levaduras = models.CharField("LEVADURAS", max_length=15, choices=OPCIONES,null=True,blank=True) #OB-SA-NP
otros= models.CharField("OTROS",max_length=500,null=True,blank=True)#OB-SA-NP
hematocrito = models.PositiveIntegerField(null=True, blank=True)#OB-SA-NP
hemoglobina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
serologia=models.CharField("SEROLOGIA", max_length=500,null=True,blank=True) #OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='GeneralNuevoIngreso/')
def __str__(self):
return '%s %s' % (self.codExamen_General, self.paciente)
class Examen_Quimica_Sanguinea(models.Model):
codExamen_Quimica = models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0,null=True,blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True,null=True,blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True, null=True,blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1) #OA-NA-NP
glucosa = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
glucosaPospandrial = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
colesterol = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
trigliceridos = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
acidoUrico = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
creatinina = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
hdl = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
ldl = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
tgo = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
tgp = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
bilirrubinaTotal = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
bilirrubinaDirecta = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
bilirrubinaIndirecta = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)#OB-SA-NP
tiempoProtrombina = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
tiempoTromboplastina = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
tiempoSangramiento = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
tiempoCoagulacion = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
eritrosedimentacion = models.PositiveIntegerField(null=True,blank=True)#OB-SA-NP
glucosaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
pospandrialAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
colesterolAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
trigliceridosAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
uricoAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
creatininaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
hdlAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
ldlAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
tgoAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
tgpAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
bTotalAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
bDirectaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
bIndirectaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
protrombinaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
tromboplastinaAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
sangramientoAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
coagulacionAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
eritrosedimentacionAjustada=models.CharField(max_length=25,null=True,blank=True) #OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='QuimicaSanguinea/')
def __str__(self):
return '%s %s' % (self.codExamen_Quimica, self.paciente)
class Examen_Especiales(models.Model):
codExamen_Especiales = models.AutoField(primary_key=True, null=False)#OA-NA-NP
paciente = models.ForeignKey(Paciente, on_delete=models.SET_NULL, verbose_name="PACIENTE", default=1, null=True, blank=True) #OB-SA-NP
edad = models.PositiveIntegerField(default=0,null=True,blank=True)#OB-SA-NP
fechaIngreso=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True,null=True,blank=True) #OA-NA-NP default=timezone.now
fechaModificacion=models.DateTimeField("Fecha de Inscripcion",auto_now_add=True, null=True,blank=True) #OA-NA-NP default=timezone.now
nombreRecibido=models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="Tomo informacion", editable=False, default=1) #OA-NA-NP
tipoExamen= models.CharField("TIPO DE EXAMEN",max_length=25,null=True,blank=True)#OB-SA-NP
resultado= models.CharField("RESULTADOS",max_length=100,null=True,blank=True)#OB-SA-NP
archivo = models.FileField(blank=True, null=True, upload_to='PruebasEspeciales/')
def __str__(self):
return '%s %s' % (self.codExamen_Especiales, self.paciente)
|
|
import os
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from scripts.lib.zulip_tools import deport
from .config import DEVELOPMENT, PRODUCTION, get_secret
if TYPE_CHECKING:
from django_auth_ldap.config import LDAPSearch
from typing_extensions import TypedDict
from zerver.lib.types import SAMLIdPConfigDict
if PRODUCTION:
from .prod_settings import EXTERNAL_HOST, ZULIP_ADMINISTRATOR
else:
from .dev_settings import EXTERNAL_HOST, ZULIP_ADMINISTRATOR
DEBUG = DEVELOPMENT
EXTERNAL_HOST_WITHOUT_PORT = deport(EXTERNAL_HOST)
# These settings are intended for the server admin to set. We document them in
# prod_settings_template.py, and in the initial /etc/zulip/settings.py on a new
# install of the Zulip server.
# Extra HTTP "Host" values to allow (standard ones added in computed_settings.py)
ALLOWED_HOSTS: List[str] = []
# Basic email settings
NOREPLY_EMAIL_ADDRESS = "noreply@" + EXTERNAL_HOST_WITHOUT_PORT
ADD_TOKENS_TO_NOREPLY_ADDRESS = True
TOKENIZED_NOREPLY_EMAIL_ADDRESS = "noreply-{token}@" + EXTERNAL_HOST_WITHOUT_PORT
PHYSICAL_ADDRESS = ""
FAKE_EMAIL_DOMAIN = EXTERNAL_HOST_WITHOUT_PORT
# SMTP settings
EMAIL_HOST: Optional[str] = None
# Other settings, like EMAIL_HOST_USER, EMAIL_PORT, and EMAIL_USE_TLS,
# we leave up to Django's defaults.
# LDAP auth
AUTH_LDAP_SERVER_URI = ""
AUTH_LDAP_BIND_DN = ""
AUTH_LDAP_USER_SEARCH: Optional["LDAPSearch"] = None
LDAP_APPEND_DOMAIN: Optional[str] = None
LDAP_EMAIL_ATTR: Optional[str] = None
AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
AUTH_LDAP_USERNAME_ATTR: Optional[str] = None
# AUTH_LDAP_USER_ATTR_MAP is uncommented in prod_settings_template.py,
# so the value here mainly serves to help document the default.
AUTH_LDAP_USER_ATTR_MAP: Dict[str, str] = {
"full_name": "cn",
}
# Automatically deactivate users not found by the AUTH_LDAP_USER_SEARCH query.
LDAP_DEACTIVATE_NON_MATCHING_USERS: Optional[bool] = None
# AUTH_LDAP_CONNECTION_OPTIONS: we set ldap.OPT_REFERRALS in settings.py if unset.
AUTH_LDAP_CONNECTION_OPTIONS: Dict[int, object] = {}
# Disable django-auth-ldap caching, to prevent problems with OU changes.
AUTH_LDAP_CACHE_TIMEOUT = 0
# Disable syncing user on each login; Using sync_ldap_user_data cron is recommended.
AUTH_LDAP_ALWAYS_UPDATE_USER = False
# Development-only settings for fake LDAP authentication; used to
# support local development of LDAP auth without an LDAP server.
# Detailed docs in zproject/dev_settings.py.
FAKE_LDAP_MODE: Optional[str] = None
FAKE_LDAP_NUM_USERS = 8
AUTH_LDAP_ADVANCED_REALM_ACCESS_CONTROL: Optional[Dict[str, Any]] = None
# Social auth; we support providing values for some of these
# settings in zulip-secrets.conf instead of settings.py in development.
SOCIAL_AUTH_GITHUB_KEY = get_secret("social_auth_github_key", development_only=True)
SOCIAL_AUTH_GITHUB_ORG_NAME: Optional[str] = None
SOCIAL_AUTH_GITHUB_TEAM_ID: Optional[str] = None
SOCIAL_AUTH_GITLAB_KEY = get_secret("social_auth_gitlab_key", development_only=True)
SOCIAL_AUTH_SUBDOMAIN: Optional[str] = None
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = get_secret("azure_oauth2_secret")
SOCIAL_AUTH_GOOGLE_KEY = get_secret("social_auth_google_key", development_only=True)
# SAML:
SOCIAL_AUTH_SAML_SP_ENTITY_ID: Optional[str] = None
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = ""
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = ""
SOCIAL_AUTH_SAML_ORG_INFO: Optional[Dict[str, Dict[str, str]]] = None
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT: Optional[Dict[str, str]] = None
SOCIAL_AUTH_SAML_SUPPORT_CONTACT: Optional[Dict[str, str]] = None
SOCIAL_AUTH_SAML_ENABLED_IDPS: Dict[str, "SAMLIdPConfigDict"] = {}
SOCIAL_AUTH_SAML_SECURITY_CONFIG: Dict[str, Any] = {}
# Set this to True to enforce that any configured IdP needs to specify
# the limit_to_subdomains setting to be considered valid:
SAML_REQUIRE_LIMIT_TO_SUBDOMAINS = False
# Historical name for SOCIAL_AUTH_GITHUB_KEY; still allowed in production.
GOOGLE_OAUTH2_CLIENT_ID: Optional[str] = None
# Apple:
SOCIAL_AUTH_APPLE_SERVICES_ID = get_secret("social_auth_apple_services_id", development_only=True)
SOCIAL_AUTH_APPLE_APP_ID = get_secret("social_auth_apple_app_id", development_only=True)
SOCIAL_AUTH_APPLE_KEY = get_secret("social_auth_apple_key", development_only=True)
SOCIAL_AUTH_APPLE_TEAM = get_secret("social_auth_apple_team", development_only=True)
SOCIAL_AUTH_APPLE_SCOPE = ["name", "email"]
SOCIAL_AUTH_APPLE_EMAIL_AS_USERNAME = True
# Generic OpenID Connect:
SOCIAL_AUTH_OIDC_ENABLED_IDPS: Dict[str, Dict[str, Optional[str]]] = {}
SOCIAL_AUTH_OIDC_FULL_NAME_VALIDATED = False
SOCIAL_AUTH_SYNC_CUSTOM_ATTRS_DICT: Dict[str, Dict[str, Dict[str, str]]] = {}
# Other auth
SSO_APPEND_DOMAIN: Optional[str] = None
VIDEO_ZOOM_CLIENT_ID = get_secret("video_zoom_client_id", development_only=True)
VIDEO_ZOOM_CLIENT_SECRET = get_secret("video_zoom_client_secret")
# Email gateway
EMAIL_GATEWAY_PATTERN = ""
EMAIL_GATEWAY_LOGIN: Optional[str] = None
EMAIL_GATEWAY_IMAP_SERVER: Optional[str] = None
EMAIL_GATEWAY_IMAP_PORT: Optional[int] = None
EMAIL_GATEWAY_IMAP_FOLDER: Optional[str] = None
# Not documented for in /etc/zulip/settings.py, since it's rarely needed.
EMAIL_GATEWAY_EXTRA_PATTERN_HACK: Optional[str] = None
# Error reporting
ERROR_REPORTING = True
BROWSER_ERROR_REPORTING = False
LOGGING_SHOW_MODULE = False
LOGGING_SHOW_PID = False
# Sentry.io error defaults to off
SENTRY_DSN: Optional[str] = None
# File uploads and avatars
# TODO: Rename MAX_FILE_UPLOAD_SIZE to have unit in name.
DEFAULT_AVATAR_URI = "/static/images/default-avatar.png"
DEFAULT_LOGO_URI = "/static/images/logo/zulip-org-logo.svg"
S3_AVATAR_BUCKET = ""
S3_AUTH_UPLOADS_BUCKET = ""
S3_REGION: Optional[str] = None
S3_ENDPOINT_URL: Optional[str] = None
LOCAL_UPLOADS_DIR: Optional[str] = None
MAX_FILE_UPLOAD_SIZE = 25
# Jitsi Meet video call integration; set to None to disable integration.
JITSI_SERVER_URL = "https://meet.jit.si"
# GIPHY API key.
GIPHY_API_KEY = get_secret("giphy_api_key")
# Allow setting BigBlueButton settings in zulip-secrets.conf in
# development; this is useful since there are no public BigBlueButton servers.
BIG_BLUE_BUTTON_URL = get_secret("big_blue_button_url", development_only=True)
# Max state storage per user
# TODO: Add this to zproject/prod_settings_template.py once stateful bots are fully functional.
USER_STATE_SIZE_LIMIT = 10000000
# Max size of a single configuration entry of an embedded bot.
BOT_CONFIG_SIZE_LIMIT = 10000
# External service configuration
CAMO_URI = ""
MEMCACHED_LOCATION = "127.0.0.1:11211"
MEMCACHED_USERNAME = None if get_secret("memcached_password") is None else "zulip@localhost"
RABBITMQ_HOST = "127.0.0.1"
RABBITMQ_USERNAME = "zulip"
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
REMOTE_POSTGRES_HOST = ""
REMOTE_POSTGRES_PORT = ""
REMOTE_POSTGRES_SSLMODE = ""
THUMBNAIL_IMAGES = False
SENDFILE_BACKEND: Optional[str] = None
TORNADO_PORTS: List[int] = []
USING_TORNADO = True
# ToS/Privacy templates
POLICIES_DIRECTORY: str = "zerver/policies_absent"
# Security
ENABLE_FILE_LINKS = False
ENABLE_GRAVATAR = True
INLINE_IMAGE_PREVIEW = True
INLINE_URL_EMBED_PREVIEW = True
NAME_CHANGES_DISABLED = False
AVATAR_CHANGES_DISABLED = False
PASSWORD_MIN_LENGTH = 6
PASSWORD_MIN_GUESSES = 10000
PUSH_NOTIFICATION_BOUNCER_URL: Optional[str] = None
PUSH_NOTIFICATION_REDACT_CONTENT = False
SUBMIT_USAGE_STATISTICS = True
PROMOTE_SPONSORING_ZULIP = True
RATE_LIMITING = True
RATE_LIMITING_AUTHENTICATE = True
RATE_LIMIT_TOR_TOGETHER = False
SEND_LOGIN_EMAILS = True
EMBEDDED_BOTS_ENABLED = False
# Two factor authentication is not yet implementation-complete
TWO_FACTOR_AUTHENTICATION_ENABLED = False
# This is used to send all hotspots for convenient manual testing
# in development mode.
ALWAYS_SEND_ALL_HOTSPOTS = False
# The new user tutorial is enabled by default, but can be disabled for
# self-hosters who want to disable the tutorial entirely on their system.
TUTORIAL_ENABLED = True
# In-development search pills feature.
SEARCH_PILLS_ENABLED = False
# We log emails in development environment for accessing
# them easily through /emails page
DEVELOPMENT_LOG_EMAILS = DEVELOPMENT
# These settings are not documented in prod_settings_template.py.
# They should either be documented here, or documented there.
#
# Settings that it makes sense to document here instead of in
# prod_settings_template.py are those that
# * don't make sense to change in production, but rather are intended
# for dev and test environments; or
# * don't make sense to change on a typical production server with
# one or a handful of realms, though they might on an installation
# like Zulip Cloud or to work around a problem on another server.
NOTIFICATION_BOT = "[email protected]"
EMAIL_GATEWAY_BOT = "[email protected]"
NAGIOS_SEND_BOT = "[email protected]"
NAGIOS_RECEIVE_BOT = "[email protected]"
WELCOME_BOT = "[email protected]"
REMINDER_BOT = "[email protected]"
# The following bots are optional system bots not enabled by
# default. The default ones are defined in INTERNAL_BOTS, in settings.py.
# ERROR_BOT sends Django exceptions to an "errors" stream in the
# system realm.
ERROR_BOT: Optional[str] = None
# These are extra bot users for our end-to-end Nagios message
# sending tests.
NAGIOS_STAGING_SEND_BOT = "[email protected]" if PRODUCTION else None
NAGIOS_STAGING_RECEIVE_BOT = "[email protected]" if PRODUCTION else None
# SYSTEM_BOT_REALM would be a constant always set to 'zulip',
# except that it isn't that on Zulip Cloud. We will likely do a
# migration and eliminate this parameter in the future.
SYSTEM_BOT_REALM = "zulipinternal"
# Structurally, we will probably eventually merge
# analytics into part of the main server, rather
# than a separate app.
EXTRA_INSTALLED_APPS = ["analytics"]
# Used to construct URLs to point to the Zulip server. Since we
# only support HTTPS in production, this is just for development.
EXTERNAL_URI_SCHEME = "https://"
# Whether anyone can create a new organization on the Zulip server.
OPEN_REALM_CREATION = False
# Whether it's possible to create web-public streams on this server.
WEB_PUBLIC_STREAMS_ENABLED = False
# Setting for where the system bot users are. Likely has no
# purpose now that the REALMS_HAVE_SUBDOMAINS migration is finished.
SYSTEM_ONLY_REALMS = {"zulip"}
# Default deadline for demo organizations
DEMO_ORG_DEADLINE_DAYS = 30
# Alternate hostnames to serve particular realms on, in addition to
# their usual subdomains. Keys are realm string_ids (aka subdomains),
# and values are alternate hosts.
# The values will also be added to ALLOWED_HOSTS.
REALM_HOSTS: Dict[str, str] = {}
# Map used to rewrite the URIs for certain realms during mobile
# authentication. This, combined with adding the relevant hosts to
# ALLOWED_HOSTS, can be used for environments where security policies
# mean that a different hostname must be used for mobile access.
REALM_MOBILE_REMAP_URIS: Dict[str, str] = {}
# Whether the server is using the PGroonga full-text search
# backend. Plan is to turn this on for everyone after further
# testing.
USING_PGROONGA = False
# How Django should send emails. Set for most contexts in settings.py, but
# available for sysadmin override in unusual cases.
EMAIL_BACKEND: Optional[str] = None
# Whether to give admins a warning in the web app that email isn't set up.
# Set in settings.py when email isn't configured.
WARN_NO_EMAIL = False
# If True, disable rate-limiting and other filters on sending error messages
# to admins, and enable logging on the error-reporting itself. Useful
# mainly in development.
DEBUG_ERROR_REPORTING = False
# Whether to flush memcached after data migrations. Because of
# how we do deployments in a way that avoids reusing memcached,
# this is disabled in production, but we need it in development.
POST_MIGRATION_CACHE_FLUSHING = False
# Settings for APNS. Only needed on push.zulipchat.com or if
# rebuilding the mobile app with a different push notifications
# server.
APNS_CERT_FILE: Optional[str] = None
APNS_SANDBOX = True
APNS_TOPIC = "org.zulip.Zulip"
ZULIP_IOS_APP_ID = "org.zulip.Zulip"
# Limits related to the size of file uploads; last few in MB.
DATA_UPLOAD_MAX_MEMORY_SIZE = 25 * 1024 * 1024
MAX_AVATAR_FILE_SIZE_MIB = 5
MAX_ICON_FILE_SIZE_MIB = 5
MAX_LOGO_FILE_SIZE_MIB = 5
MAX_EMOJI_FILE_SIZE_MIB = 5
# Limits to help prevent spam, in particular by sending invitations.
#
# A non-admin user who's joined an open realm this recently can't invite at all.
INVITES_MIN_USER_AGE_DAYS = 3
# Default for a realm's `max_invites`; which applies per day,
# and only applies if OPEN_REALM_CREATION is true.
INVITES_DEFAULT_REALM_DAILY_MAX = 100
# Global rate-limit (list of pairs (days, max)) on invites from new realms.
# Only applies if OPEN_REALM_CREATION is true.
INVITES_NEW_REALM_LIMIT_DAYS = [(1, 100)]
# Definition of a new realm for INVITES_NEW_REALM_LIMIT.
INVITES_NEW_REALM_DAYS = 7
# Controls for which links are published in portico footers/headers/etc.
REGISTER_LINK_DISABLED: Optional[bool] = None
LOGIN_LINK_DISABLED = False
FIND_TEAM_LINK_DISABLED = True
# What domains to treat like the root domain
ROOT_SUBDOMAIN_ALIASES = ["www"]
# Whether the root domain is a landing page or can host a realm.
ROOT_DOMAIN_LANDING_PAGE = False
# If using the Zephyr mirroring supervisord configuration, the
# hostname to connect to in order to transfer credentials from webathena.
PERSONAL_ZMIRROR_SERVER: Optional[str] = None
# When security-relevant links in emails expire.
CONFIRMATION_LINK_DEFAULT_VALIDITY_DAYS = 1
INVITATION_LINK_VALIDITY_DAYS = 10
REALM_CREATION_LINK_VALIDITY_DAYS = 7
# Version number for ToS. Change this if you want to force every
# user to click through to re-accept terms of service before using
# Zulip again on the web.
TERMS_OF_SERVICE_VERSION: Optional[str] = None
# Template to use when bumping TERMS_OF_SERVICE_VERSION to explain situation.
FIRST_TIME_TERMS_OF_SERVICE_TEMPLATE: Optional[str] = None
# Hostname used for Zulip's statsd logging integration.
STATSD_HOST = ""
# Configuration for JWT auth.
if TYPE_CHECKING:
class JwtAuthKey(TypedDict):
key: str
# See https://pyjwt.readthedocs.io/en/latest/algorithms.html for a list
# of supported algorithms.
algorithms: List[str]
JWT_AUTH_KEYS: Dict[str, "JwtAuthKey"] = {}
# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-SERVER_EMAIL
# Django setting for what from address to use in error emails.
SERVER_EMAIL = ZULIP_ADMINISTRATOR
# Django setting for who receives error emails.
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
# From address for welcome emails.
WELCOME_EMAIL_SENDER: Optional[Dict[str, str]] = None
# Whether we should use users' own email addresses as the from
# address when sending missed-message emails. Off by default
# because some transactional email providers reject sending such
# emails since they can look like spam.
SEND_MISSED_MESSAGE_EMAILS_AS_USER = False
# Whether to send periodic digests of activity.
SEND_DIGEST_EMAILS = True
# Used to change the Zulip logo in portico pages.
CUSTOM_LOGO_URL: Optional[str] = None
# Random salt used when deterministically generating passwords in
# development.
INITIAL_PASSWORD_SALT: Optional[str] = None
# Settings configuring the special instrumention of the send_event
# code path used in generating API documentation for /events.
LOG_API_EVENT_TYPES = False
# Used to control whether certain management commands are run on
# the server.
# TODO: Replace this with a smarter "run on only one server" system.
STAGING = False
# How long to wait before presence should treat a user as offline.
# TODO: Figure out why this is different from the corresponding
# value in static/js/presence.js. Also, probably move it out of
# default_settings, since it likely isn't usefully user-configurable.
OFFLINE_THRESHOLD_SECS = 5 * 60
# Specifies the number of active users in the realm
# above which sending of presence update events will be disabled.
USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS = 100
# How many days deleted messages data should be kept before being
# permanently deleted.
ARCHIVED_DATA_VACUUMING_DELAY_DAYS = 30
# Enables billing pages and plan-based feature gates. If False, all features
# are available to all realms.
BILLING_ENABLED = False
FREE_TRIAL_DAYS: Optional[int] = int(get_secret("free_trial_days", "0"))
# Custom message (supports HTML) to be shown in the navbar of landing pages. Used mainly for
# making announcements.
LANDING_PAGE_NAVBAR_MESSAGE: Optional[str] = None
# Automatically catch-up soft deactivated users when running the
# `soft-deactivate-users` cron. Turn this off if the server has 10Ks of
# users, and you would like to save some disk space. Soft-deactivated
# returning users would still be caught-up normally.
AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS = True
# Enables Google Analytics on selected portico pages.
GOOGLE_ANALYTICS_ID: Optional[str] = None
# This is overridden by dev_settings.py for droplets.
IS_DEV_DROPLET = False
# Used by puppet/zulip_ops/files/cron.d/check_send_receive_time.
NAGIOS_BOT_HOST = EXTERNAL_HOST
# Use half of the available CPUs for data import purposes.
DEFAULT_DATA_EXPORT_IMPORT_PARALLELISM = (len(os.sched_getaffinity(0)) // 2) or 1
# How long after the last upgrade to nag users that the server needs
# to be upgraded because of likely security releases in the meantime.
# Default is 18 months, constructed as 12 months before someone should
# upgrade, plus 6 months for the system administrator to get around to it.
SERVER_UPGRADE_NAG_DEADLINE_DAYS = 30 * 18
# How long servers have to respond to outgoing webhook requests
OUTGOING_WEBHOOK_TIMEOUT_SECONDS = 10
# Maximum length of message content allowed.
# Any message content exceeding this limit will be truncated.
# See: `_internal_prep_message` function in zerver/lib/actions.py.
MAX_MESSAGE_LENGTH = 10000
# The maximum number of drafts to send in the response to /register.
# More drafts, should they exist for some crazy reason, could be
# fetched in a separate request.
MAX_DRAFTS_IN_REGISTER_RESPONSE = 1000
|
|
# coding: utf-8
from __future__ import unicode_literals
import os
import uuid
import time
import datetime
from PIL import Image
import requests
import feedparser
from pyquery import PyQuery as pq
from django.utils import timezone
from django.conf import settings
from django.core.mail import EmailMessage
from django.utils.lru_cache import lru_cache
from .models import Rss, RssItem, RssUser
from tools.type_tool import has_list_key, has_str_key, has_dict_key
from tools.template_tool import render_django_template
OUTPUT_DIR = settings.OUTPUT_DIR
IMG_DIR = os.path.join(OUTPUT_DIR, 'img')
LOCK_FILE = os.path.join(OUTPUT_DIR, 'LOCK')
KINDLEGEN_BIN_PATH = settings.KINDLEGEN_BIN_PATH
if not os.path.exists(IMG_DIR):
os.mkdir(IMG_DIR)
def render_and_write(template_name, context, output_name):
content = render_django_template(template_name, context=context)
with open(os.path.join(OUTPUT_DIR, output_name), 'w') as f:
f.write(content.encode('utf-8'))
class RssUpdateError(Exception):
pass
def get_entry_unique_id(entry):
if has_str_key(entry, 'id') and entry['id']:
return entry['id']
return entry['link']
def get_entry_content(entry):
if has_list_key(entry, 'content'):
for content in entry['content']:
if has_str_key(content, 'value'):
return content['value']
if has_str_key(entry, 'description'):
return entry['description']
def get_entry_published(entry):
try:
date = datetime.datetime.fromtimestamp(time.mktime(entry.published_parsed))
return timezone.make_aware(date)
except (KeyError, TypeError):
return None
def update_rss(rss):
feed = feedparser.parse(rss.url)
if feed.status != 200:
raise RssUpdateError()
ids = (get_entry_unique_id(i) for i in feed.entries)
exists_ids = RssItem.objects.filter(unique_id__in=ids).values_list('unique_id', flat=True)
exists_ids = set(exists_ids)
new_entires = (i for i in feed.entries if get_entry_unique_id(i) not in exists_ids)
rssitems = []
for entry in new_entires:
rssitem = RssItem(
rss=rss,
name=entry.title,
unique_id=get_entry_unique_id(entry),
url=entry.link,
content=get_entry_content(entry),
published=get_entry_published(entry),
)
rssitems.append(rssitem)
if rssitems:
RssItem.objects.bulk_create(rssitems)
return rssitems
@lru_cache(maxsize=None)
def download_img(url):
try:
r = requests.get(url, stream=True, timeout=10)
except Exception:
return
filename = str(uuid.uuid4())
path = os.path.join(IMG_DIR, filename)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r:
f.write(chunk)
try:
im = Image.open(path)
im.thumbnail((500, 500), Image.ANTIALIAS)
im = im.convert('L')
im.save(path + '.jpeg', "JPEG")
except IOError:
return
finally:
os.remove(path)
return 'img/{}.jpeg'.format(filename)
def update_rss_coent(content):
if not content:
return content
try:
document = pq(content)
except ValueError:
return content
for img in document('img'):
src = img.attrib.get('src', '')
if src:
try:
path = download_img(src)
except Exception:
continue
if path:
img.attrib['src'] = path
img.attrib['middle'] = 'true'
return str(document)
def build_mobi(rsses):
if os.path.exists(LOCK_FILE):
return
os.mknod(LOCK_FILE)
data = []
feed_number = 1
play_order = 1
for feed in rsses:
feed_number += 1
play_order += 1
local = {
'number': feed_number,
'play_order': play_order,
'entries': [],
'title': feed['rss'].name,
}
entry_number = 0
for entry in feed['rssitems']:
play_order += 1
entry_number += 1
local_entry = {
'number': entry_number,
'play_order': play_order,
'title': entry.name,
'description': update_rss_coent(entry.content),
}
local['entries'].append(local_entry)
data.append(local)
wrap = {
'date': datetime.date.today().isoformat(),
'feeds': data,
}
# Render and output templates
render_and_write('mobi/toc.xml', wrap, 'toc.ncx')
render_and_write('mobi/toc.html', wrap, 'toc.html')
render_and_write('mobi/opf.xml', wrap, 'daily.opf')
for feed in data:
render_and_write('mobi/feed.html', feed, '%s.html' % feed['number'])
os.system('{} {}'.format(KINDLEGEN_BIN_PATH, os.path.join(OUTPUT_DIR, 'daily.opf')))
os.remove(LOCK_FILE)
def send_file(to, subject, file_path):
email = EmailMessage(
subject,
subject,
settings.EMAIL_HOST_USER,
to,
)
email.attach_file(file_path)
email.send()
def build_user_rss(user):
rssusers = RssUser.objects.filter(user=user)
if not rssusers.exists():
return
rsses = []
for rssuser in rssusers:
rssitems = rssuser.rss.rssitem_set.all()
"""
if rssuser.last_rssitem:
rssitems = rssitems.filter(id__gt=rssuser.last_rssitem_id)
"""
yestoday = datetime.date.today() - datetime.timedelta(days=1)
rssitems = rssitems.filter(
published__year=yestoday.year,
published__month=yestoday.month,
published__day=yestoday.day,
)
rssitems = list(rssitems)
if rssitems:
rsses.append(dict(rssuser=rssuser, rss=rssuser.rss, rssitems=rssitems))
if rsses:
build_mobi(rsses)
for feed in rsses:
feed['rssuser'].last_rssitem_id = max([i.id for i in feed['rssitems']])
feed['rssuser'].save()
return rsses
def send_user_file(user):
build_user_rss(user)
send_file(settings.TEST_EMAILS, 'hello', settings.MOBI_PATH)
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import time
import rosunit
from subprocess import Popen, PIPE, check_call, call
from contextlib import contextmanager
@contextmanager
def fakestdout():
realstdout = sys.stdout
fakestdout = StringIO()
sys.stdout = fakestdout
yield fakestdout
sys.stdout = realstdout
def todict(s):
d = {}
for l in s.split('\n'):
key, p, val = l.partition(':')
if p:
d[key] = val.strip()
return d
class TestRosservice(unittest.TestCase):
def setUp(self):
pass
def test_get_service_headers(self):
import rosservice
orig_uri = os.environ['ROS_MASTER_URI']
os.environ['ROS_MASTER_URI'] = 'http://fake_host:12356'
try:
c = 'rosservice'
# test error conditions, integration tests cover success cases
try:
rosservice.get_service_headers('/add_two_ints', 'fake://localhost:1234')
self.fail("should have raised")
except rosservice.ROSServiceException: pass
try:
rosservice.get_service_headers('/add_two_ints', 'rosrpc://fake_host:1234')
self.fail("should have raised IO exc")
except rosservice.ROSServiceIOException: pass
finally:
os.environ['ROS_MASTER_URI'] = orig_uri
def test_get_service_type(self):
import rosservice
self.assertEquals('test_rosmaster/AddTwoInts', rosservice.get_service_type('/add_two_ints'))
self.assertEquals(None, rosservice.get_service_type('/fake_add_two_ints'))
def test_offline(self):
import rosservice
orig_uri = os.environ['ROS_MASTER_URI']
os.environ['ROS_MASTER_URI'] = 'http://fake_host:12356'
try:
c = 'rosservice'
try:
rosservice.get_service_type('/add_two_ints')
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException:
pass
try:
rosservice._rosservice_cmd_list([c, 'list'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
try:
rosservice._rosservice_cmd_info([c, 'info', '/add_two_ints'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
try:
rosservice._rosservice_cmd_type([c, 'type', '/add_two_ints'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
try:
rosservice._rosservice_cmd_uri([c, 'uri', '/add_two_ints'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
try:
rosservice._rosservice_cmd_find([c, 'find', 'test_ros/AddTwoInts'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
try:
rosservice._rosservice_cmd_call([c, 'call', '/add_two_ints', '1', '2'])
self.fail("should have raised ROSServiceIOException")
except rosservice.ROSServiceIOException: pass
finally:
os.environ['ROS_MASTER_URI'] = orig_uri
def test_cmd_type(self):
import rosservice
cmd = 'rosservice'
s = '/add_two_ints'
try:
rosservice.rosservicemain([cmd, 'type', '/fake_service'])
self.fail("should have triggered error exit")
except SystemExit:
pass
for s in ['/add_two_ints', 'add_two_ints', 'foo/add_two_ints']:
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'type', s])
v = b.getvalue().strip()
self.assertEquals('test_rosmaster/AddTwoInts', v)
def test_cmd_uri(self):
import rosservice
cmd = 'rosservice'
with fakestdout() as b:
try:
rosservice.rosservicemain([cmd, 'uri', '/fake_service'])
self.fail("should have triggered error exit")
except SystemExit:
pass
for s in ['/add_two_ints', 'add_two_ints', 'foo/add_two_ints']:
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'uri', s])
v = b.getvalue().strip()
self.assert_(v.startswith('rosrpc://'), v)
def test_cmd_node(self):
import rosservice
cmd = 'rosservice'
for s in ['/add_two_ints', 'add_two_ints', 'foo/add_two_ints']:
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'node', s])
v = b.getvalue().strip()
if 'foo' in s:
self.assertEquals('/foo/a2iserver', v)
else:
self.assertEquals('/a2iserver', v)
try:
rosservice.rosservicemain([cmd, 'node', '/fake_two_ints'])
self.fail("should have exited with error")
except SystemExit: pass
def test_full_usage(self):
import rosservice
try:
rosservice._fullusage()
self.fail("should have caused system exit")
except SystemExit: pass
def test_cmd_info(self):
import rosservice
cmd = 'rosservice'
try:
rosservice.rosservicemain([cmd, 'info'])
self.fail("should have exited with error")
except SystemExit: pass
try:
rosservice.rosservicemain([cmd, 'info', '/fake_service'])
self.fail("should have exited with error")
except SystemExit: pass
try:
rosservice.rosservicemain([cmd, 'info', '/add_two_ints', '/foo/add_two_ints'])
self.fail("should have exited with error")
except SystemExit: pass
for s in ['/add_two_ints', 'add_two_ints', 'foo/add_two_ints']:
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'info', s])
d = todict(b.getvalue())
if 'foo' in s:
self.assertEquals('/foo/a2iserver', d['Node'])
else:
self.assertEquals('/a2iserver', d['Node'], repr(d['Node']))
self.assertEquals('test_rosmaster/AddTwoInts', d['Type'])
self.assertEquals('a b', d['Args'])
self.assert_('URI' in d)
def test_cmd_find(self):
import rosservice
cmd = 'rosservice'
try:
rosservice.rosservicemain([cmd, 'find'])
self.fail("arg parsing should have failed")
except SystemExit: pass
try:
rosservice.rosservicemain([cmd, 'find', 'test_ros/AddTwoInts', 'test/AddThreeInts'])
self.fail("arg parsing should have failed")
except SystemExit: pass
v = set(['/add_two_ints', '/bar/add_two_ints', '/foo/add_two_ints'])
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'find', 'test_rosmaster/AddTwoInts'])
d = set([x for x in b.getvalue().split('\n') if x.strip()])
self.assertEquals(v, d)
with fakestdout() as b:
rosservice.rosservicemain([cmd, 'find', 'fake/AddTwoInts'])
self.assertEquals('', b.getvalue().strip())
def test_get_service_class_by_name(self):
import rosservice
try:
rosservice.get_service_class_by_name('fake')
self.fail("should have raised")
except rosservice.ROSServiceException as e:
self.assertEquals("Service [fake] is not available.", str(e))
def test_cmd_call(self):
import rosservice
cmd = 'rosservice'
try:
rosservice.rosservicemain([cmd, 'call'])
self.fail("arg parsing should have failed")
except SystemExit: pass
try:
rosservice.rosservicemain([cmd, 'call', 'add_two_ints', '1', '2', '3'])
self.fail("should have failed with too many args")
except SystemExit: pass
def setUp(self):
# wait for all services to come up
import rosservice
services = ['/add_two_ints',
'/foo/add_two_ints',
'/bar/add_two_ints',
]
import time
timeout_t = time.time() + 10.
while time.time() < timeout_t:
with fakestdout() as b:
rosservice._rosservice_cmd_list(['rosservice', 'list'])
v = [x.strip() for x in b.getvalue().split('\n') if x.strip()]
if not (set(services) - set(v) ):
return
self.fail("timeout")
def test_cmd_list(self):
import rosservice
cmd = 'rosservice'
s = '/add_two_ints'
# test main entry
rosservice.rosservicemain([cmd, 'list'])
# test directly
services = ['/add_two_ints',
'/foo/add_two_ints',
'/bar/add_two_ints',
'/header_echo',
]
services_nodes = ['/add_two_ints /a2iserver',
'/foo/add_two_ints /foo/a2iserver',
'/bar/add_two_ints /bar/a2iserver',
'/header_echo /headerserver',
]
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list'])
v = [x.strip() for x in b.getvalue().split('\n') if x.strip()]
v = [x for x in v if not x.startswith('/rosout/')]
v = [x for x in v if not x.endswith('/get_loggers') and not x.endswith('/set_logger_level')]
self.assertEquals(set(services), set(v))
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', '-n'])
v = [x.strip() for x in b.getvalue().split('\n') if x.strip()]
v = [x for x in v if not x.startswith('/rosout/')]
v = [x for x in v if x.find('/get_loggers ') == -1 and x.find('/set_logger_level ') == -1]
self.assertEquals(set(services_nodes), set(v))
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', '--nodes'])
v = [x.strip() for x in b.getvalue().split('\n') if x.strip()]
v = [x for x in v if not x.startswith('/rosout/')]
v = [x for x in v if x.find('/get_loggers ') == -1 and x.find('/set_logger_level ') == -1]
self.assertEquals(set(services_nodes), set(v))
# test with multiple service names
try:
rosservice._rosservice_cmd_list([cmd, 'list', s, s])
self.fail("should have caused parser error")
except SystemExit:
pass
# test with resolved service names
for s in services:
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', s])
self.assertEquals(s, b.getvalue().strip())
# test with relative service names
s = 'add_two_ints'
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', s])
self.assertEquals('/add_two_ints', b.getvalue().strip())
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', s, '-n'])
self.assertEquals('/add_two_ints /a2iserver', b.getvalue().strip())
with fakestdout() as b:
rosservice._rosservice_cmd_list([cmd, 'list', s, '--nodes'])
self.assertEquals('/add_two_ints /a2iserver', b.getvalue().strip())
# test with namespaces
s = '/foo'
rosservice._rosservice_cmd_list([cmd, 'list', s])
rosservice._rosservice_cmd_list([cmd, 'list', s, '-n'])
rosservice._rosservice_cmd_list([cmd, 'list', s, '--nodes'])
s = 'foo'
rosservice._rosservice_cmd_list([cmd, 'list', s])
rosservice._rosservice_cmd_list([cmd, 'list', s, '-n'])
rosservice._rosservice_cmd_list([cmd, 'list', s, '--nodes'])
NAME = 'test_rosservice'
if __name__ == '__main__':
rosunit.unitrun('test_rosservice', NAME, TestRosservice, sys.argv, coverage_packages=['rosservice'])
|
|
""" A neural chatbot using sequence to sequence model with
attentional decoder.
This is based on Google Translate Tensorflow model
https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/
This file contains the code to run the model.
See readme.md for instruction on how to run the starter code.
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
from model import ChatBotModel
import config
import data
def _get_random_bucket(train_buckets_scale):
""" Get a random bucket from which to choose a training sample """
rand = random.random()
return min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > rand])
def _assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks):
""" Assert that the encoder inputs, decoder inputs, and decoder masks are
of the expected lengths """
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(decoder_masks) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_masks), decoder_size))
def run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, forward_only):
""" Run one step in training.
@forward_only: boolean value to decide whether a backward path should be created
forward_only is set to True when you just want to evaluate on the test set,
or when you want to the bot to be in chat mode. """
encoder_size, decoder_size = config.BUCKETS[bucket_id]
_assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks)
# input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for step in xrange(encoder_size):
input_feed[model.encoder_inputs[step].name] = encoder_inputs[step]
for step in xrange(decoder_size):
input_feed[model.decoder_inputs[step].name] = decoder_inputs[step]
input_feed[model.decoder_masks[step].name] = decoder_masks[step]
last_target = model.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([model.batch_size], dtype=np.int32)
# output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [model.train_ops[bucket_id], # update op that does SGD.
model.gradient_norms[bucket_id], # gradient norm.
model.losses[bucket_id]] # loss for this batch.
else:
output_feed = [model.losses[bucket_id]] # loss for this batch.
for step in xrange(decoder_size): # output logits.
output_feed.append(model.outputs[bucket_id][step])
outputs = sess.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def _get_buckets():
""" Load the dataset into buckets based on their lengths.
train_buckets_scale is the inverval that'll help us
choose a random bucket later on.
"""
test_buckets = data.load_data('test_ids.enc', 'test_ids.dec')
data_buckets = data.load_data('train_ids.enc', 'train_ids.dec')
train_bucket_sizes = [len(data_buckets[b]) for b in xrange(len(config.BUCKETS))]
print("Number of samples in each bucket:\n", train_bucket_sizes)
train_total_size = sum(train_bucket_sizes)
# list of increasing numbers from 0 to 1 that we'll use to select a bucket.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
print("Bucket scale:\n", train_buckets_scale)
return test_buckets, data_buckets, train_buckets_scale
def _get_skip_step(iteration):
""" How many steps should the model train before it saves all the weights. """
if iteration < 100:
return 30
return 100
def _check_restore_parameters(sess, saver):
""" Restore the previously trained parameters if there are any. """
ckpt = tf.train.get_checkpoint_state(os.path.dirname(config.CPT_PATH + '/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
print("Loading parameters for the Chatbot")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Initializing fresh parameters for the Chatbot")
def _eval_test_set(sess, model, test_buckets):
""" Evaluate on the test set. """
for bucket_id in xrange(len(config.BUCKETS)):
if len(test_buckets[bucket_id]) == 0:
print(" Test: empty bucket %d" % (bucket_id))
continue
start = time.time()
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(test_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
print('Test bucket {}: loss {}, time {}'.format(bucket_id, step_loss, time.time() - start))
def train():
""" Train the bot """
test_buckets, data_buckets, train_buckets_scale = _get_buckets()
# in train mode, we need to create the backward path, so forwrad_only is False
model = ChatBotModel(False, config.BATCH_SIZE)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print('Running session')
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
iteration = model.global_step.eval()
total_loss = 0
while True:
skip_step = _get_skip_step(iteration)
bucket_id = _get_random_bucket(train_buckets_scale)
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(data_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
start = time.time()
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False)
total_loss += step_loss
iteration += 1
if iteration % skip_step == 0:
print('Iter {}: loss {}, time {}'.format(iteration, total_loss/skip_step, time.time() - start))
start = time.time()
total_loss = 0
saver.save(sess, os.path.join(config.CPT_PATH, 'chatbot'), global_step=model.global_step)
if iteration % (10 * skip_step) == 0:
# Run evals on development set and print their loss
_eval_test_set(sess, model, test_buckets)
start = time.time()
sys.stdout.flush()
def _get_user_input():
""" Get user's input, which will be transformed into encoder input later """
print("> ", end="")
sys.stdout.flush()
return sys.stdin.readline()
def _find_right_bucket(length):
""" Find the proper bucket for an encoder input based on its length """
return min([b for b in xrange(len(config.BUCKETS))
if config.BUCKETS[b][0] >= length])
def _construct_response(output_logits, inv_dec_vocab):
""" Construct a response to the user's encoder input.
@output_logits: the outputs from sequence to sequence wrapper.
output_logits is decoder_size np array, each of dim 1 x DEC_VOCAB
This is a greedy decoder - outputs are just argmaxes of output_logits.
"""
print(output_logits[0])
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if config.EOS_ID in outputs:
outputs = outputs[:outputs.index(config.EOS_ID)]
# Print out sentence corresponding to outputs.
return " ".join([tf.compat.as_str(inv_dec_vocab[output]) for output in outputs])
def chat():
""" in test mode, we don't to create the backward path
"""
_, enc_vocab = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.enc'))
inv_dec_vocab, _ = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.dec'))
model = ChatBotModel(True, batch_size=1)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
output_file = open(os.path.join(config.PROCESSED_PATH, config.OUTPUT_FILE), 'a+')
# Decode from standard input.
max_length = config.BUCKETS[-1][0]
print('Welcome to TensorBro. Say something. Enter to exit. Max length is', max_length)
while True:
line = _get_user_input()
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if line == '':
break
output_file.write('HUMAN ++++ ' + line + '\n')
# Get token-ids for the input sentence.
token_ids = data.sentence2id(enc_vocab, str(line))
if (len(token_ids) > max_length):
print('Max length I can handle is:', max_length)
line = _get_user_input()
continue
# Which bucket does it belong to?
bucket_id = _find_right_bucket(len(token_ids))
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch([(token_ids, [])],
bucket_id,
batch_size=1)
# Get output logits for the sentence.
_, _, output_logits = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
response = _construct_response(output_logits, inv_dec_vocab)
print(response)
output_file.write('BOT ++++ ' + response + '\n')
output_file.write('=============================================\n')
output_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices={'train', 'chat'},
default='train', help="mode. if not specified, it's in the train mode")
args = parser.parse_args()
if not os.path.isdir(config.PROCESSED_PATH):
data.prepare_raw_data()
data.process_data()
print('Data ready!')
# create checkpoints folder if there isn't one already
data.make_dir(config.CPT_PATH)
if args.mode == 'train':
train()
elif args.mode == 'chat':
chat()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import time
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
import factory.django
from factory.django import DjangoModelFactory
from django.apps import apps
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.db.utils import IntegrityError
from faker import Factory
from waffle.models import Flag, Sample, Switch
from website.notifications.constants import NOTIFICATION_TYPES
from osf.utils import permissions
from website.archiver import ARCHIVER_SUCCESS
from website.identifiers.utils import parse_identifiers
from website.settings import FAKE_EMAIL_NAME, FAKE_EMAIL_DOMAIN
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.models.storage import PROVIDER_ASSET_NAME_CHOICES
from osf.utils.names import impute_names_model
from osf.utils.workflows import DefaultStates, DefaultTriggers
from addons.osfstorage.models import OsfStorageFile, Region
fake = Factory.create()
# If tests are run on really old processors without high precision this might fail. Unlikely to occur.
fake_email = lambda: '{}+{}@{}'.format(FAKE_EMAIL_NAME, int(time.clock() * 1000000), FAKE_EMAIL_DOMAIN)
# Do this out of a cls context to avoid setting "t" as a local
PROVIDER_ASSET_NAME_CHOICES = tuple([t[0] for t in PROVIDER_ASSET_NAME_CHOICES])
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
return models.RegistrationSchema.objects.first()
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.LazyFunction(fake_email)
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._build(target_class, *args, **kwargs)
if emails:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._create(target_class, *args, **kwargs)
if emails and not instance.pk:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
@factory.post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
if create:
# Perform implicit save to populate M2M
self.save(clean=False)
else:
# This might lead to strange behavior
return
self.emails.create(address=str(self.username).lower())
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
"""Build an object without saving it."""
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
class ProjectFactory(BaseNodeFactory):
category = 'project'
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
"""Build an object without saving it."""
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.objects.get(name='No license')
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
params = {'path': '/'}
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Faker('word')
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
@classmethod
def _create(cls, *args, **kwargs):
collected_types = kwargs.pop('collected_types', ContentType.objects.filter(app_label='osf', model__in=['abstractnode', 'basefilenode', 'collection', 'preprintservice']))
obj = cls._build(*args, **kwargs)
obj.save()
# M2M, requires initial save
obj.collected_types = collected_types
return obj
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class CollectionProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.CollectionProvider
@classmethod
def _create(cls, *args, **kwargs):
user = kwargs.pop('creator', None)
obj = cls._build(*args, **kwargs)
obj._creator = user or UserFactory() # Generates primary_collection
obj.save()
return obj
class RegistrationProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.RegistrationProvider
@classmethod
def _create(cls, *args, **kwargs):
user = kwargs.pop('creator', None)
obj = cls._build(*args, **kwargs)
obj._creator = user or models.OSFUser.objects.first() or UserFactory() # Generates primary_collection
obj.save()
return obj
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
provider=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
provider = provider or models.RegistrationProvider.objects.first() or RegistrationProviderFactory(_id='osf')
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.has_permission(user, 'admin'):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data,
provider=provider,
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
archive_job = reg.archive_job
archive_job.status = ARCHIVER_SUCCESS
archive_job.done = True
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = withdrawal.approval_state.values()[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
provider = kwargs.get('provider')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
registration_schema = registration_schema or models.RegistrationSchema.objects.first()
registration_metadata = registration_metadata or {}
provider = provider or models.RegistrationProvider.objects.first() or RegistrationProviderFactory(_id='osf')
draft = models.DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
provider=provider,
)
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.PreprintProvider
@classmethod
def _build(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._build(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._create(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
instance.save()
return instance
def sync_set_identifiers(preprint):
from website.identifiers.clients import EzidClient
from website import settings
client = preprint.get_doi_client()
if isinstance(client, EzidClient):
doi_value = settings.DOI_FORMAT.format(prefix=settings.EZID_DOI_NAMESPACE, guid=preprint._id)
ark_value = '{ark}osf.io/{guid}'.format(ark=settings.EZID_ARK_NAMESPACE, guid=preprint._id)
return_value = {'success': '{} | {}'.format(doi_value, ark_value)}
else:
return_value = {'doi': settings.DOI_FORMAT.format(prefix=preprint.provider.doi_prefix, guid=preprint._id)}
doi_client_return_value = {
'response': return_value,
'already_exists': False
}
id_dict = parse_identifiers(doi_client_return_value)
preprint.set_identifier_values(doi=id_dict['doi'])
class PreprintFactory(DjangoModelFactory):
class Meta:
model = models.PreprintService
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
@classmethod
def _build(cls, target_class, *args, **kwargs):
creator = kwargs.pop('creator', None) or UserFactory()
project = kwargs.pop('project', None) or ProjectFactory(creator=creator)
provider = kwargs.pop('provider', None) or PreprintProviderFactory()
instance = target_class(node=project, provider=provider)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
update_task_patcher = mock.patch('website.preprints.tasks.on_preprint_updated.si')
update_task_patcher.start()
finish = kwargs.pop('finish', True)
set_doi = kwargs.pop('set_doi', True)
is_published = kwargs.pop('is_published', True)
instance = cls._build(target_class, *args, **kwargs)
doi = kwargs.pop('doi', None)
license_details = kwargs.pop('license_details', None)
filename = kwargs.pop('filename', None) or 'preprint_file.txt'
subjects = kwargs.pop('subjects', None) or [[SubjectFactory()._id]]
instance.node.preprint_article_doi = doi
instance.machine_state = kwargs.pop('machine_state', 'initial')
user = kwargs.pop('creator', None) or instance.node.creator
if not instance.node.is_contributor(user):
instance.node.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=True
)
preprint_file = OsfStorageFile.create(
target=instance.node,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
preprint_file.save()
from addons.osfstorage import settings as osfstorage_settings
preprint_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
update_task_patcher.stop()
if finish:
auth = Auth(user)
instance.set_primary_file(preprint_file, auth=auth, save=True)
instance.set_subjects(subjects, auth=auth)
if license_details:
instance.set_preprint_license(license_details, auth=auth)
create_task_patcher = mock.patch('website.identifiers.utils.request_identifiers')
mock_create_identifier = create_task_patcher.start()
if is_published and set_doi:
mock_create_identifier.side_effect = sync_set_identifiers(instance)
instance.set_published(is_published, auth=auth)
create_task_patcher.stop()
if not instance.is_published:
instance.node._has_abandoned_preprint = True
instance.node.save()
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Faker('word')
system = False
class DismissedAlertFactory(DjangoModelFactory):
class Meta:
model = models.DismissedAlert
@classmethod
def _create(cls, *args, **kwargs):
kwargs['_id'] = kwargs.get('_id', 'adblock')
kwargs['user'] = kwargs.get('user', UserFactory())
kwargs['location'] = kwargs.get('location', 'iver/settings')
return super(DismissedAlertFactory, cls)._create(*args, **kwargs)
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = 'Mock OAuth 2.0 Provider'
short_name = 'mock2'
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'https://mock2.com/auth'
callback_url = 'https://mock2.com/callback'
auto_refresh_url = 'https://mock2.com/callback'
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
class ReviewActionFactory(DjangoModelFactory):
class Meta:
model = models.ReviewAction
trigger = FuzzyChoice(choices=DefaultTriggers.values())
comment = factory.Faker('text')
from_state = FuzzyChoice(choices=DefaultStates.values())
to_state = FuzzyChoice(choices=DefaultStates.values())
target = factory.SubFactory(PreprintFactory)
creator = factory.SubFactory(AuthUserFactory)
is_deleted = False
class ScheduledBannerFactory(DjangoModelFactory):
# Banners are set for 24 hours from start_date if no end date is given
class Meta:
model = models.ScheduledBanner
name = factory.Faker('name')
default_alt_text = factory.Faker('text')
mobile_alt_text = factory.Faker('text')
default_photo = factory.Faker('file_name')
mobile_photo = factory.Faker('file_name')
license = factory.Faker('name')
color = 'white'
start_date = timezone.now()
end_date = factory.LazyAttribute(lambda o: o.start_date)
class FlagFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
everyone = True
note = 'This is a waffle test flag'
class Meta:
model = Flag
class SampleFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
percent = 100
note = 'This is a waffle test sample'
class Meta:
model = Sample
class SwitchFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
active = True
note = 'This is a waffle test switch'
class Meta:
model = Switch
class NodeRequestFactory(DjangoModelFactory):
class Meta:
model = models.NodeRequest
comment = factory.Faker('text')
class PreprintRequestFactory(DjangoModelFactory):
class Meta:
model = models.PreprintRequest
comment = factory.Faker('text')
osfstorage_settings = apps.get_app_config('addons_osfstorage')
generic_location = {
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'resource',
'object': '1615307',
}
generic_waterbutler_settings = {
'storage': {
'provider': 'glowcloud',
'container': 'osf_storage',
'use_public': True,
}
}
generic_waterbutler_credentials = {
'storage': {
'region': 'PartsUnknown',
'username': 'mankind',
'token': 'heresmrsocko'
}
}
class RegionFactory(DjangoModelFactory):
class Meta:
model = Region
name = factory.Sequence(lambda n: 'Region {0}'.format(n))
_id = factory.Sequence(lambda n: 'us_east_{0}'.format(n))
waterbutler_credentials = generic_waterbutler_credentials
waterbutler_settings = generic_waterbutler_settings
waterbutler_url = 'http://123.456.test.woo'
class ProviderAssetFileFactory(DjangoModelFactory):
class Meta:
model = models.ProviderAssetFile
name = FuzzyChoice(choices=PROVIDER_ASSET_NAME_CHOICES)
file = factory.django.FileField(filename=factory.Faker('text'))
@classmethod
def _create(cls, target_class, *args, **kwargs):
providers = kwargs.pop('providers', [])
instance = super(ProviderAssetFileFactory, cls)._create(target_class, *args, **kwargs)
instance.providers = providers
instance.save()
return instance
|
|
#!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = 'gukebb'
flaskport = 8938
thisMonthName = "June"
nextMonthName = "July"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
bodyEncodedForMLCorpus = str(request.form["bodyencodedformlcorpus"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusCheckin(bodyEncodedForMLCorpus)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSignupAndCheckin(bodyEncodedForMLCorpus)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusRelapse(bodyEncodedForMLCorpus)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusReinstate(bodyEncodedForMLCorpus)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusTooLate(bodyEncodedForMLCorpus)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
def recordMLCorpusCheckin(aString):
with open("../new-ml-corpus-monthly-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSignupAndCheckin(aString):
with open("../new-ml-corpus-monthly-signup-and-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusRelapse(aString):
with open("../new-ml-corpus-monthly-relapse.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusReinstate(aString):
with open("../new-ml-corpus-monthly-reinstate.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusTooLate(aString):
with open("../new-ml-corpus-monthly-too-late.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSkip(aString):
with open("../new-ml-corpus-monthly-skip.txt", "a") as f:
f.write(aString)
f.write("\n")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 2018 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import log10, log, exp, isnan, pi
from fluids.numerics import secant, logspace, linspace, assert_close, assert_close1d, assert_close2d, assert_close3d
import fluids.friction
from fluids.friction import *
import pytest
from fluids.friction import _roughness, _Farshad_roughness
try:
import fuzzywuzzy
has_fuzzywuzzy = True
except:
has_fuzzywuzzy = False
try:
import mpmath
has_mpmath = True
except:
has_mpmath = False
def test_friction_basic():
assert_close(Moody(1E5, 1E-4), 0.01809185666808665)
assert_close(Alshul_1952(1E5, 1E-4), 0.018382997825686878)
assert_close(Wood_1966(1E5, 1E-4), 0.021587570560090762)
assert_close(Churchill_1973(1E5, 1E-4), 0.01846708694482294)
assert_close(Eck_1973(1E5, 1E-4), 0.01775666973488564)
assert_close(Jain_1976(1E5, 1E-4), 0.018436560312693327)
assert_close(Swamee_Jain_1976(1E5, 1E-4), 0.018452424431901808)
assert_close(Churchill_1977(1E5, 1E-4), 0.018462624566280075)
assert_close(Chen_1979(1E5, 1E-4), 0.018552817507472126)
assert_close(Round_1980(1E5, 1E-4), 0.01831475391244354)
assert_close(Shacham_1980(1E5, 1E-4), 0.01860641215097828)
assert_close(Barr_1981(1E5, 1E-4), 0.01849836032779929)
assert_close(Zigrang_Sylvester_1(1E5, 1E-4), 0.018646892425980794)
assert_close(Zigrang_Sylvester_2(1E5, 1E-4), 0.01850021312358548)
assert_close(Haaland(1E5, 1E-4), 0.018265053014793857)
assert_close(Serghides_1(1E5, 1E-4), 0.01851358983180063)
assert_close(Serghides_2(1E5, 1E-4), 0.018486377560664482)
assert_close(Tsal_1989(1E5, 1E-4), 0.018382997825686878)
assert_close(Tsal_1989(1E8, 1E-4), 0.012165854627780102)
assert_close(Manadilli_1997(1E5, 1E-4), 0.01856964649724108)
assert_close(Romeo_2002(1E5, 1E-4), 0.018530291219676177)
assert_close(Sonnad_Goudar_2006(1E5, 1E-4), 0.0185971269898162)
assert_close(Rao_Kumar_2007(1E5, 1E-4), 0.01197759334600925)
assert_close(Buzzelli_2008(1E5, 1E-4), 0.018513948401365277)
assert_close(Avci_Karagoz_2009(1E5, 1E-4), 0.01857058061066499)
assert_close(Papaevangelo_2010(1E5, 1E-4), 0.015685600818488177)
assert_close(Brkic_2011_1(1E5, 1E-4), 0.01812455874141297)
assert_close(Brkic_2011_2(1E5, 1E-4), 0.018619745410688716)
assert_close(Fang_2011(1E5, 1E-4), 0.018481390682985432)
assert_close(Clamond(1E5, 1E-4), 0.01851386607747165)
assert_close(Clamond(1E5, 1E-4, fast=True), 0.01851486771096876)
assert_close(friction_laminar(128), 0.5)
assert_close(Blasius(10000.0), 0.03164)
fd = ft_Crane(.1)
assert_close(fd, 0.01628845962146481)
assert_close(ft_Crane(1e-5), 604.8402578042682)
def test_friction():
assert_close(sum(_roughness.values()), 0.01504508)
assert_close(friction_factor(Re=1E5, eD=1E-4), 0.01851386607747165)
methods_1 = friction_factor_methods(Re=1E5, eD=1E-4)
methods_1.sort()
methods_2 = ['Clamond', 'Colebrook', 'Manadilli_1997', 'Haaland', 'Alshul_1952', 'Avci_Karagoz_2009', 'Rao_Kumar_2007', 'Zigrang_Sylvester_2', 'Eck_1973', 'Buzzelli_2008', 'Tsal_1989', 'Papaevangelo_2010', 'Barr_1981', 'Jain_1976', 'Moody', 'Brkic_2011_2', 'Brkic_2011_1', 'Swamee_Jain_1976', 'Wood_1966', 'Shacham_1980', 'Romeo_2002', 'Chen_1979', 'Fang_2011', 'Round_1980', 'Sonnad_Goudar_2006', 'Churchill_1973', 'Churchill_1977', 'Serghides_2', 'Serghides_1', 'Zigrang_Sylvester_1']
methods_2.sort()
assert methods_1 == methods_2
assert_close(friction_factor(Re=1E5, eD=1E-4, Darcy=False), 0.01851386607747165/4)
assert_close(friction_factor(Re=128), 0.5)
assert_close(friction_factor(Re=1E5, eD=0, Method=None), 0.01798977308427384)
assert_close(friction_factor(20000, eD=0.0, Method='laminar'), 0.0032)
with pytest.raises(ValueError):
friction_factor(Re=1E5, eD=0, Method='BADMETHOD')
assert ['laminar'] == friction_factor_methods(200, 0, True)
assert 31 == len(friction_factor_methods(200, 0, False))
for m in friction_factor_methods(200, 0, False):
friction_factor(Re=1E5, eD=1e-6, Method=m)
fd = ft_Crane(.1)
Di = 0.1
fd_act = Clamond(7.5E6*Di, eD=roughness_Farshad(ID='Carbon steel, bare', D=Di)/Di)
assert_close(fd, fd_act, rtol=5e-6)
def test_friction_Colebrook():
assert_close(Colebrook(1E5, 1E-4), 0.018513866077471648)
# Test the colebrook is the clamond when tol=-1
assert Colebrook(1E5, 1E-4, -1) == Clamond(1E5, 1E-4)
# Test the colebrook is the analytical solution when Re < 10
# even when the clamond solution is specified
assert Colebrook(1, 1E-4, -1) == Colebrook(1, 1e-4)
@pytest.mark.slow
@pytest.mark.mpmath
@pytest.mark.skipif(not has_mpmath, reason='mpmath is not installed')
def test_Colebrook_numerical_mpmath():
# tested at n=500 for both Re and eD
Res = logspace(log10(1e-12), log10(1E12), 30) # 1E12 is too large for sympy - it slows down too much
eDs = logspace(log10(1e-20), log10(.1), 21) # 1-1e-9
for Re in Res:
for eD in eDs:
fd_exact = Colebrook(Re, eD, tol=0)
fd_numerical = Colebrook(Re, eD, tol=1e-12)
assert_close(fd_exact, fd_numerical, rtol=1e-5)
@pytest.mark.slow
@pytest.mark.mpmath
@pytest.mark.skipif(not has_mpmath, reason='mpmath is not installed')
def test_Colebrook_scipy_mpmath():
# Faily grueling test - check the lambertw implementations are matching mostly
# NOTE the test is to Re = 1E7; at higher Res the numerical solver is almost
# always used
Res = logspace(log10(1e-12), log10(1e7), 20) # 1E12 is too large for sympy
eDs = logspace(log10(1e-20), log10(.1), 19) # 1-1e-9
for Re in Res:
for eD in eDs:
Re = float(Re)
eD = float(eD)
fd_exact = Colebrook(Re, eD, tol=0)
fd_scipy = Colebrook(Re, eD)
assert_close(fd_exact, fd_scipy, rtol=1e-9)
@pytest.mark.slow
def test_Colebrook_vs_Clamond():
Res = logspace(log10(10), log10(1E50), 40)
eDs = logspace(log10(1e-20), log10(1), 40)
for Re in Res:
for eD in eDs:
fd_exact = Colebrook(Re, eD)
fd_clamond = Clamond(Re, eD)
# Interestingly, matches to rtol=1e-9 vs. numerical solver
# But does not have such accuracy compared to mpmath
if isnan(fd_exact) or isnan(fd_clamond):
continue # older scipy on 3.4 returns a nan sometimes
assert_close(fd_exact, fd_clamond, rtol=1e-9)
# If rtol is moved to 1E-7, eD can be increased to 1
@pytest.mark.mpmath
def test_Colebrook_hard_regimes():
fd_inf_regime = Colebrook(104800000000, 2.55e-08)
assert_close(fd_inf_regime, 0.0037751087365339906, rtol=1e-10)
def test_one_phase_dP():
dP = one_phase_dP(10.0, 1000., 1E-5, .1, L=1.000)
assert_close(dP, 63.43447321097365)
def test_one_phase_dP_gravitational():
dP = one_phase_dP_gravitational(angle=90., rho=2.6)
assert_close(dP, 25.49729)
dP = one_phase_dP_gravitational(angle=90, rho=2.6, L=2.)
assert_close(dP, 25.49729*2)
def test_one_phase_dP_dz_acceleration():
dP = one_phase_dP_dz_acceleration(m=1., D=0.1, rho=827.1, dv_dP=-1.1E-5, dP_dL=5E5, dA_dL=0.0001)
assert_close(dP, 89162.89116373913)
@pytest.mark.slow
@pytest.mark.thermo
@pytest.mark.skip
def test_one_phase_dP_dz_acceleration_example():
# This requires thermo!
from thermo import Stream, Vm_to_rho
from fluids import one_phase_dP, one_phase_dP_acceleration
import numpy as np
from scipy.integrate import odeint
from fluids.numerics import assert_close
P0 = 1E5
s = Stream(['nitrogen', 'methane'], T=300, P=P0, zs=[0.5, 0.5], m=1)
rho0 = s.rho
D = 0.1
def dP_dz(P, L, acc=False):
s.flash(P=float(P), Hm=s.Hm)
dPf = one_phase_dP(m=s.m, rho=s.rhog, mu=s.rhog, D=D, roughness=0, L=1.0)
if acc:
G = 4.0*s.m/(pi*D*D)
der = s.VolumeGasMixture.property_derivative_P(P=s.P, T=s.T, zs=s.zs, ws=s.ws)
der = 1/Vm_to_rho(der, s.MW)
factor = G*G*der
dP = dPf/(1.0 + factor)
return -dP
return -dPf
ls = linspace(0, .01)
dP_noacc = odeint(dP_dz, s.P, ls, args=(False,))[-1]
s.flash(P=float(P0), Hm=s.Hm) # Reset the stream object
profile = odeint(dP_dz, s.P, ls, args=(True,))
dP_acc = profile[-1]
s.flash(P=dP_acc, Hm=s.Hm)
rho1 = s.rho
dP_acc_numerical = dP_noacc - dP_acc
dP_acc_basic = one_phase_dP_acceleration(m=s.m, D=D, rho_o=rho1, rho_i=rho0)
assert_close(dP_acc_basic, dP_acc_numerical, rtol=1E-4)
del test_one_phase_dP_dz_acceleration_example
def test_transmission_factor():
assert_close(transmission_factor(fd=0.0185), 14.704292441876154)
assert_close(transmission_factor(F=14.704292441876154), 0.0185)
assert_close(transmission_factor(0.0185), 14.704292441876154)
# Example in [1]_, lists answer as 12.65
assert_close(transmission_factor(fd=0.025), 12.649110640673516)
with pytest.raises(Exception):
transmission_factor()
def test_roughness_Farshad():
e = roughness_Farshad('Cr13, bare', 0.05)
assert_close(e, 5.3141677781137006e-05)
e = roughness_Farshad('Cr13, bare')
assert_close(e, 5.5e-05)
e = roughness_Farshad(coeffs=(0.0021, -1.0055), D=0.05)
assert_close(e, 5.3141677781137006e-05)
tot = sum([abs(j) for i in _Farshad_roughness.values() for j in i])
assert_close(tot, 7.0729095)
with pytest.raises(Exception):
roughness_Farshad('BADID', 0.05)
@pytest.mark.skipif(not has_fuzzywuzzy, reason='missing fuzzywuzzy')
def test_nearest_material_roughness():
hit1 = nearest_material_roughness('condensate pipes', clean=False)
assert hit1 == 'Seamless steel tubes, Condensate pipes in open systems or periodically operated steam pipelines'
hit2 = nearest_material_roughness('Plastic', clean=True)
assert hit2 == 'Plastic coated'
@pytest.mark.skipif(not has_fuzzywuzzy, reason='missing fuzzywuzzy')
def test_material_roughness():
e1 = material_roughness('Plastic coated')
assert_close(e1, 5e-06)
e2 = material_roughness('Plastic coated', D=1E-3)
assert_close(e2, 5.243618447826409e-06)
e3 = material_roughness('Brass')
assert_close(e3, 1.52e-06)
e4 = material_roughness('condensate pipes')
assert_close(e4, 0.0005)
ID = 'Old, poor fitting and manufacture; with an overgrown surface'
e5 = [material_roughness(ID, optimism=i) for i in (True, False)]
assert_close1d(e5, [0.001, 0.004])
def test_von_Karman():
f = von_Karman(1E-4)
f_precalc = 0.01197365149564789
assert_close(f, f_precalc)
def Prandtl_von_Karman_Nikuradse_numeric(Re):
rat = 2.51/Re
def to_solve(f):
# Good to 1E75, down to 1E-17
v = f**-0.5
return v + 2.0*log10(rat*v)
return secant(to_solve, 0.000001)
def test_Prandtl_von_Karman_Nikuradse():
Re = 200
assert_close(Prandtl_von_Karman_Nikuradse_numeric(Re), Prandtl_von_Karman_Nikuradse(Re))
def test_Prandtl_von_Karman_Nikuradse_full():
# Tested to a very high number of points
fds = []
fds_numeric = []
for Re in logspace(1E-15, 30, 40):
fds.append(Prandtl_von_Karman_Nikuradse_numeric(Re))
fds_numeric.append(Prandtl_von_Karman_Nikuradse(Re))
assert_close1d(fds, fds_numeric)
def test_helical_laminar_fd_White():
fd = helical_laminar_fd_White(250., .02, .1)
assert_close(fd, 0.4063281817830202)
assert_close(helical_laminar_fd_White(250, .02, 100), 0.256)
def test_helical_laminar_fd_Mori_Nakayama():
fd = helical_laminar_fd_Mori_Nakayama(250., .02, .1)
assert_close(fd, 0.4222458285779544)
assert_close(4.4969472, helical_laminar_fd_Mori_Nakayama(20, .02, .1))
def test_helical_laminar_fd_Schmidt():
fd = helical_laminar_fd_Schmidt(250., .02, .1)
assert_close(fd, 0.47460725672835236)
# Test convergence at low curvature
assert_close(helical_laminar_fd_Schmidt(250., 1, 1E10), friction_laminar(250))
def test_helical_turbulent_fd_Srinivasan():
fd = helical_turbulent_fd_Srinivasan(1E4, 0.01, .02)
assert_close(fd, 0.0570745212117107)
def test_helical_turbulent_fd_Schmidt():
fd = helical_turbulent_fd_Schmidt(1E4, 0.01, .02)
assert_close(fd, 0.08875550767040916)
fd = helical_turbulent_fd_Schmidt(1E4, 0.01, .2)
assert_close(fd, 0.04476560991345504)
assert_close(friction_factor(1E4), helical_turbulent_fd_Schmidt(1E4, 0.01, 1E11))
fd = helical_turbulent_fd_Schmidt(1E6, 0.01, .02)
assert_close(fd, 0.04312877383550924)
def test_helical_turbulent_fd_Mori_Nakayama():
# Formula in [1]_ is hard to read, but the powers have been confirmed in
# two sources to be 1/5. [3]_ butchers the formula's brackets/power raising,
# but is otherwise correct.
fd = helical_turbulent_fd_Mori_Nakayama(1E4, 0.01, .2)
assert_close(fd, 0.037311802071379796)
def test_helical_turbulent_fd_Prasad():
# Checks out, formula in [2]_ is the same as in [1]_!
fd = helical_turbulent_fd_Prasad(1E4, 0.01, .2)
assert_close(fd, 0.043313098093994626)
assert_close(helical_turbulent_fd_Prasad(1E4, 0.01, 1E20), friction_factor(1E4))
def test_helical_turbulent_fd_Czop():
fd = helical_turbulent_fd_Czop(1E4, 0.01, .2)
assert_close(fd, 0.02979575250574106)
def test_helical_turbulent_fd_Guo():
fd = helical_turbulent_fd_Guo(2E5, 0.01, .2)
assert_close(fd, 0.022189161013253147)
def test_helical_turbulent_fd_Ju():
fd = helical_turbulent_fd_Ju(1E4, 0.01, .2)
assert_close(fd, 0.04945959480770937)
assert_close(helical_turbulent_fd_Ju(1E4, 0.01, 1E80), friction_factor(1E4))
def test_helical_turbulent_fd_Mandal_Nigam():
fd = helical_turbulent_fd_Mandal_Nigam(1E4, 0.01, .2)
assert_close(fd, 0.03831658117115902)
assert_close(helical_turbulent_fd_Mandal_Nigam(1E4, 0.01, 1E80), friction_factor(1E4))
def test_helical_transition_Re_Seth_Stahel():
# Read the original
assert_close(helical_transition_Re_Seth_Stahel(1, 7.), 7645.0599897402535)
assert_close(helical_transition_Re_Seth_Stahel(1, 1E20), 1900)
def test_helical_transition_Re_Ito():
assert_close(helical_transition_Re_Ito(1, 7.), 10729.972844697186)
def test_helical_transition_Re_Kubair_Kuloor():
assert_close(helical_transition_Re_Kubair_Kuloor(1, 7), 8625.986927588123)
def test_helical_transition_Re_Kutateladze_Borishanskii():
assert_close(helical_transition_Re_Kutateladze_Borishanskii(1, 7.), 7121.143774574058)
assert_close(helical_transition_Re_Kutateladze_Borishanskii(1, 1E20), 2300)
def test_helical_transition_Re_Schmidt():
assert_close(helical_transition_Re_Schmidt(1, 7.), 10540.094061770815)
assert_close(helical_transition_Re_Schmidt(1, 1E20), 2300)
def test_helical_transition_Re_Srinivasan():
assert_close(helical_transition_Re_Srinivasan(1, 7.), 11624.704719832524,)
assert_close(helical_transition_Re_Srinivasan(1, 1E20), 2100)
def test_friction_factor_curved():
fd = friction_factor_curved(2E4, 0.01, .02)
assert_close(fd, 0.050134646621603024)
fd = friction_factor_curved(250, .02, .1)
assert_close(fd, 0.47460725672835236)
fd_transition = [friction_factor_curved(i, 0.01, .02) for i in [16779, 16780]]
assert_close1d(fd_transition, [0.03323676794260526, 0.057221855744623344])
with pytest.raises(Exception):
friction_factor_curved(16779, 0.01, .02, Method='BADMETHOD')
with pytest.raises(Exception):
friction_factor_curved(16779, 0.01, .02, Rec_method='BADMETHOD')
fd_rough_false = friction_factor_curved(20000, 0.01, .02, roughness=.0001, turbulent_method='Guo')
assert_close(fd_rough_false, 0.1014240343662085)
methods = friction_factor_curved_methods(20000, 0.01, .02, check_ranges=True)
assert sorted(methods) == sorted(['Guo','Ju','Schmidt turbulent','Prasad','Mandel Nigam','Mori Nakayama turbulent','Czop', 'Srinivasan turbulent'])
methods = friction_factor_curved_methods(2000, 0.01, .02, check_ranges=True)
assert sorted(methods) == sorted(['White', 'Schmidt laminar', 'Mori Nakayama laminar'])
assert 'Schmidt turbulent' in friction_factor_curved_methods(Re=1E5, Di=0.02, Dc=0.5)
assert 11 == len(friction_factor_curved_methods(Re=1E5, Di=0.02, Dc=0.5, check_ranges=False))
for m in friction_factor_curved_methods(Re=1E5, Di=0.02, Dc=0.5, check_ranges=False):
friction_factor_curved(2000, 0.01, .02, Method=m)
# Test the Fanning case
fd = friction_factor_curved(2E4, 0.01, .02, Darcy=False)
assert_close(fd, 0.012533661655400756)
for m in ['Seth Stahel', 'Ito', 'Kubair Kuloor', 'Kutateladze Borishanskii', 'Schmidt', 'Srinivasan']:
helical_Re_crit(Di=0.02, Dc=0.5, Method=m)
def test_friction_plate():
fd = friction_plate_Martin_1999(Re=20000., plate_enlargement_factor=1.15)
assert_close(fd, 2.284018089834134)
fd = friction_plate_Martin_1999(Re=1999., plate_enlargement_factor=1.15)
assert_close(fd, 2.749383588479863)
fd = friction_plate_Martin_VDI(Re=20000., plate_enlargement_factor=1.15)
assert_close(fd, 2.702534119024076)
fd = friction_plate_Martin_VDI(Re=1999., plate_enlargement_factor=1.15)
assert_close(fd, 3.294294334690556)
fd = friction_plate_Muley_Manglik(Re=2000., chevron_angle=45., plate_enlargement_factor=1.2)
assert_close(fd, 1.0880870804075413)
def test_friction_Kumar():
from fluids.friction import Kumar_beta_list, Kumar_fd_Res
fd = friction_plate_Kumar(2000, 30)
assert_close(fd, 2.9760669055634517)
all_ans_expect = [[[22.22222222222222, 18.900854099814858, 5.181226661414687, 5.139730745446174],
[20.88888888888889, 17.09090909090909, 3.656954441625244, 3.609575756782771]],
[[13.428571428571427, 12.000171923243482, 1.7788367041690634, 1.7788497785371564],
[9.714285714285714, 8.5, 1.2332865464612235, 1.2320492987599356]],
[[7.157894736842104, 6.590102034105372, 1.2332865464612235, 1.2320492987599356],
[5.052631578947368, 4.571428571428571, 0.9576862861589914, 0.9547729646969146]],
[[2.4615384615384617, 2.374448634025773, 0.8393834232628009, 0.8379103279437352],
[2.4615384615384617, 2.3414634146341466, 0.7519331759748705, 0.7502394735017442]],
[[1.9591836734693877, 1.9015330284979595, 0.6797898512309091, 0.6799788644298855],
[1.9591836734693877, 1.9015330284979595, 0.6797898512309091, 0.6799788644298855]]]
all_ans = []
for i, beta_main in enumerate(Kumar_beta_list):
beta_ans = []
for beta in (beta_main-1, beta_main+1):
Re_ans = []
for Re_main in Kumar_fd_Res[i]:
for Re in [Re_main-1, Re_main+1]:
ans = friction_plate_Kumar(Re, beta)
Re_ans.append(ans)
beta_ans.append(Re_ans)
all_ans.append(beta_ans)
assert_close3d(all_ans, all_ans_expect)
|
|
import copy
import datetime
import json
import os
from typing import Dict, List, Optional
import jinja2
import jsonschema
import yaml
from ray_release.anyscale_util import find_cloud_by_name
from ray_release.exception import ReleaseTestConfigError
from ray_release.logger import logger
from ray_release.util import deep_update
class Test(dict):
pass
DEFAULT_WHEEL_WAIT_TIMEOUT = 7200 # Two hours
DEFAULT_COMMAND_TIMEOUT = 1800
DEFAULT_BUILD_TIMEOUT = 1800
DEFAULT_CLUSTER_TIMEOUT = 1800
DEFAULT_CLOUD_ID = "cld_4F7k8814aZzGG8TNUGPKnc"
DEFAULT_ENV = {
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str(
(datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
),
"EXPIRATION_2D": str(
(datetime.datetime.now() + datetime.timedelta(days=2)).strftime("%Y-%m-%d")
),
"EXPIRATION_3D": str(
(datetime.datetime.now() + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
),
}
RELEASE_PACKAGE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
RELEASE_TEST_SCHEMA_FILE = os.path.join(
RELEASE_PACKAGE_DIR, "ray_release", "schema.json"
)
class TestEnvironment(dict):
pass
_test_env = None
def get_test_environment():
global _test_env
if _test_env:
return _test_env
_test_env = TestEnvironment(**DEFAULT_ENV)
return _test_env
def set_test_env_var(key: str, value: str):
test_env = get_test_environment()
test_env[key] = value
def get_test_env_var(key: str, default: Optional[str] = None):
test_env = get_test_environment()
return test_env.get(key, default)
def read_and_validate_release_test_collection(config_file: str) -> List[Test]:
"""Read and validate test collection from config file"""
with open(config_file, "rt") as fp:
test_config = yaml.safe_load(fp)
validate_release_test_collection(test_config)
return test_config
def load_schema_file(path: Optional[str] = None) -> Dict:
path = path or RELEASE_TEST_SCHEMA_FILE
with open(path, "rt") as fp:
return json.load(fp)
def validate_release_test_collection(test_collection: List[Test]):
try:
schema = load_schema_file()
except Exception as e:
raise ReleaseTestConfigError(
f"Could not load release test validation schema: {e}"
) from e
num_errors = 0
for test in test_collection:
error = validate_test(test, schema)
if error:
logger.error(
f"Failed to validate test {test.get('name', '(unnamed)')}: {error}"
)
num_errors += 1
if num_errors > 0:
raise ReleaseTestConfigError(
f"Release test configuration error: Found {num_errors} test "
f"validation errors."
)
def validate_test(test: Test, schema: Optional[Dict] = None) -> Optional[str]:
schema = schema or load_schema_file()
try:
jsonschema.validate(test, schema=schema)
except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
return str(e.message)
except Exception as e:
return str(e)
def find_test(test_collection: List[Test], test_name: str) -> Optional[Test]:
"""Find test with `test_name` in `test_collection`"""
for test in test_collection:
if test["name"] == test_name:
return test
return None
def as_smoke_test(test: Test) -> Test:
if "smoke_test" not in test:
logger.warning(
f"Requested smoke test, but test with name {test['name']} does "
f"not have any smoke test configuration."
)
return test
smoke_test_config = test.pop("smoke_test")
new_test = deep_update(test, smoke_test_config)
return new_test
def get_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = (
"python -c 'import ray; print("
'"No commit sanity check available, but this is the '
"Ray wheel commit:\", ray.__commit__)'"
)
else:
cmd = (
f"python -c 'import ray; "
f'assert ray.__commit__ == "{commit}", ray.__commit__\''
)
return cmd
def load_and_render_yaml_template(
template_path: str, env: Optional[Dict] = None
) -> Optional[Dict]:
if not template_path:
return None
if not os.path.exists(template_path):
raise ReleaseTestConfigError(
f"Cannot load yaml template from {template_path}: Path not found."
)
with open(template_path, "rt") as f:
content = f.read()
render_env = copy.deepcopy(os.environ)
if env:
render_env.update(env)
try:
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
except Exception as e:
raise ReleaseTestConfigError(
f"Error rendering/loading yaml template: {e}"
) from e
def load_test_cluster_env(test: Test, ray_wheels_url: str) -> Optional[Dict]:
cluster_env_file = test["cluster"]["cluster_env"]
cluster_env_path = os.path.join(
RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file
)
env = get_test_environment()
commit = env.get("RAY_COMMIT", None)
env["RAY_WHEELS_SANITY_CHECK"] = get_wheels_sanity_check(commit)
env["RAY_WHEELS"] = ray_wheels_url
return load_and_render_yaml_template(cluster_env_path, env=env)
def load_test_cluster_compute(test: Test) -> Optional[Dict]:
cluster_compute_file = test["cluster"]["cluster_compute"]
cluster_compute_path = os.path.join(
RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_compute_file
)
env = get_test_environment()
cloud_id = get_test_cloud_id(test)
env["ANYSCALE_CLOUD_ID"] = cloud_id
return load_and_render_yaml_template(cluster_compute_path, env=env)
def get_test_cloud_id(test: Test) -> str:
cloud_id = test["cluster"].get("cloud_id", None)
cloud_name = test["cluster"].get("cloud_name", None)
if cloud_id and cloud_name:
raise RuntimeError(
f"You can't supply both a `cloud_name` ({cloud_name}) and a "
f"`cloud_id` ({cloud_id}) in the test cluster configuration. "
f"Please provide only one."
)
elif cloud_name and not cloud_id:
cloud_id = find_cloud_by_name(cloud_name)
if not cloud_id:
raise RuntimeError(f"Couldn't find cloud with name `{cloud_name}`.")
else:
cloud_id = cloud_id or DEFAULT_CLOUD_ID
return cloud_id
|
|
import math
import paddle.v2 as paddle
import reader
__all__ = ["training_net", "inference_net", "feeding"]
feeding = {
reader.Q_IDS_STR: reader.Q_IDS,
reader.E_IDS_STR: reader.E_IDS,
reader.QE_COMM_STR: reader.QE_COMM,
reader.EE_COMM_STR: reader.EE_COMM,
reader.LABELS_STR: reader.LABELS
}
def get_embedding(input, word_vec_dim, wordvecs):
"""
Define word embedding
:param input: layer input
:type input: LayerOutput
:param word_vec_dim: dimension of the word embeddings
:type word_vec_dim: int
:param wordvecs: word embedding matrix
:type wordvecs: numpy array
:return: embedding
:rtype: LayerOutput
"""
return paddle.layer.embedding(
input=input,
size=word_vec_dim,
param_attr=paddle.attr.ParamAttr(
name="wordvecs", is_static=True, initializer=lambda _: wordvecs))
def encoding_question(question, q_lstm_dim, latent_chain_dim, word_vec_dim,
drop_rate, wordvecs, default_init_std, default_l2_rate):
"""
Define network for encoding question
:param question: question token ids
:type question: LayerOutput
:param q_lstm_dim: dimension of the question LSTM
:type q_lstm_dim: int
:param latent_chain_dim: dimension of the attention layer
:type latent_chain_dim: int
:param word_vec_dim: dimension of the word embeddings
:type word_vec_dim: int
:param drop_rate: dropout rate
:type drop_rate: float
:param wordvecs: word embedding matrix
:type wordvecs: numpy array
:param default_init_std: default initial standard deviation
:type default_init_std: float
:param default_l2_rate: default l2 rate
:type default_l2_rate: float
:return: question encoding
:rtype: LayerOutput
"""
# word embedding
emb = get_embedding(question, word_vec_dim, wordvecs)
# question LSTM
wx = paddle.layer.fc(act=paddle.activation.Linear(),
size=q_lstm_dim * 4,
input=emb,
param_attr=paddle.attr.ParamAttr(
name="_q_hidden1.w0",
initial_std=default_init_std,
l2_rate=default_l2_rate),
bias_attr=paddle.attr.ParamAttr(
name="_q_hidden1.wbias",
initial_std=0,
l2_rate=default_l2_rate))
q_rnn = paddle.layer.lstmemory(
input=wx,
bias_attr=paddle.attr.ParamAttr(
name="_q_rnn1.wbias", initial_std=0, l2_rate=default_l2_rate),
param_attr=paddle.attr.ParamAttr(
name="_q_rnn1.w0",
initial_std=default_init_std,
l2_rate=default_l2_rate))
q_rnn = paddle.layer.dropout(q_rnn, drop_rate)
# self attention
fc = paddle.layer.fc(act=paddle.activation.Tanh(),
size=latent_chain_dim,
input=q_rnn,
param_attr=paddle.attr.ParamAttr(
name="_attention_layer1.w0",
initial_std=default_init_std,
l2_rate=default_l2_rate),
bias_attr=False)
weight = paddle.layer.fc(size=1,
act=paddle.activation.SequenceSoftmax(),
input=fc,
param_attr=paddle.attr.ParamAttr(
name="_attention_weight.w0",
initial_std=default_init_std,
l2_rate=default_l2_rate),
bias_attr=False)
scaled_q_rnn = paddle.layer.scaling(input=q_rnn, weight=weight)
q_encoding = paddle.layer.pooling(
input=scaled_q_rnn, pooling_type=paddle.pooling.Sum())
return q_encoding
def encoding_evidence(evidence, qe_comm, ee_comm, q_encoding, e_lstm_dim,
word_vec_dim, com_vec_dim, drop_rate, wordvecs,
default_init_std, default_l2_rate):
"""
Define network for encoding evidence
:param qe_comm: qe.ecomm features
:type qe_comm: LayerOutput
:param ee_comm: ee.ecomm features
:type ee_comm: LayerOutput
:param q_encoding: question encoding, a fixed-length vector
:type q_encoding: LayerOutput
:param e_lstm_dim: dimension of the evidence LSTMs
:type e_lstm_dim: int
:param word_vec_dim: dimension of the word embeddings
:type word_vec_dim: int
:param com_vec_dim: dimension of the qe.comm and ee.comm feature embeddings
:type com_vec_dim: int
:param drop_rate: dropout rate
:type drop_rate: float
:param wordvecs: word embedding matrix
:type wordvecs: numpy array
:param default_init_std: default initial standard deviation
:type default_init_std: float
:param default_l2_rate: default l2 rate
:type default_l2_rate: float
:return: evidence encoding
:rtype: LayerOutput
"""
def lstm(idx, reverse, inputs):
"""LSTM wrapper"""
bias_attr = paddle.attr.ParamAttr(
name="_e_hidden%d.wbias" % idx,
initial_std=0,
l2_rate=default_l2_rate)
with paddle.layer.mixed(size=e_lstm_dim * 4, bias_attr=bias_attr) as wx:
for i, input in enumerate(inputs):
param_attr = paddle.attr.ParamAttr(
name="_e_hidden%d.w%d" % (idx, i),
initial_std=default_init_std,
l2_rate=default_l2_rate)
wx += paddle.layer.full_matrix_projection(
input=input, param_attr=param_attr)
e_rnn = paddle.layer.lstmemory(
input=wx,
reverse=reverse,
bias_attr=paddle.attr.ParamAttr(
name="_e_rnn%d.wbias" % idx,
initial_std=0,
l2_rate=default_l2_rate),
param_attr=paddle.attr.ParamAttr(
name="_e_rnn%d.w0" % idx,
initial_std=default_init_std,
l2_rate=default_l2_rate))
e_rnn = paddle.layer.dropout(e_rnn, drop_rate)
return e_rnn
# share word embeddings with question
emb = get_embedding(evidence, word_vec_dim, wordvecs)
# copy q_encoding len(evidence) times
q_encoding_expand = paddle.layer.expand(
input=q_encoding, expand_as=evidence)
# feature embeddings
comm_initial_std = 1 / math.sqrt(64.0)
qe_comm_emb = paddle.layer.embedding(
input=qe_comm,
size=com_vec_dim,
param_attr=paddle.attr.ParamAttr(
name="_cw_embedding.w0",
initial_std=comm_initial_std,
l2_rate=default_l2_rate))
ee_comm_emb = paddle.layer.embedding(
input=ee_comm,
size=com_vec_dim,
param_attr=paddle.attr.ParamAttr(
name="_eecom_embedding.w0",
initial_std=comm_initial_std,
l2_rate=default_l2_rate))
# evidence LSTMs
first_layer_extra_inputs = [q_encoding_expand, qe_comm_emb, ee_comm_emb]
e_rnn1 = lstm(1, False, [emb] + first_layer_extra_inputs)
e_rnn2 = lstm(2, True, [e_rnn1])
e_rnn3 = lstm(3, False, [e_rnn2, e_rnn1]) # with cross layer links
return e_rnn3
def define_data(dict_dim, label_num):
"""
Define data layers
:param dict_dim: number of words in the vocabulary
:type dict_dim: int
:param label_num: label numbers, BIO:3, BIO2:4
:type label_num: int
:return: data layers
:rtype: tuple of LayerOutput
"""
question = paddle.layer.data(
name=reader.Q_IDS_STR,
type=paddle.data_type.integer_value_sequence(dict_dim))
evidence = paddle.layer.data(
name=reader.E_IDS_STR,
type=paddle.data_type.integer_value_sequence(dict_dim))
qe_comm = paddle.layer.data(
name=reader.QE_COMM_STR,
type=paddle.data_type.integer_value_sequence(2))
ee_comm = paddle.layer.data(
name=reader.EE_COMM_STR,
type=paddle.data_type.integer_value_sequence(2))
label = paddle.layer.data(
name=reader.LABELS_STR,
type=paddle.data_type.integer_value_sequence(label_num),
layer_attr=paddle.attr.ExtraAttr(device=-1))
return question, evidence, qe_comm, ee_comm, label
def define_common_network(conf):
"""
Define common network
:param conf: network conf
:return: CRF features, golden labels
:rtype: tuple
"""
# define data layers
question, evidence, qe_comm, ee_comm, label = \
define_data(conf.dict_dim, conf.label_num)
# encode question
q_encoding = encoding_question(question, conf.q_lstm_dim,
conf.latent_chain_dim, conf.word_vec_dim,
conf.drop_rate, conf.wordvecs,
conf.default_init_std, conf.default_l2_rate)
# encode evidence
e_encoding = encoding_evidence(
evidence, qe_comm, ee_comm, q_encoding, conf.e_lstm_dim,
conf.word_vec_dim, conf.com_vec_dim, conf.drop_rate, conf.wordvecs,
conf.default_init_std, conf.default_l2_rate)
# pre-compute CRF features
crf_feats = paddle.layer.fc(act=paddle.activation.Linear(),
input=e_encoding,
size=conf.label_num,
param_attr=paddle.attr.ParamAttr(
name="_output.w0",
initial_std=conf.default_init_std,
l2_rate=conf.default_l2_rate),
bias_attr=False)
return crf_feats, label
def training_net(conf):
"""
Define training network
:param conf: network conf
:return: CRF cost
:rtype: LayerOutput
"""
e_encoding, label = define_common_network(conf)
crf = paddle.layer.crf(input=e_encoding,
label=label,
size=conf.label_num,
param_attr=paddle.attr.ParamAttr(
name="_crf.w0",
initial_std=conf.default_init_std,
l2_rate=conf.default_l2_rate),
layer_attr=paddle.attr.ExtraAttr(device=-1))
return crf
def inference_net(conf):
"""
Define training network
:param conf: network conf
:return: CRF viberbi decoding result
:rtype: LayerOutput
"""
e_encoding, label = define_common_network(conf)
ret = paddle.layer.crf_decoding(
input=e_encoding,
size=conf.label_num,
param_attr=paddle.attr.ParamAttr(name="_crf.w0"),
layer_attr=paddle.attr.ExtraAttr(device=-1))
return ret
|
|
import pytest
from pytest import approx
from math import radians, nan, sqrt, isnan
import pymap3d as pm
lla0 = (42, -82, 200)
rlla0 = (radians(lla0[0]), radians(lla0[1]), lla0[2])
xyz0 = (660675.2518247, -4700948.68316, 4245737.66222)
ELL = pm.Ellipsoid()
A = ELL.semimajor_axis
B = ELL.semiminor_axis
atol_dist = 1e-6 # 1 micrometer
@pytest.mark.parametrize("lla", [(42, -82, 200), ([42], [-82], [200])], ids=("scalar", "list"))
def test_scalar_geodetic2ecef(lla):
"""
verify we can handle the wide variety of input data type users might use
"""
if isinstance(lla[0], list):
pytest.importorskip("numpy")
x0, y0, z0 = pm.geodetic2ecef(*lla)
assert (x0, y0, z0) == approx(xyz0)
def test_3d_geodetic2ecef():
np = pytest.importorskip("numpy")
lla = (np.atleast_3d(42), np.atleast_3d(-82), np.atleast_3d(200))
x0, y0, z0 = pm.geodetic2ecef(*lla)
assert (x0, y0, z0) == approx(xyz0)
def test_scalar_ecef2geodetic():
"""
verify we can handle the wide variety of input data type users might use
"""
lat, lon, alt = pm.ecef2geodetic(xyz0[0], xyz0[1], xyz0[2])
assert [lat, lon, alt] == approx(lla0, rel=1e-4)
def test_3d_ecef2geodetic():
np = pytest.importorskip("numpy")
xyz = (np.atleast_3d(xyz0[0]), np.atleast_3d(xyz0[1]), np.atleast_3d(xyz0[2]))
lat, lon, alt = pm.ecef2geodetic(*xyz)
assert [lat, lon, alt] == approx(lla0, rel=1e-4)
def test_array_ecef2geodetic():
"""
tests ecef2geodetic can handle numpy array data in addition to singular floats
"""
np = pytest.importorskip("numpy")
# test values with no points inside ellipsoid
lla0_array = (
np.array([lla0[0], lla0[0]]),
np.array([lla0[1], lla0[1]]),
np.array([lla0[2], lla0[2]]),
)
xyz = pm.geodetic2ecef(*lla0_array)
lats, lons, alts = pm.ecef2geodetic(*xyz)
assert lats == approx(lla0_array[0])
assert lons == approx(lla0_array[1])
assert alts == approx(lla0_array[2])
# test values with some (but not all) points inside ellipsoid
lla0_array_inside = (
np.array([lla0[0], lla0[0]]),
np.array([lla0[1], lla0[1]]),
np.array([lla0[2], -lla0[2]]),
)
xyz = pm.geodetic2ecef(*lla0_array_inside)
lats, lons, alts = pm.ecef2geodetic(*xyz)
assert lats == approx(lla0_array_inside[0])
assert lons == approx(lla0_array_inside[1])
assert alts == approx(lla0_array_inside[2])
def test_xarray():
xarray = pytest.importorskip("xarray")
xr_lla = xarray.DataArray(list(lla0))
xyz = pm.geodetic2ecef(*xr_lla)
assert xyz == approx(xyz0)
# %%
xr_xyz = xarray.DataArray(list(xyz0))
lla = pm.ecef2geodetic(*xr_xyz)
assert lla == approx(lla0)
def test_pandas():
pandas = pytest.importorskip("pandas")
pd_lla = pandas.Series(lla0)
xyz = pm.geodetic2ecef(*pd_lla)
assert xyz == approx(xyz0)
# %% dataframe degenerates to series
pd_lla = pandas.DataFrame([[*lla0], [*lla0]], columns=["lat", "lon", "alt_m"])
xyz = pm.geodetic2ecef(pd_lla["lat"], pd_lla["lon"], pd_lla["alt_m"])
assert xyz[0].values == approx(xyz0[0])
assert xyz[1].values == approx(xyz0[1])
assert xyz[2].values == approx(xyz0[2])
def test_ecef():
xyz = pm.geodetic2ecef(*lla0)
assert xyz == approx(xyz0)
x, y, z = pm.geodetic2ecef(*rlla0, deg=False)
assert x == approx(xyz[0])
assert y == approx(xyz[1])
assert z == approx(xyz[2])
with pytest.raises(ValueError):
pm.geodetic2ecef(-100, lla0[1], lla0[2])
assert pm.ecef2geodetic(*xyz) == approx(lla0)
assert pm.ecef2geodetic(*xyz, deg=False) == approx(rlla0)
assert pm.ecef2geodetic((A - 1) / sqrt(2), (A - 1) / sqrt(2), 0) == approx([0, 45, -1])
@pytest.mark.parametrize(
"lla, xyz",
[
((0, 0, -1), (A - 1, 0, 0)),
((0, 90, -1), (0, A - 1, 0)),
((0, -90, -1), (0, -A + 1, 0)),
((90, 0, -1), (0, 0, B - 1)),
((90, 15, -1), (0, 0, B - 1)),
((-90, 0, -1), (0, 0, -B + 1)),
],
)
def test_geodetic2ecef(lla, xyz):
assert pm.geodetic2ecef(*lla) == approx(xyz, abs=atol_dist)
@pytest.mark.parametrize(
"xyz, lla",
[
((A - 1, 0, 0), (0, 0, -1)),
((0, A - 1, 0), (0, 90, -1)),
((0, 0, B - 1), (90, 0, -1)),
((0, 0, -B + 1), (-90, 0, -1)),
((-A + 1, 0, 0), (0, 180, -1)),
],
)
def test_ecef2geodetic(xyz, lla):
lat, lon, alt = pm.ecef2geodetic(*xyz)
assert lat == approx(lla[0])
assert lon == approx(lla[1])
assert alt == approx(lla[2])
@pytest.mark.parametrize(
"aer,lla,lla0",
[
((33, 77, 1000), (42.0016981935, -81.99852, 1174.374035), (42, -82, 200)),
((0, 90, 10000), (0, 0, 10000), (0, 0, 0)),
],
)
def test_aer_geodetic(aer, lla, lla0):
lat1, lon1, alt1 = pm.aer2geodetic(*aer, *lla0)
assert lat1 == approx(lla[0])
assert lon1 == approx(lla[1])
assert alt1 == approx(lla[2])
assert isinstance(lat1, float)
assert isinstance(lon1, float)
assert isinstance(alt1, float)
raer = (radians(aer[0]), radians(aer[1]), aer[2])
rlla0 = (radians(lla0[0]), radians(lla0[1]), lla0[2])
assert pm.aer2geodetic(*raer, *rlla0, deg=False) == approx(
(radians(lla[0]), radians(lla[1]), lla[2])
)
with pytest.raises(ValueError):
pm.aer2geodetic(aer[0], aer[1], -1, *lla0)
assert pm.geodetic2aer(*lla, *lla0) == approx(aer, rel=1e-3)
assert pm.geodetic2aer(radians(lla[0]), radians(lla[1]), lla[2], *rlla0, deg=False) == approx(
raer, rel=1e-3
)
def test_scalar_nan():
a, e, r = pm.geodetic2aer(nan, nan, nan, *lla0)
assert isnan(a) and isnan(e) and isnan(r)
lat, lon, alt = pm.aer2geodetic(nan, nan, nan, *lla0)
assert isnan(lat) and isnan(lon) and isnan(alt)
def test_allnan():
np = pytest.importorskip("numpy")
anan = np.empty((10, 10))
anan.fill(nan)
assert np.isnan(pm.geodetic2aer(anan, anan, anan, *lla0)).all()
assert np.isnan(pm.aer2geodetic(anan, anan, anan, *lla0)).all()
def test_somenan():
np = pytest.importorskip("numpy")
xyz = np.stack((xyz0, (nan, nan, nan)))
lat, lon, alt = pm.ecef2geodetic(xyz[:, 0], xyz[:, 1], xyz[:, 2])
assert (lat[0], lon[0], alt[0]) == approx(lla0)
|
|
from __future__ import absolute_import, print_function
import logging
from types import LambdaType
from sentry.models import Organization
from sentry.web.frontend.base import BaseView
from sentry.utils.session_store import RedisSessionStore
from sentry.utils.hashlib import md5_text
from sentry.web.helpers import render_to_response
from sentry import analytics
class PipelineProvider(object):
"""
A class implementing the PipelineProvider interface provides the pipeline
views that the Pipeline will traverse through.
"""
def get_pipeline_views(self):
"""
Returns a list of instantiated views which implement the PipelineView
interface. Each view will be dispatched in order.
>>> return [OAuthInitView(), OAuthCallbackView()]
"""
raise NotImplementedError
def set_config(self, config):
"""
Use set_config to allow additional provider configuration be assigned to
the provider instance. This is useful for example when nesting
pipelines and the provider needs to be configured differently.
"""
self.config = config
def set_pipeline(self, pipeline):
"""
Used by the pipeline to give the provider access to the executing pipeline.
"""
self.pipeline = pipeline
class PipelineView(BaseView):
"""
A class implementing the PipelineView may be used in a PipelineProviders
get_pipeline_views list.
"""
def dispatch(self, request, pipeline):
"""
Called on request, the active pipeline is passed in which can and
should be used to bind data and traverse the pipeline.
"""
raise NotImplementedError
class NestedPipelineView(PipelineView):
"""
A NestedPipelineView can be used within other pipelines to process another
pipeline within a pipeline. Note that the nested pipelines finish_pipeline
will NOT be called, instead it's data will be bound into the parent
pipeline and the parents pipeline moved to the next step.
Useful for embedding an identity authentication pipeline.
"""
def __init__(self, bind_key, pipeline_cls, provider_key, config=None):
self.provider_key = provider_key
self.config = config or {}
class NestedPipeline(pipeline_cls):
def set_parent_pipeline(self, parent_pipeline):
self.parent_pipeline = parent_pipeline
def finish_pipeline(self):
self.parent_pipeline.bind_state(bind_key, self.fetch_state())
self.clear_session()
return self.parent_pipeline.next_step()
self.pipeline_cls = NestedPipeline
def dispatch(self, request, pipeline):
nested_pipeline = self.pipeline_cls(
organization=pipeline.organization,
request=request,
provider_key=self.provider_key,
config=self.config,
)
nested_pipeline.set_parent_pipeline(pipeline)
# nested_pipeline.bind_state('_parent', pipeline.fetch_state())
if not nested_pipeline.is_valid():
nested_pipeline.initialize()
return nested_pipeline.current_step()
class Pipeline(object):
"""
Pipeline provides a mechanism to guide the user through a request
'pipeline', where each view may be completed by calling the ``next_step``
pipeline method to traverse through the pipe.
The pipeline works with a PipelineProvider object which provides the
pipeline views and is made available to the views through the passed in
pipeline.
:provider_manager:
A class property that must be specified to allow for lookup of a provider
implementation object given it's key.
:provider_model_cls:
The Provider model object represents the instance of an object implementing
the PipelineProvider interface. This is used to look up the instance
when constructing an in progress pipeline (get_for_request).
:config:
A object that specifies additional pipeline and provider runtime
configurations. An example of usage is for OAuth Identity providers, for
overriding the scopes. The config object will be passed into the provider
using the ``set_config`` method.
"""
pipeline_name = None
provider_manager = None
provider_model_cls = None
@classmethod
def get_for_request(cls, request):
state = RedisSessionStore(request, cls.pipeline_name)
if not state.is_valid():
return None
provider_model = None
if state.provider_model_id:
provider_model = cls.provider_model_cls.objects.get(id=state.provider_model_id)
organization = None
if state.org_id:
organization = Organization.objects.get(id=state.org_id)
provider_key = state.provider_key
config = state.config
return cls(
request,
organization=organization,
provider_key=provider_key,
provider_model=provider_model,
config=config,
)
def __init__(self, request, provider_key, organization=None, provider_model=None, config=None):
if config is None:
config = {}
self.request = request
self.organization = organization
self.state = RedisSessionStore(request, self.pipeline_name)
self.provider = self.provider_manager.get(provider_key)
self.provider_model = provider_model
self.config = config
self.provider.set_pipeline(self)
self.provider.set_config(config)
self.pipeline_views = self.get_pipeline_views()
# we serialize the pipeline to be ['fqn.PipelineView', ...] which
# allows us to determine if the pipeline has changed during the auth
# flow or if the user is somehow circumventing a chunk of it
pipe_ids = [
u"{}.{}".format(type(v).__module__, type(v).__name__) for v in self.pipeline_views
]
self.signature = md5_text(*pipe_ids).hexdigest()
def get_pipeline_views(self):
"""
Retrieve the pipeline views from the provider.
You may wish to override this method to provide views that all
providers should inherit, or customize the provider method called to
retrieve the views.
"""
return self.provider.get_pipeline_views()
def is_valid(self):
return self.state.is_valid() and self.state.signature == self.signature
def initialize(self):
self.state.regenerate(
{
"uid": self.request.user.id if self.request.user.is_authenticated() else None,
"provider_model_id": self.provider_model.id if self.provider_model else None,
"provider_key": self.provider.key,
"org_id": self.organization.id if self.organization else None,
"step_index": 0,
"signature": self.signature,
"config": self.config,
"data": {},
}
)
def clear_session(self):
self.state.clear()
def current_step(self):
"""
Render the current step.
"""
step_index = self.state.step_index
if step_index == len(self.pipeline_views):
return self.finish_pipeline()
step = self.pipeline_views[step_index]
# support late binding steps
if isinstance(step, LambdaType):
step = step()
return step.dispatch(request=self.request, pipeline=self)
def error(self, message):
context = {"error": message}
extra = {
"organization_id": self.organization.id if self.organization else None,
"provider": self.provider.key,
"error": message,
}
logger = self.get_logger()
# log error
logger.error("pipeline error", extra=extra)
return render_to_response("sentry/pipeline-error.html", context, self.request)
def next_step(self, step_size=1):
"""
Render the next step.
"""
self.state.step_index += step_size
if self.organization:
analytics.record(
"integrations.pipeline_step",
user_id=self.request.user.id,
organization_id=self.organization.id,
integration=self.provider.key,
step_index=self.state.step_index,
pipeline_type="reauth" if self.fetch_state("integration_id") else "install",
)
return self.current_step()
def finish_pipeline(self):
"""
Called when the pipeline completes the final step.
"""
raise NotImplementedError
def bind_state(self, key, value):
data = self.state.data
data[key] = value
self.state.data = data
def fetch_state(self, key=None):
data = self.state.data
if not data:
return None
return data if key is None else data.get(key)
def get_logger(self):
return logging.getLogger("sentry.integration.%s" % (self.provider.key,))
|
|
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from nose import SkipTest
from uuid import uuid4
from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
from swift_testing import check_response, retry, skip, skip2, skip3, \
swift_test_perm, web_front_end, requires_acls, swift_test_user
class TestContainer(unittest.TestCase):
def setUp(self):
if skip:
raise SkipTest
self.name = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def tearDown(self):
if skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name + '?format=json',
'', {'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, obj):
conn.request('DELETE',
'/'.join([parsed.path, self.name, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
while True:
resp = retry(get)
body = resp.read()
self.assert_(resp.status // 100 == 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, obj)
resp.read()
self.assertEqual(resp.status, 204)
def delete(url, token, parsed, conn):
conn.request('DELETE', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
def test_multi_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(post, 'X-Container-Meta-One', '1')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
resp = retry(post, 'X-Container-Meta-Two', '2')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
self.assertEqual(resp.getheader('x-container-meta-two'), '2')
def test_unicode_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
uni_key = u'X-Container-Meta-uni\u0E12'
uni_value = u'uni\u0E12'
if (web_front_end == 'integral'):
resp = retry(post, uni_key, '1')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1')
resp = retry(post, 'X-Container-Meta-uni', uni_value)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('X-Container-Meta-uni'),
uni_value.encode('utf-8'))
if (web_front_end == 'integral'):
resp = retry(post, uni_key, uni_value)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader(uni_key.encode('utf-8')),
uni_value.encode('utf-8'))
def test_PUT_metadata(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn, name, value):
conn.request('PUT', parsed.path + '/' + name, '',
{'X-Auth-Token': token,
'X-Container-Meta-Test': value})
return check_response(conn)
def head(url, token, parsed, conn, name):
conn.request('HEAD', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
name = uuid4().hex
resp = retry(put, name, 'Value')
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
resp = retry(put, name, '')
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), None)
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), None)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
def test_POST_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Meta-Test': value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), None)
resp = retry(get)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), None)
resp = retry(post, 'Value')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(get)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
def test_PUT_bad_metadata(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn, name, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('PUT', parsed.path + '/' + name, '', headers)
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 404)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 404)
name = uuid4().hex
headers = {}
for x in xrange(MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
headers = {}
for x in xrange(MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 404)
name = uuid4().hex
headers = {}
header_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
size += 4 + MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size - 1)
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size)
resp = retry(put, name, headers)
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 404)
def test_POST_bad_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('POST', parsed.path + '/' + self.name, '', headers)
return check_response(conn)
resp = retry(
post,
{'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
{'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
resp.read()
self.assertEqual(resp.status, 400)
resp = retry(
post,
{'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(
post,
{'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
resp.read()
self.assertEqual(resp.status, 400)
def test_POST_bad_metadata2(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('POST', parsed.path + '/' + self.name, '', headers)
return check_response(conn)
headers = {}
for x in xrange(MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers = {}
for x in xrange(MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
def test_POST_bad_metadata3(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('POST', parsed.path + '/' + self.name, '', headers)
return check_response(conn)
headers = {}
header_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
size += 4 + MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size - 1)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 204)
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size)
resp = retry(post, headers)
resp.read()
self.assertEqual(resp.status, 400)
def test_public_container(self):
if skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name)
return check_response(conn)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '), err)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*,.rlistings'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get)
resp.read()
self.assertEqual(resp.status, 204)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, 'X-Container-Read': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '), err)
def test_cross_account_container(self):
if skip or skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the second account
def get2(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 403)
# Make the container accessible by the second account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': swift_test_perm[1],
'X-Container-Write': swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can now use the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 204)
# Make the container private again
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, 'X-Container-Read': '',
'X-Container-Write': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can't access the container with the second account again
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 403)
def test_cross_account_public_container(self):
if skip or skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the second account
def get2(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 403)
# Make the container completely public
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*,.rlistings'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can now read the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 204)
# But we shouldn't be able to write with the second account
def put2(url, token, parsed, conn):
conn.request('PUT', first_account[0] + '/' + self.name + '/object',
'test object', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put2, use_account=2)
resp.read()
self.assertEqual(resp.status, 403)
# Now make the container also writeable by the second account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Write': swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can still read the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEqual(resp.status, 204)
# And that we can now write with the second account
resp = retry(put2, use_account=2)
resp.read()
self.assertEqual(resp.status, 201)
def test_nonadmin_user(self):
if skip or skip3:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the third account
def get3(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get3, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Make the container accessible by the third account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can now read the container with the third account
resp = retry(get3, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
# But we shouldn't be able to write with the third account
def put3(url, token, parsed, conn):
conn.request('PUT', first_account[0] + '/' + self.name + '/object',
'test object', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put3, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Now make the container also writeable by the third account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Write': swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# Ensure we can still read the container with the third account
resp = retry(get3, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
# And that we can now write with the third account
resp = retry(put3, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
@requires_acls
def test_read_only_acl_listings(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list containers
resp = retry(get, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant read-only access
acl_user = swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# read-only can list containers
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(self.name in listing)
# read-only can not create containers
new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# but it can see newly created ones
resp = retry(put, new_container_name, use_account=1)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(new_container_name in listing)
@requires_acls
def test_read_only_acl_metadata(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def post(url, token, parsed, conn, name, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path + '/%s' % name, '', new_headers)
return check_response(conn)
# add some metadata
value = str(uuid4())
headers = {'x-container-meta-test': value}
resp = retry(post, self.name, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant read-only access
acl_user = swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# read-only can NOT write container metadata
new_value = str(uuid4())
headers = {'x-container-meta-test': new_value}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# read-only can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
@requires_acls
def test_read_write_acl_listings(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
return check_response(conn)
def post(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list containers
resp = retry(get, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant read-write access
acl_user = swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list containers
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(self.name in listing)
# can create new containers
new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(new_container_name in listing)
# can also delete them
resp = retry(delete, new_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(new_container_name not in listing)
# even if they didn't create them
empty_container_name = str(uuid4())
resp = retry(put, empty_container_name, use_account=1)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, empty_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
@requires_acls
def test_read_write_acl_metadata(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def post(url, token, parsed, conn, name, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path + '/%s' % name, '', new_headers)
return check_response(conn)
# add some metadata
value = str(uuid4())
headers = {'x-container-meta-test': value}
resp = retry(post, self.name, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant read-write access
acl_user = swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# read-write can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# read-write can also write container metadata
new_value = str(uuid4())
headers = {'x-container-meta-test': new_value}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and remove it
headers = {'x-remove-container-meta-test': 'true'}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), None)
@requires_acls
def test_admin_acl_listing(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
return check_response(conn)
def post(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list containers
resp = retry(get, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant admin access
acl_user = swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list containers
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(self.name in listing)
# can create new containers
new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(new_container_name in listing)
# can also delete them
resp = retry(delete, new_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(new_container_name not in listing)
# even if they didn't create them
empty_container_name = str(uuid4())
resp = retry(put, empty_container_name, use_account=1)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, empty_container_name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
@requires_acls
def test_admin_acl_metadata(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def post(url, token, parsed, conn, name, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path + '/%s' % name, '', new_headers)
return check_response(conn)
# add some metadata
value = str(uuid4())
headers = {'x-container-meta-test': value}
resp = retry(post, self.name, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant access
acl_user = swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# can also write container metadata
new_value = str(uuid4())
headers = {'x-container-meta-test': new_value}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and remove it
headers = {'x-remove-container-meta-test': 'true'}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), None)
@requires_acls
def test_protected_container_sync(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def post(url, token, parsed, conn, name, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path + '/%s' % name, '', new_headers)
return check_response(conn)
# add some metadata
value = str(uuid4())
headers = {
'x-container-sync-key': 'secret',
'x-container-meta-test': value,
}
resp = retry(post, self.name, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# grant read-only access
acl_user = swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), None)
# and can not write
headers = {'x-container-sync-key': str(uuid4())}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-write access
acl_user = swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), None)
# sanity check sync-key w/ account1
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
# and can write
new_value = str(uuid4())
headers = {
'x-container-sync-key': str(uuid4()),
'x-container-meta-test': new_value,
}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) # validate w/ account1
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# but can not write sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
# grant admin access
acl_user = swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# admin can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and ALSO sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
# admin tester3 can even change sync-key
new_secret = str(uuid4())
headers = {'x-container-sync-key': new_secret}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), new_secret)
@requires_acls
def test_protected_container_acl(self):
if skip3:
raise SkipTest
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/%s' % name, '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def post(url, token, parsed, conn, name, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path + '/%s' % name, '', new_headers)
return check_response(conn)
# add some container acls
value = str(uuid4())
headers = {
'x-container-read': 'jdoe',
'x-container-write': 'jdoe',
'x-container-meta-test': value,
}
resp = retry(post, self.name, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# grant read-only access
acl_user = swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not container acl
self.assertEqual(resp.getheader('X-Container-Read'), None)
self.assertEqual(resp.getheader('X-Container-Write'), None)
# and can not write
headers = {
'x-container-read': 'frank',
'x-container-write': 'frank',
}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-write access
acl_user = swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not container acl
self.assertEqual(resp.getheader('X-Container-Read'), None)
self.assertEqual(resp.getheader('X-Container-Write'), None)
# sanity check container acls with account1
resp = retry(get, self.name, use_account=1)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
# and can write
new_value = str(uuid4())
headers = {
'x-container-read': 'frank',
'x-container-write': 'frank',
'x-container-meta-test': new_value,
}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) # validate w/ account1
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# but can not write container acls
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
# grant admin access
acl_user = swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# admin can read container metadata
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and ALSO container acls
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
# admin tester3 can even change container acls
new_value = str(uuid4())
headers = {
'x-container-read': '.r:*',
}
resp = retry(post, self.name, headers=headers, use_account=3)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), '.r:*')
def test_long_name_content_type(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn):
container_name = 'X' * 2048
conn.request('PUT', '%s/%s' % (parsed.path, container_name),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(resp.getheader('Content-Type'),
'text/html; charset=UTF-8')
def test_null_name(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/abc%%00def' % parsed.path, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
if (web_front_end == 'apache2'):
self.assertEqual(resp.status, 404)
else:
self.assertEqual(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEqual(resp.status, 412)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
from .fixtures import *
from blitzdb.tests.helpers.movie_data import Actor, Director, Movie
def test_basic_delete(backend, small_test_data):
backend.filter(Actor, {}).delete()
backend.commit()
assert len(backend.filter(Actor, {})) == 0
def test_basic_storage(backend, small_test_data):
(movies, actors, directors) = small_test_data
assert len(backend.filter(Movie, {})) == len(movies)
assert len(backend.filter(Actor, {})) == len(actors)
def test_keys_with_dots(backend):
actor = Actor({'some.key.with.nasty.dots': [{'some.more.nasty.dots': 100}], 'pk': 'test'})
backend.save(actor)
backend.commit()
assert actor == backend.get(Actor, {'pk': 'test'})
def test_negative_indexing(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[-1] == actors[len(actors) - 1]
assert actors[-10:-1] == actors[len(actors) - 10:len(actors) - 1]
assert actors[-len(actors):-1] == actors[0:len(actors) - 1]
# To do: Make step tests for file backend (MongoDB does not support this)
# assert actors[-10:-1:2] == actors[len(actors)-10:len(actors)-1:2]
def test_missing_keys_in_slice(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[:] == actors
assert actors[1:] == actors[1:len(actors)]
assert actors[:len(actors)] == actors[0:len(actors)]
def test_query_set(backend):
actors = [Actor({'foo': 'bar', 'value': 10}),
Actor({'foo': 'baz', 'value': 10}),
Actor({'foo': 'baz', 'value': 11}),
Actor({'foo': 'bar', 'value': 11})
]
for actor in actors:
backend.save(actor)
backend.commit()
queryset = backend.filter(Actor, {'foo': 'bar','value' : 10})
assert queryset.next() == actors[0]
def test_and_queries(backend):
backend.save(Actor({'foo': 'bar', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 11}))
backend.save(Actor({'foo': 'bar', 'value': 11}))
backend.commit()
assert len(backend.filter(Actor, {'foo': 'bar'})) == 2
assert len(backend.filter(Actor, {'value': 10})) == 2
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 11})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 11})) == 1
def test_composite_queries(backend):
backend.filter(Actor, {}).delete()
backend.save(Actor({'values': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}))
backend.save(Actor({'values': [7, 6, 5, 4, 3, 2, 1]}))
backend.save(Actor({'values': [1, 2, 3, 4]}))
backend.save(Actor({'values': [1, 2, 3, 4, {'foo': 'bar'}]}))
backend.save(Actor({'values': 'foobar'}))
backend.commit()
for f in (lambda: True, lambda: backend.create_index(Actor, 'values')):
assert len(backend.filter(Actor, {})) == 5
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, {'foo': 'bar'}]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, {'foo': 'bar'}, 4]})) == 0
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, 5]})) == 0
assert len(backend.filter(Actor, {'values': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1]}})) == 4
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, {'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [{'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, 14]}})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]}})) == 1
assert len(backend.filter(Actor, {'values': {'$in': [[1, 2, 3, 4], [7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 5], 'foobar']}})) == 3
def test_operators(backend):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
for op, results in (('$gt', [david_hasselhoff]), ('$gte', [david_hasselhoff]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'gross_income_m': {op: 1.0}},
{'is_funny': True}
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
for op, results in (('$gt', [david_hasselhoff, charlie_chaplin, marlon_brando]), ('$gte', [marlon_brando, david_hasselhoff, charlie_chaplin]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'$or': [
{'gross_income_m': {op: 1.0}},
{'birth_year': {'$lt': 1900}},
]},
{'$or': [
{'is_funny': True},
{'name': 'Marlon Brando'},
]},
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
assert len(backend.filter(Actor, {'name': {'$ne': 'David Hasselhoff'}})) == 3
assert len(backend.filter(Actor, {'name': 'David Hasselhoff'})) == 1
assert len(backend.filter(Actor, {'name': {'$not': {'$in': ['David Hasselhoff', 'Marlon Brando', 'Charlie Chaplin']}}})) == 1
assert len(backend.filter(Actor, {'name': {'$in': ['Marlon Brando', 'Leonardo di Caprio']}})) == 2
def test_regex_operator(backend, small_test_data):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
marlon_wayans = Actor({'name': 'Marlon Wayans'})
backend.save(marlon_brando)
backend.save(marlon_wayans)
backend.commit()
assert backend.get(Actor, {'name': {'$regex': r'^Marlon\s+(?!Wayans)[\w]+$'}}) == marlon_brando
assert len(backend.filter(Actor, {'name': {'$regex': r'^Marlon\s+.*$'}})) == 2
assert len(backend.filter(Actor, {'name': {'$regex': r'^.*\s+Brando$'}})) == 1
def test_list_query(backend, small_test_data):
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 4:
movie = movies[i]
i += 1
actor = movie.cast[0]['actor']
other_movie = movies[i % len(movies)]
while other_movie in actor.movies:
other_movie = movies[i % len(movies)]
i += 1
assert actor in backend.filter(Actor, {'movies': movie})
assert actor not in backend.filter(Actor, {'movies': other_movie})
def test_list_query_multiple_items(backend, small_test_data):
(movies, actors, directors) = small_test_data
actor = None
i = 0
while not actor or len(actor.movies) < 2:
actor = actors[i]
i += 1
assert actor in backend.filter(Actor, {'movies': actor.movies})
def test_indexed_delete(backend, small_test_data):
all_movies = backend.filter(Movie, {})
for movie in all_movies:
backend.filter(Actor, {'movies': movie}).delete()
backend.commit()
for actor in backend.filter(Actor, {}):
assert actor.movies == []
def test_non_indexed_delete(backend, small_test_data):
(movies, actors, directors) = small_test_data
for movie in movies:
backend.filter(Director, {'movies': {'$all': [movie]}}).delete()
backend.commit()
for director in backend.filter(Director, {}):
assert director.movies == []
def test_positional_query(backend, small_test_data):
"""
We test a search query which explicitly references a given list item in an object
"""
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 3:
if len(movies[i].cast):
movie = movies[i]
actor = movie.cast[0]['actor']
index = actor.movies.index(movie)
i += 1
assert actor in backend.filter(Actor, {'movies.%d' % index: movie})
def test_default_backend(backend, small_test_data):
movies = backend.filter(Movie, {})
old_len = len(movies)
movie = movies[0]
movie.delete()
backend.commit()
with pytest.raises(Movie.DoesNotExist):
backend.get(Movie, {'pk': movie.pk})
assert old_len == len(backend.filter(Movie, {})) + 1
def test_index_reloading(backend, small_test_data):
(movies, actors, directors) = small_test_data
backend.filter(Actor, {'movies': movies[0]}).delete()
backend.commit()
assert list(backend.filter(Actor, {'movies': movies[0]})) == []
|
|
# Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved.
#
# Licensed under the Simplified BSD License (the "License");
# you may not use this file except in compliance with the License.
"""Test basic CRUD operations of the CRUDProvider."""
import json
import pytest
from hypr.providers import CRUDProvider
def deserialize(data, model):
"""Deserialize JSON data."""
data = json.loads(data)
if 'content' in data and 'count' in data:
return data['count'], [model.load(r) for r in data['content']]
return model.load(data)
@pytest.fixture
def app(app, model):
"""All the tests are conducted with application/json as default mime."""
provider = type('IntestProvider', (CRUDProvider,), {'__model__': model})
app.add_provider(provider, '/test', '/test/<int:id>')
return app
class TestModelCreate:
"""Test create."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
payload = json.dumps({'value': 'foo'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 201
data = deserialize(rv.text, model)
assert data == model.one(data.id)
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 201
count, resources = deserialize(rv.text, model)
for resource in resources:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestProviderRead:
"""Test read."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == model.count() == 5
assert sorted(resources) == sorted(model.get())
def test_get_one(self, app, model):
"""Test."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource == model.one(1)
@pytest.mark.populate(5)
class TestModelUpdate:
"""Test update."""
models = 'SQLiteModel',
def test_update(self, app, model):
"""Update an instance with PATCH."""
ref = model.one(1)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(1)
def test_update_alt(self, app, model):
"""Update an instance with PUT."""
ref = model.one(2)
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.put('/test/2', data=payload)
assert rv.status == 200
resource = deserialize(rv.text, model)
assert resource != ref
assert resource == model.one(2)
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 200
count, data = deserialize(rv.text, model)
for instance in ref:
assert instance != model.one(instance.id)
for resource in data:
assert resource == model.one(resource.id)
@pytest.mark.populate(5)
class TestModelDelete:
"""Test delete."""
models = 'SQLiteModel',
def test_delete(self, app, model):
"""Delete a resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 204
assert model.one(1) is None
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
ref = [model.one(3), model.one(4)]
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 204
for instance in ref:
assert model.one(instance.id) is None
@pytest.mark.populate(5)
class TestMissingPayloadException:
"""Test requests with missing payload."""
models = 'SQLiteModel',
def test_create(self, app, model):
"""Create one resource."""
with app.test_client() as client:
rv = client.post('/test')
assert rv.status == 400
def test_bulk_create(self, app, model):
"""Create multiple resources at once."""
with app.test_client() as client:
rv = client.post('/test?_bulk=1')
assert rv.status == 400
def test_update(self, app, model):
"""Update an instance."""
with app.test_client() as client:
rv = client.patch('/test/1')
assert rv.status == 400
def test_bulk_update(self, app, model):
"""Update multiple resources at once."""
with app.test_client() as client:
rv = client.put('/test?_bulk=1')
assert rv.status == 400
def test_bulk_delete(self, app, model):
"""Delete multiple resources at once."""
with app.test_client() as client:
rv = client.delete('/test?_bulk=1')
assert rv.status == 400
@pytest.mark.populate(5)
class TestInvalidPayloadException:
"""Test requests with invalid payload."""
models = 'SQLiteModel',
def test_create(self, app):
"""Create one resource."""
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
def test_update(self, app, model):
"""Update one resource."""
ref = model.one(1)
payload = json.dumps({'invalid': 'property'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 400
assert ref == model.one(1)
@pytest.mark.populate(5)
class TestInvalidBulkRequest:
"""Test invalid bulk requests."""
models = 'SQLiteModel',
def test_bulk_create_missing_flag(self, app, model):
"""A missing bulk flag returns an error 400."""
payload = json.dumps([
{'value': 'foo'},
{'value': 'bar'}
])
with app.test_client() as client:
rv = client.post('/test', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_missing_flag(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_flag(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_on_single_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'value': 'test_ok1'}
])
with app.test_client() as client:
rv = client.put('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_on_single_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 4}
])
with app.test_client() as client:
rv = client.delete('/test/1?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_unknown_resource(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 100, 'value': 'test_ok1'} # unkwnown resource
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_unknown_resource(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{'id': 100} # unknwon resource
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_create_invalid_property(self, app, model):
"""Create multiple resources at once."""
payload = json.dumps([
{'value': 'foo'},
{'invalid': 'property'}
])
with app.test_client() as client:
rv = client.post('/test?_bulk=1', data=payload)
assert rv.status == 400
assert model.count() == 5
def test_bulk_update_invalid_property(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'id': 4, 'invalid': 'property'}
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_update_missing_id(self, app, model):
"""Update multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3, 'value': 'test_ok0'},
{'value': 'test_ok1'} # missing id
])
with app.test_client() as client:
rv = client.put('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
def test_bulk_delete_missing_id(self, app, model):
"""Delete multiple resources at once."""
ref = model.get()
payload = json.dumps([
{'id': 3},
{} # missing id
])
with app.test_client() as client:
rv = client.delete('/test?_bulk=1', data=payload)
assert rv.status == 400
assert sorted(ref) == sorted(model.get())
class TestEmptySet:
"""Crud operations (except create) on an empty database."""
models = 'SQLiteModel',
def test_get_collection(self, app, model):
"""Get an empty set."""
with app.test_client() as client:
rv = client.get('/test')
assert rv.status == 200
count, resources = deserialize(rv.text, model)
assert count == 0
assert resources == []
def test_get_one(self, app, model):
"""Get an unknown resource."""
with app.test_client() as client:
rv = client.get('/test/1')
assert rv.status == 404
def test_update(self, app, model):
"""Update an unknown resource."""
payload = json.dumps({'value': 'test_ok'})
with app.test_client() as client:
rv = client.patch('/test/1', data=payload)
assert rv.status == 404
def test_delete(self, app, model):
"""Delete an unknown resource."""
with app.test_client() as client:
rv = client.delete('/test/1')
assert rv.status == 404
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
# Used by py_util.cc to get tracebacks.
import traceback # pylint: disable=unused-import
import weakref
import numpy as np
import six
from tensorflow.python import _pywrap_py_func
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_script_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
# Map from EagerPyFunc token to tuple (tape, eager args, eager outputs);
# used for differentiation.
tape_cache = {}
def _maybe_copy_to_context_device(tensor, device_name):
"""Copy an EagerTensor to the current device if it's not on `device_name`."""
in_device = tensor.backing_device
if device_name == in_device:
return tensor
else:
# Note that EagerTensor._copy bypasses the placer and copies to the context
# device, which means e.g. int32 Tensors which would normally be forced onto
# the CPU can instead be placed on the GPU. This is necessary so that the
# PyFunc kernel always returns Tensors on the device it's executing on.
return tensor._copy() # pylint: disable=protected-access
class EagerFunc(object):
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout, is_grad_func, use_tape_cache=True):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
is_grad_func: Whether this EagerFunc is the gradient of another
EagerPyFunc.
use_tape_cache: (Optional.) Whether to cache `func` in the `tape_cache`.
For additional information, see description of `_eager_py_func`.
This parameter should be removed once the #35084 issue is fixed.
"""
self._func = func
self._out_dtypes = Tout
self._is_grad_func = is_grad_func
self._use_tape_cache = use_tape_cache
def _convert(self, value, dtype):
"""Converts `value` to a tensor of type `dtype`, with error checking.
Args:
value: The tensor to convert.
dtype: The desired dtype.
Returns:
A tensor of type `dtype`, or a zeros tensor if value is None and
this function is in fact a gradient function.
Raises:
RuntimeError: if `value` is a variable.
"""
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(
"Attempting to return a variable from an eagerly executed py_func. "
"Only numeric data structures like Tensors or NumPy arrays should "
"be returned; to return the value of a variable, make sure to obtain "
"the Tensor backing it by calling `.read_value()` on the variable in "
"question: %s" % value)
if value is None and self._is_grad_func:
# Gradient functions may legitimately return a list that contains
# both Tensors and Python Nones. Unfortunately this breaks the
# OpKernel, so for now we replace None objects with zeros, which is
# mathematically correct but will prevent short-circuiting gradient
# computations.
#
# TODO(akshayka): Make it possible to return a list of both Tensors and
# Nones from an EagerPyFunc.
return constant_op.constant(0.0, dtype=dtype)
return ops.convert_to_tensor(value, dtype=dtype)
def __call__(self, device, token, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode(), backprop.GradientTape() as tape:
# Only watch tensors with a floating or complex dtype.
for tensor in args:
for t in nest.flatten(tensor):
if t.dtype.is_floating or t.dtype.is_complex:
tape.watch(t)
ret = self._func(*args)
# copy the returned tensors to the PyFunc op's device if necessary.
device_name = device
if device_name is None:
# "None" here means "CPU", from the nullptr convention with C++ device
# pointers.
device_name = "/job:localhost/replica:0/task:0/device:CPU:0"
with ops.device(device):
if isinstance(ret, (tuple, list)):
outputs = [
_maybe_copy_to_context_device(self._convert(x, dtype=dtype),
device_name)
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
outputs = None
else:
outputs = _maybe_copy_to_context_device(
self._convert(ret, dtype=self._out_dtypes[0]), device_name)
if self._use_tape_cache:
tape_cache[compat.as_bytes(token)] = (tape, args, outputs)
return outputs
class FuncRegistry(object):
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
# Only store weakrefs to the functions. The strong reference is stored in
# the graph.
self._funcs = weakref.WeakValueDictionary()
@property
def _ctx(self):
# N.B. This is needed to support calling py_func with GPU tensors,
# which must be transferred to CPU if used in any of the NumPy APIs.
context.ensure_initialized()
return context.context()._handle # pylint: disable=protected-access
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
# Store a weakref to the function
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = np.asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode("utf8"))(value)
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, device, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
device: Name of the device on which outputs of `token`'s corresponding
operation should be placed. Used iff the function registered for `token`
is an EagerPyFunc.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self._funcs.get(token, None)
if func is None:
raise ValueError("callback %s is not found" % token)
if isinstance(func, EagerFunc):
# NB: Different invocations of the same py_func will share the same
# token, and the entries they stash in the tape_cache will collide.
# In practice, when executing a graph, this should only happen if
# the py_func is in a while_loop whose iterations are run in parallel
# or if the graph is being driven by concurrent session.run() calls.
#
# TODO(akshayka): Key the tape cache in a thread-safe way.
return func(device, token, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, six.binary_type):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
_pywrap_py_func.initialize_py_trampoline(_py_funcs)
def _internal_py_func(func,
inp,
Tout,
stateful=None,
eager=False,
is_grad_func=False,
name=None,
use_tape_cache=True):
"""See documentation for py_func and eager_py_func."""
if not callable(func):
raise ValueError("Expected func to be callable, got func of type {}".format(
type(func)))
original_func = func
func = autograph.do_not_convert(func)
is_list_or_tuple = False
if isinstance(Tout, (list, tuple)):
is_list_or_tuple = True
else:
Tout = [Tout]
if eager:
func = EagerFunc(func, Tout, is_grad_func, use_tape_cache=use_tape_cache)
# Tying the registered function's lifetime with the current default graph is
# not reliable. For example, Estimator-based binaries may switch graphs in
# between model training end evaluation, via saved_model. Those binaries work
# because the original function is global, and break once the registered
# function is an anonymous lambda, like the one produced by do_not_convert.
# To avoid breaking those cases, we attach the wrapper to the original
# function so that their lifetime is connected.
# TODO(b/144286616): Remove this.
if tf_inspect.isfunction(original_func):
# Note: this check is needed because original_func may be a descriptor
# (https://docs.python.org/3/howto/descriptor.html)
# and we can't attach attributes to those.
original_func.ag_dnc_wrapper__ = func
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
while True:
current_graph = graph
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
if graph is current_graph:
break
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_py_funcs_used_in_graph"):
graph._py_funcs_used_in_graph = [] # pylint: disable=protected-access
# Store a reference to the function in the graph to ensure it stays alive
# as long as the graph lives. When the graph is destroyed, the function
# is left to the garbage collector for destruction as well.
graph._py_funcs_used_in_graph.append(func) # pylint: disable=protected-access
if eager:
result = gen_script_ops.eager_py_func(
input=inp,
token=token,
is_async=context.is_async(),
Tout=Tout,
name=name)
else:
if stateful:
result = gen_script_ops.py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops.py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
return result if is_list_or_tuple else result[0]
# TODO(akshayka): Implement higher-order derivatives.
@ops.RegisterGradient("EagerPyFunc")
def _EagerPyFuncGrad(op, *dy):
"""Computes the gradient of an EagerPyFunc."""
token = op.get_attr("token")
def eagerly_executed_grad(*dy):
tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))
return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)
with ops.control_dependencies(op.outputs):
return _internal_py_func(
func=eagerly_executed_grad,
inp=dy,
Tout=[tensor.dtype for tensor in op.inputs],
eager=True,
is_grad_func=True)
def _eager_py_func(func, inp, Tout, name=None, use_tape_cache=True):
"""Wraps a python function into a TensorFlow op that executes it eagerly.
This function is the internal implementation for `eager_py_func`, see the
`eager_py_func` docstring for the full description.
Note: this function as a layer of indirection was added with one
specific purpose: as a workaround for github issue #35084.
It does all the same as `eager_py_func` used to do with one difference:
it can be used to instruct underlying EagerFunc not to use `tape_cache`
to avoid memory leak. When the issue #35084 is fixed - this function should
be removed, its body should be moved back to become the body of
`eager_py_func` and all the call sites should be reverted to
using `eager_py_func` without `use_tape_cache` argument of any value.
Args:
func: A Python function which accepts a list of `Tensor` objects having
element types that match the corresponding `tf.Tensor` objects in `inp`
and returns a list of `Tensor` objects (or a single `Tensor`, or `None`)
having element types that match the corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns; an empty list
if no value is returned (i.e., if the return value is `None`).
name: A name for the operation (optional).
use_tape_cache: (Optional.) Whether to cache `func` in the `tape_cache`.
For additional information, see description of `_eager_py_func`.
This parameter should be removed once the #35084 issue is fixed.
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes; an empty list
if `func` returns None.
"""
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
eager=True,
name=name,
use_tape_cache=use_tape_cache)
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
eager=True,
name=name,
use_tape_cache=use_tape_cache)
@tf_export("py_function")
@dispatch.add_dispatch_support
def eager_py_func(func, inp, Tout, name=None):
"""Wraps a python function into a TensorFlow op that executes it eagerly.
This function allows expressing computations in a TensorFlow graph as
Python functions. In particular, it wraps a Python function `func`
in a once-differentiable TensorFlow operation that executes it with eager
execution enabled. As a consequence, `tf.py_function` makes it
possible to express control flow using Python constructs (`if`, `while`,
`for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,
`tf.while_loop`). For example, you might use `tf.py_function` to
implement the log huber function:
```python
def log_huber(x, m):
if tf.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))
x = tf.compat.v1.placeholder(tf.float32)
m = tf.compat.v1.placeholder(tf.float32)
y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32)
dy_dx = tf.gradients(y, x)[0]
with tf.compat.v1.Session() as sess:
# The session executes `log_huber` eagerly. Given the feed values below,
# it will take the first branch, so `y` evaluates to 1.0 and
# `dy_dx` evaluates to 2.0.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
```
You can also use `tf.py_function` to debug your models at runtime
using Python tools, i.e., you can isolate portions of your code that
you want to debug, wrap them in Python functions and insert `pdb` tracepoints
or print statements as desired, and wrap those functions in
`tf.py_function`.
For more information on eager execution, see the
[Eager guide](https://tensorflow.org/guide/eager).
`tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike
the latter, the former lets you use TensorFlow operations in the wrapped
Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs
and
wraps functions that take NumPy arrays as inputs and return NumPy arrays as
outputs, `tf.py_function` can be placed on GPUs and wraps functions
that take Tensors as inputs, execute TensorFlow operations in their bodies,
and return Tensors as outputs.
Like `tf.compat.v1.py_func`, `tf.py_function` has the following limitations
with respect to serialization and distribution:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.py_function()` and you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
Args:
func: A Python function which accepts a list of `Tensor` objects having
element types that match the corresponding `tf.Tensor` objects in `inp`
and returns a list of `Tensor` objects (or a single `Tensor`, or `None`)
having element types that match the corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns; an empty list
if no value is returned (i.e., if the return value is `None`).
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes; an empty list
if `func` returns None.
"""
return _eager_py_func(
func=func, inp=inp, Tout=Tout, name=name, use_tape_cache=True)
def py_func_common(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
arguments and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
input = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [input], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts `ndarray` objects as arguments and
returns a list of `ndarray` objects (or a single `ndarray`). This function
must accept as many arguments as there are tensors in `inp`, and these
argument types will match the corresponding `tf.Tensor` objects in `inp`.
The returns `ndarray`s must match the number and types defined `Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
subexpression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
"""
if context.executing_eagerly():
result = func(*[np.array(x) for x in inp])
result = nest.flatten(result)
result = [x if x is None else ops.convert_to_tensor(x) for x in result]
if len(result) == 1:
# Mimic the automatic unwrapping in graph-mode py_func
result, = result
return result
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
eager=False,
name=name)
return _internal_py_func(
func=func, inp=inp, Tout=Tout, stateful=stateful, eager=False, name=name)
@deprecation.deprecated(
date=None,
instructions="""tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
""")
@tf_export(v1=["py_func"])
@dispatch.add_dispatch_support
def py_func(func, inp, Tout, stateful=True, name=None):
return py_func_common(func, inp, Tout, stateful, name=name)
py_func.__doc__ = "%s" % py_func_common.__doc__
@tf_export("numpy_function")
@dispatch.add_dispatch_support
def numpy_function(func, inp, Tout, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func` wrap this function as an operation in a
TensorFlow function. `func` must take numpy arrays as its arguments and
return numpy arrays as its outputs.
The following example creates a TensorFlow graph with `np.sinh()` as an
operation in the graph:
>>> def my_numpy_func(x):
... # x will be a numpy array with the contents of the input to the
... # tf.function
... return np.sinh(x)
>>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
... def tf_function(input):
... y = tf.numpy_function(my_numpy_func, [input], tf.float32)
... return y * y
>>> tf_function(tf.constant(1.))
<tf.Tensor: shape=(), dtype=float32, numpy=1.3810978>
Comparison to `tf.py_function`:
`tf.py_function` and `tf.numpy_function` are very similar, except that
`tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the
function to contain `tf.Tensors`, and have any TensorFlow operations executed
in the function be differentiable, please use `tf.py_function`.
Note: The `tf.numpy_function` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`tf.SavedModel`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.numpy_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.numpy_function` you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Since the function takes numpy arrays, you cannot take gradients
through a numpy_function. If you require something that is differentiable,
please consider using tf.py_function.
* The resulting function is assumed stateful and will never be optimized.
Args:
func: A Python function, which accepts `numpy.ndarray` objects as arguments
and returns a list of `numpy.ndarray` objects (or a single
`numpy.ndarray`). This function must accept as many arguments as there are
tensors in `inp`, and these argument types will match the corresponding
`tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the
number and types defined `Tout`.
Important Note: Input and output `numpy.ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification
or storing `func` input or return values in python datastructures
without explicit (np.)copy can have non-deterministic consequences.
inp: A list of `tf.Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
name: (Optional) A name for the operation.
Returns:
Single or list of `tf.Tensor` which `func` computes.
"""
return py_func_common(func, inp, Tout, stateful=True, name=name)
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
|
|
##
# Copyright (c) 2012-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.sql}.
"""
from uuid import UUID
from twext.enterprise.dal.syntax import Insert
from twext.enterprise.dal.syntax import Select
from twisted.internet.defer import Deferred
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
# from twistedcaldav.vcard import Component as VCard
from txdav.common.datastore.sql import (
log, CommonStoreTransactionMonitor,
CommonHome, CommonHomeChild, ECALENDARTYPE
)
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.sql_util import _normalizeColumnUUIDs, \
fixUUIDNormalization
from txdav.common.datastore.test.util import CommonCommonTests
from txdav.common.icommondatastore import AllRetriesFailed
from txdav.xml import element as davxml
exampleUID = UUID("a" * 32)
denormalizedUID = unicode(exampleUID)
normalizedUID = denormalizedUID.upper()
class CommonSQLStoreTests(CommonCommonTests, TestCase):
"""
Tests for shared functionality in L{txdav.common.datastore.sql}.
"""
@inlineCallbacks
def setUp(self):
"""
Set up two stores to migrate between.
"""
yield super(CommonSQLStoreTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(denormalizedUID, normalizedUID, u"uid")
)
@inlineCallbacks
def test_logging(self):
"""
txn.execSQL works with all logging options on.
"""
# Patch config to turn on logging then rebuild the store
self.patch(self.store, "logLabels", True)
self.patch(self.store, "logStats", True)
self.patch(self.store, "logSQL", True)
txn = self.transactionUnderTest()
cs = schema.CALENDARSERVER
version = (yield Select(
[cs.VALUE],
From=cs,
Where=cs.NAME == "VERSION",
).on(txn))
self.assertNotEqual(version, None)
self.assertEqual(len(version), 1)
self.assertEqual(len(version[0]), 1)
def test_logWaits(self):
"""
CommonStoreTransactionMonitor logs waiting transactions.
"""
c = Clock()
self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
# Patch config to turn on log waits then rebuild the store
self.patch(self.store, "logTransactionWaits", 1)
ctr = [0]
def counter(*args, **kwargs):
ctr[0] += 1
self.patch(log, "error", counter)
txn = self.transactionUnderTest()
c.advance(2)
self.assertNotEqual(ctr[0], 0)
txn.abort()
def test_txnTimeout(self):
"""
CommonStoreTransactionMonitor terminates long transactions.
"""
c = Clock()
self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
# Patch config to turn on transaction timeouts then rebuild the store
self.patch(self.store, "timeoutTransactions", 1)
ctr = [0]
def counter(*args, **kwargs):
ctr[0] += 1
self.patch(log, "error", counter)
txn = self.transactionUnderTest()
self.assertFalse(txn.timedout)
c.advance(2)
self.assertNotEqual(ctr[0], 0)
self.assertTrue(txn._sqlTxn._completed)
self.assertTrue(txn.timedout)
def test_logWaitsAndTxnTimeout(self):
"""
CommonStoreTransactionMonitor logs waiting transactions and terminates
long transactions.
"""
c = Clock()
self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)
# Patch config to turn on log waits then rebuild the store
self.patch(self.store, "logTransactionWaits", 1)
self.patch(self.store, "timeoutTransactions", 2)
ctr = [0, 0]
def counter(logStr, *args, **kwargs):
if "wait" in logStr:
ctr[0] += 1
elif "abort" in logStr:
ctr[1] += 1
self.patch(log, "error", counter)
txn = self.transactionUnderTest()
c.advance(2)
self.assertNotEqual(ctr[0], 0)
self.assertNotEqual(ctr[1], 0)
self.assertTrue(txn._sqlTxn._completed)
@inlineCallbacks
def test_subtransactionOK(self):
"""
txn.subtransaction runs loop once.
"""
txn = self.transactionUnderTest()
ctr = [0]
def _test(subtxn):
ctr[0] += 1
cs = schema.CALENDARSERVER
return Select(
[cs.VALUE, ],
From=cs,
Where=cs.NAME == 'VERSION',
).on(subtxn)
(yield txn.subtransaction(_test, retries=0))[0][0]
self.assertEqual(ctr[0], 1)
@inlineCallbacks
def test_subtransactionOKAfterRetry(self):
"""
txn.subtransaction runs loop twice when one failure.
"""
txn = self.transactionUnderTest()
ctr = [0]
def _test(subtxn):
ctr[0] += 1
if ctr[0] == 1:
raise ValueError
cs = schema.CALENDARSERVER
return Select(
[cs.VALUE, ],
From=cs,
Where=cs.NAME == 'VERSION',
).on(subtxn)
(yield txn.subtransaction(_test, retries=1))[0][0]
self.assertEqual(ctr[0], 2)
@inlineCallbacks
def test_subtransactionFailNoRetry(self):
"""
txn.subtransaction runs loop once when one failure and no retries.
"""
txn = self.transactionUnderTest()
ctr = [0]
def _test(subtxn):
ctr[0] += 1
raise ValueError
cs = schema.CALENDARSERVER
return Select(
[cs.VALUE, ],
From=cs,
Where=cs.NAME == 'VERSION',
).on(subtxn)
try:
(yield txn.subtransaction(_test, retries=0))[0][0]
except AllRetriesFailed:
pass
else:
self.fail("AllRetriesFailed not raised")
self.assertEqual(ctr[0], 1)
@inlineCallbacks
def test_subtransactionFailSomeRetries(self):
"""
txn.subtransaction runs loop three times when all fail and two retries
requested.
"""
txn = self.transactionUnderTest()
ctr = [0]
def _test(subtxn):
ctr[0] += 1
raise ValueError
cs = schema.CALENDARSERVER
return Select(
[cs.VALUE, ],
From=cs,
Where=cs.NAME == 'VERSION',
).on(subtxn)
try:
(yield txn.subtransaction(_test, retries=2))[0][0]
except AllRetriesFailed:
pass
else:
self.fail("AllRetriesFailed not raised")
self.assertEqual(ctr[0], 3)
@inlineCallbacks
def test_subtransactionAbortOuterTransaction(self):
"""
If an outer transaction that is holding a subtransaction open is
aborted, then the L{Deferred} returned by L{subtransaction} raises
L{AllRetriesFailed}.
"""
txn = self.transactionUnderTest()
cs = schema.CALENDARSERVER
yield Select([cs.VALUE], From=cs).on(txn)
waitAMoment = Deferred()
@inlineCallbacks
def later(subtxn):
yield waitAMoment
value = yield Select([cs.VALUE], From=cs).on(subtxn)
returnValue(value)
started = txn.subtransaction(later)
txn.abort()
waitAMoment.callback(True)
try:
result = yield started
except AllRetriesFailed:
pass
else:
self.fail("AllRetriesFailed not raised, %r returned instead" %
(result,))
@inlineCallbacks
def test_changeRevision(self):
"""
CommonHomeChild._changeRevision actions.
"""
class TestCommonHome(CommonHome):
pass
class TestCommonHomeChild(CommonHomeChild):
_homeChildSchema = schema.CALENDAR
_homeChildMetaDataSchema = schema.CALENDAR_METADATA
_bindSchema = schema.CALENDAR_BIND
_revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
def resourceType(self):
return davxml.ResourceType.calendar
txn = self.transactionUnderTest()
home = yield txn.homeWithUID(ECALENDARTYPE, "uid", create=True)
homeChild = yield TestCommonHomeChild.create(home, "B")
# insert test
token = yield homeChild.syncToken()
yield homeChild._changeRevision("insert", "C")
changed = yield homeChild.resourceNamesSinceToken(token)
self.assertEqual(changed, (["C"], [], [],))
# update test
token = yield homeChild.syncToken()
yield homeChild._changeRevision("update", "C")
changed = yield homeChild.resourceNamesSinceToken(token)
self.assertEqual(changed, (["C"], [], [],))
# delete test
token = yield homeChild.syncToken()
yield homeChild._changeRevision("delete", "C")
changed = yield homeChild.resourceNamesSinceToken(token)
self.assertEqual(changed, ([], ["C"], [],))
# missing update test
token = yield homeChild.syncToken()
yield homeChild._changeRevision("update", "D")
changed = yield homeChild.resourceNamesSinceToken(token)
self.assertEqual(changed, (["D"], [], [],))
# missing delete test
token = yield homeChild.syncToken()
yield homeChild._changeRevision("delete", "E")
changed = yield homeChild.resourceNamesSinceToken(token)
self.assertEqual(changed, ([], ["E"], [],))
yield txn.abort()
@inlineCallbacks
def test_normalizeColumnUUIDs(self):
"""
L{_normalizeColumnUUIDs} upper-cases only UUIDs in a given column.
"""
rp = schema.RESOURCE_PROPERTY
txn = self.transactionUnderTest()
# setup
yield Insert({
rp.RESOURCE_ID: 1,
rp.NAME: "asdf",
rp.VALUE: "property-value",
rp.VIEWER_UID: "not-a-uuid"}).on(txn)
yield Insert({
rp.RESOURCE_ID: 2,
rp.NAME: "fdsa",
rp.VALUE: "another-value",
rp.VIEWER_UID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"}
).on(txn)
# test
yield _normalizeColumnUUIDs(txn, rp.VIEWER_UID)
self.assertEqual(
map(
list,
(
yield Select(
[rp.RESOURCE_ID, rp.NAME, rp.VALUE, rp.VIEWER_UID],
From=rp,
OrderBy=rp.RESOURCE_ID, Ascending=True,
).on(txn)
)
),
[
[1, "asdf", "property-value", "not-a-uuid"],
[
2, "fdsa",
"another-value", "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA"
]
]
)
@inlineCallbacks
def allHomeUIDs(self, table=schema.CALENDAR_HOME):
"""
Get a listing of all UIDs in the current store.
"""
results = yield (Select([table.OWNER_UID], From=table)
.on(self.transactionUnderTest()))
yield self.commit()
returnValue(results)
@inlineCallbacks
def test_fixUUIDNormalization_lowerToUpper(self):
"""
L{fixUUIDNormalization} will fix the normalization of UUIDs. If a home
is found with the wrong case but no duplicate, it will simply be
upper-cased.
"""
t1 = self.transactionUnderTest()
yield t1.calendarHomeWithUID(denormalizedUID, create=True)
yield self.commit()
yield fixUUIDNormalization(self.storeUnderTest())
self.assertEqual(
map(list, (yield self.allHomeUIDs())),
[[normalizedUID]]
)
@inlineCallbacks
def test_fixUUIDNormalization_lowerToUpper_notification(self):
"""
L{fixUUIDNormalization} will fix the normalization of UUIDs. If a home
is found with the wrong case but no duplicate, it will simply be
upper-cased.
"""
t1 = self.transactionUnderTest()
yield t1.notificationsWithUID(denormalizedUID, create=True)
yield self.commit()
yield fixUUIDNormalization(self.storeUnderTest())
self.assertEqual(
map(list, (yield self.allHomeUIDs(schema.NOTIFICATION_HOME))),
[[normalizedUID]]
)
@inlineCallbacks
def test_fixUUIDNormalization_lowerToUpper_addressbook(self):
"""
L{fixUUIDNormalization} will fix the normalization of UUIDs. If a home
is found with the wrong case but no duplicate, it will simply be
upper-cased.
"""
t1 = self.transactionUnderTest()
yield t1.addressbookHomeWithUID(denormalizedUID, create=True)
yield self.commit()
yield fixUUIDNormalization(self.storeUnderTest())
self.assertEqual(
map(list, (yield self.allHomeUIDs(schema.ADDRESSBOOK_HOME))),
[[normalizedUID]]
)
@inlineCallbacks
def test_inTransaction(self):
"""
Make sure a successful operation commits the transaction while an
unsuccessful operation (raised an exception) aborts the transaction.
"""
store = self.storeUnderTest()
def txnCreator(label):
self.txn = StubTransaction(label)
return self.txn
def goodOperation(txn):
return succeed(None)
def badOperation(txn):
1 / 0
return succeed(None)
yield store.inTransaction("good", goodOperation, txnCreator)
self.assertEquals(self.txn.action, "committed")
self.assertEquals(self.txn.label, "good")
try:
yield store.inTransaction("bad", badOperation, txnCreator)
except:
pass
self.assertEquals(self.txn.action, "aborted")
self.assertEquals(self.txn.label, "bad")
class StubTransaction(object):
def __init__(self, label):
self.label = label
self.action = None
def commit(self):
self.action = "committed"
return succeed(None)
def abort(self):
self.action = "aborted"
return succeed(None)
|
|
"""The tests for the demo climate component."""
import unittest
from homeassistant.util.unit_system import (
METRIC_SYSTEM
)
from homeassistant.setup import setup_component
from homeassistant.components import climate
from tests.common import get_test_home_assistant
ENTITY_CLIMATE = 'climate.hvac'
ENTITY_ECOBEE = 'climate.ecobee'
ENTITY_HEATPUMP = 'climate.heatpump'
class TestDemoClimate(unittest.TestCase):
"""Test the demo climate hvac."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.units = METRIC_SYSTEM
self.assertTrue(setup_component(self.hass, climate.DOMAIN, {
'climate': {
'platform': 'demo',
}}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_params(self):
"""Test the initial parameters."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
self.assertEqual('on', state.attributes.get('away_mode'))
self.assertEqual(22, state.attributes.get('current_temperature'))
self.assertEqual("On High", state.attributes.get('fan_mode'))
self.assertEqual(67, state.attributes.get('humidity'))
self.assertEqual(54, state.attributes.get('current_humidity'))
self.assertEqual("Off", state.attributes.get('swing_mode'))
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual('off', state.attributes.get('aux_heat'))
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(30, state.attributes.get('min_humidity'))
self.assertEqual(99, state.attributes.get('max_humidity'))
def test_set_only_target_temp_bad_attr(self):
"""Test setting the target temperature without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
climate.set_temperature(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual(21, state.attributes.get('temperature'))
def test_set_only_target_temp(self):
"""Test the setting of the target temperature."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(21, state.attributes.get('temperature'))
climate.set_temperature(self.hass, 30, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(30.0, state.attributes.get('temperature'))
def test_set_only_target_temp_with_convert(self):
"""Test the setting of the target temperature."""
state = self.hass.states.get(ENTITY_HEATPUMP)
self.assertEqual(20, state.attributes.get('temperature'))
climate.set_temperature(self.hass, 21, ENTITY_HEATPUMP)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_HEATPUMP)
self.assertEqual(21.0, state.attributes.get('temperature'))
def test_set_target_temp_range(self):
"""Test the setting of the target temperature with range."""
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
climate.set_temperature(self.hass, target_temp_high=25,
target_temp_low=20, entity_id=ENTITY_ECOBEE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(20.0, state.attributes.get('target_temp_low'))
self.assertEqual(25.0, state.attributes.get('target_temp_high'))
def test_set_target_temp_range_bad_attr(self):
"""Test setting the target temperature range without attribute."""
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
climate.set_temperature(self.hass, temperature=None,
entity_id=ENTITY_ECOBEE, target_temp_low=None,
target_temp_high=None)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('temperature'))
self.assertEqual(21.0, state.attributes.get('target_temp_low'))
self.assertEqual(24.0, state.attributes.get('target_temp_high'))
def test_set_target_humidity_bad_attr(self):
"""Test setting the target humidity without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
climate.set_humidity(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
def test_set_target_humidity(self):
"""Test the setting of the target humidity."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(67, state.attributes.get('humidity'))
climate.set_humidity(self.hass, 64, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual(64.0, state.attributes.get('humidity'))
def test_set_fan_mode_bad_attr(self):
"""Test setting fan mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
climate.set_fan_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
def test_set_fan_mode(self):
"""Test setting of new fan mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On High", state.attributes.get('fan_mode'))
climate.set_fan_mode(self.hass, "On Low", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("On Low", state.attributes.get('fan_mode'))
def test_set_swing_mode_bad_attr(self):
"""Test setting swing mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
climate.set_swing_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
def test_set_swing(self):
"""Test setting of new swing mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Off", state.attributes.get('swing_mode'))
climate.set_swing_mode(self.hass, "Auto", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("Auto", state.attributes.get('swing_mode'))
def test_set_operation_bad_attr_and_state(self):
"""Test setting operation mode without required attribute.
Also check the state.
"""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
climate.set_operation_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
def test_set_operation(self):
"""Test setting of new operation mode."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("cool", state.attributes.get('operation_mode'))
self.assertEqual("cool", state.state)
climate.set_operation_mode(self.hass, "heat", ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual("heat", state.attributes.get('operation_mode'))
self.assertEqual("heat", state.state)
def test_set_away_mode_bad_attr(self):
"""Test setting the away mode without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('away_mode'))
climate.set_away_mode(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual('on', state.attributes.get('away_mode'))
def test_set_away_mode_on(self):
"""Test setting the away mode on/true."""
climate.set_away_mode(self.hass, True, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('away_mode'))
def test_set_away_mode_off(self):
"""Test setting the away mode off/false."""
climate.set_away_mode(self.hass, False, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('away_mode'))
def test_set_hold_mode_home(self):
"""Test setting the hold mode home."""
climate.set_hold_mode(self.hass, 'home', ENTITY_ECOBEE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual('home', state.attributes.get('hold_mode'))
def test_set_hold_mode_away(self):
"""Test setting the hold mode away."""
climate.set_hold_mode(self.hass, 'away', ENTITY_ECOBEE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual('away', state.attributes.get('hold_mode'))
def test_set_hold_mode_none(self):
"""Test setting the hold mode off/false."""
climate.set_hold_mode(self.hass, None, ENTITY_ECOBEE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_ECOBEE)
self.assertEqual(None, state.attributes.get('hold_mode'))
def test_set_aux_heat_bad_attr(self):
"""Test setting the auxillary heater without required attribute."""
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('aux_heat'))
climate.set_aux_heat(self.hass, None, ENTITY_CLIMATE)
self.hass.block_till_done()
self.assertEqual('off', state.attributes.get('aux_heat'))
def test_set_aux_heat_on(self):
"""Test setting the axillary heater on/true."""
climate.set_aux_heat(self.hass, True, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('on', state.attributes.get('aux_heat'))
def test_set_aux_heat_off(self):
"""Test setting the auxillary heater off/false."""
climate.set_aux_heat(self.hass, False, ENTITY_CLIMATE)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY_CLIMATE)
self.assertEqual('off', state.attributes.get('aux_heat'))
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
"""Shared utilities used by various classes, all placed here to avoid circular imports.
This file INTENTIONALLY has NO module dependencies!
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import imp
import itertools
import os
import re
import sys
import zipfile
import io
import string
import subprocess
import csv
import shutil
import tempfile
from h2o.exceptions import H2OValueError
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, is_type, numeric
from h2o.backend.server import H2OLocalServer
_id_ctr = 0
# The set of characters allowed in frame IDs. Since frame ids are used within REST API urls, they may
# only contain characters allowed within the "segment" part of the URL (see RFC 3986). Additionally, we
# forbid all characters that are declared as "illegal" in Key.java.
_id_allowed_characters = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
__all__ = ('mojo_predict_csv', 'mojo_predict_pandas')
def _py_tmp_key(append):
global _id_ctr
_id_ctr += 1
return "py_" + str(_id_ctr) + append
def check_frame_id(frame_id):
"""Check that the provided frame id is valid in Rapids language."""
if frame_id is None:
return
if frame_id.strip() == "":
raise H2OValueError("Frame id cannot be an empty string: %r" % frame_id)
for i, ch in enumerate(frame_id):
# '$' character has special meaning at the beginning of the string; and prohibited anywhere else
if ch == "$" and i == 0: continue
if ch not in _id_allowed_characters:
raise H2OValueError("Character '%s' is illegal in frame id: %s" % (ch, frame_id))
if re.match(r"-?[0-9]", frame_id):
raise H2OValueError("Frame id cannot start with a number: %s" % frame_id)
def temp_ctr():
return _id_ctr
def can_use_pandas():
try:
imp.find_module('pandas')
return True
except ImportError:
return False
def can_use_numpy():
try:
imp.find_module('numpy')
return True
except ImportError:
return False
_url_safe_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~"
_url_chars_map = [chr(i) if chr(i) in _url_safe_chars else "%%%02X" % i for i in range(256)]
def url_encode(s):
# Note: type cast str(s) will not be needed once all code is made compatible
return "".join(_url_chars_map[c] for c in bytes_iterator(s))
def quote(s):
return url_encode(s)
def urlopen():
if PY3:
from urllib import request
return request.urlopen
else:
import urllib2
return urllib2.urlopen
def clamp(x, xmin, xmax):
"""Return the value of x, clamped from below by `xmin` and from above by `xmax`."""
return max(xmin, min(x, xmax))
def _gen_header(cols):
return ["C" + str(c) for c in range(1, cols + 1, 1)]
def _check_lists_of_lists(python_obj):
# check we have a lists of flat lists
# returns longest length of sublist
most_cols = 0
for l in python_obj:
# All items in the list must be a list!
if not isinstance(l, (tuple, list)):
raise ValueError("`python_obj` is a mixture of nested lists and other types.")
most_cols = max(most_cols, len(l))
for ll in l:
# in fact, we must have a list of flat lists!
if isinstance(ll, (tuple, list)):
raise ValueError("`python_obj` is not a list of flat lists!")
return most_cols
def _handle_python_lists(python_obj, check_header):
# convert all inputs to lol
if _is_list_of_lists(python_obj): # do we have a list of lists: [[...], ..., [...]] ?
ncols = _check_lists_of_lists(python_obj) # must be a list of flat lists, raise ValueError if not
elif isinstance(python_obj, (list, tuple)): # single list
ncols = 1
python_obj = [[e] for e in python_obj]
else: # scalar
python_obj = [[python_obj]]
ncols = 1
# create the header
if check_header == 1:
header = python_obj[0]
python_obj = python_obj[1:]
else:
header = _gen_header(ncols)
# shape up the data for csv.DictWriter
# data_to_write = [dict(list(zip(header, row))) for row in python_obj]
return header, python_obj
def stringify_list(arr):
return "[%s]" % ",".join(stringify_list(item) if isinstance(item, list) else _str(item)
for item in arr)
def _str(item):
return _str_tuple(item) if isinstance(item, tuple) else str(item)
def _str_tuple(t):
return "{%s}" % ",".join(["%s: %s" % (ti[0], str(ti[1])) for ti in zip(list(string.ascii_lowercase), t)])
def _is_list(l):
return isinstance(l, (tuple, list))
def _is_str_list(l):
return is_type(l, [str])
def _is_num_list(l):
return is_type(l, [numeric])
def _is_list_of_lists(o):
return any(isinstance(l, (tuple, list)) for l in o)
def _handle_numpy_array(python_obj, header):
return _handle_python_lists(python_obj.tolist(), header)
def _handle_pandas_data_frame(python_obj, header):
data = _handle_python_lists(python_obj.as_matrix().tolist(), -1)[1]
return list(python_obj.columns), data
def _handle_python_dicts(python_obj, check_header):
header = list(python_obj.keys())
is_valid = all(re.match(r"^[a-zA-Z_][a-zA-Z0-9_.]*$", col) for col in header) # is this a valid header?
if not is_valid:
raise ValueError(
"Did not get a valid set of column names! Must match the regular expression: ^[a-zA-Z_][a-zA-Z0-9_.]*$ ")
for k in python_obj: # check that each value entry is a flat list/tuple or single int, float, or string
v = python_obj[k]
if isinstance(v, (tuple, list)): # if value is a tuple/list, then it must be flat
if _is_list_of_lists(v):
raise ValueError("Values in the dictionary must be flattened!")
elif is_type(v, str, numeric):
python_obj[k] = [v]
else:
raise ValueError("Encountered invalid dictionary value when constructing H2OFrame. Got: {0}".format(v))
zipper = getattr(itertools, "zip_longest", None) or getattr(itertools, "izip_longest", None) or zip
rows = list(map(list, zipper(*list(python_obj.values()))))
data_to_write = [dict(list(zip(header, row))) for row in rows]
return header, data_to_write
def _is_fr(o):
return o.__class__.__name__ == "H2OFrame" # hack to avoid circular imports
def _quoted(key):
if key is None: return "\"\""
# mimic behavior in R to replace "%" and "&" characters, which break the call to /Parse, with "."
# key = key.replace("%", ".")
# key = key.replace("&", ".")
is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0
key = key if is_quoted else '"' + key + '"'
return key
def _locate(path):
"""Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while True:
if os.path.exists(possible_result):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if next_tmp_dir == tmp_dir:
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def _colmean(column):
"""Return the mean of a single-column frame."""
assert column.ncols == 1
return column.mean(return_frame=True).flatten()
def get_human_readable_bytes(size):
"""
Convert given number of bytes into a human readable representation, i.e. add prefix such as kb, Mb, Gb,
etc. The `size` argument must be a non-negative integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
"""
if size == 0: return "0"
if size is None: return ""
assert_is_type(size, int)
assert size >= 0, "`size` cannot be negative, got %d" % size
suffixes = "PTGMk"
maxl = len(suffixes)
for i in range(maxl + 1):
shift = (maxl - i) * 10
if size >> shift == 0: continue
ndigits = 0
for nd in [3, 2, 1]:
if size >> (shift + 12 - nd * 3) == 0:
ndigits = nd
break
if ndigits == 0 or size == (size >> shift) << shift:
rounded_val = str(size >> shift)
else:
rounded_val = "%.*f" % (ndigits, size / (1 << shift))
return "%s %sb" % (rounded_val, suffixes[i] if i < maxl else "")
def get_human_readable_time(time_ms):
"""
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
"""
millis = time_ms % 1000
secs = (time_ms // 1000) % 60
mins = (time_ms // 60000) % 60
hours = (time_ms // 3600000) % 24
days = (time_ms // 86400000)
res = ""
if days > 1:
res += "%d days" % days
elif days == 1:
res += "1 day"
if hours > 1 or (hours == 0 and res):
res += " %d hours" % hours
elif hours == 1:
res += " 1 hour"
if mins > 1 or (mins == 0 and res):
res += " %d mins" % mins
elif mins == 1:
res += " 1 min"
if days == 0 and hours == 0:
res += " %02d secs" % secs
if not res:
res = " %d ms" % millis
return res.strip()
def print2(msg, flush=False, end="\n"):
"""
This function exists here ONLY because Sphinx.ext.autodoc gets into a bad state when seeing the print()
function. When in that state, autodoc doesn't display any errors or warnings, but instead completely
ignores the "bysource" member-order option.
"""
print(msg, end=end)
if flush: sys.stdout.flush()
def normalize_slice(s, total):
"""
Return a "canonical" version of slice ``s``.
:param slice s: the original slice expression
:param total int: total number of elements in the collection sliced by ``s``
:return slice: a slice equivalent to ``s`` but not containing any negative indices or Nones.
"""
newstart = 0 if s.start is None else max(0, s.start + total) if s.start < 0 else min(s.start, total)
newstop = total if s.stop is None else max(0, s.stop + total) if s.stop < 0 else min(s.stop, total)
newstep = 1 if s.step is None else s.step
return slice(newstart, newstop, newstep)
def slice_is_normalized(s):
"""Return True if slice ``s`` in "normalized" form."""
return (s.start is not None and s.stop is not None and s.step is not None and s.start <= s.stop)
gen_header = _gen_header
py_tmp_key = _py_tmp_key
locate = _locate
quoted = _quoted
is_list = _is_list
is_fr = _is_fr
handle_python_dicts = _handle_python_dicts
handle_pandas_data_frame = _handle_pandas_data_frame
handle_numpy_array = _handle_numpy_array
is_list_of_lists = _is_list_of_lists
is_num_list = _is_num_list
is_str_list = _is_str_list
handle_python_lists = _handle_python_lists
check_lists_of_lists = _check_lists_of_lists
gen_model_file_name = "h2o-genmodel.jar"
h2o_predictor_class = "hex.genmodel.tools.PredictCsv"
def mojo_predict_pandas(dataframe, mojo_zip_path, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a Pandas frame and use MOJO model as zip file to score.
:param dataframe: Pandas frame to score.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default '-Xmx4g' is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: Pandas frame with predictions
"""
tmp_dir = tempfile.mkdtemp()
try:
if not can_use_pandas():
raise RuntimeException('Cannot import pandas')
import pandas
assert_is_type(dataframe, pandas.DataFrame)
input_csv_path = os.path.join(tmp_dir, 'input.csv')
prediction_csv_path = os.path.join(tmp_dir, 'prediction.csv')
dataframe.to_csv(input_csv_path)
mojo_predict_csv(input_csv_path=input_csv_path, mojo_zip_path=mojo_zip_path,
output_csv_path=prediction_csv_path, genmodel_jar_path=genmodel_jar_path,
classpath=classpath, java_options=java_options, verbose=verbose)
return pandas.read_csv(prediction_csv_path)
finally:
shutil.rmtree(tmp_dir)
def mojo_predict_csv(input_csv_path, mojo_zip_path, output_csv_path=None, genmodel_jar_path=None, classpath=None, java_options=None, verbose=False):
"""
MOJO scoring function to take a CSV file and use MOJO model as zip file to score.
:param input_csv_path: Path to input CSV file.
:param mojo_zip_path: Path to MOJO zip downloaded from H2O.
:param output_csv_path: Optional, name of the output CSV file with computed predictions. If None (default), then
predictions will be saved as prediction.csv in the same folder as the MOJO zip.
:param genmodel_jar_path: Optional, path to genmodel jar file. If None (default) then the h2o-genmodel.jar in the same
folder as the MOJO zip will be used.
:param classpath: Optional, specifies custom user defined classpath which will be used when scoring. If None
(default) then the default classpath for this MOJO model will be used.
:param java_options: Optional, custom user defined options for Java. By default '-Xmx4g -XX:ReservedCodeCacheSize=256m' is used.
:param verbose: Optional, if True, then additional debug information will be printed. False by default.
:return: List of computed predictions
"""
default_java_options = '-Xmx4g -XX:ReservedCodeCacheSize=256m'
prediction_output_file = 'prediction.csv'
# Checking java
java = H2OLocalServer._find_java()
H2OLocalServer._check_java(java=java, verbose=verbose)
# Ensure input_csv exists
if verbose:
print("input_csv:\t%s" % input_csv_path)
if not os.path.isfile(input_csv_path):
raise RuntimeError("Input csv cannot be found at %s" % input_csv_path)
# Ensure mojo_zip exists
mojo_zip_path = os.path.abspath(mojo_zip_path)
if verbose:
print("mojo_zip:\t%s" % mojo_zip_path)
if not os.path.isfile(mojo_zip_path):
raise RuntimeError("MOJO zip cannot be found at %s" % mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
# Set output_csv if necessary
if output_csv_path is None:
output_csv_path = os.path.join(parent_dir, prediction_output_file)
# Set path to h2o-genmodel.jar if necessary and check it's valid
if genmodel_jar_path is None:
genmodel_jar_path = os.path.join(parent_dir, gen_model_file_name)
if verbose:
print("genmodel_jar:\t%s" % genmodel_jar_path)
if not os.path.isfile(genmodel_jar_path):
raise RuntimeError("Genmodel jar cannot be found at %s" % genmodel_jar_path)
if verbose and output_csv_path is not None:
print("output_csv:\t%s" % output_csv_path)
# Set classpath if necessary
if classpath is None:
classpath = genmodel_jar_path
if verbose:
print("classpath:\t%s" % classpath)
# Set java_options if necessary
if java_options is None:
java_options = default_java_options
if verbose:
print("java_options:\t%s" % java_options)
# Construct command to invoke java
cmd = [java]
for option in java_options.split(' '):
cmd += [option]
cmd += ["-cp", classpath, h2o_predictor_class, "--mojo", mojo_zip_path, "--input", input_csv_path,
'--output', output_csv_path, '--decimal']
if verbose:
cmd_str = " ".join(cmd)
print("java cmd:\t%s" % cmd_str)
# invoke the command
subprocess.check_call(cmd, shell=False)
# load predictions in form of a dict
with open(output_csv_path) as csv_file:
result = list(csv.DictReader(csv_file))
return result
def deprecated(message):
"""The decorator to mark deprecated functions."""
from traceback import extract_stack
assert message, "`message` argument in @deprecated is required."
def deprecated_decorator(fun):
def decorator_invisible(*args, **kwargs):
stack = extract_stack()
assert len(stack) >= 2 and stack[-1][2] == "decorator_invisible", "Got confusing stack... %r" % stack
print("[WARNING] in %s line %d:" % (stack[-2][0], stack[-2][1]))
print(" >>> %s" % (stack[-2][3] or "????"))
print(" ^^^^ %s" % message)
return fun(*args, **kwargs)
decorator_invisible.__doc__ = message
decorator_invisible.__name__ = fun.__name__
decorator_invisible.__module__ = fun.__module__
decorator_invisible.__deprecated__ = True
return decorator_invisible
return deprecated_decorator
class InMemoryZipArch(object):
def __init__(self, file_name = None, compression = zipfile.ZIP_DEFLATED):
self._data = io.BytesIO()
self._arch = zipfile.ZipFile(self._data, "w", compression, False)
self._file_name = file_name
def append(self, filename_in_zip, file_contents):
self._arch.writestr(filename_in_zip, file_contents)
return self
def write_to_file(self, filename):
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in self._arch.filelist:
zfile.create_system = 0
self._arch.close()
with open(filename, 'wb') as f:
f.write(self._data.getvalue())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._file_name is None:
return
self.write_to_file(self._file_name)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os.path
from copy import deepcopy
from django.conf import settings
from django.http import SimpleCookie
from django.template import RequestContext, Template
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.six import StringIO
from cms import api
from cms.apphook_pool import apphook_pool
from cms.utils.conf import get_cms_setting
from djangocms_helper.base_test import BaseTestCase
from ..utils import get_app_instance, get_apphook_configs, get_apphook_field_names
from .utils.example.models import (
AnotherExampleConfig, Article, ExampleConfig, News, NotApphookedModel, TranslatableArticle,
)
class AppHookConfigTestCase(BaseTestCase):
def setUp(self):
self.template = get_cms_setting('TEMPLATES')[0][0]
self.language = settings.LANGUAGES[0][0]
self.root_page = api.create_page(
'root page', self.template, self.language, published=True)
# This is needed in django CMS 3.5+ to keep the same tree across
# all django CMS versions
if hasattr(self.root_page, 'set_as_homepage'):
self.root_page.set_as_homepage()
self.ns_app_1 = ExampleConfig.objects.create(namespace='app1')
self.ns_app_1.app_data.config.property = 'app1_property'
self.ns_app_1.app_data.config.published_default = False
self.ns_app_1.save()
self.ns_app_2 = ExampleConfig.objects.create(namespace='app2')
self.ns_app_2.app_data.config.property = 'app2_property'
self.ns_app_2.app_data.config.published_default = True
self.ns_app_2.save()
self.ns_app_3 = AnotherExampleConfig.objects.create(namespace='app3')
self.ns_app_3.app_data.config.property = 'app3_property'
self.ns_app_3.app_data.config.published_default = True
self.ns_app_3.save()
self.page_1 = api.create_page(
'page_1', self.template, self.language, published=True,
parent=self.root_page,
apphook='ExampleApp',
apphook_namespace=self.ns_app_1.namespace)
self.page_2 = api.create_page(
'page_2', self.template, self.language, published=True,
parent=self.root_page,
apphook='ExampleApp',
apphook_namespace=self.ns_app_2.namespace)
self.page_3 = api.create_page(
'page_3', self.template, self.language, published=True,
parent=self.root_page,
apphook='SampleApp')
for page in self.root_page, self.page_1, self.page_2:
for language, _ in settings.LANGUAGES[1:]:
api.create_title(language, page.get_slug(), page)
page.publish(language)
self.reload_urlconf()
def test_configs(self):
app = apphook_pool.get_apphook(self.page_1.application_urls)
self.assertEqual(app.get_configs().count(), 2)
def test_wrong_ns(self):
app = apphook_pool.get_apphook(self.page_1.application_urls)
self.assertIsNone(app.get_config('no_app'))
def test_bad_property(self):
with self.assertRaises(AttributeError):
self.ns_app_1.no_property
def test_app_no_ns(self):
request = self.get_page_request(self.page_3, self.user)
config = get_app_instance(request)
# when config is requested on a non-config apphook, just return empty data
self.assertEqual(('', None), config)
def test_no_page(self):
request = self.request_factory.get('/en/sample/login/')
request.user = self.user
request.session = {}
request.cookies = SimpleCookie()
request.errors = StringIO()
# when config is requested on a non-CMS url, just return empty data
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls'):
config = get_app_instance(request)
self.assertEqual(('', None), config)
def test_config_str(self):
app = apphook_pool.get_apphook(self.page_1.application_urls)
self.assertEqual('%s / %s' % (force_text(app.name), self.ns_app_1.namespace), force_text(self.ns_app_1))
def test_admin_url(self):
app = apphook_pool.get_apphook(self.page_1.application_urls)
url = app.get_config_add_url()
try:
self.assertEqual(url, reverse('admin:%s_%s_add' % (ExampleConfig._meta.app_label,
ExampleConfig._meta.model_name)))
except AttributeError: # noqa
self.assertEqual(url, reverse('admin:%s_%s_add' % (ExampleConfig._meta.app_label,
ExampleConfig._meta.module_name)))
def test_app_1_list_empty(self):
response = self.client.get('/en/page_1/')
self.assertContains(response, 'namespace:app1')
self.assertContains(response, 'property:app1_property')
self.assertContains(response, 'objects:0')
def test_app_2_list_empty(self):
response = self.client.get('/en/page_2/')
self.assertContains(response, 'namespace:app2')
self.assertContains(response, 'property:app2_property')
self.assertContains(response, 'objects:0')
def test_app_1_list_items(self):
Article.objects.create(title='article_app_1',
slug='article_app_1', section=self.ns_app_1)
response = self.client.get('/en/page_1/')
self.assertContains(response, 'namespace:app1')
self.assertContains(response, 'property:app1_property')
self.assertContains(response, 'objects:1')
def test_app_2_list_items(self):
Article.objects.create(title='article_app_2',
slug='article_app_2', section=self.ns_app_2)
response = self.client.get('/en/page_2/')
self.assertContains(response, 'namespace:app2')
self.assertContains(response, 'property:app2_property')
self.assertContains(response, 'objects:1')
def test_apphook_manager_on_simple_model(self):
ns_app_3 = ExampleConfig.objects.create(namespace='app3')
ns_app_3.app_data.config.property = 'app3_property'
ns_app_3.save()
Article.objects.create(title='article_1_app_1',
slug='article_1_app_1',
section=self.ns_app_1)
Article.objects.create(title='article_2_app_1',
slug='article_2_app_1',
section=self.ns_app_1)
Article.objects.create(title='article_1_app_2',
slug='article_1_app_2',
section=self.ns_app_2)
self.assertEqual(
2, Article.objects.namespace(self.ns_app_1.namespace).count()
)
self.assertEqual(
1, Article.objects.namespace(self.ns_app_2.namespace).count()
)
self.assertEqual(
0, Article.objects.namespace(ns_app_3.namespace).count()
)
self.assertEqual(
0, Article.objects.namespace('').count()
)
def test_apphook_manager_on_model_with_two_configs(self):
ans_config_1 = AnotherExampleConfig.objects.create(namespace='config1')
ans_config_2 = AnotherExampleConfig.objects.create(namespace='config2')
News.objects.create(title='news_1_app_1_config1',
slug='news_1_app_1_config1',
section=self.ns_app_1,
config=ans_config_1)
News.objects.create(title='news_2_app_1_config2',
slug='news_2_app_1_config2',
section=self.ns_app_1,
config=ans_config_2)
msg = ('"{0}" has {1} relations to an ApphookConfig model.'
' Please, specify which one to use in argument "to".'
' Choices are: {2}'.format('News', '2', 'section, config'))
self.assertRaisesMessage(
ValueError, msg, News.objects.namespace, ans_config_1.namespace
)
self.assertEqual(
1, News.objects.namespace(ans_config_1.namespace,
to='config').count()
)
self.assertEqual(
2, News.objects.namespace(self.ns_app_1.namespace,
to='section').count()
)
def test_translatable_apphook_manager(self):
t1 = TranslatableArticle.objects.language('en').create(
title='article_1_app_1_en', slug='article_1_app_1_en',
section=self.ns_app_1
)
self.assertEqual(t1.get_current_language(), 'en')
t2 = TranslatableArticle.objects.language('de').create(
title='article_2_app_1_de', slug='article_2_app_1_de',
section=self.ns_app_1
)
self.assertEqual(t2.get_current_language(), 'de')
self.assertEqual(
2, TranslatableArticle.objects.namespace(self.ns_app_1.namespace)
.count()
)
self.assertEqual(
1,
TranslatableArticle.objects.namespace(self.ns_app_1.namespace)
.translated('en')
.count()
)
self.assertEqual(
1,
TranslatableArticle.objects.namespace(self.ns_app_1.namespace)
.translated('de')
.count()
)
def test_get_config_data(self):
from django.contrib import admin
article = Article.objects.create(title='news_1_app_1_config1',
slug='news_1_app_1_config1',
section=self.ns_app_1)
admin.autodiscover()
admin_instance = admin.site._registry[Article]
# correct parameter passed by the request
request = self.get_page_request(self.page_3, self.user)
request.GET = deepcopy(request.GET)
request.GET['section'] = self.ns_app_1.pk
retrieved = admin_instance.get_config_data(request, article, 'property')
self.assertEqual(retrieved, self.ns_app_1.property)
# correct parameter passed by the request - no existing object
request = self.get_page_request(self.page_3, self.user)
request.GET = deepcopy(request.GET)
request.GET['section'] = self.ns_app_1.pk
retrieved = admin_instance.get_config_data(request, Article(), 'property')
self.assertEqual(retrieved, self.ns_app_1.property)
# no parameter from request - config retrieved form existing instance
request = self.get_page_request(self.page_3, self.user)
retrieved = admin_instance.get_config_data(request, article, 'property')
self.assertEqual(retrieved, self.ns_app_1.property)
def test_config_select(self):
from django.contrib import admin
article = Article.objects.create(title='news_1_app_1_config1',
slug='news_1_app_1_config1',
section=self.ns_app_1)
admin.autodiscover()
admin_instance = admin.site._registry[Article]
# no object is set, no parameter passed through the request, two namespaces
request = self.get_page_request(self.page_3, self.user)
value = admin_instance._app_config_select(request, None)
self.assertEqual(value, None)
# object is set, no parameter passed through the request, two namespaces
request = self.get_page_request(self.page_3, self.user)
value = admin_instance._app_config_select(request, article)
self.assertEqual(value, article.section)
self.ns_app_2.delete()
# no object is set, no parameter passed through the request, one namespace
request = self.get_page_request(self.page_3, self.user)
value = admin_instance._app_config_select(request, None)
self.assertEqual(value, self.ns_app_1)
def test_get_config_form(self):
from django.contrib import admin
article = Article.objects.create(title='news_1_app_1_config1',
slug='news_1_app_1_config1',
section=self.ns_app_1)
admin.autodiscover()
admin_instance = admin.site._registry[Article]
# no object is set, no parameter passed through the request, two namespaces
request = self.get_page_request(self.page_3, self.user)
form = admin_instance.get_form(request, None)
self.assertEqual(list(form.base_fields.keys()), ['section'])
self.assertEqual(form.base_fields['section'].initial, None)
# object is set, normal form is used
request = self.get_page_request(self.page_3, self.user)
request.GET = deepcopy(request.GET)
request.GET['section'] = self.ns_app_1.pk
form = admin_instance.get_form(request, article)
self.assertEqual(list(form.base_fields.keys()), ['title', 'slug', 'section', 'published'])
self.assertEqual(form.base_fields['section'].initial, self.ns_app_1)
# no object is set, parameter passed through the request
request = self.get_page_request(self.page_3, self.user)
request.GET = deepcopy(request.GET)
request.GET['section'] = self.ns_app_1.pk
form = admin_instance.get_form(request, None)
self.assertEqual(list(form.base_fields.keys()), ['title', 'slug', 'section', 'published'])
self.assertEqual(form.base_fields['section'].initial, self.ns_app_1)
self.ns_app_2.delete()
request = self.get_page_request(self.page_3, self.user)
app_config_default = admin_instance._app_config_select(request, None)
self.assertEqual(app_config_default, self.ns_app_1)
# no object is set, no parameter passed through the request, one namespace
request = self.get_page_request(self.page_3, self.user)
form = admin_instance.get_form(request, None)
self.assertEqual(list(form.base_fields.keys()), ['title', 'slug', 'section', 'published'])
self.assertEqual(form.base_fields['section'].initial, self.ns_app_1)
def test_apphook_admin(self):
from django.contrib import admin
admin.autodiscover()
admin_instance = admin.site._registry[ExampleConfig]
request = self.get_page_request(self.page_3, self.user)
# Testing Readonly field
self.assertEqual(
admin_instance.get_readonly_fields(request), ('type',)
)
self.assertEqual(
admin_instance.get_readonly_fields(request, self.ns_app_1), ('type', 'namespace')
)
# Testing admin output for sample app specific implementation
response = admin_instance.change_view(request, str(self.ns_app_1.pk))
try:
self.assertContains(
response,
'<div class="readonly">aldryn_apphooks_config.tests.utils.example.cms_appconfig.ExampleConfig</div>'
)
self.assertContains(response, '<div class="readonly">app1</div>')
self.assertContains(response, 'value="app1_property"')
except AssertionError:
self.assertContains(
response,
'<p>aldryn_apphooks_config.tests.utils.example.cms_appconfig.ExampleConfig</p>'
)
self.assertContains(response, '<p>app1</p>')
self.assertContains(response, 'name="config-property" type="text" value="app1_property"')
def test_admin(self):
from django.contrib import admin
admin.autodiscover()
admin_instance = admin.site._registry[Article]
# testing behavior when more than 1 namespace instance exists - the selection form
# should be shown
request = self.get_page_request(self.page_3, self.user)
response = admin_instance.add_view(request)
self.assertContains(response, '$(this).apphook_reload_admin')
self.assertContains(response, 'var sel = $(\'#id_section\');')
self.assertContains(response, 'aldryn_apphooks_config')
self.assertContains(response, '<option value="1">%s</option>' % self.ns_app_1)
self.assertContains(response, '<option value="2">%s</option>' % self.ns_app_2)
self.assertContains(response, '<h2>Select app config</h2>')
# only one namespace instance exists, the normal changeform is used
self.ns_app_2.delete()
response = admin_instance.add_view(request)
self.assertContains(response, '$(this).apphook_reload_admin')
self.assertContains(response, 'aldryn_apphooks_config')
self.assertRegexpMatches(
force_text(response.content),
'(<option value="1" selected="selected">%s</option>|<option value="1" selected>%s</option>)' % (
self.ns_app_1, self.ns_app_1
)
)
self.assertContains(response, 'id="id_published"')
self.ns_app_1.app_data.config.published_default = True
self.ns_app_1.save()
response = admin_instance.add_view(request)
response.render()
self.assertRegexpMatches(
force_text(response.content),
'(checked id="id_published"|id="id_published" checked|<input checked="checked" id="id_published")'
)
def test_templatetag(self):
article = Article.objects.create(title='news_1_app_1_config1',
slug='news_1_app_1_config1',
section=self.ns_app_1)
request = self.get_page_request(self.page_1, self.user)
context = RequestContext(request, {'object': article, 'current_app': self.ns_app_1.namespace})
template = Template('{% load apphooks_config_tags %}{% namespace_url "example_detail" object.slug %}')
response = template.render(context)
self.assertEqual(response, os.path.join(self.page_1.get_absolute_url(), article.slug, ''))
template = Template('{% load apphooks_config_tags %}{% namespace_url "example_detail" slug=object.slug %}')
response = template.render(context)
self.assertEqual(response, os.path.join(self.page_1.get_absolute_url(), article.slug, ''))
template = Template('{% load apphooks_config_tags %}{% namespace_url "example_list" %}')
response = template.render(context)
self.assertEqual(response, self.page_1.get_absolute_url())
request = self.get_page_request(self.page_2, self.user)
context = RequestContext(request, {'object': article, 'current_app': self.ns_app_2.namespace})
template = Template('{% load apphooks_config_tags %}{% namespace_url "example_list" %}')
response = template.render(context)
self.assertEqual(response, self.page_2.get_absolute_url())
def test_apphook_field_name_discovery(self):
field_names = get_apphook_field_names(Article)
self.assertEqual(field_names, ['section'])
field_names = get_apphook_field_names(TranslatableArticle)
self.assertEqual(field_names, ['section'])
field_names = get_apphook_field_names(News)
self.assertEqual(set(field_names), set(['config', 'section']))
field_names = get_apphook_field_names(NotApphookedModel)
self.assertEqual(field_names, [])
def test_apphook_field_name_discovery_from_objects(self):
field_names = get_apphook_field_names(Article())
self.assertEqual(field_names, ['section'])
field_names = get_apphook_field_names(TranslatableArticle())
self.assertEqual(field_names, ['section'])
field_names = get_apphook_field_names(News())
self.assertEqual(set(field_names), set(['config', 'section']))
field_names = get_apphook_field_names(NotApphookedModel())
self.assertEqual(field_names, [])
def test_apphook_config_objects_discovery(self):
obj = Article(section=self.ns_app_1)
configs = get_apphook_configs(obj)
self.assertEqual(configs, [self.ns_app_1])
obj = TranslatableArticle(section=self.ns_app_1)
configs = get_apphook_configs(obj)
self.assertEqual(configs, [self.ns_app_1])
obj = News(section=self.ns_app_1, config=self.ns_app_3)
configs = get_apphook_configs(obj)
self.assertEqual(set(configs), set([self.ns_app_1, self.ns_app_3]))
obj = NotApphookedModel()
configs = get_apphook_configs(obj)
self.assertEqual(configs, [])
|
|
from itertools import chain
import json
import logging
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, redirect
from django.conf import settings
from haystack.forms import SearchForm
from haystack.query import ValuesSearchQuerySet
from core.utils import get_context
from .utils import get_simple_query, get_correct_url_query, get_voucher_code_list
from public_interface.models import Vouchers, FlickrImages, LocalImages, Sequences, Primers
from .forms import AdvancedSearchForm, BatchChangesForm
log = logging.getLogger(__name__)
def index(request):
context = get_context(request)
return render(request, 'public_interface/index.html', context)
def browse(request):
context = get_context(request)
queryset = Vouchers.objects.order_by('-modified')[:10]
vouchers_with_images = []
# Lookups that span relationships
# https://docs.djangoproject.com/en/1.8/topics/db/queries/#lookups-that-span-relationships
for i in Vouchers.objects.filter(flickrimages__voucher_id__isnull=False):
vouchers_with_images.append(i.code)
for i in Vouchers.objects.filter(localimages__voucher_id__isnull=False):
vouchers_with_images.append(i.code)
context["results"] = queryset
context['vouchers_with_images'] = set(vouchers_with_images)
return render(request, 'public_interface/browse.html', context)
def search(request):
"""Simple search tool"""
context = get_context(request)
context['simple_query'] = get_simple_query(request)
if 'q' not in request.GET:
return redirect('/')
query = request.GET['q'].strip()
if query == '':
return redirect('/')
form = SearchForm(request.GET)
page = request.GET.get('page')
if settings.ELASTICSEARCH is True:
sqs = form.search()
sqs.spelling_suggestion()
results = ""
paginator = ""
if sqs:
paginator = Paginator(sqs, 25)
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
context['page'] = results
context['paginator'] = paginator
context['results'] = results
context['voucher_code_list'] = get_voucher_code_list(sqs)
context['url_encoded_query'] = get_correct_url_query(request.GET.urlencode())
context['result_count'] = len(sqs)
return render(request, 'public_interface/search_results.html', context)
else:
sqs = Vouchers.objects.filter(
Q(orden__icontains=query) |
Q(genus__icontains=query) | Q(species__icontains=query) | Q(code__icontains=query),
)
results = ""
paginator = ""
if sqs:
paginator = Paginator(sqs, 25)
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
context["result_count"] = sqs.count()
context['page'] = results
context['paginator'] = paginator
context['results'] = results
return render(request, 'public_interface/search_results.html', context)
def autocomplete(request):
"""Used for JSON queries from javascript to fill autocomplete values in
input boxes of advanced searches.
:param request:
:return:
"""
try:
field = request.GET['field']
except KeyError:
raise Http404("Value for <b>field</b> is missing.")
try:
term = request.GET['term']
except KeyError:
raise Http404("Value for <b>term</b> query is missing.")
field_term = {field: term}
sqs = ValuesSearchQuerySet().using('autocomplete').autocomplete(**field_term).values(field)[:5]
suggestions = set()
for result in sqs:
suggestions.add(result[field])
suggestions = list(suggestions)
the_data = json.dumps(suggestions)
return HttpResponse(the_data, content_type='application/json')
def search_advanced(request):
"""Uses the haystack index `advanced_search` to find values based on a
combination of queries for one or more fields.
Works in a similar way to **genus:Mopho AND species:helenor**
:param request: HTTP request from the url dispatcher.
:return: response to html template.
"""
context = get_context(request)
if request.method == 'GET' and bool(request.GET) is not False:
form = AdvancedSearchForm(request.GET)
page = request.GET.get('page')
if form.is_valid():
sqs = form.search()
results = ""
paginator = ""
if sqs:
paginator = Paginator(sqs, 25)
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
if sqs is not None:
context['page'] = results
context['paginator'] = paginator
context['results'] = results
context['voucher_code_list'] = get_voucher_code_list(sqs)
context['simple_query'] = get_simple_query(request)
context['url_encoded_query'] = get_correct_url_query(request.GET.urlencode())
context['result_count'] = len(sqs)
return render(request, 'public_interface/search_results.html', context)
else:
context["form"] = form
return render(request, 'public_interface/search_results.html', context)
else:
context["form"] = form
return render(request, 'public_interface/search.html', context)
else:
form = AdvancedSearchForm()
context["form"] = form
return render(request, 'public_interface/search.html', context)
def show_voucher(request, voucher_code):
context = get_context(request)
try:
voucher_queryset = Vouchers.objects.get(code__iexact=voucher_code)
except Vouchers.DoesNotExist:
raise Http404
flickr_images_queryset = FlickrImages.objects.filter(voucher=voucher_code)
local_images_queryset = LocalImages.objects.filter(voucher=voucher_code)
images_queryset = list(chain(flickr_images_queryset, local_images_queryset))
seqs_queryset = Sequences.objects.filter(code=voucher_queryset).order_by('gene__gene_code')
context['voucher'] = voucher_queryset
context['images'] = images_queryset
context['sequences'] = seqs_queryset
context['google_maps_api_key'] = settings.GOOGLE_MAPS_API_KEY
return render(request, 'public_interface/show_voucher.html', context)
@login_required
def show_sequence(request, voucher_code, gene_code):
context = get_context(request)
try:
queryset = Vouchers.objects.get(code__iexact=voucher_code)
except Vouchers.DoesNotExist:
raise Http404
seqs_queryset = Sequences.objects.filter(
code=voucher_code, gene__gene_code=gene_code
).first()
images_queryset = FlickrImages.objects.filter(voucher=voucher_code)
primers_queryset = Primers.objects.filter(for_sequence=seqs_queryset)
context['voucher'] = queryset
context['sequence'] = seqs_queryset
context['images'] = images_queryset
context['primers'] = primers_queryset
return render(request, 'public_interface/show_sequence.html', context)
@csrf_protect
def change_selected(request, selected):
"""
Changes field values from Vouchers in batch.
This action first displays a change form page whichs shows all the
fields of a Vouchers type.
Next, it changes all selected objects and redirects back to the changed list.
The action that calls this function should raise a PermissionDenied
if the user has no rights for changes.
"""
# The user has already proposed the changes.
# Apply the changes and return a None to display the changed list.
if request.method == 'POST':
form = BatchChangesForm(request.POST)
ids = selected.split(",")
queryset = Vouchers.objects.filter(pk__in=ids)
n = queryset.count()
if n and form.is_valid():
# do changes
keywords = {}
for field, value in form.cleaned_data.items():
if value:
keywords[field] = value
queryset.update(**keywords)
return HttpResponseRedirect('/admin/public_interface/vouchers/')
else:
form = BatchChangesForm()
# Display the changes page
context = {'form': form, 'selected': selected}
return render(request, 'admin/public_interface/vouchers/batch_changes.html', context)
|
|
import datetime
from collections import Counter
from unittest import mock
from django.core.exceptions import ValidationError
from django.forms import (
BaseForm, CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, formsets,
)
from django.forms.formsets import BaseFormSet, all_valid, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.test import SimpleTestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
# A FormSet that takes a list of favorite drinks and raises an error if
# there are any duplicates.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3)
class CustomKwargForm(Form):
def __init__(self, *args, custom_kwarg, **kwargs):
self.custom_kwarg = custom_kwarg
super().__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(
self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
"""
A FormSet constructor takes the same arguments as Form. Create a
FormSet for adding data. By default, it displays 1 blank form.
"""
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1">
<input type="hidden" name="choices-INITIAL_FORMS" value="0">
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000">
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice"></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes"></td></tr>"""
)
# FormSet are treated similarly to Forms. FormSet has an is_valid()
# method, and a cleaned_data or errors attribute depending on whether
# all the forms passed validation. However, unlike a Form, cleaned_data
# and errors will be a list of dicts rather than a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet wasn't passed any data, is_valid() and has_changed()
# return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
"""
Custom kwargs set on the formset instance are passed to the
underlying forms.
"""
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
for form in formset:
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
"""Form kwargs can be passed dynamically in a formset."""
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {'custom_kwarg': index}
DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2)
formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg'))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_validation_count(self):
"""
A formset's ManagementForm is validated once per FormSet.is_valid()
call and each form of the formset is cleaned once.
"""
def make_method_counter(func):
"""Add a counter to func for the number of times it's called."""
counter = Counter()
counter.call_count = 0
def mocked_func(*args, **kwargs):
counter.call_count += 1
return func(*args, **kwargs)
return mocked_func, counter
mocked_is_valid, is_valid_counter = make_method_counter(formsets.ManagementForm.is_valid)
mocked_full_clean, full_clean_counter = make_method_counter(BaseForm.full_clean)
formset = self.make_choiceformset([('Calexico', '100'), ('Any1', '42'), ('Any2', '101')])
with mock.patch('django.forms.formsets.ManagementForm.is_valid', mocked_is_valid), \
mock.patch('django.forms.forms.BaseForm.full_clean', mocked_full_clean):
self.assertTrue(formset.is_valid())
self.assertEqual(is_valid_counter.call_count, 1)
self.assertEqual(full_clean_counter.call_count, 4)
def test_formset_has_changed(self):
"""
FormSet.has_changed() is True if any data is passed to its forms, even
if the formset didn't validate.
"""
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
"""
A FormSet can be prefilled with existing data by providing a list of
dicts to the `initial` argument. By default, an extra blank form is
included.
"""
formset = self.make_choiceformset(initial=[{'choice': 'Calexico', 'votes': 100}])
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>"""
)
def test_blank_form_unfilled(self):
"""A form that's displayed as blank may be submitted as blank."""
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
"""
If at least one field is filled out on a blank form, it will be
validated.
"""
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
"""
Deleting prefilled data is an error. Removing data from form fields
isn't the proper way to delete it.
"""
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}]
)
def test_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can be displayed using formset_factory's
`extra` argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>"""
)
# Since every form was displayed as blank, they are also accepted as
# blank. This may seem a little strange, but min_num is used to require
# a minimum number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can also be displayed using formset_factory's
min_num argument. It will (essentially) increment the extra argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>"""
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
"""More than 1 empty form can be displayed using min_num."""
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>"""
)
def test_single_form_completed(self):
"""Just one form may be completed."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
"""
If validate_max is set and max_num is less than TOTAL_FORMS in the
data, a ValidationError is raised. MAX_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at most 1 form.'])
def test_formset_validate_min_flag(self):
"""
If validate_min is set and min_num is more than TOTAL_FORMS in the
data, a ValidationError is raised. MIN_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at least 3 forms.'])
def test_formset_validate_min_unchanged_forms(self):
"""
min_num validation doesn't consider unchanged forms with initial data
as "empty".
"""
initial = [
{'choice': 'Zero', 'votes': 0},
{'choice': 'One', 'votes': 0},
]
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '2',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '2',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1', # changed from initial
}
ChoiceFormSet = formset_factory(Choice, min_num=2, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices', initial=initial)
self.assertFalse(formset.forms[0].has_changed())
self.assertTrue(formset.forms[1].has_changed())
self.assertTrue(formset.is_valid())
def test_formset_validate_min_excludes_empty_forms(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
}
ChoiceFormSet = formset_factory(Choice, extra=2, min_num=1, validate_min=True, can_delete=True)
formset = ChoiceFormSet(data, prefix='choices')
self.assertFalse(formset.has_changed())
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at least 1 form.'])
def test_second_form_partially_filled_2(self):
"""A partially completed form is invalid."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
"""
The extra argument works when the formset is pre-filled with initial
data.
"""
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Choice: <input type="text" name="choices-3-choice"></li>
<li>Votes: <input type="number" name="choices-3-votes"></li>"""
)
# Retrieving an empty form works. Tt shows up in the form list.
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice"></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes"></li>"""
)
def test_formset_with_deletion(self):
"""
formset_factory's can_delete argument adds a boolean "delete" field to
each form. When that boolean field is True, the form will be in
formset.deleted_forms.
"""
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>"""
)
# To delete something, set that form's special delete field to 'on'.
# Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{'votes': 100, 'DELETE': False, 'choice': 'Calexico'},
{'votes': 900, 'DELETE': True, 'choice': 'Fergie'},
{},
]
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}]
)
def test_formset_with_deletion_remove_deletion_flag(self):
"""
If a form is filled with something and can_delete is also checked, that
form's errors shouldn't make the entire formset invalid since it's
going to be deleted.
"""
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If the deletion flag is removed, validation is enabled.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
def test_formset_with_deletion_invalid_deleted_form(self):
"""
deleted_forms works on a valid formset even if a deleted form would
have been invalid.
"""
FavoriteDrinkFormset = formset_factory(form=FavoriteDrinkForm, can_delete=True)
formset = FavoriteDrinkFormset({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1,
})
self.assertTrue(formset.is_valid())
self.assertEqual(formset._errors, [])
self.assertEqual(len(formset.deleted_forms), 1)
def test_formsets_with_ordering(self):
"""
formset_factory's can_order argument adds an integer field to each
form. When form validation succeeds, [form.cleaned_data for form in formset.forms]
will have the data in the correct order specified by the ordering
fields. If a number is duplicated in the set of ordering fields, for
instance form 0 and form 3 are both marked as 1, then the form index
used as a secondary ordering criteria. In order to put something at the
front of the list, you'd need to set its order to 0.
"""
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Order: <input type="number" name="choices-2-ORDER"></li>"""
)
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
],
)
def test_formsets_with_order_custom_widget(self):
class OrderingAttributFormSet(BaseFormSet):
ordering_widget = HiddenInput
class OrderingMethodFormSet(BaseFormSet):
def get_ordering_widget(self):
return HiddenInput(attrs={'class': 'ordering'})
tests = (
(OrderingAttributFormSet, '<input type="hidden" name="form-0-ORDER">'),
(OrderingMethodFormSet, '<input class="ordering" type="hidden" name="form-0-ORDER">'),
)
for formset_class, order_html in tests:
with self.subTest(formset_class=formset_class.__name__):
ArticleFormSet = formset_factory(ArticleForm, formset=formset_class, can_order=True)
formset = ArticleFormSet(auto_id=False)
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
(
'<li>Title: <input type="text" name="form-0-title"></li>'
'<li>Pub date: <input type="text" name="form-0-pub_date">'
'%s</li>' % order_html
),
)
def test_empty_ordered_fields(self):
"""
Ordering fields are allowed to be left blank. If they are left blank,
they'll be sorted below everything else.
"""
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
],
)
def test_ordering_blank_fieldsets(self):
"""Ordering works with blank fieldsets."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_formset_with_ordering_and_deletion(self):
"""FormSets with ordering + deletion."""
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists"></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500"></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3"></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>
<li>Choice: <input type="text" name="choices-3-choice"></li>
<li>Votes: <input type="number" name="choices-3-votes"></li>
<li>Order: <input type="number" name="choices-3-ORDER"></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE"></li>"""
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
],
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}]
)
def test_invalid_deleted_form_with_ordering(self):
"""
Can get ordered_forms from a valid formset even if a deleted form
would have been invalid.
"""
FavoriteDrinkFormset = formset_factory(form=FavoriteDrinkForm, can_delete=True, can_order=True)
formset = FavoriteDrinkFormset({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_clean_hook(self):
"""
FormSets have a clean() hook for doing extra validation that isn't tied
to any form. It follows the same pattern as the clean() hook on Forms.
"""
# Start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# The valid case still works.
data['drinks-1-name'] = 'Bloody Mary'
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
"""Limiting the maximum number of forms with max_num."""
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input type="text" name="form-2-name" id="id_form-2-name"></td></tr>"""
)
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_limited_max_forms_two(self):
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th><td>
<input type="text" name="form-0-name" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_limiting_extra_lest_than_max_num(self):
"""max_num has no effect when extra is less than max_num."""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name"></td></tr>"""
)
def test_max_num_with_initial_data(self):
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=[{'name': 'Fernet and Coke'}])
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_max_num_zero(self):
"""
If max_num is 0 then no form is rendered at all, regardless of extra,
unless initial data is present.
"""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_max_num_zero_with_initial(self):
# initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary"></td></tr>"""
)
def test_more_initial_than_max_num(self):
"""
More initial forms than max_num results in all initial forms being
displayed (but no extra forms).
"""
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary"></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke"></td></tr>"""
)
def test_default_absolute_max(self):
# absolute_max defaults to 2 * DEFAULT_MAX_NUM if max_num is None.
data = {
'form-TOTAL_FORMS': 2001,
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 1000 forms.'],
)
self.assertEqual(formset.absolute_max, 2000)
def test_absolute_max(self):
data = {
'form-TOTAL_FORMS': '2001',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
AbsoluteMaxFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
absolute_max=3000,
)
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), True)
self.assertEqual(len(formset.forms), 2001)
# absolute_max provides a hard limit.
data['form-TOTAL_FORMS'] = '3001'
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 3000)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 1000 forms.'],
)
def test_absolute_max_with_max_num(self):
data = {
'form-TOTAL_FORMS': '1001',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
LimitedFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
max_num=30,
absolute_max=1000,
)
formset = LimitedFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1000)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 30 forms.'],
)
def test_absolute_max_invalid(self):
msg = "'absolute_max' must be greater or equal to 'max_num'."
for max_num in [None, 31]:
with self.subTest(max_num=max_num):
with self.assertRaisesMessage(ValueError, msg):
formset_factory(FavoriteDrinkForm, max_num=max_num, absolute_max=30)
def test_more_initial_form_result_in_one(self):
"""
One form from initial and extra=3 with max_num=2 results in the one
initial form and one extra.
"""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=[{'name': 'Gin Tonic'}])
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_management_form_prefix(self):
"""The management form has the correct prefix."""
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_non_form_errors(self):
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
def test_formset_iteration(self):
"""Formset instances are iterable."""
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# An iterated formset yields formset.forms.
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# A formset may be indexed to retrieve its forms.
self.assertEqual(formset[0], forms[0])
with self.assertRaises(IndexError):
formset[3]
# Formsets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super().__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# __iter__() modifies the rendering order.
# Compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""A formsets without any forms evaluates as True."""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset works with SplitDateTimeField(initial=datetime.datetime.now).
"""
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
"""Formset's forms use the formset's error_class."""
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
"""Formsets call is_valid() on each form."""
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super().is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
"""
If non_form_errors() is called without calling is_valid() first,
it should ensure that full_clean() is called.
"""
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
data = {
'choices-TOTAL_FORMS': '1',
'choices-INITIAL_FORMS': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()), ['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, '__html__'))
self.assertEqual(str(formset), formset.__html__())
def test_can_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(form=Choice, can_delete=True, extra=2)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertIn('DELETE', formset.forms[1].fields)
def test_disable_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(
form=Choice,
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertNotIn('DELETE', formset.forms[0].fields)
self.assertNotIn('DELETE', formset.forms[1].fields)
formset = ChoiceFormFormset(initial=[{'choice': 'Zero', 'votes': '1'}])
self.assertEqual(len(formset), 3)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertNotIn('DELETE', formset.forms[1].fields)
self.assertNotIn('DELETE', formset.forms[2].fields)
formset = ChoiceFormFormset(data={
'form-0-choice': 'Zero',
'form-0-votes': '0',
'form-0-DELETE': 'on',
'form-1-choice': 'One',
'form-1-votes': '1',
'form-2-choice': '',
'form-2-votes': '',
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
}, initial=[{'choice': 'Zero', 'votes': '1'}])
self.assertEqual(formset.cleaned_data, [
{'choice': 'Zero', 'votes': 0, 'DELETE': True},
{'choice': 'One', 'votes': 1},
{},
])
self.assertIs(formset._should_delete_form(formset.forms[0]), True)
self.assertIs(formset._should_delete_form(formset.forms[1]), False)
self.assertIs(formset._should_delete_form(formset.forms[2]), False)
class FormsetAsTagTests(SimpleTestCase):
def setUp(self):
data = {
'choices-TOTAL_FORMS': '1',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
self.formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.management_form_html = (
'<input type="hidden" name="choices-TOTAL_FORMS" value="1">'
'<input type="hidden" name="choices-INITIAL_FORMS" value="0">'
'<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">'
'<input type="hidden" name="choices-MAX_NUM_FORMS" value="0">'
)
def test_as_table(self):
self.assertHTMLEqual(
self.formset.as_table(),
self.management_form_html + (
'<tr><th>Choice:</th><td>'
'<input type="text" name="choices-0-choice" value="Calexico"></td></tr>'
'<tr><th>Votes:</th><td>'
'<input type="number" name="choices-0-votes" value="100"></td></tr>'
)
)
def test_as_p(self):
self.assertHTMLEqual(
self.formset.as_p(),
self.management_form_html + (
'<p>Choice: <input type="text" name="choices-0-choice" value="Calexico"></p>'
'<p>Votes: <input type="number" name="choices-0-votes" value="100"></p>'
)
)
def test_as_ul(self):
self.assertHTMLEqual(
self.formset.as_ul(),
self.management_form_html + (
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>'
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
)
)
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_error(self):
formset = ArticleFormSet({})
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
'ManagementForm data is missing or has been tampered with. '
'Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. '
'You may need to file a bug report if the issue persists.',
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertEqual(
str(formset),
'<tr><td colspan="2">'
'<ul class="errorlist nonfield">'
'<li>(Hidden field TOTAL_FORMS) This field is required.</li>'
'<li>(Hidden field INITIAL_FORMS) This field is required.</li>'
'</ul>'
'<input type="hidden" name="form-TOTAL_FORMS" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
'</td></tr>\n'
)
def test_management_form_invalid_data(self):
data = {
'form-TOTAL_FORMS': 'two',
'form-INITIAL_FORMS': 'one',
}
formset = ArticleFormSet(data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
'ManagementForm data is missing or has been tampered with. '
'Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. '
'You may need to file a bug report if the issue persists.',
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertEqual(
str(formset),
'<tr><td colspan="2">'
'<ul class="errorlist nonfield">'
'<li>(Hidden field TOTAL_FORMS) Enter a whole number.</li>'
'<li>(Hidden field INITIAL_FORMS) Enter a whole number.</li>'
'</ul>'
'<input type="hidden" name="form-TOTAL_FORMS" value="two" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" value="one" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
'</td></tr>\n',
)
def test_customize_management_form_error(self):
formset = ArticleFormSet({}, error_messages={'missing_management_form': 'customized'})
self.assertIs(formset.is_valid(), False)
self.assertEqual(formset.non_form_errors(), ['customized'])
self.assertEqual(formset.errors, [])
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [
unbound_formset.empty_form,
bound_formset.empty_form
]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
"""An empty formset still calls clean()"""
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError('Clean method called')
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Media is available on empty formset."""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""is_multipart() works with an empty formset."""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
class AllValidTests(SimpleTestCase):
def test_valid(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix='choices')
formset2 = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIs(all_valid((formset1, formset2)), True)
expected_errors = [{}, {}]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
def test_invalid(self):
"""all_valid() validates all forms, even when some are invalid."""
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-0-choice': 'Zero',
'choices-0-votes': '',
'choices-1-choice': 'One',
'choices-1-votes': '',
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix='choices')
formset2 = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIs(all_valid((formset1, formset2)), False)
expected_errors = [{'votes': ['This field is required.']}, {'votes': ['This field is required.']}]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest.lib import exceptions as lib_exc
from tempest import test
class VolumesV2NegativeTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2NegativeTest, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2NegativeTest, cls).resource_setup()
cls.name_field = cls.special_fields['name_field']
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
cls.mountpoint = "/dev/vdc"
@test.attr(type=['negative'])
@test.idempotent_id('f131c586-9448-44a4-a8b0-54ca838aa43e')
def test_volume_get_nonexistent_volume_id(self):
# Should not be able to get a non-existent volume
self.assertRaises(lib_exc.NotFound, self.client.show_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('555efa6e-efcd-44ef-8a3b-4a7ca4837a29')
def test_volume_delete_nonexistent_volume_id(self):
# Should not be able to delete a non-existent Volume
self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('1ed83a8a-682d-4dfb-a30e-ee63ffd6c049')
def test_create_volume_with_invalid_size(self):
# Should not be able to create volume with invalid size
# in request
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('9387686f-334f-4d31-a439-33494b9e2683')
def test_create_volume_with_out_passing_size(self):
# Should not be able to create volume without passing size
# in request
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('41331caa-eaf4-4001-869d-bc18c1869360')
def test_create_volume_with_size_zero(self):
# Should not be able to create volume with size zero
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('8b472729-9eba-446e-a83b-916bdb34bef7')
def test_create_volume_with_size_negative(self):
# Should not be able to create volume with size negative
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='-1', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('10254ed8-3849-454e-862e-3ab8e6aa01d2')
def test_create_volume_with_nonexistent_volume_type(self):
# Should not be able to create volume with non-existent volume type
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', volume_type=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9')
def test_create_volume_with_nonexistent_snapshot_id(self):
# Should not be able to create volume with non-existent snapshot
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', snapshot_id=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344')
def test_create_volume_with_nonexistent_source_volid(self):
# Should not be able to create volume with non-existent source volume
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', source_volid=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('0186422c-999a-480e-a026-6a665744c30c')
def test_update_volume_with_nonexistent_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id=str(uuid.uuid4()), display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
def test_update_volume_with_invalid_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id='#$%%&^&^', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
def test_update_volume_with_empty_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id='', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('30799cfd-7ee4-446c-b66c-45b383ed211b')
def test_get_invalid_volume_id(self):
# Should not be able to get volume with invalid id
self.assertRaises(lib_exc.NotFound, self.client.show_volume,
'#$%%&^&^')
@test.attr(type=['negative'])
@test.idempotent_id('c6c3db06-29ad-4e91-beb0-2ab195fe49e3')
def test_get_volume_without_passing_volume_id(self):
# Should not be able to get volume when empty ID is passed
self.assertRaises(lib_exc.NotFound, self.client.show_volume, '')
@test.attr(type=['negative'])
@test.idempotent_id('1f035827-7c32-4019-9240-b4ec2dbd9dfd')
def test_delete_invalid_volume_id(self):
# Should not be able to delete volume when invalid ID is passed
self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
'!@#$%^&*()')
@test.attr(type=['negative'])
@test.idempotent_id('441a1550-5d44-4b30-af0f-a6d402f52026')
def test_delete_volume_without_passing_volume_id(self):
# Should not be able to delete volume when empty ID is passed
self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '')
@test.attr(type=['negative'])
@test.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6')
@test.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
srv_name = data_utils.rand_name('Instance')
server = self.create_server(
name=srv_name,
wait_until='ACTIVE')
self.addCleanup(waiters.wait_for_server_termination,
self.servers_client, server['id'])
self.addCleanup(self.servers_client.delete_server, server['id'])
self.assertRaises(lib_exc.NotFound,
self.client.attach_volume,
str(uuid.uuid4()),
instance_uuid=server['id'],
mountpoint=self.mountpoint)
@test.attr(type=['negative'])
@test.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
def test_detach_volumes_with_invalid_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.detach_volume,
'xxx')
@test.attr(type=['negative'])
@test.idempotent_id('e0c75c74-ee34-41a9-9288-2a2051452854')
def test_volume_extend_with_size_smaller_than_original_size(self):
# Extend volume with smaller size than original size.
extend_size = 0
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
def test_volume_extend_with_non_number_size(self):
# Extend volume when size is non number.
extend_size = 'abc'
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
def test_volume_extend_with_None_size(self):
# Extend volume with None size.
extend_size = None
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
def test_volume_extend_with_nonexistent_volume_id(self):
# Extend volume size when volume is nonexistent.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
str(uuid.uuid4()), new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
def test_volume_extend_without_passing_volume_id(self):
# Extend volume size when passing volume id is None.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
None, new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
def test_reserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.reserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('eb467654-3dc1-4a72-9b46-47c29d22654c')
def test_unreserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.unreserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c')
def test_reserve_volume_with_negative_volume_status(self):
# Mark volume as reserved.
self.client.reserve_volume(self.volume['id'])
# Mark volume which is marked as reserved before
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_volume,
self.volume['id'])
# Unmark volume as reserved.
self.client.unreserve_volume(self.volume['id'])
@test.attr(type=['negative'])
@test.idempotent_id('0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f')
def test_list_volumes_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume')
params = {self.name_field: v_name}
fetched_volume = self.client.list_volumes(params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('9ca17820-a0e7-4cbd-a7fa-f4468735e359')
def test_list_volumes_detail_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume')
params = {self.name_field: v_name}
fetched_volume = \
self.client.list_volumes(detail=True, params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('143b279b-7522-466b-81be-34a87d564a7c')
def test_list_volumes_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = self.client.list_volumes(params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('ba94b27b-be3f-496c-a00e-0283b373fa75')
def test_list_volumes_detail_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = \
self.client.list_volumes(detail=True, params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
class VolumesV1NegativeTest(VolumesV2NegativeTest):
_api_version = 1
_name = 'display_name'
|
|
import datetime
import xml.etree.ElementTree as ET
from .shared import SepaPaymentInitn
from .utils import int_to_decimal_str, make_id
class SepaTransfer(SepaPaymentInitn):
"""
This class creates a Sepa transfer XML File.
"""
root_el = "CstmrCdtTrfInitn"
def __init__(self, config, schema="pain.001.001.03", clean=True):
super().__init__(config, schema, clean)
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "IBAN", "amount", "description", "execution_date"]
for config_item in required:
if config_item not in payment:
validation += config_item.upper() + "_MISSING "
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if 'execution_date' in payment:
if not isinstance(payment['execution_date'], datetime.date):
validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['execution_date'] = payment['execution_date'].isoformat()
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation)
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
# Validate the payment
self.check_payment(payment)
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount']
)
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if 'execution_date' in payment:
PmtInf_nodes['ReqdExctnDtNode'].text = payment['execution_date']
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['EndToEnd_PmtId_Node'].text = payment.get('endtoend_id', 'NOTPROVIDED')
if bic:
TX_nodes['BIC_CdtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Cdtr_Node'].text = payment['name']
TX_nodes['IBAN_CdtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
return ED
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['CdtTrfTxInfNode'] = ET.Element("CdtTrfTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEnd_PmtId_Node'] = ET.Element("EndToEndId")
ED['AmtNode'] = ET.Element("Amt")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
def _add_non_batch(self, TX_nodes, PmtInf_nodes):
"""
Method to add a transaction as non batch, will fold the transaction
together with the payment info node and append to the main xml.
"""
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if 'BIC_CdtrAgt_Node' in TX_nodes and TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode'])
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if 'BIC_CdtrAgt_Node' in TX_nodes and TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
def _finalize_batch(self):
"""
Method to finalize the batch, this will iterate over the _batches dict
and create a PmtInf node for each batch. The correct information (from
the batch_key and batch_totals) will be inserted and the batch
transaction nodes will be folded. Finally, the batches will be added to
the main XML.
"""
for batch_meta, batch_nodes in self._batches.items():
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "TRF"
PmtInf_nodes['BtchBookgNode'].text = "true"
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
if batch_meta:
PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta
else:
del PmtInf_nodes['ReqdExctnDtNode']
PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes))
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode'])
PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node'])
PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode'])
if 'ReqdExctnDtNode' in PmtInf_nodes:
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode'])
PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode'])
PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node'])
PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode'])
if 'BIC' in self._config:
PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node'])
PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode'])
PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode'])
for txnode in batch_nodes:
PmtInf_nodes['PmtInfNode'].append(txnode)
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
|
|
"""
test_indexing tests the following Index methods:
__getitem__
get_loc
get_value
__contains__
take
where
get_indexer
get_indexer_for
slice_locs
asof_locs
The corresponding tests.indexes.[index_type].test_indexing files
contain tests for the corresponding methods specific to those Index subclasses.
"""
import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
NaT,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.core.api import (
Float64Index,
Int64Index,
UInt64Index,
)
class TestTake:
def test_take_invalid_kwargs(self, index):
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
index.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
index.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
index.take(indices, mode="clip")
def test_take(self, index):
indexer = [4, 3, 0, 2]
if len(index) < 5:
# not enough elements; ignore
return
result = index.take(indexer)
expected = index[indexer]
assert result.equals(expected)
if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
msg = r"'(.*Index)' object has no attribute 'freq'"
with pytest.raises(AttributeError, match=msg):
index.freq
def test_take_minus1_without_fill(self, index):
# -1 does not get treated as NA unless allow_fill=True is passed
if len(index) == 0:
# Test is not applicable
return
result = index.take([0, 0, -1])
expected = index.take([0, 0, len(index) - 1])
tm.assert_index_equal(result, expected)
class TestContains:
@pytest.mark.parametrize(
"index,val",
[
(Index([0, 1, 2]), 2),
(Index([0, 1, "2"]), "2"),
(Index([0, 1, 2, np.inf, 4]), 4),
(Index([0, 1, 2, np.nan, 4]), 4),
(Index([0, 1, 2, np.inf]), np.inf),
(Index([0, 1, 2, np.nan]), np.nan),
],
)
def test_index_contains(self, index, val):
assert val in index
@pytest.mark.parametrize(
"index,val",
[
(Index([0, 1, 2]), "2"),
(Index([0, 1, "2"]), 2),
(Index([0, 1, 2, np.inf]), 4),
(Index([0, 1, 2, np.nan]), 4),
(Index([0, 1, 2, np.inf]), np.nan),
(Index([0, 1, 2, np.nan]), np.inf),
# Checking if np.inf in Int64Index should not cause an OverflowError
# Related to GH 16957
(Int64Index([0, 1, 2]), np.inf),
(Int64Index([0, 1, 2]), np.nan),
(UInt64Index([0, 1, 2]), np.inf),
(UInt64Index([0, 1, 2]), np.nan),
],
)
def test_index_not_contains(self, index, val):
assert val not in index
@pytest.mark.parametrize(
"index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")]
)
def test_mixed_index_contains(self, index, val):
# GH#19860
assert val in index
@pytest.mark.parametrize(
"index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)]
)
def test_mixed_index_not_contains(self, index, val):
# GH#19860
assert val not in index
def test_contains_with_float_index(self):
# GH#22085
integer_index = Int64Index([0, 1, 2, 3])
uinteger_index = UInt64Index([0, 1, 2, 3])
float_index = Float64Index([0.1, 1.1, 2.2, 3.3])
for index in (integer_index, uinteger_index):
assert 1.1 not in index
assert 1.0 in index
assert 1 in index
assert 1.1 in float_index
assert 1.0 not in float_index
assert 1 not in float_index
def test_contains_requires_hashable_raises(self, index):
if isinstance(index, MultiIndex):
return # TODO: do we want this to raise?
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in index
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
{} in index._engine
class TestGetValue:
@pytest.mark.parametrize(
"index", ["string", "int", "datetime", "timedelta"], indirect=True
)
def test_get_value(self, index):
# TODO(2.0): can remove once get_value deprecation is enforced GH#19728
values = np.random.randn(100)
value = index[67]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
# Index.get_value requires a Series, not an ndarray
with tm.assert_produces_warning(FutureWarning):
index.get_value(values, value)
with tm.assert_produces_warning(FutureWarning):
result = index.get_value(Series(values, index=values), value)
tm.assert_almost_equal(result, values[67])
class TestGetLoc:
def test_get_loc_non_hashable(self, index):
# MultiIndex and Index raise TypeError, others InvalidIndexError
with pytest.raises((TypeError, InvalidIndexError), match="slice"):
index.get_loc(slice(0, 1))
def test_get_loc_generator(self, index):
exc = KeyError
if isinstance(
index,
(DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, IntervalIndex),
):
# TODO: make these more consistent?
exc = InvalidIndexError
with pytest.raises(exc, match="generator object"):
# MultiIndex specifically checks for generator; others for scalar
index.get_loc(x for x in range(5))
class TestGetIndexer:
def test_get_indexer_base(self, index):
if index._index_as_unique:
expected = np.arange(index.size, dtype=np.intp)
actual = index.get_indexer(index)
tm.assert_numpy_array_equal(expected, actual)
else:
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
index.get_indexer(index)
with pytest.raises(ValueError, match="Invalid fill method"):
index.get_indexer(index, method="invalid")
def test_get_indexer_consistency(self, index):
# See GH#16819
if index._index_as_unique:
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
class TestConvertSliceIndexer:
def test_convert_almost_null_slice(self, index):
# slice with None at both ends, but not step
key = slice(None, None, "foo")
if isinstance(index, IntervalIndex):
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
index._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
index._convert_slice_indexer(key, "loc")
class TestPutmask:
def test_putmask_with_wrong_mask(self, index):
# GH#18368
if not len(index):
return
fill = index[0]
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask("foo", fill)
@pytest.mark.parametrize(
"idx", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index(["a", "b", "c"])]
)
def test_getitem_deprecated_float(idx):
# https://github.com/pandas-dev/pandas/issues/34191
with tm.assert_produces_warning(FutureWarning):
result = idx[1.0]
expected = idx[1]
assert result == expected
def test_maybe_cast_slice_bound_kind_deprecated(index):
if not len(index):
return
with tm.assert_produces_warning(FutureWarning):
# passed as keyword
index._maybe_cast_slice_bound(index[0], "left", kind="loc")
with tm.assert_produces_warning(FutureWarning):
# pass as positional
index._maybe_cast_slice_bound(index[0], "left", "loc")
@pytest.mark.parametrize(
"idx,target,expected",
[
([np.nan, "var1", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)),
(
[np.nan, "var1", np.nan],
[np.nan, "var1"],
np.array([0, 2, 1], dtype=np.intp),
),
(
np.array([np.nan, "var1", np.nan], dtype=object),
[np.nan],
np.array([0, 2], dtype=np.intp),
),
(
DatetimeIndex(["2020-08-05", NaT, NaT]),
[NaT],
np.array([1, 2], dtype=np.intp),
),
(["a", "b", "a", np.nan], [np.nan], np.array([3], dtype=np.intp)),
(
np.array(["b", np.nan, float("NaN"), "b"], dtype=object),
Index([np.nan], dtype=object),
np.array([1, 2], dtype=np.intp),
),
],
)
def test_get_indexer_non_unique_multiple_nans(idx, target, expected):
# GH 35392
axis = Index(idx)
actual = axis.get_indexer_for(target)
tm.assert_numpy_array_equal(actual, expected)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# topology change pvr
# or at least the components to get there
"""
Use cases:
play forward/backward
fast forward/backward
pause
jump
jump to 'now'
stepping
jumping through checkpoints? (checkpoints could be marked up in the sequence)
Generalisations of properties:
current position
playing/paused
play direction and speed
Generalisation of functions
plug in a stream interpreter that generates teh 'reverse' direction stream
playout control
reverse stream generation
store
I'm thinking:
Generate the stream tagged with 'how to reverse the stream' data
First need a more generic chooser - one where you can add items to the list dynamically
- using the chooser as the recorder and the means to step through the data
- then build a recorder around this that does the time based stuff
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
import time
class RecordingChooser(component):
"""A chooser where you add to (either end of) the list of items being iterated over at any time.
RecordingChooser is a bit of a rubbish name, need a better one
"""
Inboxes = { "nextItems" : "New items to go on the end of the list",
"prevItems" : "New items prepend to the front of the list",
"inbox" : "'NEXT', 'PREV', 'FIRST', 'LAST'",
"control" : "",
}
Outboxes = { "outbox" : "outputs items",
"signal" : "",
}
def __init__(self, winding = False):
"""Initialisation.
winding = True causes all items to be enumerated in order when jumping to FIRST or LAST
next and prev requests are auto queued if you try to go past the endstops
next/prev requests are cancelled out by each other or flushed by FIRST/LAST requests
SAME requests are not supported
"""
super(RecordingChooser, self).__init__()
self.winding = winding
def main(self):
# we don't yet have a starting position in the data, this will depend on whether the initial request
# is a NEXT/FIRST implying starting at the start
# or a PREV/LAST implying starting at the end
self.buffer = []
moved = False
self.initialpos = 0
while not moved:
yield 1
self.handleNewDataNoPos()
moved = self.handleRequestsNoPos()
if self.shutdown():
return
while 1:
yield 1
self.handleNewData()
self.handleRequests()
if self.shutdown():
return
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def handleNewDataNoPos(self):
# new items for the front of the set
while self.dataReady("nextItems"):
data = self.recv("nextItems")
self.buffer.append(data)
# new items for the back
while self.dataReady("prevItems"):
data = self.recv("prevItems")
self.buffer.insert(0, data)
self.initialpos += 1
def handleRequestsNoPos(self):
if self.dataReady("inbox"):
cmd = self.recv("inbox").upper()
if cmd == "SAME":
return False
elif cmd == "FIRST":
self.pos = 0
self.emit()
return True
elif cmd == "LAST":
self.pos = len(self.buffer)-1
self.emit()
return True
elif cmd == "NEXT":
self.pos = self.initialpos
self.emit()
return True
elif cmd == "PREV":
self.pos = self.initialpos-1
self.emit()
return True
else:
return False
return False
def handleNewData(self):
# new items for the front of the set
while self.dataReady("nextItems"):
data = self.recv("nextItems")
self.buffer.append(data)
# 0 1 2 3 4 new
# ^
# waiting to emit
if len(self.buffer)-1 <= self.pos:
self.send(data, "outbox")
# new items for the back
while self.dataReady("prevItems"):
data = self.recv("prevItems")
self.buffer.insert(0, data)
if self.pos < 0:
self.send(data, "outbox") # emit if we're waiting for catchup
self.pos += 1
def handleRequests(self):
while self.dataReady("inbox"):
cmd = self.recv("inbox").upper()
if cmd == "SAME":
self.emit()
elif cmd == "FIRST":
if self.winding and self.pos >= 0:
while self.pos > 0:
self.pos -= 1
self.emit()
else:
self.pos = 0
self.emit()
elif cmd == "LAST":
if self.winding and self.pos <= len(self.buffer)-1:
while self.pos < len(self.buffer)-1:
self.pos += 1
self.emit()
else:
self.pos = len(self.buffer)-1
self.emit()
elif cmd == "NEXT":
self.pos += 1
if self.pos != 0:
self.emit()
elif cmd == "PREV":
self.pos -= 1
if self.pos != len(self.buffer)-1:
self.emit()
else:
pass
def emit(self):
if self.pos >= 0 and self.pos < len(self.buffer):
self.send( self.buffer[self.pos], "outbox")
class timestamper(component):
"""Timestamps data.
If 'data' arrives in the inbox, (timestamp, data) is sent out of the outbox
As data is passed through this component, the timestamps are guaranteed
to be unique and ordered.
The timestamp is a tuple (system_timestamp, sequencenumber)
The combination if these two values is guaranteed to be unique, and
for two timestamps A and B, where B was generated after A, it is guaranteed
that A < B.
"""
def main(self):
seqcount = 0
oldt = time.time()
while not self.shutdown():
t = time.time()
while self.dataReady("inbox"):
if t == oldt:
seqcount += 1
else:
seqcount = 0
oldt = t
timestamp = (t, seqcount)
data = self.recv("inbox")
self.send( (timestamp, data), "outbox" )
self.pause()
yield 1
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
class detuple(component):
"""Detuples data"""
def __init__(self, index):
super(detuple, self).__init__()
self.index = index
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
self.send( data[self.index], "outbox")
self.pause()
yield 1
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
class directionNormaliser(component):
"""Takes data in the form (timestamp, A, B).
If timestamp > previous received timestamp, return A
If timestamp < previous received timestamp, return B
If timestamp == previous received timestamp, shouldn't happen
For the very first item received, A will be returned
"""
def main(self):
while not self.shutdown() and not self.dataReady("inbox"):
self.pause()
yield 1
if not self.dataReady("inbox"):
return
data = self.recv("inbox")
timestamp = data[0]
self.send( data[1], "outbox" )
direction = +1
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
if data[0] > timestamp:
direction = +1
self.send( data[1], "outbox" )
elif data[0] < timestamp:
direction = -1
self.send( data[2], "outbox" )
else:
raise "directionNormaliser component entered unsafe state"
self.pause()
yield 1
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
class PlayControl(component):
"""A PVR like playback interface. Uses an external (ordered) datasource like a chooser.
Requires data to be timestamped - in the form (timestamp, data....)
Fetches one in advance, so needs behaviour semantics as defined in RecordingChooser.
"""
Inboxes = { "inbox" : "control 'PLAY', 'STOP', 'NEXT', 'PREV', 'RPLAY",
"control" : "",
"playspeed" : "speed of play (must be >0 ... 0.5=halfspeed, 1.0=realtime, 2.0=doublespeed...)",
"data" : "items of timestamped data, returned from the datasource",
}
Outboxes = { "outbox" : "outgoing data",
"signal" : "",
"datareq" : "requests for data 'NEXT', 'PREV'"
}
def __init__(self, initialMode="PLAY", initalPlaySpeed = 1.0):
super(PlayControl, self).__init()
if initialPlaySpeed <= 0:
raise AttributeError("Play speed must be > 0.0")
self.speed = initialPlaySpeed
if initialMode == "PLAY":
self.direction = "NEXT"
elif initialMode == "RPLAY":
self.direction = "PREV"
else:
self.direction = None
def main(self):
self.pendingRequests = [] # 'NEXT' and 'PREV' requests that have not yet been fulfilled by the datasource
self.prevTimestamp = None # the timestamp of the most recently received from the datasource
self.waitingData = [] # data waiting for the right time to be sent
self.lastSentTimestamp = None # timestamp of last piece of data sent from the waitingData queue
self.lastSentMoment = None # moment in 'real' time at which this was done
# get the ball rolling
if self.direction:
self.issue(self.direction)
while not self.shutdown():
self.handleData()
self.handleCommands()
self.emitData()
self.makeRequests()
yield 1
def request(self, cmd):
"""Issue a data request and also add it to the list of pending requests"""
self.pendingRequests.append( cmd )
self.send(cmd, "datareq")
def handleData(self):
"""Deal with data returned from the chooser style component"""
while self.dataReady("data"):
data = self.recv("data")
timestamp = data[0]
#payload = data[1:]
# lets work out whether this item is a NEXT or PREV step
# if its the first step, then we assume its the same as the pending request
direction = None
if prevTimestamp == None:
direction = self.pendingRequests[0]
elif timestamp > prevTimestamp:
direction = "NEXT"
elif timestamp < prevTimestamp:
direction = "PREV"
else:
raise "Two items received with the same timestamp. Cant handle."
# pop items off until we find one that matches the direction the data has moved in
while self.pendingRequests:
req = self.pendingRequests[0]
del self.pendingRequests[0]
if req == direction:
break
# add to the 'to be sent out' queue
self.waitingData.append( data )
def emitData(self):
"""Emit data at the right time"""
# if the mode is PLAY or RPLAY we know what to do - if the data is in the right direction, emit, otherwise
# discard
nothingtodo=False
while self.waitingData and not nothingtodo:
data = self.waitingData[0]
(timestamp, payload) = data
timenow = time.time()
if self.direction == None:
# if not in a 'play' or 'rplay' mode then this must be stepping data, so send it immediately
del self.waitingData[0]
self.send( data, "outbox")
elif self.direction == "NEXT":
if (self.lastSentTimestamp == None): # bootstrap the process
self.send( data, "outbox" )
del self.waitingData[0]
self.lastSentTimestamp = timestamp
self.lastSentMoment = timenow
elif (self.lastSentTimestamp > timestamp):
del self.waitingData[0]
else:
nextMoment = self.lastSentMoment + (timestamp - self.lastSentTimestamp)/self.speed
if timenow >= nextMoment:
self.send( data, "outbox" )
del self.waitingData[0]
self.lastSentTimestamp = timestamp
self.lastSentMoment = nextMoment # could be timenow, but this hopefully avoids gradual drift
else:
nothingtodo = True
elif self.direction == "PREV":
if (self.lastSentTimestamp == None): # bootstrap the process
self.send( data, "outbox" )
del self.waitingData[0]
self.lastSentTimestamp = timestamp
self.lastSentMoment = timenow
elif (self.lastSentTimestamp < timestamp):
del self.waitingData[0]
else:
nextMoment = self.lastSentMoment + (self.lastSentTimestamp - timestamp)/self.speed # time running backwards
if timenow >= nextMoment:
self.send( data, "outbox" )
del self.waitingData[0]
self.lastSentTimestamp = timestamp
self.lastSentMoment = nextMoment # could be timenow, but this hopefully avoids gradual drift
else:
nothingtodo = True
else:
raise "Unsafe state - self.direction set to invalid value"
def makeRequests(self):
"""Ensure a steady flow of requests to the data source if in play mode"""
if self.direction: # if in PLAY or RPLAY mode
# note we achieve rate limiting by taking into account the waitingData queue
if len(self.pendingRequests) + len(waitingData) < 1: # threshold
self.issue(self.direction)
def handleCommands(self):
"""Handle incoming commands to change the play mode (PLAY, STOP, RPLAY, NEXT, PREV)"""
while self.dataReady("playspeed"):
newspeed = self.recv("playspeed")
if newspeed > 0 and newspeed != self.speed:
# could do with sorting along the lines of the variable rate control component
# - to handle speed changes mid stride
self.speed = newspeed
while self.dataReady("inbox"):
cmd = self.recv("inbox").upper()
if cmd == "STOP":
self.direction = None
elif cmd == "PLAY" or cmd == "RPLAY":
newdirection = "NEXT"
if cmd == "RPLAY":
newdirection = "PREV"
if newdirection != self.direction:
self.direction = newdirection
# look at the pending request queue, tally up how far the
# pending requests will offset us from the direction we need to
# move
#
# eg. if newdirection=PREV, and pending=[NEXT, PREV, NEXT]
# then tally = +1-1+1 = +1
#
# then move that much to compensate and move one further
offset = 0
for req in self.pendingRequests:
if req == self.direction:
offset -= 1
else:
offset += 1
offset += 1
for i in range(0, offset):
self.issue(self.direction)
self.lastSentTimestamp = None # reset time syncing for output
elif cmd == "NEXT" or cmd == "PREV":
if self.direction == None: # if not in play mode, otherwise do nothing
self.issue(cmd)
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
class pairer(component):
"""Takes values coming in from two inboxes and pairs them as a tuple (inbox, inbox2)"""
Inboxes = { "inbox" : "first item in the tuple",
"inbox2" : "second item in the tuple",
"control" : "",
}
def main(self):
self.queues = [ ("inbox", []),
("inbox2", [])
]
while not self.shutdown():
# read incoming data
for (box, q) in self.queues:
while self.dataReady(box):
q.append( self.recv(box) )
# send out stuff
while 0 not in [len(q) for (box,q) in self.queues]:
data = tuple([ q[0] for (box,q) in self.queues ])
self.send(data, "outbox")
for (box,q) in self.queues:
del q[0]
yield 1
self.pause()
class topologyReverser(component):
"""Takes topology commands in lists and outputs reverse commands in lists
eg.
[ ("ADD","NODE",blah) ] --> [ ("DEL","NODE",blah) ]
[ ("DEL","NODE",blah) ] --> [ ("ADD","NODE",blah), ("ADD","LINK",blah) ]
"""
def __init__(self):
super(topologyReverser, self).__init__()
self.nodes = {} # id -> (id, name, other data)
self.links = {} # from,to -> (from, to, other data)
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
cmds = self.recv("inbox")
output = []
for cmd in cmds:
output.extend( list(self.reverse(cmd)) )
if output:
self.send(output, "outbox")
yield 1
self.pause()
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def reverse(self, msg):
"""Reverse a topology command"""
try:
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
if cmd == ("ADD", "NODE"):
self.nodes[msg[2]] = msg[2:] # add to table of nodes
yield ["DEL", "NODE"] + msg[2:]
return
elif cmd == ("DEL", "NODE"):
yield ["ADD", "NODE"] + self.nodes[msg[2]]
del self.nodes[msg[2]]
for link in self.links:
if link[0] == msg[2] or link[1] == msg[2]:
yield ["ADD","LINK"] + link
del self.links[ (link[0],link[1]) ]
elif cmd == ("ADD", "LINK"):
self.links[ (msg[2],msg[3]) ] = msg[2:] # add to table of links
yield ["DEL", "LINK"] + msg[2:]
return
elif cmd == ("DEL", "LINK"):
yield ["ADD", "LINK"] + self.links[ (msg[2],msg[3]) ]
del self.links[ (msg[2],msg[3]) ] # remove from table of links
return
elif cmd == ("DEL", "ALL") and len(msg) == 2:
for node in self.nodes:
yield ["ADD","NODE"] + node
for link in self.links:
yield ["ADD","LINK"] + link
self.nodes = {}
self.links = {}
yield cmd # pass through anything else
def shutdown(self):
"""Checks for and passes on shutdown messages"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
|
|
'''
Analyses the Oslo algorithm.
'''
import numpy as np
import matplotlib as m
import matplotlib.pylab as plt
import matplotlib.ticker as mtk
import matplotlib.cm as cm
import log_bin_CN_2016 as lb
import oslo as o
font = {'size' : 19}
m.rc('font', **font)
all_sizes = np.array([8, 16, 32, 64, 128, 256, 512, 1024, 2048])
class Analysis(object):
"""
Includes plots, and analysis tools for a system that behaves according to
the Oslo model..
"""
def __init__(self, counts, sizes=all_sizes):
'''
The number of counts given defines the interval over which averages
are calculated.
'''
self.counts = int(counts)
self.sizes = sizes
def set_counts(self, counts):
self.counts = int(counts)
# Get data
def get_data(self, data, generate='ready'):
'''
Gets data to be analysed. Parameter generate is set to 'ready' to
load existing data files or 'auto' to generate data for sizes in
sizes with iterations equal to (size^2 + self.counts) and threshold
probability 0.5. Parameter data is 1 for heights, 2 for avalanches and
3 for grain drops
'''
if data not in [1, 2, 3]:
raise ValueError('data = 1, 2 or 3')
for size in self.sizes:
print size
if generate=='ready':
string = lambda n: str(n).zfill(4)
if data==1:
y = np.load('s'+string(size)+'_h.npy')
if data==2:
y = np.load('s'+string(size)+'_s.npy')
if data==3:
y = np.load('s'+string(size)+'_d.npy')
elif generate=='auto':
lat = o.System(size, 0.5, self.counts)
lat.iterate()
if data==1:
y = lat.heights
if data==2:
y = lat.avalanches
if data==3:
y = lat.drops
if size==self.sizes[0]:
self.y = y
else:
self.y = np.vstack((self.y, y))
self.x = np.arange(len(self.y[0]))
# Calculate quantities
def moving_avg(self, data, w=25):
'''
Performs a moving average smoothing over the given data.
'''
ww= 2*w+1
mov_avg = np.cumsum(data, dtype=float)
mov_avg[ww:] = (mov_avg[ww:] - mov_avg[:-ww])
return mov_avg[ww-1:]/ww
def calc_havg(self):
'''
Calculates height average after cross over time
'''
self.havg = np.around(self.y[:, -self.counts:].mean(1), 1)
def calc_hstd(self):
'''
Calculates height standard deviationat after cross over time
'''
self.hstd = np.around(self.y[:, -self.counts:].std(1), 2)
def calc_tc(self):
'''
Estimates cross over time
'''
tc = []
for i in range(len(self.sizes)):
diff = []
heights = self.y[i, :2*self.sizes[i]*self.sizes[i]]
section = self.sizes[i]/2
splits = len(heights)/section
for arr in np.split(heights, splits):
diff.append(self.havg[i] - arr.mean())
tc.append(int(np.argmin(np.array(diff)>1) + 0.5)*section)
self.tc = np.array(tc)
def calc_moment(self, i, k, drops):
'''
Calculates k-th moment of avalanches in given interval.
'''
if drops:
y= self.y[i,:]
y = y[np.nonzero(y)]
else:
y = self.y[i, -self.counts:]
return 1./y.size * np.sum(np.power(y, float(k)))
def calc_prob(self, size, counts=None, drops=True):
'''
Calculates pribability of heights, avalanches or grain drops.
'''
if counts is None: counts = self.counts
i = np.where(self.sizes==size)[0][0]
if drops:
y= self.y[i,:]
y = y[np.nonzero(y)]
else:
y = self.y[i, -counts:]
return np.bincount(y)/float(y.size)
# Plots and analysis
def test_btw(self, size, p=None):
'''
Tests consistency of Oslo model for given size.
'''
if p is None: p = [0., 0.25, 0.5, 0.75, 1.]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("$sites$")
ax.set_ylabel("$lattice\ height\ h\ /L$")
c = cm.rainbow(np.linspace(0, 1, len(p)))
x = np.arange(size)
heights = []
for j in xrange(len(p)):
lat = o.System(size, p[j], size*size + self.counts)
lat.iterate()
s = lat.slope
h = np.zeros(len(s)); h[0] = lat.h
heights.append(lat.h/64.)
for i in range(1, len(s)):
h[i] = h[i-1] - s[i-1]
ax.plot(x, h/64., color=c[j], lw=2.,
label='p={0:.2f}'.format(p[j]))
ax.legend(loc='upper right', prop={'size':17})
return heights
def plot_heights(self):
'''
Plots height against time for all system sizes.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("$number\ of\ grains\ \log_2{t}$")
ax.set_ylabel("$height\ \log_2{h}$")
c = cm.rainbow(np.linspace(0, 1, len(self.sizes)))
h = np.log2(self.havg)
t = np.log2(self.tc)
for i in xrange(len(self.sizes)):
x, y = np.log2(self.x), np.log2(self.y[i,:])
ax.plot(x, y, '-', lw=0.8, color = c[i], label=self.sizes[i])
ax.axhline(h[i], color='k', lw=0.3, linestyle='--')
ax.axvline(t[i], color='k', lw=0.3, linestyle='--')
leg = ax.legend(loc='upper left', ncol=2, prop={'size':15})
for lg in leg.legendHandles:
lg.set_linewidth(1.5)
def plot_crossover(self):
'''
Plots dependence of average height and crossover time on system size.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
z = np.polyfit(self.sizes, self.havg, 1)
ax.set_xlabel("$system\ size\ L$")
ax.set_ylabel("$average\ height$")
ax.loglog(self.sizes, self.havg, 'rx', markersize=20.)
ax.loglog(self.sizes, z[0]*self.sizes+z[1], 'b-', lw=2.,
label='slope {0:.3f}'.format(z[0]))
ax.legend(loc='upper left', prop={'size':15})
fig = plt.figure()
ss = self.sizes*self.sizes
z = np.polyfit(ss, self.tc, 1)
ax = fig.add_subplot(111)
ax.set_xlabel("$system\ size\ L^2$")
ax.set_ylabel("$crossover\ time\ t_c$")
ax.loglog(ss, self.tc, 'rx', markersize=20.)
ax.loglog(ss, z[0]*ss+z[1], 'b-', lw=2.,
label='slope {0:.3f}'.format(z[0]))
ax.legend(loc='upper left', prop={'size':15})
def collapse_heights(self, w=25):
'''
Plots the collapsed heights against time.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(r"$scaled\ time\ t/L^2$")
ax.set_ylabel(r"$scaled\ height\ h/L$")
c = cm.rainbow(np.linspace(0, 1, len(self.sizes)))
for i in xrange(len(self.sizes)):
heights = self.moving_avg(self.y[i,:], w=w)
s = float(self.sizes[i])
x = self.x[w:int(10*self.sizes[i]**2)]/(s*s)
y = heights[:int(10*self.sizes[i]**2-w)]/s
ax.loglog(x, y, color=c[i], lw=1.5, label=int(s))
ax.legend(loc='upper left', ncol=2, prop={'size':13})
fig = plt.figure()
ax = fig.add_subplot(111)
tc = self.tc[-1]
x, y = np.log2(self.x[1:tc]), np.log2(self.y[-1, 1:tc])
z = np.polyfit(x, y, 1)
ax.plot(x, y, 'gx')
ax.plot(x, z[0]*x+z[1], 'k-', label='slope %g'%(z[0]))
ax.legend(loc='upper left')
return z[0], np.exp(z[1]), tc/(s*s), self.havg[-1]/s
def scale_havg(self, a=None):
'''
Plots average height with corrections.
'''
if a is None: a = np.linspace(1.7328, 1.7343, 5)
fig = plt.figure()
fig.suptitle("Height against Size with Corrections")
for i in range(len(a)):
ax = fig.add_subplot(2,4,i+1)
x = np.log(self.sizes)
y = np.log(1-self.havg/(a[i]*self.sizes))
z = np.polyfit(x, y, 1)
ax.plot(x, y)
ax.plot(x, z[0]*x+z[1])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("$system\ size\ L$")
ax.set_ylabel("$transformed\ height\ h'$")
xx, yy = self.sizes, 1-self.havg/(a.mean()*self.sizes)
x = np.log2(xx)
y = np.log2(yy)
z = np.polyfit(x, y, 1)
ax.plot(x, y, 'rx', markersize=15.)
ax.plot(x, z[0]*x+z[1], 'b-')
a0, a1 = np.around(a.mean(), 4), np.around(np.exp(z[1]), 4)
w1 = np.around(-z[0], 4)
return a0, a1, w1
def scale_hstd(self):
'''
Plots scaling of height standard deviation.
'''
x, y = self.sizes, self.hstd
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("$system\ size\ L/10^3$")
ax.set_ylabel("$standard\ deviation\ \sigma_h$")
ax.plot(x/1e3, y, lw=2.)
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.log10(x), np.log10(y)
z = np.polyfit(x, y, 1)
ax.plot(x, y, 'rx', markersize=15.)
ax.plot(x, z[0]*x+z[1], 'b-', label='slope {0:.2}'.format(z[0]))
ax.set_xlabel("$system\ size\ \log{L}$")
ax.set_ylabel("$standard\ deviation\ \log{\sigma_h}$")
ax.legend(loc='upper left', prop={'size':16})
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = self.sizes, self.hstd/self.sizes**z[0]
ax.loglog(x, y, 'rx')
ax.axhline(y=0.5838, color='k', linestyle='--')
ax.set_xlabel("$L$")
ax.set_ylabel("$\sigma_h/L^{0.24}$")
return z[0], np.exp(z[1])
def avg_slope(self):
'''
Plots average slope and its std.
'''
zavg, zstd = self.havg/self.sizes, self.hstd/self.sizes
fig = plt.figure()
ax = fig.add_subplot(121)
ax.axhline(y=1.733, color='k', linestyle='--')
ax.plot(self.sizes, zavg)
ax = fig.add_subplot(122)
ax.plot(self.sizes, zstd)
def prob_height(self, collapse=True):
'''
Plots Gaussian height probabilities before or after collapse.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
c = cm.rainbow(np.linspace(0, 1, len(self.sizes)))
if collapse:
ax.set_xlabel("$scaled\ height\ z$")
ax.set_ylabel('$probability\ \tilde{P}(z;L)$')
else:
ax.set_xlabel("$height\ h$")
ax.set_ylabel('$probability\ P(h;L)$')
for i in range(len(self.sizes)):
prob = self.calc_prob(self.sizes[i])
x, y = np.arange(len(prob)), prob
#x = np.nonzero(prob)[0]
#y = prob[x]
if collapse:
x, y = (x-self.havg[i])/self.hstd[i], self.hstd[i]*y
#if self.sizes[i]==2048: x, y = 0, 0
ax.plot(x, y, color=c[i], lw=2., label=self.sizes[i])
else:
ax.semilogx(x, y, color=c[i], lw=2., label=self.sizes[i])
ax.legend(loc='upper right', ncol=2, prop={'size':13})
def prob_aval(self, counts, base=1.2, size=all_sizes[5], drops=False):
'''
Plots the log-binning of avalanche probability.
'''
prob = self.calc_prob(size=size, counts=counts, drops=drops)
fig = plt.figure()
x = np.nonzero(prob)[0]
y = prob[x]
j = np.where(self.sizes==size)[0][0]
if base is None: base = np.linspace(1.15, 1.25, 4)
ax = fig.add_subplot(111)
if drops:
s= self.y[j,:]
s = s[np.nonzero(s)]
ax.set_xlabel('$drop\ size\ d$')
ax.set_ylabel('$probability\ P(d;L)$')
else:
s = self.y[j, -counts:]
ax.set_xlabel('$avalanche\ size\ s$')
ax.set_ylabel('$probability\ P(s;L)$')
xx, yy = lb.log_bin(s, 0., 1., base, 'integer', False)
ax.loglog(x, y, 'x')
ax.loglog(xx, yy, 'o', lw=2., label='a='+str(base))
ax.legend(loc='upper right')
def scale_aval(self, collapse=True, drops=False):
'''
Plots probability of avalanches or drops before or after collapse for
all sizes.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
c = cm.rainbow(np.linspace(0, 1, len(self.sizes)))
for i in xrange(len(self.sizes)):
print self.sizes[i]
if drops:
y= self.y[i,:]
y = y[np.nonzero(y)]
k = [1.002, 1.249]
else:
y = self.y[i, -self.counts:]
k = [1.55, 2.25]
xx, yy = lb.log_bin(y, 0, 1., 1.2, 'integer', False)
xx, yy = np.array(xx), np.array(yy)
if collapse:
y = (xx**k[0])*yy
x = xx/(self.sizes[i]**k[1])
ax.loglog(x, y, '-', lw=1.5, color=c[i], label=self.sizes[i])
ax.legend(loc='lower left', ncol=2, prop={'size':13})
if drops:
ax.set_xlabel('$d/L^{1.25}$')
ax.set_ylabel('$d^{1.01} P(d;L)$')
else:
ax.set_xlabel('$s/L^{2.24}$')
ax.set_ylabel('$ s^{1.55} P(s;L)$')
else:
#x, y = np.log2(xx), np.log2(yy)
ax.loglog(xx, yy, '-', lw=1.5, color=c[i], label=self.sizes[i])
ax.legend(loc='upper right', ncol=2, prop={'size':13})
if drops:
ax.set_xlabel('$drop\ size\ d$')
ax.set_ylabel('$probability\ P(d;L)$')
else:
ax.set_xlabel('$avalanche\ size\ s$')
ax.set_ylabel('$probability\ P(s;L)$')
def moments(self, points=5, check=5, drops=False):
'''
Performs moment analysis for avalanche or drop probability.
'''
moments = np.zeros((len(self.sizes), points))
coefs, ycept = [], []
for k in xrange(points):
for i in xrange(len(self.sizes)):
moments[i,k] = self.calc_moment(i, k+1, drops=drops)
z = np.polyfit(np.log(self.sizes[4:]), np.log(moments[4:,k]), 1)
coefs.append(z[0])
ycept.append(np.exp(z[1]))
fig = plt.figure()
ax = fig.add_subplot(111)
k = check-1
y = moments[:,k]/self.sizes**(coefs[k])
ax.plot(self.sizes, y, 'rx', markersize=15., label='M'+str(k+1))
ax.axhline(y=ycept[k], color='k', linestyle='--')
ax.set_xlabel("$system\ size\ L$")
ax.set_ylabel(r"$s^k/L^{1+k-{\tau}_s}$")
ax.legend(loc='upper right', prop={'size':15})
ks = np.arange(1, points+1)
fig = plt.figure()
ax = fig.add_subplot(111)
z = np.polyfit(ks, coefs, 1)
ax.plot(ks, coefs, 'x', markersize=15.)
ax.plot(ks, z[0]*ks+z[1], '-', lw=2., label='D = %.3g'%(z[0]))
ax.set_xlabel("$moment\ order\ k$")
ax.set_ylabel("$Dk+D(1-\tau_s)$")
ax.legend(loc='upper left', prop={'size':15})
d = z[0]
tau = 1. - z[1]/d
return d, tau
|
|
# Copyright 2011-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import contextlib
import copy
import pickle
import random
import sys
sys.path[0:0] = [""]
from test import IntegrationTest, SkipTest, client_context, unittest
from test.utils import (
OvertCommandListener,
connected,
one,
rs_client,
single_client,
wait_until,
)
from test.version import Version
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
from pymongo.message import _maybe_add_read_preference
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import (
MovingAverage,
Nearest,
Primary,
PrimaryPreferred,
ReadPreference,
Secondary,
SecondaryPreferred,
)
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import Selection, readable_server_selector
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
class TestSelections(IntegrationTest):
@client_context.require_connection
def test_bool(self):
client = single_client()
wait_until(lambda: client.address, "discover primary")
selection = Selection.from_topology_description(client._topology.description)
self.assertTrue(selection)
self.assertFalse(selection.with_server_descriptions([]))
class TestReadPreferenceObjects(unittest.TestCase):
prefs = [
Primary(),
PrimaryPreferred(),
Secondary(),
Nearest(tag_sets=[{"a": 1}, {"b": 2}]),
SecondaryPreferred(max_staleness=30),
]
def test_pickle(self):
for pref in self.prefs:
self.assertEqual(pref, pickle.loads(pickle.dumps(pref)))
def test_copy(self):
for pref in self.prefs:
self.assertEqual(pref, copy.copy(pref))
def test_deepcopy(self):
for pref in self.prefs:
self.assertEqual(pref, copy.deepcopy(pref))
class TestReadPreferencesBase(IntegrationTest):
@classmethod
@client_context.require_secondaries_count(1)
def setUpClass(cls):
super(TestReadPreferencesBase, cls).setUpClass()
def setUp(self):
super(TestReadPreferencesBase, self).setUp()
# Insert some data so we can use cursors in read_from_which_host
self.client.pymongo_test.test.drop()
self.client.get_database(
"pymongo_test", write_concern=WriteConcern(w=client_context.w)
).test.insert_many([{"_id": i} for i in range(10)])
self.addCleanup(self.client.pymongo_test.test.drop)
def read_from_which_host(self, client):
"""Do a find() on the client and return which host was used"""
cursor = client.pymongo_test.test.find()
next(cursor)
return cursor.address
def read_from_which_kind(self, client):
"""Do a find() on the client and return 'primary' or 'secondary'
depending on which the client used.
"""
address = self.read_from_which_host(client)
if address == client.primary:
return "primary"
elif address in client.secondaries:
return "secondary"
else:
self.fail(
"Cursor used address %s, expected either primary "
"%s or secondaries %s" % (address, client.primary, client.secondaries)
)
def assertReadsFrom(self, expected, **kwargs):
c = rs_client(**kwargs)
wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes")
used = self.read_from_which_kind(c)
self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected))
class TestSingleSecondaryOk(TestReadPreferencesBase):
def test_reads_from_secondary(self):
host, port = next(iter(self.client.secondaries))
# Direct connection to a secondary.
client = single_client(host, port)
self.assertFalse(client.is_primary)
# Regardless of read preference, we should be able to do
# "reads" with a direct connection to a secondary.
# See server-selection.rst#topology-type-single.
self.assertEqual(client.read_preference, ReadPreference.PRIMARY)
db = client.pymongo_test
coll = db.test
# Test find and find_one.
self.assertIsNotNone(coll.find_one())
self.assertEqual(10, len(list(coll.find())))
# Test some database helpers.
self.assertIsNotNone(db.list_collection_names())
self.assertIsNotNone(db.validate_collection("test"))
self.assertIsNotNone(db.command("ping"))
# Test some collection helpers.
self.assertEqual(10, coll.count_documents({}))
self.assertEqual(10, len(coll.distinct("_id")))
self.assertIsNotNone(coll.aggregate([]))
self.assertIsNotNone(coll.index_information())
class TestReadPreferences(TestReadPreferencesBase):
def test_mode_validation(self):
for mode in (
ReadPreference.PRIMARY,
ReadPreference.PRIMARY_PREFERRED,
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_PREFERRED,
ReadPreference.NEAREST,
):
self.assertEqual(mode, rs_client(read_preference=mode).read_preference)
self.assertRaises(TypeError, rs_client, read_preference="foo")
def test_tag_sets_validation(self):
S = Secondary(tag_sets=[{}])
self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{"k": "v"}])
self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{"k": "v"}, {}])
self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets)
self.assertRaises(ValueError, Secondary, tag_sets=[])
# One dict not ok, must be a list of dicts
self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"})
self.assertRaises(TypeError, Secondary, tag_sets="foo")
self.assertRaises(TypeError, Secondary, tag_sets=["foo"])
def test_threshold_validation(self):
self.assertEqual(
17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms
)
self.assertEqual(
42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms
)
self.assertEqual(
666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms
)
self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms)
self.assertRaises(ValueError, rs_client, localthresholdms=-1)
def test_zero_latency(self):
ping_times: set = set()
# Generate unique ping times.
while len(ping_times) < len(self.client.nodes):
ping_times.add(random.random())
for ping_time, host in zip(ping_times, self.client.nodes):
ServerDescription._host_to_round_trip_time[host] = ping_time
try:
client = connected(rs_client(readPreference="nearest", localThresholdMS=0))
wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes")
host = self.read_from_which_host(client)
for _ in range(5):
self.assertEqual(host, self.read_from_which_host(client))
finally:
ServerDescription._host_to_round_trip_time.clear()
def test_primary(self):
self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY)
def test_primary_with_tags(self):
# Tags not allowed with PRIMARY
self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}])
def test_primary_preferred(self):
self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED)
def test_secondary(self):
self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY)
def test_secondary_preferred(self):
self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED)
def test_nearest(self):
# With high localThresholdMS, expect to read from any
# member
c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds
data_members = {self.client.primary} | self.client.secondaries
# This is a probabilistic test; track which members we've read from so
# far, and keep reading until we've used all the members or give up.
# Chance of using only 2 of 3 members 10k times if there's no bug =
# 3 * (2/3)**10000, very low.
used: set = set()
i = 0
while data_members.difference(used) and i < 10000:
address = self.read_from_which_host(c)
used.add(address)
i += 1
not_used = data_members.difference(used)
latencies = ", ".join(
"%s: %dms" % (server.description.address, server.description.round_trip_time)
for server in c._get_topology().select_servers(readable_server_selector)
)
self.assertFalse(
not_used,
"Expected to use primary and all secondaries for mode NEAREST,"
" but didn't use %s\nlatencies: %s" % (not_used, latencies),
)
class ReadPrefTester(MongoClient):
def __init__(self, *args, **kwargs):
self.has_read_from = set()
client_options = client_context.client_options
client_options.update(kwargs)
super(ReadPrefTester, self).__init__(*args, **client_options)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference, session):
context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session)
with context as (sock_info, read_preference):
self.record_a_read(sock_info.address)
yield sock_info, read_preference
@contextlib.contextmanager
def _socket_from_server(self, read_preference, server, session):
context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session)
with context as (sock_info, read_preference):
self.record_a_read(sock_info.address)
yield sock_info, read_preference
def record_a_read(self, address):
server = self._get_topology().select_server_by_address(address, 0)
self.has_read_from.add(server)
_PREF_MAP = [
(Primary, SERVER_TYPE.RSPrimary),
(PrimaryPreferred, SERVER_TYPE.RSPrimary),
(Secondary, SERVER_TYPE.RSSecondary),
(SecondaryPreferred, SERVER_TYPE.RSSecondary),
(Nearest, "any"),
]
class TestCommandAndReadPreference(IntegrationTest):
c: ReadPrefTester
client_version: Version
@classmethod
@client_context.require_secondaries_count(1)
def setUpClass(cls):
super(TestCommandAndReadPreference, cls).setUpClass()
cls.c = ReadPrefTester(
client_context.pair,
# Ignore round trip times, to test ReadPreference modes only.
localThresholdMS=1000 * 1000,
)
cls.client_version = Version.from_client(cls.c)
# mapReduce fails if the collection does not exist.
coll = cls.c.pymongo_test.get_collection(
"test", write_concern=WriteConcern(w=client_context.w)
)
coll.insert_one({})
@classmethod
def tearDownClass(cls):
cls.c.drop_database("pymongo_test")
cls.c.close()
def executed_on_which_server(self, client, fn, *args, **kwargs):
"""Execute fn(*args, **kwargs) and return the Server instance used."""
client.has_read_from.clear()
fn(*args, **kwargs)
self.assertEqual(1, len(client.has_read_from))
return one(client.has_read_from)
def assertExecutedOn(self, server_type, client, fn, *args, **kwargs):
server = self.executed_on_which_server(client, fn, *args, **kwargs)
self.assertEqual(
SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type]
)
def _test_fn(self, server_type, fn):
for _ in range(10):
if server_type == "any":
used = set()
for _ in range(1000):
server = self.executed_on_which_server(self.c, fn)
used.add(server.description.address)
if len(used) == len(self.c.secondaries) + 1:
# Success
break
assert self.c.primary is not None
unused = self.c.secondaries.union(set([self.c.primary])).difference(used)
if unused:
self.fail("Some members not used for NEAREST: %s" % (unused))
else:
self.assertExecutedOn(server_type, self.c, fn)
def _test_primary_helper(self, func):
# Helpers that ignore read preference.
self._test_fn(SERVER_TYPE.RSPrimary, func)
def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs):
for mode, server_type in _PREF_MAP:
new_coll = coll.with_options(read_preference=mode())
func = lambda: getattr(new_coll, meth)(*args, **kwargs)
if secondary_ok:
self._test_fn(server_type, func)
else:
self._test_fn(SERVER_TYPE.RSPrimary, func)
def test_command(self):
# Test that the generic command helper obeys the read preference
# passed to it.
for mode, server_type in _PREF_MAP:
func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode())
self._test_fn(server_type, func)
def test_create_collection(self):
# create_collection runs listCollections on the primary to check if
# the collection already exists.
self._test_primary_helper(
lambda: self.c.pymongo_test.create_collection(
"some_collection%s" % random.randint(0, sys.maxsize)
)
)
def test_count_documents(self):
self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {})
def test_estimated_document_count(self):
self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count")
def test_distinct(self):
self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a")
def test_aggregate(self):
self._test_coll_helper(
True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}]
)
def test_aggregate_write(self):
# 5.0 servers support $out on secondaries.
secondary_ok = client_context.version.at_least(5, 0)
self._test_coll_helper(
secondary_ok,
self.c.pymongo_test.test,
"aggregate",
[{"$project": {"_id": 1}}, {"$out": "agg_write_test"}],
)
class TestMovingAverage(unittest.TestCase):
def test_moving_average(self):
avg = MovingAverage()
self.assertIsNone(avg.get())
avg.add_sample(10)
self.assertAlmostEqual(10, avg.get()) # type: ignore
avg.add_sample(20)
self.assertAlmostEqual(12, avg.get()) # type: ignore
avg.add_sample(30)
self.assertAlmostEqual(15.6, avg.get()) # type: ignore
class TestMongosAndReadPreference(IntegrationTest):
def test_read_preference_document(self):
pref = Primary()
self.assertEqual(pref.document, {"mode": "primary"})
pref = PrimaryPreferred()
self.assertEqual(pref.document, {"mode": "primaryPreferred"})
pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}])
self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]})
pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30)
self.assertEqual(
pref.document,
{"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30},
)
pref = Secondary()
self.assertEqual(pref.document, {"mode": "secondary"})
pref = Secondary(tag_sets=[{"dc": "sf"}])
self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]})
pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30)
self.assertEqual(
pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}
)
pref = SecondaryPreferred()
self.assertEqual(pref.document, {"mode": "secondaryPreferred"})
pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}])
self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]})
pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30)
self.assertEqual(
pref.document,
{"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30},
)
pref = Nearest()
self.assertEqual(pref.document, {"mode": "nearest"})
pref = Nearest(tag_sets=[{"dc": "sf"}])
self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]})
pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30)
self.assertEqual(
pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}
)
with self.assertRaises(TypeError):
# Float is prohibited.
Nearest(max_staleness=1.5) # type: ignore
with self.assertRaises(ValueError):
Nearest(max_staleness=0)
with self.assertRaises(ValueError):
Nearest(max_staleness=-2)
def test_read_preference_document_hedge(self):
cases = {
"primaryPreferred": PrimaryPreferred,
"secondary": Secondary,
"secondaryPreferred": SecondaryPreferred,
"nearest": Nearest,
}
for mode, cls in cases.items():
with self.assertRaises(TypeError):
cls(hedge=[]) # type: ignore
pref = cls(hedge={})
self.assertEqual(pref.document, {"mode": mode})
out = _maybe_add_read_preference({}, pref)
if cls == SecondaryPreferred:
# SecondaryPreferred without hedge doesn't add $readPreference.
self.assertEqual(out, {})
else:
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
hedge = {"enabled": True}
pref = cls(hedge=hedge)
self.assertEqual(pref.document, {"mode": mode, "hedge": hedge})
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
hedge = {"enabled": False}
pref = cls(hedge=hedge)
self.assertEqual(pref.document, {"mode": mode, "hedge": hedge})
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
hedge = {"enabled": False, "extra": "option"}
pref = cls(hedge=hedge)
self.assertEqual(pref.document, {"mode": mode, "hedge": hedge})
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
def test_send_hedge(self):
cases = {
"primaryPreferred": PrimaryPreferred,
"secondaryPreferred": SecondaryPreferred,
"nearest": Nearest,
}
if client_context.supports_secondary_read_pref:
cases["secondary"] = Secondary
listener = OvertCommandListener()
client = rs_client(event_listeners=[listener])
self.addCleanup(client.close)
client.admin.command("ping")
for mode, cls in cases.items():
pref = cls(hedge={"enabled": True})
coll = client.test.get_collection("test", read_preference=pref)
listener.reset()
coll.find_one()
started = listener.results["started"]
self.assertEqual(len(started), 1, started)
cmd = started[0].command
if client_context.is_rs or client_context.is_mongos:
self.assertIn("$readPreference", cmd)
self.assertEqual(cmd["$readPreference"], pref.document)
else:
self.assertNotIn("$readPreference", cmd)
def test_maybe_add_read_preference(self):
# Primary doesn't add $readPreference
out = _maybe_add_read_preference({}, Primary())
self.assertEqual(out, {})
pref = PrimaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary(tag_sets=[{"dc": "nyc"}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
# SecondaryPreferred without tag_sets or max_staleness doesn't add
# $readPreference
pref = SecondaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, {})
pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = SecondaryPreferred(max_staleness=120)
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{"dc": "nyc"}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)]))
criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))])
pref = Nearest()
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON(
[
("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document),
]
),
)
pref = Nearest(tag_sets=[{"dc": "nyc"}])
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON(
[
("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document),
]
),
)
@client_context.require_mongos
def test_mongos(self):
res = client_context.client.config.shards.find_one()
assert res is not None
shard = res["host"]
num_members = shard.count(",") + 1
if num_members == 1:
raise SkipTest("Need a replica set shard to test.")
coll = client_context.client.pymongo_test.get_collection(
"test", write_concern=WriteConcern(w=num_members)
)
coll.drop()
res = coll.insert_many([{} for _ in range(5)])
first_id = res.inserted_ids[0]
last_id = res.inserted_ids[-1]
# Note - this isn't a perfect test since there's no way to
# tell what shard member a query ran on.
for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()):
qcoll = coll.with_options(read_preference=pref)
results = list(qcoll.find().sort([("_id", 1)]))
self.assertEqual(first_id, results[0]["_id"])
self.assertEqual(last_id, results[-1]["_id"])
results = list(qcoll.find().sort([("_id", -1)]))
self.assertEqual(first_id, results[-1]["_id"])
self.assertEqual(last_id, results[0]["_id"])
@client_context.require_mongos
def test_mongos_max_staleness(self):
# Sanity check that we're sending maxStalenessSeconds
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=120)
)
# No error
coll.find_one()
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=10)
)
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
coll = single_client(
readPreference="secondaryPreferred", maxStalenessSeconds=120
).pymongo_test.test
# No error
coll.find_one()
coll = single_client(
readPreference="secondaryPreferred", maxStalenessSeconds=10
).pymongo_test.test
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python2.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading, subprocess, re, os, sys, signal, socket
from time import sleep, time
from contextlib import closing
import traceback, thread
from datetime import datetime
from collections import namedtuple
from pprint import pprint
from itertools import groupby
# Probe intervals, in seconds.
# Warning: a value too short may get wrong results due to lack of data when system load goes high.
# and must be float!
PROBE_INTERVAL=float(5)
#FIXME: use log helper later
#log_lock = threading.Lock()
def log(*s):
if len(s)==1: s=s[0]
else: s= " ".join([str(x) for x in s])
# with log_lock:
# with open("/home/zhihui/monitor_proc.log", 'a') as f:
log_str = str(thread.get_ident())+":"+str(s) +'\n'
# f.write( log_str )
sys.stderr.write(log_str)
entered=False
def sig_term_handler(signo, stack):
global entered
global log_path
global report_path
global workload_title
global bench_log_path
global na
if not entered:
entered=True # FIXME: Not atomic
else: return
na.stop()
generate_report(workload_title, log_path, bench_log_path, report_path)
sys.exit(0)
def samedir(fn):
"""
return abspath of fn in the same directory where this python file stores
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), fn))
class PatchedNameTuple(object):
def __sub__(self, other):
assert isinstance(other, self.__class__)
assert self[0] == other[0]
cls = self.__class__
return cls(self[0], *[a-b for a, b in zip(self[1:], other[1:])])
def __div__(self, other):
return self.__class__(self[0], *[a/other for a in self[1:]])
def _add(self, other, override_title=None):
if other == None: return self
assert isinstance(other, self.__class__)
cls = self.__class__
title = self[0] if not override_title else override_title
return cls(title, *[a+b for a, b in zip(self[1:], other[1:])])
def ident(size, s):
return "\n".join((" "*size + x for x in s.split("\n")))
class RemoteProc(threading.Thread):
SEP="----SEP----"
template_debug=r"""exec('
import time, os, sys, socket, traceback
socket.setdefaulttimeout(1)
def log(*x, **kw):
with open("/home/zhihui/probe.log", kw.get("mode","a")) as f:
f.write(repr(x)+chr(10))
try:
log("create socket", mode="w")
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
log("bind socket")
s.bind(("0.0.0.0",0))
log("listen socket")
s.listen(5)
log("bind socket to:", s.getsockname())
while True:
log("accepting")
try:
print s.getsockname()[1]
s2,peer=s.accept()
break
except socket.timeout:
log("accept timeout, retry")
log("accepted, peer:",peer)
except Exception as e:
import traceback
log(traceback.format_exc())
{func_template}
while True:
s2.send(("{SEP}+%s" % time.time())+chr(10))
{call_template}
s2.send("{SEP}#end"+chr(10))
time.sleep({interval})
')"""
template=r"""exec('
import time, os, sys, socket, traceback
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0",0))
s.listen(5)
print s.getsockname()[1]
s2,peer=s.accept()
{func_template}
while True:
s2.send(("{SEP}+%s" % time.time())+chr(10))
{call_template}
s2.send("{SEP}#end"+chr(10))
time.sleep({interval})
')"""
def __init__(self, host, interval=1):
self.host = host
self.cmds = []
self.interval = interval
self.monitor_ins = {}
self.local_aggr_container={}
self._running=True
super(RemoteProc, self).__init__()
def register(self, monitor_ins, cmds):
assert isinstance(monitor_ins, BaseMonitor)
self.monitor_ins[len(self.cmds)] = monitor_ins # monitor command seq id => monitor instance
self.cmds.append(cmds)
def run(self):
func_template = "\n".join(["def func_{id}():\n{func}"\
.format(id=id,
func=ident(2,
func+'\ns2.send("{SEP}={id}"+chr(10))'\
.format(SEP=self.SEP, id=id))) \
for id, func in enumerate(self.cmds)])
call_template="\n".join([" func_{id}()"\
.format(id=id) for id in range(len(self.cmds))]
)
script = self.template.format(func_template=func_template,
call_template=call_template,
interval = self.interval,
SEP = self.SEP)
s = script.replace('"', r'\"').replace("\n", r"\n")
container=[]
# log("ssh client to:", self.host)
with self.ssh_client(self.host, "python -u -c \"{script}\"".format(script=s)) as f:
# log("ssh client %s connected" % self.host)
try:
port_line = f.readline()
# log("host:", self.host, "got port,", port_line)
port = int(port_line.rstrip())
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
for i in range(30): # try to connect 30 times maximum
try:
# log("try to connect:", self.host, port)
s.connect((self.host, port))
# log("connectted to:", self.host, port)
break
except socket.timeout:
# log("connecting to:", self.host, port, "timedout")
pass
else: # not connectted after 30 times trying
# log("cann't connectted to:", self.host, port)
s.shutdown(socket.SHUT_RDWR)
self.ssh_close()
return
s.settimeout(None)
except Exception as e:
log(traceback.format_exc())
with closing(s.makefile()) as f2:
while self._running:
try:
l = f2.readline()
except KeyboardInterrupt:
break
if not l: break
if l.startswith(self.SEP):
tail = l.lstrip(self.SEP)
if tail[0]=='+': # timestamp
remote_timestamp = float(tail[1:])
cur_timestamp = time()
elif tail.startswith('#end'): # end sign
# log("na push, timestamp:", cur_timestamp)
self.na_push(cur_timestamp)
else:
id = int(tail[1:])
if self.monitor_ins[id]:
self.monitor_ins[id].feed(container, cur_timestamp)
container = []
else:
container.append(l.rstrip())
s.shutdown(socket.SHUT_RDWR)
self.ssh_close()
def stop(self):
self._running=False
def aggregate(self, timestamp, data):
if not self.local_aggr_container:
self.local_aggr_container['timestamp']=timestamp
assert timestamp == self.local_aggr_container['timestamp']
assert type(data) is dict
self.local_aggr_container.update(data)
self.local_aggr_container['timestamp'] = timestamp
def na_register(self, na):
assert isinstance(na, NodeAggregator)
self.node_aggr_parent = na
def na_push(self, timestamp):
if self.local_aggr_container:
assert self.local_aggr_container.get('timestamp', -1) == timestamp
self.node_aggr_parent.commit_aggregate(self.host, self.local_aggr_container)
self.local_aggr_container={}
class BaseMonitor(object):
IGNORE_KEYS=[]
def __init__(self, rproc):
self.rproc = rproc
self._last = None
def feed(self, container, timestamp): # override to parse pulled data files
raise NotImplementedError()
def ssh_client(self, host, shell): # override for opening ssh client
raise NotImplementedError()
def ssh_close(self): # override for clear up ssh client
raise NotImplementedError()
def commit(self, timestamp, header, stat):
if self._last is None: self._last = stat
else:
stat_delta = dict([(header+'/'+k, stat[k] - self._last[k]) \
for k in set(self._last.keys()).union(set(stat.keys()))\
if k in stat and k in self._last and k not in self.IGNORE_KEYS
])
self._last = stat
# if header.startswith("net"):
# print stat_delta
stat_delta[header+'/total'] = reduce_patched(lambda a,b: a._add(b, 'total'), stat_delta.values())
self.rproc.aggregate(timestamp, stat_delta)
class BashSSHClientMixin(object):
ssh_lock = threading.Lock()
def ssh_client(self, host, shell):
with open(os.devnull, 'rb', 0) as DEVNULL:
with BashSSHClientMixin.ssh_lock:
self.proc = subprocess.Popen(["ssh", host, shell], bufsize=1,
stdin=DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return self.proc.stdout
def ssh_close(self):
assert self.proc
self.proc.terminate()
self.proc.wait()
return self.proc.returncode
_CPU=namedtuple("CPU", ['label', 'user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'])
class CPU(_CPU, PatchedNameTuple):
def percentage(self):
total = sum(self[1:])
return CPU(self[0], *[x*100.0 / total for x in self[1:]]) if total>0 else self
class CPUMonitor(BaseMonitor):
def __init__(self, rproc):
super(CPUMonitor, self).__init__(rproc)
rproc.register(self, """with open("/proc/stat") as f:
s2.send("".join([x for x in f.readlines() if x.startswith("cpu")]))
""")
def feed(self, container, timestamp):
"parse /proc/stat"
self.commit(timestamp, dict([self._parse_stat(line) for line in container]))
def _parse_stat(self, line):
"parse one line of /proc/stat"
assert line.strip(), "BUG! empty line in /proc/stat"
fields = line.split()
if fields[0]=='cpu':
fields[0]='total'
return (fields[0], CPU(fields[0], *[int(x) for x in fields[1:8]]))
def commit(self, timestamp, cpu_stat):
if self._last is None:
self._last = cpu_stat
else:
cpu_usage = dict([("cpu/"+k, (cpu_stat[k] - self._last[k]).percentage()) for k in self._last])
self._last = cpu_stat
self.rproc.aggregate(timestamp, cpu_usage)
_Network=namedtuple("Network", ['label', "recv_bytes", "recv_packets", "recv_errs", "recv_drop",
"send_bytes", "send_packets", "send_errs", "send_drop"])
class Network(_Network, PatchedNameTuple): pass
class NetworkMonitor(BaseMonitor):
IGNORE_KEYS=["lo"]
def __init__(self, rproc):
rproc.register(self, """with open("/proc/net/dev") as f:
s2.send("".join([x for x in f.readlines()]))
""")
self._filter = re.compile('^\s*(.+):\s*(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+).*$')
super(NetworkMonitor, self).__init__(rproc)
def feed(self, container, timestamp):
"parse /proc/net/dev"
self.commit(timestamp, "net", dict(filter(lambda x:x, [self._parse_net_dev(line) for line in container])))
def _parse_net_dev(self, line):
matched = self._filter.match(line)
if matched:
obj = Network(matched.groups()[0], *[int(x) for x in matched.groups()[1:]])
if not (obj.recv_bytes==0 and obj.send_bytes==0):
return (obj[0], obj)
_Disk=namedtuple("Disk", ["label", "io_read", "bytes_read", "time_spent_read", "io_write", "bytes_write", "time_spent_write"])
class Disk(_Disk, PatchedNameTuple): pass
class DiskMonitor(BaseMonitor):
def __init__(self, rproc):
super(DiskMonitor, self).__init__(rproc)
rproc.register(self, """with open("/proc/diskstats") as f:
blocks = os.listdir("/sys/block")
s2.send("".join([x for x in f.readlines() if x.split()[2] in blocks and not x.split()[2].startswith("loop") and x.split()[3]!="0"]))
""")
def feed(self, container, timestamp):
"parse /proc/diskstats"
self.commit(timestamp, "disk", dict([self._parse_disk_stat(line) for line in container]))
def _parse_disk_stat(self, line):
fields = line.split()[2:]
obj = Disk(fields[0],
io_read=int(fields[1]), bytes_read=int(fields[3])*512, time_spent_read=int(fields[4])/1000.0,
io_write=int(fields[5]), bytes_write=int(fields[7])*512, time_spent_write=int(fields[8])/1000.0)
return (obj[0], obj)
_Memory=namedtuple("Memory", ["label", "total", "used", "buffer_cache", "free", "map"])
class Memory(_Memory, PatchedNameTuple): pass
class MemoryMonitor(BaseMonitor):
def __init__(self, rproc):
super(MemoryMonitor, self).__init__(rproc)
rproc.register(self, """with open("/proc/meminfo") as f:
mem = dict([(a, b.split()[0].strip()) for a, b in [x.split(":") for x in f.readlines()]])
s2.send(":".join([mem[field] for field in ["MemTotal", "Buffers", "Cached", "MemFree", "Mapped"]])+chr(10))
""")
def feed(self, memory_status, timestamp):
"parse /proc/meminfo"
total, buffers, cached, free, mapped= [int(x) for x in memory_status[0].split(":")]
self.rproc.aggregate(timestamp, {"memory/total":Memory(label="total", total=total,
used=total - free - buffers-cached,
buffer_cache=buffers + cached,
free=free, map=mapped)})
_Proc=namedtuple("Proc", ["label", "load5", "load10", "load15", "running", "procs"])
class Proc(_Proc, PatchedNameTuple): pass
class ProcMonitor(BaseMonitor):
def __init__(self, rproc):
super(ProcMonitor, self).__init__(rproc)
rproc.register(self, """with open("/proc/loadavg") as f:
s2.send(f.read())
""")
def feed(self, load_status, timestamp):
"parse /proc/meminfo"
load5, load10, load15, running_procs= load_status[0].split()[:4]
running, procs = running_procs.split('/')
self.rproc.aggregate(timestamp, {"proc":Proc(label="total", load5=float(load5), load10=float(load10),
load15=float(load15), running=int(running), procs=int(procs))})
class NodeAggregator(object):
def __init__(self, log_name):
self.node_pool = {}
self.log_name = log_name
self.log_lock = threading.Lock()
try:
os.unlink(self.log_name)
except OSError:
pass
def append(self, node):
assert isinstance(node, RemoteProc)
self.node_pool[node.host] = node
node.na_register(self)
def commit_aggregate(self, node, datas):
datas['hostname'] = node
with self.log_lock:
with file(self.log_name, "a") as f:
f.write(repr(datas) + "\n")
def run(self):
for v in self.node_pool.values():
v.start()
def stop(self):
for v in self.node_pool.values():
v.stop()
for v in self.node_pool.values():
v.join()
def round_to_base(v, b):
"""
>>> round_to_base(0.1, 0.3)
0.0
>>> round_to_base(0.3, 0.3)
0.3
>>> round_to_base(0.0, 0.3)
0.0
>>> round_to_base(0.5, 0.3)
0.3
>>> round_to_base(0.51, 0.3)
0.3
"""
for i in range(10):
base = int(b * 10**i)
if abs(base - b * 10**i) < 0.001: break
assert base>0
return float(int(v * 10**i) / base * base) / (10**i)
def filter_dict_with_prefix(d, prefix, sort=True):
keys = sorted(d.keys()) if sort else d.keys()
if prefix[0]=='!':
return dict([(x, d[x]) for x in keys if not x.startswith(prefix[1:])])
else:
return dict([(x, d[x]) for x in keys if x.startswith(prefix)])
def reduce_patched(func, data):
if len(data)==1:
return data[0]
elif len(data)==0:
return data
else:
return reduce(func, data)
def filter_dict_with_prefixes(d, *prefixes):
if len(prefixes)==1:
return filter_dict_with_prefix(d, prefixes[0])
else:
return reduce_patched(lambda a,b: filter_dict_with_prefix(filter_dict_with_prefix(d, a),b),
prefixes)
def test():
p = BashSSHClientMixin()
script=r"""exec('
import time, os, sys
while 1:
with open("/proc/stat") as f: print f.read(),
print "---hello---"
time.sleep(1)
')"""
s = script.replace('"', r'\"').replace("\n", r"\n")
with p.ssh_client("localhost", "python -u -c \"{s}\"".format(s=s)) as f:
while 1:
l = f.readline()
print l.rstrip()
if not l: break
p.ssh_close()
def test2():
class P(RemoteProc, BashSSHClientMixin): pass
p = P("localhost", 0.3)
CPUMonitor(p)
NetworkMonitor(p)
DiskMonitor(p)
MemoryMonitor(p)
p.run()
def start_monitor(log_filename, nodes):
class P(RemoteProc, BashSSHClientMixin):
def __init__(self, *args):
RemoteProc.__init__(self, *args)
CPUMonitor(self)
NetworkMonitor(self)
DiskMonitor(self)
MemoryMonitor(self)
ProcMonitor(self)
global na
na = NodeAggregator(log_filename)
nodes = sorted(list(set(nodes)))
for node in nodes:
na.append(P(node, PROBE_INTERVAL))
na.run()
def parse_bench_log(benchlog_fn):
events=["x,event"]
_spark_stage_submit = re.compile("^(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) INFO [a-zA-Z0-9_\.]*DAGScheduler: Submitting (Stage \d+) \((.*)\).+$") # submit spark stage
_spark_stage_finish = re.compile("^(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) INFO [a-zA-Z0-9_\.]*DAGScheduler: (Stage \d+) \((.*)\) finished.+$") # spark stage finish
_thrill_stage_start = re.compile("^.*[0-9]*,\"START \((.*)\) stage\":\"([a-zA-Z]*)\".*time:\":\"(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2})\".*$")
_thrill_stage_finish = re.compile("^.*[0-9]*,\"FINISH \((.*)\) stage\":\"([a-zA-Z]*)\".*time:\":\"(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2})\".*$")
_hadoop_run_job = re.compile("^(\d{2}\/\d{2}\/\d{4} \d{2}:\d{2}:\d{2}) INFO mapred.*\.Job.*: Running job: job_([\d_]+)$") # hadoop run job
_hadoop_map_reduce_progress = re.compile("^(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) INFO mapred.*\.Job.*:\s+map (\d{1,2})% reduce (\d{1,2})%$") # hadoop reduce progress
_hadoop_job_complete_mr1 = re.compile("^(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) INFO mapred.JobClient: Job complete: job_([\d_]+)$")
_hadoop_job_complete_mr2 = re.compile("^(\d{2}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) INFO mapreduce.Job: Job job_([\d_]+) completed successfully$")
"""
# MR1 sample
14/06/24 11:18:39 INFO mapred.JobClient: Running job: job_201406241116_0001
14/06/24 11:18:40 INFO mapred.JobClient: map 0% reduce 0%
...
13/11/21 14:38:55 INFO mapred.JobClient: Job complete: job_201311150128_0050
# MR2 sample
15/04/10 17:20:01 INFO mapreduce.Job: Running job: job_1427781540447_0448
15/04/10 17:20:07 INFO mapreduce.Job: Job job_1427781540447_0448 running in uber mode : false
15/04/10 17:20:07 INFO mapreduce.Job: map 0% reduce 0%
...
15/04/10 17:20:25 INFO mapreduce.Job: Job job_1427781540447_0448 completed successfully
"""
flag={}
with open(benchlog_fn) as f:
while True:
line = f.readline().rstrip()
if not line: break
for rule in [_spark_stage_submit, _spark_stage_finish, _hadoop_run_job, _hadoop_map_reduce_progress, _hadoop_job_complete_mr1, _hadoop_job_complete_mr2, _thrill_stage_start, _thrill_stage_finish]:
matched = rule.match(line)
if matched:
result = matched.groups()
if rule is _spark_stage_submit:
timestamp = datetime.strptime(result[0], r"%y/%m/%d %H:%M:%S").strftime("%s")+"000" # convert to millsec for js
events.append("{t},Start {v1} ({v2})".format(t=timestamp, v1=result[1], v2=result[2]))
elif rule is _spark_stage_finish:
timestamp = datetime.strptime(result[0], r"%y/%m/%d %H:%M:%S").strftime("%s")+"000" # convert to millsec for js
events.append("{t},Finish {v1} ({v2})".format(t=timestamp, v1=result[1], v2=result[2]))
if rule is _thrill_stage_start:
timestamp = datetime.strptime(result[2], r"%m/%d/%y %H:%M:%S").strftime("%s")+"000" # convert to millsec for js
elem = "{t},Start {v1} ({v2})".format(t=timestamp, v1=result[1], v2=result[0])
if not elem in events:
events.append(elem)
elif rule is _thrill_stage_finish:
timestamp = datetime.strptime(result[2], r"%m/%d/%y %H:%M:%S").strftime("%s")+"000" # convert to millsec for js
elem = "{t},Finish {v1} ({v2})".format(t=timestamp, v1=result[1], v2=result[0])
if not elem in events:
events.append(elem)
elif rule is _hadoop_run_job:
events.append("{t},Start Job {v1}".format(t=timestamp, v1=result[1]))
flag={}
elif rule is _hadoop_map_reduce_progress:
map_progress,reduce_progress = int(result[1]), int(result[2])
op={'map':False, 'reduce':False}
if map_progress == 100:
if not "map" in flag:
op['map'] = True
flag['map'] = True
elif reduce_progress>0:
if not 'reduce' in flag:
op['reduce'] = True
flag['reduce'] = True
if op['map'] and op['reduce']:
events.append("{t},Map finish and Reduce start".format(t=timestamp))
elif op['map']:
events.append("{t},Map finish".format(t=timestamp))
elif op['reduce']:
events.append("{t},Reduce start".format(t=timestamp))
elif rule is _hadoop_job_complete_mr1 or rule is _hadoop_job_complete_mr2:
events.append("{t},Finsih Job {v1}".format(t=timestamp, v1=result[1]))
else:
assert 0, "should never reach here"
# limit maximum string length of events
for i in range(len(events)):
event_time, event_str = re.split(',', events[i], 1)
if len(event_str) > 45:
event_str = event_str[:21]+ '...' + event_str[-21:]
events[i]="%s,%s" % (event_time, event_str)
# merge events occurred at sametime:
i = 1
while i < len(events)-1:
cur = events[i].split(',')[0]
next = events[i+1].split(',')[0]
if abs(int(cur)/1000 - int(next)/1000) < 1:
events[i] = events[i] + "<br>" + re.split(',', events[i+1], 1)[1]
del events[i+1]
continue
i += 1
return events
def generate_report(workload_title, log_fn, benchlog_fn, report_fn):
c =- 1
with open(log_fn) as f:
datas=[eval(x) for x in f.readlines()]
all_hosts = sorted(list(set([x['hostname'] for x in datas])))
data_slices = groupby(datas, lambda x:round_to_base(x['timestamp'], PROBE_INTERVAL)) # round to time interval and groupby
# Generating CSVs
cpu_heatmap = ["x,y,value,hostname,coreid"]
cpu_overall = ["x,idle,user,system,iowait,others"]
network_heatmap = ["x,y,value,hostname,adapterid"]
network_overall = ["x,recv_bytes,send_bytes,|recv_packets,send_packets,errors"]
diskio_heatmap = ["x,y,value,hostname,diskid"]
diskio_overall = ["x,read_bytes,write_bytes,|read_io,write_io"]
memory_heatmap = ["x,y,value,hostname"]
memory_overall = ["x,free,buffer_cache,used"]
procload_heatmap = ["x,y,value,hostname"]
procload_overall = ["x,load5,load10,load15,|running,procs"]
events = parse_bench_log(benchlog_fn)
cpu_count={}
network_count={}
diskio_count={}
memory_count={}
proc_count={}
for t, sub_data in data_slices:
classed_by_host = dict([(x['hostname'], x) for x in sub_data])
# total cpus, plot user/sys/iowait/other
data_by_all_hosts = [classed_by_host.get(h, {}) for h in all_hosts]
# all cpu cores, total cluster
summed1 = [x['cpu/total'] for x in data_by_all_hosts if x.has_key('cpu/total')]
if summed1:
summed = reduce_patched(lambda a,b: a._add(b), summed1) / len(summed1)
for x in data_by_all_hosts:
cpu = x.get('cpu/total', None)
if not cpu: continue
# user, system, io, idle, others
# print t, x['hostname'], cpu.user, cpu.system, cpu.iowait, cpu.idle, cpu.nice+cpu.irq+cpu.softirq
# print t, summed
cpu_overall.append("{time},{idle},{user},{system},{iowait},{others}" \
.format(time = int(t*1000), user = summed.user, system = summed.system,
iowait = summed.iowait, idle = summed.idle,
others = summed.nice + summed.irq + summed.softirq))
# all cpu cores, plot heatmap according to cpus/time/usage(100%-idle)
for idx, x in enumerate(data_by_all_hosts):
for idy, y in enumerate(filter_dict_with_prefixes(x, "cpu", "!cpu/total").values()):
try:
pos = cpu_count[(idx, idy, x['hostname'])]
except:
pos = len(cpu_count)
cpu_count[(idx, idy, x['hostname'])] = pos
# print t, pos, 100-y.idle, x['hostname'], y.label
cpu_heatmap.append("{time},{pos},{value},{host},{cpuid}" \
.format(time = int(t*1000), pos = pos, value = 100-y.idle,
host = x['hostname'], cpuid = y.label))
# all disk of each node, total cluster
summed1=[x['disk/total'] for x in data_by_all_hosts if x.has_key('disk/total')]
if summed1:
summed = reduce_patched(lambda a,b: a._add(b), summed1)
for x in data_by_all_hosts:
disk = x.get('disk/total', None)
if not disk: continue
# io-read, io-write, bytes-read, bytes-write
# print t, x['hostname'], disk.io_read, disk.io_write, disk.bytes_read, disk.bytes_write
# print t, summed
diskio_overall.append("{time},{bytes_read},{bytes_write},{io_read},{io_write}" \
.format(time = int(t*1000),
bytes_read = summed.bytes_read / PROBE_INTERVAL,
bytes_write = summed.bytes_write / PROBE_INTERVAL,
io_read = summed.io_read / PROBE_INTERVAL,
io_write = summed.io_write / PROBE_INTERVAL))
# all disks, plot heatmap according to disks/bytes_read+bytes_write
for idx, x in enumerate(data_by_all_hosts):
for idy, y in enumerate(filter_dict_with_prefixes(x, "disk", "!disk/total").values()):
try:
pos = diskio_count[(idx, idy, x['hostname'])]
except:
pos = len(diskio_count)
diskio_count[(idx, idy, x['hostname'])] = pos
# print t, pos, 100-y.idle, x['hostname'], y.label
diskio_heatmap.append("{time},{pos},{value},{host},{diskid}" \
.format(time = int(t*1000),
pos = pos,
value = (y.bytes_read + y.bytes_write) / PROBE_INTERVAL,
host = x['hostname'],
diskid = y.label))
# memory of each node, total cluster
summed1 = [x['memory/total'] for x in data_by_all_hosts if x.has_key('memory/total')]
if summed1:
summed = reduce_patched(lambda a,b: a._add(b), summed1)
for x in data_by_all_hosts:
mem = x.get("memory/total", None)
if not mem: continue
# mem-total, mem-used, mem-buffer&cache, mem-free, KB
# print t, x['hostname'], mem.total, mem.used, mem.buffer_cache, mem.free
#print t, summed
memory_overall.append("{time},{free},{buffer_cache},{used}" \
.format(time = int(t*1000),
free = summed.free,
used = summed.used,
buffer_cache = summed.buffer_cache))
# all memory, plot heatmap according to memory/total - free
for idx, x in enumerate(data_by_all_hosts):
for idy, y in enumerate(filter_dict_with_prefixes(x, "memory/total").values()):
try:
pos = memory_count[(idx, idy, x['hostname'])]
except:
pos = len(memory_count)
memory_count[(idx, idy, x['hostname'])] = pos
# print t, pos, 100-y.idle, x['hostname'], y.label
memory_heatmap.append("{time},{pos},{value},{host}" \
.format(time = int(t*1000),
pos = pos,
value = (y.total - y.free)*1000,
host = x['hostname']))
# proc of each node, total cluster
summed1 = [x['proc'] for x in data_by_all_hosts if x.has_key('proc')]
if summed1:
summed = reduce_patched(lambda a,b: a._add(b), summed1)
for x in data_by_all_hosts:
procs = x.get("proc", None)
if not procs: continue
procload_overall.append("{time},{load5},{load10},{load15},{running},{procs}"\
.format(time = int(t*1000),
load5 = summed.load5,load10=summed.load10,
load15 = summed.load15,running=summed.running,
procs = summed.procs))
# all nodes' proc, plot heatmap according to proc/proc.procs
for idx, x in enumerate(data_by_all_hosts):
for idy, y in enumerate(filter_dict_with_prefixes(x, "proc").values()):
try:
pos = proc_count[(idx, idy, x['hostname'])]
except:
pos = len(proc_count)
proc_count[(idx, idy, x['hostname'])] = pos
# print t, pos, 100-y.idle, x['hostname'], y.label
procload_heatmap.append("{time},{pos},{value},{host}" \
.format(time = int(t*1000), pos = pos, value = y.procs,
host = x['hostname']))
# all network interface, total cluster
summed1 = [x['net/total'] for x in data_by_all_hosts if x.has_key('net/total')]
if summed1:
summed = reduce_patched(lambda a,b: a._add(b), summed1)
for x in data_by_all_hosts:
net = x.get("net/total", None)
if not net: continue
# recv-byte, send-byte, recv-packet, send-packet, errors
# print t, x['hostname'], net.recv_bytes, net.send_bytes, net.recv_packets, net.send_packets, net.recv_errs+net.send_errs+net.recv_drop+net.send_drop
# print t, summed
network_overall.append("{time},{recv_bytes},{send_bytes},{recv_packets},{send_packets},{errors}" \
.format(time = int(t*1000),
recv_bytes = summed.recv_bytes / PROBE_INTERVAL,
send_bytes = summed.send_bytes / PROBE_INTERVAL,
recv_packets = summed.recv_packets / PROBE_INTERVAL,
send_packets = summed.send_packets / PROBE_INTERVAL,
errors = (summed.recv_errs + summed.send_errs + \
summed.recv_drop + summed.send_drop) / PROBE_INTERVAL)
)
# all network adapters, plot heatmap according to net/recv_bytes + send_bytes
for idx, x in enumerate(data_by_all_hosts):
for idy, y in enumerate(filter_dict_with_prefixes(x, "net", "!net/total").values()):
try:
pos = network_count[(idx, idy, x['hostname'])]
except:
pos = len(network_count)
network_count[(idx, idy, x['hostname'])] = pos
network_heatmap.append("{time},{pos},{value},{host},{networkid}" \
.format(time = int(t*1000),
pos = pos*2,
value = y.recv_bytes / PROBE_INTERVAL,
host = x['hostname'],
networkid = y.label+".recv"))
network_heatmap.append("{time},{pos},{value},{host},{networkid}" \
.format(time = int(t*1000),
pos = pos*2+1,
value = y.send_bytes / PROBE_INTERVAL,
host = x['hostname'],
networkid = y.label+".send"))
with open(samedir("chart-template.html")) as f:
template = f.read()
variables = locals()
def my_replace(match):
match = match.group()[1:-1]
if match.endswith('heatmap') or match.endswith('overall'):
return "\n".join(variables[match])
elif match =='events':
return "\n".join(events)
elif match == 'probe_interval':
return str(PROBE_INTERVAL * 1000)
elif match == 'workload_name':
return workload_title
else:
return '{%s}' % match
with open(report_fn, 'w') as f:
f.write(re.sub(r'{\w+}', my_replace, template))
def show_usage():
log("""Usage:
monitor.py <workload_title> <parent_pid> <log_path.log> <benchlog_fn.log> <report_path.html> <monitor_node_name1> ... <monitor_node_nameN>
""")
if __name__=="__main__":
if len(sys.argv)<6:
log(sys.argv)
show_usage()
sys.exit(1)
# log(sys.argv)
global log_path
global report_path
global workload_title
global bench_log_path
global na
workload_title = sys.argv[1]
parent_pid = sys.argv[2]
log_path = sys.argv[3]
bench_log_path = sys.argv[4]
report_path = sys.argv[5]
nodes_to_monitor = sys.argv[6:]
pid=os.fork()
if pid: #parent
print pid
else: #child
os.close(0)
os.close(1)
os.close(2)
# log("child process start")
signal.signal(signal.SIGTERM, sig_term_handler)
start_monitor(log_path, nodes_to_monitor)
while os.path.exists("/proc/%s" % parent_pid):
sleep(1)
# parent lost, stop!
signal.signal(signal.SIGTERM, signal.SIG_IGN)
na.stop()
generate_report(workload_title, log_path, bench_log_path, report_path)
|
|
from __future__ import unicode_literals
import mimetypes
import os
import random
import sys
import time
from email import (charset as Charset, encoders as Encoders,
message_from_string, generator)
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
from django.utils import six
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, six.string_types):
addr = parseaddr(force_text(addr))
nm, addr = addr
# This try-except clause is needed on Python 3 < 3.2.4
# http://bugs.python.org/issue14291
try:
nm = Header(nm, encoding).encode()
except UnicodeEncodeError:
nm = Header(nm, 'utf-8').encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
if charset == 'utf-8':
# Unfortunately, Python < 3.5 doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, text, subtype, None)
del self['Content-Transfer-Encoding']
# Workaround for versions without http://bugs.python.org/issue19063
if (3, 2) < sys.version_info < (3, 3, 4):
payload = text.encode(utf8_charset.output_charset)
self._payload = payload.decode('ascii', 'surrogateescape')
self.set_charset(utf8_charset)
else:
self.set_payload(text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (subtype, charset))
else:
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, six.string_types), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, six.string_types), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, six.string_types), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments, headers, cc
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
|
""" Contains the Mode and ModeTimers parent classes"""
# modes.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from mpf.system.timing import Timing, Timer
from mpf.system.tasks import DelayManager
from mpf.system.config import Config
from mpf.system.mode_controller import RemoteMethod
# todo
# override player var
# override event strings
class Mode(object):
"""Parent class for in-game mode code."""
def __init__(self, machine, config, name, path):
self.machine = machine
self.config = config
self.name = name.lower()
self.path = path
self.log = logging.getLogger('Mode.' + name)
self.delay = DelayManager()
self.priority = 0
self._active = False
self._mode_start_wait_queue = None
self.stop_methods = list()
self.timers = dict()
self.start_callback = None
self.stop_callback = None
self.event_handlers = set()
self.switch_handlers = list()
self.mode_start_kwargs = dict()
self.mode_stop_kwargs = dict()
self.mode_devices = set()
self.player = None
'''Reference to the current player object.'''
self._validate_mode_config()
self.configure_mode_settings(config.get('mode', dict()))
self.auto_stop_on_ball_end = self.config['mode']['stop_on_ball_end']
'''Controls whether this mode is stopped when the ball ends,
regardless of its stop_events settings.
'''
self.restart_on_next_ball = self.config['mode']['restart_on_next_ball']
'''Controls whether this mode will restart on the next ball. This only
works if the mode was running when the ball ended. It's tracked per-
player in the '_restart_modes_on_next_ball' untracked player variable.
'''
for asset_manager in self.machine.asset_managers.values():
config_data = self.config.get(asset_manager.config_section, dict())
self.config[asset_manager.config_section] = (
asset_manager.register_assets(config=config_data,
mode_path=self.path))
# Call registered remote loader methods
for item in self.machine.mode_controller.loader_methods:
if (item.config_section and
item.config_section in self.config and
self.config[item.config_section]):
item.method(config=self.config[item.config_section],
mode_path=self.path,
**item.kwargs)
elif not item.config_section:
item.method(config=self.config, mode_path=self.path,
**item.kwargs)
self.mode_init()
def __repr__(self):
return '<Mode.{}>'.format(self.name)
@property
def active(self):
return self._active
@active.setter
def active(self, active):
if self._active != active:
self._active = active
self.machine.mode_controller._active_change(self, self._active)
def configure_mode_settings(self, config):
"""Processes this mode's configuration settings from a config
dictionary.
"""
self.config['mode'] = self.machine.config_processor.process_config2(
config_spec='mode', source=config, section_name='mode')
for event in self.config['mode']['start_events']:
self.machine.events.add_handler(event=event, handler=self.start,
priority=self.config['mode']['priority'] +
self.config['mode']['start_priority'])
def _validate_mode_config(self):
for section in self.machine.config['mpf']['mode_config_sections']:
this_section = self.config.get(section, None)
if this_section:
if type(this_section) is dict:
for device, settings in this_section.iteritems():
self.config[section][device] = (
self.machine.config_processor.process_config2(
section, settings))
else:
self.config[section] = (
self.machine.config_processor.process_config2(section,
this_section))
def start(self, priority=None, callback=None, **kwargs):
"""Starts this mode.
Args:
priority: Integer value of what you want this mode to run at. If you
don't specify one, it will use the "Mode: priority" setting from
this mode's configuration file.
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode starts in the
mode_start method which will be called automatically.
"""
self.log.debug("Received request to start")
if self._active:
self.log.debug("Mode is already active. Aborting start")
return
if self.config['mode']['use_wait_queue'] and 'queue' in kwargs:
self.log.debug("Registering a mode start wait queue")
self._mode_start_wait_queue = kwargs['queue']
self._mode_start_wait_queue.wait()
if type(priority) is int:
self.priority = priority
else:
self.priority = self.config['mode']['priority']
self.start_event_kwargs = kwargs
self.log.info('Mode Starting. Priority: %s', self.priority)
self._create_mode_devices()
self.log.debug("Registering mode_stop handlers")
# register mode stop events
if 'stop_events' in self.config['mode']:
for event in self.config['mode']['stop_events']:
# stop priority is +1 so if two modes of the same priority
# start and stop on the same event, the one will stop before the
# other starts
self.add_mode_event_handler(event=event, handler=self.stop,
priority=self.priority + 1 +
self.config['mode']['stop_priority'])
self.start_callback = callback
if 'timers' in self.config:
self._setup_timers()
self.log.debug("Calling mode_start handlers")
for item in self.machine.mode_controller.start_methods:
if item.config_section in self.config or not item.config_section:
self.stop_methods.append(
item.method(config=self.config.get(item.config_section,
self.config),
priority=self.priority,
mode=self,
**item.kwargs))
self._setup_device_control_events()
self.machine.events.post_queue(event='mode_' + self.name + '_starting',
callback=self._started)
def _started(self):
# Called after the mode_<name>_starting queue event has finished.
self.log.debug('Mode Started. Priority: %s', self.priority)
self.active = True
self._start_timers()
self.machine.events.post('mode_' + self.name + '_started',
callback=self._mode_started_callback)
def _mode_started_callback(self, **kwargs):
# Called after the mode_<name>_started queue event has finished.
self.mode_start(**self.start_event_kwargs)
self.start_event_kwargs = dict()
if self.start_callback:
self.start_callback()
self.log.debug('Mode Start process complete.')
def stop(self, callback=None, **kwargs):
"""Stops this mode.
Args:
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode stops in the
mode_stop method which will be called automatically.
"""
if not self._active:
return
self.mode_stop_kwargs = kwargs
self.log.debug('Mode Stopping.')
self._remove_mode_switch_handlers()
self.stop_callback = callback
self._kill_timers()
self.delay.clear()
# self.machine.events.remove_handler(self.stop)
# todo is this ok here? Or should we only remove ones that we know this
# mode added?
self.machine.events.post_queue(event='mode_' + self.name + '_stopping',
callback=self._stopped)
def _stopped(self):
self.log.debug('Mode Stopped.')
self.priority = 0
self.active = False
for item in self.stop_methods:
try:
item[0](item[1])
except TypeError:
try:
item()
except TypeError:
pass
self.stop_methods = list()
self.machine.events.post('mode_' + self.name + '_stopped',
callback=self._mode_stopped_callback)
if self._mode_start_wait_queue:
self.log.debug("Clearing wait queue")
self._mode_start_wait_queue.clear()
self._mode_start_wait_queue = None
def _mode_stopped_callback(self, **kwargs):
self._remove_mode_event_handlers()
self._remove_mode_devices()
self.mode_stop(**self.mode_stop_kwargs)
self.mode_stop_kwargs = dict()
if self.stop_callback:
self.stop_callback()
def _create_mode_devices(self):
# Creates new devices that are specified in a mode config that haven't
# been created in the machine-wide config
self.log.debug("Scanning config for mode-based devices")
for collection_name, device_class in (
self.machine.device_manager.device_classes.iteritems()):
if device_class.config_section in self.config:
for device, settings in (
self.config[device_class.config_section].iteritems()):
collection = getattr(self.machine, collection_name)
if device not in collection: # no existing device, create now
self.log.debug("Creating mode-based device: %s", device)
# TODO this config is already validated, so add something
# so it doesn't validate it again?
self.machine.device_manager.create_devices(
collection.name, {device: settings}, validate=False)
# change device from str to object
device = collection[device]
# Track that this device was added via this mode so we
# can remove it when the mode ends.
self.mode_devices.add(device)
# This lets the device know it was created by a mode
# instead of machine-wide, as some devices want to do
# certain things here. We also pass the player object in
# case this device wants to do something with that too.
device.device_added_to_mode(mode=self,
player=self.player)
def _remove_mode_devices(self):
for device in self.mode_devices:
device.remove()
self.mode_devices = set()
def _setup_device_control_events(self):
# registers mode handlers for control events for all devices specified
# in this mode's config (not just newly-created devices)
self.log.debug("Scanning mode-based config for device control_events")
device_list = set()
for event, method, delay, device in (
self.machine.device_manager.get_device_control_events(
self.config)):
try:
event, priority = event.split('|')
except ValueError:
priority = 0
self.add_mode_event_handler(
event=event,
handler=self._control_event_handler,
priority=self.priority + 2 + int(priority),
callback=method,
ms_delay=delay)
device_list.add(device)
for device in device_list:
device.control_events_in_mode(self)
def _control_event_handler(self, callback, ms_delay=0, **kwargs):
self.log.debug("_control_event_handler: callback: %s,", callback)
if ms_delay:
self.delay.add(name=callback, ms=ms_delay, callback=callback,
mode=self)
else:
callback(mode=self)
def add_mode_event_handler(self, event, handler, priority=1, **kwargs):
"""Registers an event handler which is automatically removed when this
mode stops.
This method is similar to the Event Manager's add_handler() method,
except this method automatically unregisters the handlers when the mode
ends.
Args:
event: String name of the event you're adding a handler for. Since
events are text strings, they don't have to be pre-defined.
handler: The method that will be called when the event is fired.
priority: An arbitrary integer value that defines what order the
handlers will be called in. The default is 1, so if you have a
handler that you want to be called first, add it here with a
priority of 2. (Or 3 or 10 or 100000.) The numbers don't matter.
They're called from highest to lowest. (i.e. priority 100 is
called before priority 1.)
**kwargs: Any any additional keyword/argument pairs entered here
will be attached to the handler and called whenever that handler
is called. Note these are in addition to kwargs that could be
passed as part of the event post. If there's a conflict, the
event-level ones will win.
Returns:
A GUID reference to the handler which you can use to later remove
the handler via ``remove_handler_by_key``. Though you don't need to
remove the handler since the whole point of this method is they're
automatically removed when the mode stops.
Note that if you do add a handler via this method and then remove it
manually, that's ok too.
"""
key = self.machine.events.add_handler(event, handler, priority,
mode=self, **kwargs)
self.event_handlers.add(key)
return key
def _remove_mode_event_handlers(self):
for key in self.event_handlers:
self.machine.events.remove_handler_by_key(key)
self.event_handlers = set()
def _remove_mode_switch_handlers(self):
for handler in self.switch_handlers:
self.machine.switch_controller.remove_switch_handler(
switch_name=handler['switch_name'],
callback=handler['callback'],
state=handler['state'],
ms=handler['ms'])
self.switch_handlers = list()
def _setup_timers(self):
# config is localized
for timer, settings in self.config['timers'].iteritems():
self.timers[timer] = ModeTimer(machine=self.machine, mode=self,
name=timer, config=settings)
return self._kill_timers
def _start_timers(self):
for timer in self.timers.values():
if timer.running:
timer.start()
def _kill_timers(self, ):
for timer in self.timers.values():
timer.kill()
self.timers = dict()
def mode_init(self):
"""User-overrideable method which will be called when this mode
initializes as part of the MPF boot process.
"""
pass
def mode_start(self, **kwargs):
"""User-overrideable method which will be called whenever this mode
starts (i.e. whenever it becomes active).
"""
pass
def mode_stop(self, **kwargs):
"""User-overrideable method which will be called whenever this mode
stops (i.e. whenever it becomes inactive).
"""
pass
class ModeTimer(object):
"""Parent class for a mode timer.
Args:
machine: The main MPF MachineController object.
mode: The parent mode object that this timer belongs to.
name: The string name of this timer.
config: A Python dictionary which contains the configuration settings
for this timer.
"""
def __init__(self, machine, mode, name, config):
self.machine = machine
self.mode = mode
self.name = name
self.config = config
self.tick_var = self.mode.name + '_' + self.name + '_tick'
self.mode.player[self.tick_var] = 0
self.running = False
self.start_value = 0
self.restart_on_complete = False
self._ticks = 0
self.end_value = None
self.ticks_remaining = 0
self.max_value = None
self.direction = 'up'
self.tick_secs = 1
self.timer = None
self.bcp = False
self.event_keys = set()
self.delay = DelayManager()
self.log = None
self.debug = False
if 'start_value' in self.config:
self.start_value = self.config['start_value']
else:
self.start_value = 0
if 'start_running' in self.config and self.config['start_running']:
self.running = True
if 'end_value' in self.config:
self.end_value = self.config['end_value']
if 'control_events' in self.config and self.config['control_events']:
if type(self.config['control_events']) is dict:
self.config['control_events'] = [self.config['control_events']]
else:
self.config['control_events'] = list()
if ('direction' in self.config and
self.config['direction'].lower() == 'down'):
self.direction = 'down'
if not self.end_value:
self.end_value = 0 # need it to be 0 not None
if 'tick_interval' in self.config:
self.tick_secs = Timing.string_to_secs(self.config['tick_interval'])
if 'max_value' in self.config:
self.max_value = self.config['max_value']
if ('restart_on_complete' in self.config and
self.config['restart_on_complete']):
self.restart_on_complete = True
if 'bcp' in self.config and self.config['bcp']:
self.bcp = True
if 'debug' in self.config and self.config['debug']:
self.debug = True
self.log.debug("Enabling Debug Logging")
self.mode.player[self.tick_var] = self.start_value
if self.log:
self.log.debug("----------- Initial Values -----------")
self.log.debug("running: %s", self.running)
self.log.debug("start_value: %s", self.start_value)
self.log.debug("restart_on_complete: %s", self.restart_on_complete)
self.log.debug("_ticks: %s", self._ticks)
self.log.debug("end_value: %s", self.end_value)
self.log.debug("ticks_remaining: %s", self.ticks_remaining)
self.log.debug("max_value: %s", self.max_value)
self.log.debug("direction: %s", self.direction)
self.log.debug("tick_secs: %s", self.tick_secs)
self.log.debug("--------------------------------------")
self._setup_control_events(self.config['control_events'])
def _setup_control_events(self, event_list):
if self.debug:
self.log.debug("Setting up control events")
kwargs = None
for entry in event_list:
if entry['action'] == 'add':
handler = self.add_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'subtract':
handler = self.subtract_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'jump':
handler = self.set_current_time
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'start':
handler = self.start
elif entry['action'] == 'stop':
handler = self.stop
elif entry['action'] == 'reset':
handler = self.reset
elif entry['action'] == 'restart':
handler = self.restart
elif entry['action'] == 'pause':
handler = self.pause
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'set_tick_interval':
handler = self.set_tick_interval
kwargs = {'timer_value': entry['value']}
elif entry['action'] == 'change_tick_interval':
handler = self.change_tick_interval
kwargs = {'change': entry['value']}
if kwargs:
self.event_keys.add(self.machine.events.add_handler(
entry['event'], handler, **kwargs))
else:
self.event_keys.add(self.machine.events.add_handler(
entry['event'], handler))
def _remove_control_events(self):
if self.debug:
self.log.debug("Removing control events")
for key in self.event_keys:
self.machine.events.remove_handler_by_key(key)
def reset(self, **kwargs):
"""Resets this timer based to the starting value that's already been
configured. Does not start or stop the timer.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
if self.debug:
self.log.debug("Resetting timer. New value: %s", self.start_value)
self.set_current_time(self.start_value)
def start(self, **kwargs):
"""Starts this timer based on the starting value that's already been
configured. Use set_current_time() if you want to set the starting time
value.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
if self.debug:
self.log.debug("Starting Timer.")
if self._check_for_done():
return()
self.running = True
self.delay.remove('pause')
self._create_system_timer()
self.machine.events.post('timer_' + self.name + '_started',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='started',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
def restart(self, **kwargs):
"""Restarts the timer by resetting it and then starting it. Essentially
this is just a reset() then a start()
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.reset()
self.start()
def stop(self, **kwargs):
"""Stops the timer and posts the 'timer_<name>_stopped' event.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
if self.debug:
self.log.debug("Stopping Timer")
self.delay.remove('pause')
self.running = False
self._remove_system_timer()
self.machine.events.post('timer_' + self.name + '_stopped',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='stopped',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
def pause(self, timer_value=0, **kwargs):
"""Pauses the timer and posts the 'timer_<name>_paused' event
Args:
timer_value: How many seconds you want to pause the timer for. Note
that this pause time is real-world seconds and does not take
into consideration this timer's tick interval.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
if self.debug:
self.log.debug("Pausing Timer for %s secs", timer_value)
self.running = False
pause_secs = timer_value
self._remove_system_timer()
self.machine.events.post('timer_' + self.name + '_paused',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='paused',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if pause_secs > 0:
self.delay.add(name='pause', ms=pause_secs, callback=self.start)
def timer_complete(self):
"""Automatically called when this timer completes. Posts the
'timer_<name>_complete' event. Can be manually called to mark this timer
as complete.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
if self.debug:
self.log.debug("Timer Complete")
self.stop()
if self.bcp: # must be before the event post in case it stops the mode
self.machine.bcp.send('timer', name=self.name, action='complete',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
self.machine.events.post('timer_' + self.name + '_complete',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if self.restart_on_complete:
if self.debug:
self.log.debug("Restart on complete: True")
self.reset()
self.start()
def _timer_tick(self):
# Automatically called by the sytem timer each tick
if self.debug:
self.log.debug("Timer Tick")
if not self.running:
if self.debug:
self.log.debug("Timer is not running. Will remove.")
self._remove_system_timer()
return
if self.direction == 'down':
self.mode.player[self.tick_var] -= 1
else:
self.mode.player[self.tick_var] += 1
if not self._check_for_done():
self.machine.events.post('timer_' + self.name + '_tick',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
if self.debug:
self.log.debug("Ticks: %s, Remaining: %s",
self.mode.player[self.tick_var],
self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='tick',
ticks=self.mode.player[self.tick_var],
ticks_remaining=self.ticks_remaining)
def add_time(self, timer_value, **kwargs):
"""Adds ticks to this timer.
Args:
Args:
timer_value: The number of ticks you want to add to this timer's
current value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
ticks_added = timer_value
new_value = self.mode.player[self.tick_var] + ticks_added
if self.max_value and new_value > self.max_value:
new_value = self.max_value
self.mode.player[self.tick_var] = new_value
ticks_added = new_value - timer_value
self.machine.events.post('timer_' + self.name + '_time_added',
ticks=self.mode.player[self.tick_var],
ticks_added=ticks_added,
ticks_remaining=self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name, action='time_added',
ticks=self.mode.player[self.tick_var],
ticks_added=ticks_added,
ticks_remaining=self.ticks_remaining)
self._check_for_done()
def subtract_time(self, timer_value, **kwargs):
"""Subracts ticks from this timer.
Args:
timer_value: The numebr of ticks you want to subtract from this
timer's current value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
ticks_subtracted = timer_value
self.mode.player[self.tick_var] -= ticks_subtracted
self.machine.events.post('timer_' + self.name + '_time_subtracted',
ticks=self.mode.player[self.tick_var],
ticks_subtracted=ticks_subtracted,
ticks_remaining=self.ticks_remaining)
if self.bcp:
self.machine.bcp.send('timer', name=self.name,
action='time_subtracted',
ticks=self.mode.player[self.tick_var],
ticks_subtracted=ticks_subtracted,
ticks_remaining=self.ticks_remaining)
self._check_for_done()
def _check_for_done(self):
# Checks to see if this timer is done. Automatically called anytime the
# timer's value changes.
if self.debug:
self.log.debug("Checking to see if timer is done. Ticks: %s, End "
"Value: %s, Direction: %s",
self.mode.player[self.tick_var], self.end_value,
self.direction)
if (self.direction == 'up' and self.end_value is not None and
self.mode.player[self.tick_var] >= self.end_value):
self.timer_complete()
return True
elif (self.direction == 'down' and
self.mode.player[self.tick_var] <= self.end_value):
self.timer_complete()
return True
if self.end_value is not None:
self.ticks_remaining = abs(self.end_value -
self.mode.player[self.tick_var])
if self.debug:
self.log.debug("Timer is not done")
return False
def _create_system_timer(self):
# Creates the system timer which drives this mode timer's tick method.
self._remove_system_timer()
self.timer = Timer(callback=self._timer_tick, frequency=self.tick_secs)
self.machine.timing.add(self.timer)
def _remove_system_timer(self):
# Removes the system timer associated with this mode timer.
if self.timer:
self.machine.timing.remove(self.timer)
self.timer = None
def change_tick_interval(self, change=0.0, **kwargs):
"""Changes the interval for each "tick" of this timer.
Args:
change: Float or int of the change you want to make to this timer's
tick rate. Note this value is added to the current tick
interval. To set an absolute value, use the set_tick_interval()
method. To shorten the tick rate, use a negative value.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.tick_secs *= change
self._create_system_timer()
def set_tick_interval(self, timer_value, **kwargs):
"""Sets the number of seconds between ticks for this timer. This is an
absolute setting. To apply a change to the current value, use the
change_tick_interval() method.
Args:
timer_value: The new number of seconds between each tick of this
timer. This value should always be positive.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.tick_secs = abs(timer_value)
self._create_system_timer()
def set_current_time(self, timer_value, **kwargs):
"""Sets the current amount of time of this timer. This value is
expressed in "ticks" since the interval per tick can be something other
than 1 second).
Args:
timer_value: Integer of the current value you want this timer to be.
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.mode.player[self.tick_var] = int(timer_value)
if self.max_value and self.mode.player[self.tick_var] > self.max_value:
self.mode.player[self.tick_var] = self.max_value
def kill(self):
"""Stops this timer and also removes all the control events.
Args:
**kwargs: Not used in this method. Only exists since this method is
often registered as an event handler which may contain
additional keyword arguments.
"""
self.stop()
self._remove_control_events()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def db_service_get_by_host_and_topic(context, host, topic):
"""Replacement for db.service_get_by_host_and_topic.
We stub the db.service_get_by_host_and_topic method to return something
for a specific host, and raise an exception for anything else. We don't
use the returned data (the code under test just use the call to check for
existence of a host, so the content returned doesn't matter.
"""
if host == 'host_ok':
return {}
raise exception.ServiceNotFound(service_id=host)
# Some of the tests check that volume types are correctly validated during a
# volume manage operation. This data structure represents an existing volume
# type.
fake_vt = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'name': 'good_fakevt'}
def vt_get_volume_type_by_name(context, name):
"""Replacement for cinder.volume.volume_types.get_volume_type_by_name.
Overrides cinder.volume.volume_types.get_volume_type_by_name to return
the volume type based on inspection of our fake structure, rather than
going to the Cinder DB.
"""
if name == fake_vt['name']:
return fake_vt
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
def vt_get_volume_type(context, vt_id):
"""Replacement for cinder.volume.volume_types.get_volume_type.
Overrides cinder.volume.volume_types.get_volume_type to return the
volume type based on inspection of our fake structure, rather than going
to the Cinder DB.
"""
if vt_id == fake_vt['id']:
return fake_vt
raise exception.VolumeTypeNotFound(volume_type_id=vt_id)
def api_manage(*args, **kwargs):
"""Replacement for cinder.volume.api.API.manage_existing.
Overrides cinder.volume.api.API.manage_existing to return some fake volume
data structure, rather than initiating a real volume managing.
Note that we don't try to replicate any passed-in information (e.g. name,
volume type) in the returned structure.
"""
vol = {
'status': 'creating',
'display_name': 'fake_name',
'availability_zone': 'nova',
'tenant_id': 'fake',
'created_at': 'DONTCARE',
'id': 'ffffffff-0000-ffff-0000-ffffffffffff',
'volume_type': None,
'snapshot_id': None,
'user_id': 'fake',
'launched_at': 'DONTCARE',
'size': 0,
'attach_status': 'detached',
'volume_type_id': None}
return vol
@mock.patch('cinder.db.service_get_by_host_and_topic',
db_service_get_by_host_and_topic)
@mock.patch('cinder.volume.volume_types.get_volume_type_by_name',
vt_get_volume_type_by_name)
@mock.patch('cinder.volume.volume_types.get_volume_type',
vt_get_volume_type)
class VolumeManageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_manage.py
The API extension adds a POST /os-volume-manage API that is passed a cinder
host name, and a driver-specific reference parameter. If everything
is passed correctly, then the cinder.volume.api.API.manage_existing method
is invoked to manage an existing storage object on the host.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.manage_existing with the
correct arguments.
"""
def setUp(self):
super(VolumeManageTest, self).setUp()
def _get_resp(self, body):
"""Helper to execute an os-volume-manage API call."""
req = webob.Request.blank('/v2/fake/os-volume-manage')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
req.body = jsonutils.dumps(body)
res = req.get_response(app())
return res
@mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage)
def test_manage_volume_ok(self, mock_api_manage):
"""Test successful manage volume execution.
Tests for correct operation when valid arguments are passed in the
request body. We ensure that cinder.volume.api.API.manage_existing got
called with the correct arguments, and that we return the correct HTTP
code to the caller.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(202, res.status_int, res)
# Check that the manage API was called with the correct arguments.
self.assertEqual(mock_api_manage.call_count, 1)
args = mock_api_manage.call_args[0]
self.assertEqual(args[1], body['volume']['host'])
self.assertEqual(args[2], body['volume']['ref'])
def test_manage_volume_missing_host(self):
"""Test correct failure when host is not specified."""
body = {'volume': {'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(400, res.status_int)
def test_manage_volume_missing_ref(self):
"""Test correct failure when the ref is not specified."""
body = {'volume': {'host': 'host_ok'}}
res = self._get_resp(body)
self.assertEqual(400, res.status_int)
pass
@mock.patch('cinder.volume.api.API.manage_existing', api_manage)
def test_manage_volume_volume_type_by_uuid(self):
"""Tests for correct operation when a volume type is specified by ID.
We wrap cinder.volume.api.API.manage_existing so that managing is not
actually attempted.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type':
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}}
res = self._get_resp(body)
self.assertEqual(202, res.status_int, res)
pass
@mock.patch('cinder.volume.api.API.manage_existing', api_manage)
def test_manage_volume_volume_type_by_name(self):
"""Tests for correct operation when a volume type is specified by name.
We wrap cinder.volume.api.API.manage_existing so that managing is not
actually attempted.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type': 'good_fakevt'}}
res = self._get_resp(body)
self.assertEqual(202, res.status_int, res)
pass
def test_manage_volume_bad_volume_type_by_uuid(self):
"""Test failure on nonexistent volume type specified by ID."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type':
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 404, res)
pass
def test_manage_volume_bad_volume_type_by_name(self):
"""Test failure on nonexistent volume type specified by name."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type': 'bad_fakevt'}}
res = self._get_resp(body)
self.assertEqual(404, res.status_int, res)
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2015, GoodData(R) Corporation. All rights reserved
import ast
import datetime
from flask_restful import abort
import json
import os
import re
import socket
def generate_unique_file():
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
TMP_DIR = (os.path.dirname(os.path.realpath(__file__)) + '/.tmp')
PROCESSES = [0]
HOSTNAME = socket.gethostname()
def rest_api_response(k, **kwargs):
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
plugin_list = ['Hostname', 'Uptime', 'Uname']
url = 'http://%s:8086' % HOSTNAME
result = {
url + '/': about_response,
url + '/processes': dict(),
url + '/plugins': plugins_response,
}
if k in result.keys():
if k == url + '/processes' and kwargs.get('data'):
raw_data = kwargs.get('data')
if isinstance(raw_data, bytes):
data = raw_data.decode('utf-8')
data = ast.literal_eval(data)
if True in [x in plugin_list for x in data['process']['plugins']]:
location = len(PROCESSES)
process = {
"href": "processes/%s" % location,
"plugins": [x for x in data['process']['plugins']]
}
PROCESSES.append(process)
return_value = {
'asyncTask': {
'link': {
'poll': '/processes/%s' % location
}
}
}
else:
return abort(500)
elif k == url + '/processes' and not kwargs.get('data'):
return_value = PROCESSES if len(PROCESSES) > 1 else dict()
else:
return_value = result[k] # Return static result
elif re.search('%s/processes/[0-9]+' % url, k):
if k == '%s/processes/0' % url:
return abort(404)
proc_number = int(re.search('%s/processes/([0-9]+)' % url, k).group(1))
return_value = {
"plugins": {
"items": [force_plugin_run_response[x]
for x in PROCESSES[proc_number]['plugins']]
}
}
else:
return abort(404) # Return 404 if k don't match predefine pattern
fp = '%s/%s.tmp' % (TMP_DIR, generate_unique_file())
open(fp, 'w').write(json.dumps(return_value))
return open(fp)
about_response = {
'about': {
'host': HOSTNAME,
'links': [
{
'href': '/plugins',
'methods': 'GET',
'rel': 'plugins',
'title': 'Show details about all plugins'
},
{
'href': '/processes',
'methods': 'GET, POST',
'rel': 'processes',
'title': 'Force plugin run'
}
],
'title': 'Smoker daemon API'
}
}
links = {
'processes': {
'href': '/processes',
'methods': 'GET, POST',
'rel': 'processes',
'title': 'Force plugin run'
},
'plugins': {
'href': '/plugins',
'methods': 'GET',
'rel': 'plugins',
'title': 'Show details about all plugins'
}
}
force_plugin_run_response = {
'Uname': {
'plugin': {
'name': 'Uname',
'links': {
'self': '/plugins/Uname'
},
'nextRun': '2016-05-31T11:11:06.126684',
'forcedResult': {
'status': 'WARN',
'lastRun': '2016-05-31T13:11:33.684257',
'forced': True,
'messages': {
'info': [],
'warn': [
'Skipped because of maintenance in progress'
],
'error': []
},
'componentResults': None,
'action': None
},
'lastResult': {
'status': 'WARN',
'lastRun': '2016-05-31T13:11:33.684257',
'forced': True,
'messages': {
'info': [],
'warn': [
'Skipped because of maintenance in progress'
],
'error': []
},
'componentResults': None,
'action': None
},
'parameters': {
'Category': 'system',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': 'smoker.server.plugins.uname',
'MaintenanceLock': '/tmp/smokerd.lock',
'gid': 'default',
'Command': None,
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
}
}
},
'Uptime': {
'plugin': {
'name': 'Uptime',
'links': {
'self': '/plugins/Uptime'
},
'nextRun': '2016-05-31T11:11:06.126856',
'forcedResult': {
'status': 'OK',
'lastRun': '2016-05-31T13:12:10.554044',
'forced': True,
'messages': {
'info': [
'13:12:10 up 7 days, 2:08, 1 user,'
' load average: 0.74, 0.44, 0.36'
],
'warn': [],
'error': []
},
'componentResults': None,
'action': None
},
'lastResult': {
'status': 'OK',
'lastRun': '2016-05-31T13:12:10.554044',
'forced': True,
'messages': {
'info': [
'13:12:10 up 7 days, 2:08, 1 user,'
' load average: 0.74, 0.44, 0.36'
],
'warn': [],
'error': []
},
'componentResults': None,
'action': None
},
'parameters': {
'Category': 'monitoring',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': None,
'MaintenanceLock': None,
'gid': 'default',
'Command': 'uptime',
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
}
}
},
'Hostname': {
'plugin': {
'name': 'Hostname',
'links': {
'self': '/plugins/Hostname'
},
'nextRun': '2016-05-31T11:11:06.127003',
'forcedResult': {
'status': 'ERROR',
'lastRun': '2016-05-31T13:12:46.880849',
'forced': True,
'messages': {
'info': [],
'warn': [],
'error': [
'/bin/sh: 1: InvalidCommand: not found'
]
},
'componentResults': None,
'action': None
},
'lastResult': {
'status': 'ERROR',
'lastRun': '2016-05-31T13:12:46.880849',
'forced': True,
'messages': {
'info': [],
'warn': [],
'error': [
'/bin/sh: 1: InvalidCommand: not found'
]
},
'componentResults': None,
'action': None
},
'parameters': {
'Category': 'system',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': None,
'MaintenanceLock': None,
'gid': 'default',
'Command': 'InvalidCommand',
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
}
}
}
}
plugins_response = {
'plugins': {
'items': [
{
'plugin': {
'lastResult': {
'status': 'WARN',
'lastRun': '2016-05-31T15:32:53.187552',
'forced': True,
'messages': {
'info': [],
'warn': [
'Skipped because of maintenance in progress'
],
'error': []
},
'componentResults': None,
'action': None
},
'links': {
'self': '/plugins/Uname'
},
'name': 'Uname',
'parameters': {
'Category': 'system',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': 'smoker.server.plugins.uname',
'MaintenanceLock': '/tmp/smokerd.lock',
'gid': 'default',
'Command': None,
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
},
'nextRun': '2016-05-31T15:31:35.191518'
}
},
{
'plugin': {
'lastResult': {
'status': 'OK',
'lastRun': '2016-05-31T15:32:53.194612',
'forced': True,
'messages': {
'info': [
'15:32:53 up 7 days, 4:29, 1 user,'
' load average: 1.07, 1.32, 1.28'
],
'warn': [],
'error': []
},
'componentResults': None,
'action': None
},
'links': {
'self': '/plugins/Uptime'
},
'name': 'Uptime',
'parameters': {
'Category': 'monitoring',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': None,
'MaintenanceLock': None,
'gid': 'default',
'Command': 'uptime',
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
},
'nextRun': '2016-05-31T15:31:35.191695'
}
},
{
'plugin': {
'lastResult': {
'status': 'ERROR',
'lastRun': '2016-05-31T15:32:53.189058',
'forced': True,
'messages': {
'info': [],
'warn': [],
'error': [
'/bin/sh: 1: InvalidCommand: not found'
]
},
'componentResults': None,
'action': None
},
'links': {
'self': '/plugins/Hostname'
},
'name': 'Hostname',
'parameters': {
'Category': 'system',
'Parser': None,
'uid': 'default',
'Interval': 1,
'Module': None,
'MaintenanceLock': None,
'gid': 'default',
'Command': 'InvalidCommand',
'Timeout': 30,
'Action': None,
'Template': None,
'History': 10
},
'nextRun': '2016-05-31T15:31:35.191841'
}
}
]
}
}
tap_result_all_plugins = [
'1..1',
'not ok 1 - %s' % HOSTNAME,
' 1..3',
' not ok 1 - Hostname',
' ---',
' - error:',
' - /bin/sh: 1: InvalidCommand: not found',
' ...',
' ok 2 - Uname',
' ---',
' - warn:',
' - Skipped because of maintenance in progress',
' ...',
' ok 3 - Uptime',
' ---',
' - info:',
' - 15:32:53 up 7 days, 4:29, 1 user, load average: 1.07, 1.32, 1.28',
' ...'
]
tap_result_uptime_hostname = [
'1..1',
'not ok 1 - %s' % HOSTNAME,
' 1..2',
' not ok 1 - Hostname',
' ---',
' - error:',
' - /bin/sh: 1: InvalidCommand: not found',
' ...',
' ok 2 - Uptime',
' ---',
' - info:',
' - 15:32:53 up 7 days, 4:29, 1 user, load average: 1.07, 1.32, 1.28',
' ...',
]
tap_result_uptime_uname = [
'1..1',
'ok 1 - %s' % HOSTNAME,
' 1..2',
' ok 1 - Uname',
' ---',
' - warn:',
' - Skipped because of maintenance in progress',
' ...',
' ok 2 - Uptime',
' ---',
' - info:',
' - 15:32:53 up 7 days, 4:29, 1 user, load average: 1.07, 1.32, 1.28',
' ...'
]
tap_result_hostname_uname = [
'1..1',
'not ok 1 - %s' % HOSTNAME,
' 1..2',
' not ok 1 - Hostname',
' ---',
' - error:',
' - /bin/sh: 1: InvalidCommand: not found',
' ...',
' ok 2 - Uname',
' ---',
' - warn:',
' - Skipped because of maintenance in progress',
' ...',
]
xml_result_all_plugins = [
'',
' <testsuites name="All">',
' <testsuite hostname="%s" name="node %s" timestamp="2016-05-31 15:32:53">' % (HOSTNAME, HOSTNAME),
' <testcase classname="%s.Uname" name="Uname">' % HOSTNAME,
' <system-out message="Skipped because of maintenance in progress"></system-out></testcase>',
' <testcase classname="%s.Uptime" name="Uptime"></testcase>' % HOSTNAME,
' <testcase classname="%s.Hostname" name="Hostname">' % HOSTNAME,
' <error message="/bin/sh: 1: InvalidCommand: not found"></error></testcase></testsuite></testsuites>'
]
xml_result_uptime_uname = [
'',
' <testsuites name="All">',
' <testsuite hostname="%s" name="node %s" timestamp="2016-05-31 15:32:53">' % (HOSTNAME, HOSTNAME),
' <testcase classname="%s.Uname" name="Uname">' % HOSTNAME,
' <system-out message="Skipped because of maintenance in progress"></system-out></testcase>',
' <testcase classname="%s.Uptime" name="Uptime"></testcase></testsuite></testsuites>' % HOSTNAME
]
xml_result_uptime_hostname = [
'',
' <testsuites name="All">',
' <testsuite hostname="%s" name="node %s" timestamp="2016-05-31 15:32:53">' % (HOSTNAME, HOSTNAME),
' <testcase classname="%s.Uptime" name="Uptime"></testcase>' % HOSTNAME,
' <testcase classname="%s.Hostname" name="Hostname">' % HOSTNAME,
' <error message="/bin/sh: 1: InvalidCommand: not found"></error></testcase></testsuite></testsuites>'
]
xml_result_hostname_uname = [
'',
' <testsuites name="All">',
' <testsuite hostname="%s" name="node %s" timestamp="2016-05-31 15:32:53">' % (HOSTNAME, HOSTNAME),
' <testcase classname="%s.Uname" name="Uname">' % HOSTNAME,
' <system-out message="Skipped because of maintenance in progress"></system-out></testcase>',
' <testcase classname="%s.Hostname" name="Hostname">' % HOSTNAME,
' <error message="/bin/sh: 1: InvalidCommand: not found"></error></testcase></testsuite></testsuites>'
]
|
|
import base64
import fcntl
import getpass
import logging
import os
import re
import socket
import subprocess
import uuid
import paramiko as paramiko
from Crypto.Cipher import AES
from PyQt4.QtCore import QThread
cipher = AES.new(uuid.uuid4().hex, AES.MODE_ECB)
REGEX_UFW_ANYWHERE = re.compile(r'^(?P<any>Anywhere)')
REGEX_UFW_IP4 = re.compile(r'^(?P<ip4>([0-9]{1,3}\.){3}[0-9]{1,3}(/\d+)?)')
REGEX_UFW_IP6 = re.compile(r'^(?P<ip6>(.*?:){2,}[0-9a-zA-Z%.]+(/\d+)?)')
REGEX_UFW_PORT_RANGE = re.compile(r'^(?P<port_range>[0-9]{1,5}:[0-9]{1,5})/(?P<proto>tcp|udp)')
REGEX_UFW_PORT_LIST = re.compile(r'^(?P<port_list>[0-9]{1,5}(,[0-9]{1,5})*)/(?P<proto>tcp|udp)')
REGEX_UFW_PORT = re.compile(r'^(?P<port>[0-9]{1,5})')
REGEX_UFW_V6 = re.compile(r'^(?P<v6>\(v6\))?')
REGEX_UFW_INTERFACE = re.compile(r'^(on (?P<interface>\w+))?')
REGEX_UFW_SPLIT = re.compile(r'(?<!\son)\s+')
REGEX_UFW_WHOLE_LINE = re.compile(r'^(?P<to>.*)\s*(?P<action>(ALLOW|DENY|REJECT|LIMIT)\s(IN|OUT|FWD)?)\s*(?P<from>.*)')
REGEX_UFW_STATUS = re.compile(r'^Status:\s*(.*)')
REGEX_UFW_LOGGING = re.compile(r'^Logging:\s*(.*)')
REGEX_UFW_DEFAULT = re.compile(r'^Default:\s*(.*)')
def encode(msg_text):
return base64.b64encode(cipher.encrypt(msg_text))
def decode(msg_text):
return cipher.decrypt(base64.b64decode(msg_text)).strip().decode("utf-8")
def auto_str(cls):
def __str__(self):
return '%s(%s)' % (
type(self).__name__,
', '.join('%s=%s' % item for item in vars(self).items())
)
cls.__str__ = __str__
return cls
def use_sshpass(key_parameter, password):
if not key_parameter:
return ["sshpass", "-p", "{0}".format(password)]
else:
return []
def validate_ip_address(ip):
try:
socket.inet_aton(ip)
return True
except:
return False
# usage @static_vars(counter=0)
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
class ValidateConnectionThread(QThread):
"""
This class is used to validate connection to the machine that is selected for the operation
"""
def __init__(self, parent, selected_connection):
super().__init__(parent)
self.result = None
self.selected_connection = selected_connection
self.command = ''
def run(self):
if self.selected_connection.ip in ('127.0.0.1', '::1'):
self.result = True
else:
logging.debug('Validating connection for {0}'.format(self.selected_connection.ip))
self.command = [
"nmap -oG - -sP -PA22 {0} | awk '/Status: Up/{{print $0}}'".format(self.selected_connection.ip)]
self.result = subprocess.Popen(
self.command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL).stdout.read().decode('utf-8').strip()
@auto_str
class Connection:
def __init__(self):
super(self.__class__, self).__init__()
self.ip = '127.0.0.1'
self.username = getpass.getuser()
self.password = ''
self.sudo_password = ''
self.store_password = False
self.use_key_file = False
def get_title(self):
return self.username + '@' + self.ip
def is_local_ip(ip):
return True if ip in ('127.0.0.1', '::1') else False
def get_full_command(command, sudo_password, use_sudo_=True):
if sudo_password and use_sudo_:
base_command = "echo {0} | sudo -S ".format(sudo_password)
base_command = base_command + command.strip()
else:
base_command = command.strip()
return [base_command]
def get_table_model_data_in_array(table):
"""
:param QtGui.QTableView table:
:return:
"""
model = table.model().sourceModel()
data = []
for row in range(model.rowCount()):
data.append([])
for column in range(model.columnCount()):
index = model.index(row, column)
data[row].append(str(model.data(index)))
return data
def run_remote_command(command, ip, username, password, use_key_file, process_=None,
blocking_=False, expect_results_=1):
"""
:param expect_results_:
:param blocking_: This parameter is used to differentiate between continuous output in loop, where we need non blocking read
and a read where we expect immediate and full response (full response needs something written or it will block)
:param command:
:param ip:
:param username:
:param password:
:param use_key_file:
:param process_:
:return:
"""
if process_:
output = ''
if is_local_ip(ip):
command.append('\n')
process_.stdin.write(' '.join(command).encode())
process_.stdin.flush()
if blocking_:
i = 0
while i < expect_results_: # need to write command to always return expected results
o = process_.stdout.readline()
if o:
output += o.decode('utf-8')
i += 1
else:
o = process_.stdout.read()
if o:
output = o.decode('utf-8')
else:
stdin, stdout, stderr = process_.exec_command(' '.join(command).encode())
o = stdout.read()
if o:
output = o.decode('utf-8')
parsed = list((x.strip() for x in output.split('\n')))
return parsed
else:
if is_local_ip(ip):
proc = subprocess.Popen(command, shell=False if len(command) > 1 else True, stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL, stdin=subprocess.PIPE, bufsize=1)
if not blocking_:
fd = proc.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return proc
else:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy)
client.connect(ip, port=22, username=username, password=password)
return client
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""List printer for Cloud Platform resources."""
from googlecloudsdk.core.util import attrpath
from googlecloudsdk.core.console import console_io
def PrintResourceList(collection, items):
"""Print a list of cloud resources.
Args:
collection: str, The name of the collection to which the items belong.
items: iterable, A list or otherwise iterable object that generates the
rows of the list.
"""
console_io.PrintExtendedList(items, COLLECTION_COLUMNS[collection])
def _Select(path, transform=None):
"""Get a column fetcher for the given attr path and transform.
Args:
path: str, The attr path that keys into the resource.
transform: func(str)->str, A func that takes something found by the path
and maps it to some other strip.
Returns:
func(obj)->str, A func that takes an object and returns the value
for a particular column.
"""
getter = attrpath.Selector(path)
if transform is None:
return getter
def GetAndTransform(obj):
return transform(getter(obj))
return GetAndTransform
def _NameOnly(value):
"""Get only the last token from a longer path, usually the name.
Intended to be a selector transform for URLs.
Args:
value: str, The value whose last token will be returned.
Returns:
str, The name from value.
"""
if value:
return value.split('/')[-1]
return value
def _CommaList(default=None):
def Transform(items):
if not items:
return default
return ', '.join(items)
return Transform
def _DiskSize(value):
"""Returns a human readable string representation of the disk size.
Args:
value: str, Disk size represented as number of bytes.
Returns:
A human readable string representation of the disk size.
"""
size = float(value)
the_unit = 'TB'
for unit in ['bytes', 'KB', 'MB', 'GB']:
if size < 1024.0:
the_unit = unit
break
size = float(size) / 1024.0
if size == int(size):
return '%d %s' % (size, the_unit)
else:
return '%3.1f %s' % (size, the_unit)
def _ScreenResolution(model):
"""Build a human readable string representation of a screen resolution.
Args:
model: a Test_v1.AndroidModel message (from ApiTools)
Returns:
Returns a human readable string representation of a screen resolution.
"""
return '{y} x {x}'.format(y=model.screenY, x=model.screenX)
# Guidelines for choosing your resource columns:
# - Column headers are ANGRY_SNAKE_CASE, just like user input in usage. This
# casing has a side effect in that column headers will never be confused for
# fields in the API responses, which are camelCase.
# - Fields that are URL parameters (that is, they disambiguate your
# resource) go first, starting at the end of the URL. So, often the first
# column will be NAME. PROJECT is an exception: no PROJECT column unless the
# resource being listed can be in a different project than the one found in
# the project property.
# - If your resource has a STATUS column or something similar, put it last.
# - Aim for an 80-char-wide table, but if someone has a 70-char NAME it's not
# your fault.
def _Default(default):
def Transform(item):
return default if item is None else item
return Transform
def _SelectTime(path):
return _Select(path, transform=lambda x: x and x.isoformat())
def _FormatOperationErrors(errs):
if not errs:
return None
else:
return '\n'.join(['[%s: %s]' % (e.code, e.message) for e in errs])
def _FormatResourceErrors(errs):
if not errs:
return None
else:
return '\n'.join(['[%s]' % e for e in errs])
COLLECTION_COLUMNS = {
# APHELEIA
'apheleia.projects.regions.functions': (
('NAME', _Select('name')),
('TRIGGERS NUMBER', _Select('triggers',
transform=lambda x: len(x) if x else 0)),
),
# APPENGINE
'app.module_versions': (
('MODULE', _Select('module')),
('VERSION', _Select('version')),
('IS_DEFAULT', _Select('is_default',
transform=lambda x: '*' if x else '-')),
),
# AUTOSCALER
'autoscaler.instances': (
('NAME', _Select('name')),
('DESCRIPTION', _Select('description')),
('STATE', _Select('state')),
('STATE_DETAILS', _Select('state_details')),
),
# BIGQUERY
'bigquery.datasets': (
('DATASET_ID', _Select('datasetReference.datasetId')),
),
'bigquery.jobs.describe': (
('JOB_TYPE', _Select('job_type')),
('STATE', _Select('state')),
('START_TIME', _Select('start_time')),
('DURATION', _Select('duration')),
('BYTES_PROCESSED', _Select('bytes_processed')),
),
'bigquery.jobs.list': (
('JOB_ID', _Select('job_id')),
('JOB_TYPE', _Select('job_type')),
('STATE', _Select('state')),
('START_TIME', _Select('start_time')),
('DURATION', _Select('duration')),
),
'bigquery.jobs.wait': (
('JOB_TYPE', _Select('job_type')),
('STATE', _Select('state')),
('START_TIME', _Select('start_time')),
('DURATION', _Select('duration')),
('BYTES_PROCESSED', _Select('bytes_processed')),
),
'bigquery.projects': (
('PROJECT_ID', _Select('projectReference.projectId')),
('FRIENDLY_NAME', _Select('friendlyName')),
),
'bigquery.tables.list': (
('ID', _Select('id')),
('TABLE_OR_VIEW', _Select('type')),
),
# COMPUTE
'compute.instances': (
('NAME', _Select('name')),
('ZONE', _Select('zone', _NameOnly)),
('MACHINE_TYPE', _Select('machineType', _NameOnly)),
('INTERNAL_IP', _Select('networkInterfaces[0].networkIP')),
('EXTERNAL_IP', _Select('networkInterfaces[0].accessConfigs[0].natIP')),
('STATUS', _Select('status')),
),
# CONTAINER V1BETA1
# TODO(user): remove this once v1 is fully rolled out
'containerv1beta1.projects.zones.clusters': (
('NAME', _Select('name')),
('ZONE', _Select('zone')),
('CLUSTER_API_VERSION', _Select('clusterApiVersion')),
('MASTER_IP', _Select('endpoint')),
('MACHINE_TYPE', _Select(
'nodeConfig', transform=
lambda x: '%s, %s' % (x.machineType, _NameOnly(x.sourceImage)))),
('NODES', _Select('numNodes')),
('STATUS', _Select('status')),
),
'containerv1beta1.projects.zones.operations': (
('NAME', _Select('name')),
('TYPE', _Select('operationType')),
('ZONE', _Select('zone')),
('TARGET', _Select('target')),
('ERROR_MESSAGE', _Select('errorMessage')),
('STATUS', _Select('status')),
),
# CONTAINER
'container.projects.zones.clusters': (
('NAME', _Select('name')),
('ZONE', _Select('zone')),
('MASTER_VERSION', _Select('currentMasterVersion')),
('MASTER_IP', _Select('endpoint')),
('MACHINE_TYPE', _Select(
'nodeConfig', transform=lambda x: '%s' % (x.machineType))),
('STATUS', _Select('status')),
),
'container.projects.zones.operations': (
('NAME', _Select('name')),
('TYPE', _Select('operationType')),
('ZONE', _Select('zone')),
('TARGET', _Select('targetLink', _NameOnly)),
('STATUS_MESSAGE', _Select('statusMessage')),
('STATUS', _Select('status')),
),
# DATAFLOW
'dataflow.jobs': (
('ID', _Select('job_id')),
('NAME', _Select('job_name')),
('TYPE', _Select('job_type')),
('CREATION_TIME', _Select('creation_time')),
('STATUS', _Select('status')),
),
# DNS
'dns.changes': (
('ID', _Select('id')),
('START_TIME', _Select('startTime')),
('STATUS', _Select('status')),
),
'dns.managedZones': (
('NAME', _Select('name')),
('DNS_NAME', _Select('dnsName')),
('DESCRIPTION', _Select('description')),
),
'dns.resourceRecordSets': (
('NAME', _Select('name')),
('TYPE', _Select('type')),
('TTL', _Select('ttl')),
('DATA', _Select('rrdatas', _CommaList(''))),
),
# DEPLOYMENTMANAGER V2BETA2
'deploymentmanagerv2beta2.deployments': (
('NAME', _Select('name')),
('STATE', _Select('state')),
('INTENT', _Select('intent')),
('ID', _Select('id')),
('DESCRIPTION', _Select('description')),
('MANIFEST', _Select('manifest', transform=
lambda x: x.split('/')[-1] if x else None)),
('ERRORS', _Select('update.errors', transform=_FormatResourceErrors)),
),
'deploymentmanagerv2beta2.operations': (
('NAME', _Select('name')),
('TYPE', _Select('operationType')),
('STATUS', _Select('status')),
('TARGET', _Select('targetLink', transform=
lambda x: x.split('/')[-1] if x else None)),
('ERRORS', _Select('error.errors', transform=_FormatOperationErrors)),
),
'deploymentmanagerv2beta2.resources': (
('NAME', _Select('name')),
('TYPE', _Select('type')),
('ID', _Select('id')),
('UPDATE_STATE', _Select('update.state', transform=
lambda x: 'COMPLETED' if x is None else x)),
('ERRORS', _Select('update.errors', transform=_FormatResourceErrors)),
),
# GENOMICS
'genomics.datasets': (
('ID', _Select('id')),
('NAME', _Select('name')),
),
# SQL
'sql.backupRuns': (
('DUE_TIME', _SelectTime('dueTime')),
('ERROR', _Select('error.code')),
('STATUS', _Select('status')),
),
'sql.flags': (
('NAME', _Select('name')),
('TYPE', _Select('type')),
('ALLOWED_VALUES', _Select('allowedStringValues', _CommaList(''))),
),
'sql.instances': (
('NAME', _Select('instance')),
('REGION', _Select('region')),
('TIER', _Select('settings.tier')),
('ADDRESS', _Select('ipAddresses[0].ipAddress')),
('STATUS', _Select('state')),
),
'sql.operations': (
('OPERATION', _Select('operation')),
('TYPE', _Select('operationType')),
('START', _SelectTime('startTime')),
('END', _SelectTime('endTime')),
('ERROR', _Select('error[0].code')),
('STATUS', _Select('state')),
),
'sql.sslCerts': (
('NAME', _Select('commonName')),
('SHA1_FINGERPRINT', _Select('sha1Fingerprint')),
('EXPIRATION', _Select('expirationTime')),
),
'sql.tiers': (
('TIER', _Select('tier')),
('AVAILABLE_REGIONS', _Select('region', _CommaList(''))),
('RAM', _Select('RAM', _DiskSize)),
('DISK', _Select('DiskQuota', _DiskSize)),
),
# projects
'cloudresourcemanager.projects': (
('PROJECT_ID', _Select('projectId')),
('NAME', _Select('name')),
('PROJECT_NUMBER', _Select('projectNumber')),
),
# source
'source.jobs.list': (
('REPO_NAME', _Select('name', _Default('default'))),
('PROJECT_ID ', _Select('projectId')),
('VCS', _Select('vcs')),
('STATE', _Select('state')),
('CREATE_TIME', _Select('createTime')),
),
# Cloud Updater
'replicapoolupdater.rollingUpdates': (
('ID', _Select('id')),
('GROUP_NAME', _Select('instanceGroupManager', _NameOnly)),
('TEMPLATE_NAME', _Select('instanceTemplate', _NameOnly)),
('STATUS', _Select('status')),
('STATUS_MESSAGE', _Select('statusMessage')),
),
'replicapoolupdater.rollingUpdates.instanceUpdates': (
('INSTANCE_NAME', _Select('instance', _NameOnly)),
('STATUS', _Select('status')),
),
# TEST
'test.android.devices': (
('DEVICE_ID', _Select('id')),
('MAKE', _Select('manufacturer')),
('MODEL', _Select('name')),
('FORM', _Select('form')),
('SCREEN_RES', _ScreenResolution),
('OS_VERSION_IDS', _Select('supportedVersionIds', _CommaList('none'))),
('TAGS', _Select('tags', _CommaList('')))
),
'test.run.outcomes': (
('OUTCOME', _Select('outcome')),
('STEP', _Select('step_name')),
('TEST_AXIS_VALUE', _Select('axis_value')),
),
# Cloud Logging
'logging.logs': (
('NAME', _Select('name')),
),
'logging.sinks': (
('NAME', _Select('name')),
('DESTINATION', _Select('destination')),
),
'logging.typedSinks': (
('NAME', _Select('name')),
('DESTINATION', _Select('destination')),
('TYPE', _Select('type')),
),
'logging.metrics': (
('NAME', _Select('name')),
('DESCRIPTION', _Select('description')),
('FILTER', _Select('filter')),
),
# Service Management (Inception)
'servicemanagement-v1.services': (
('NAME', _Select('serviceName')),
('TITLE', _Select('serviceConfig.title')),
),
}
|
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from barbican.common import exception
from barbican.model import models
from barbican.plugin.interface import secret_store
from barbican.tests import utils
class WhenCreatingNewSecret(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewSecret, self).setUp()
self.parsed_secret = {'name': 'name',
'secret_type': secret_store.SecretType.OPAQUE,
'algorithm': 'algorithm',
'bit_length': 512,
'mode': 'mode',
'plain_text': 'not-encrypted',
'creator_id': 'creator12345'}
self.parsed_order = {'secret': self.parsed_secret}
def test_new_secret_is_created_from_dict(self):
date_time = datetime.datetime.now().isoformat()
self.parsed_secret['expiration'] = date_time
secret = models.Secret(self.parsed_secret)
self.assertEqual(secret.name, self.parsed_secret['name'])
self.assertEqual(secret.secret_type, self.parsed_secret['secret_type'])
self.assertEqual(secret.algorithm, self.parsed_secret['algorithm'])
self.assertEqual(secret.bit_length, self.parsed_secret['bit_length'])
self.assertEqual(secret.mode, self.parsed_secret['mode'])
self.assertIsInstance(secret.expiration, datetime.datetime)
self.assertEqual(secret.creator_id, self.parsed_secret['creator_id'])
self.assertEqual(secret.created_at, secret.updated_at)
fields = secret.to_dict_fields()
self.assertEqual(self.parsed_secret['secret_type'],
fields['secret_type'])
self.assertEqual(self.parsed_secret['algorithm'], fields['algorithm'])
self.assertEqual(self.parsed_secret['creator_id'],
fields['creator_id'])
def test_new_secret_is_created_with_default_secret_type(self):
secret_spec = dict(self.parsed_secret)
date_time = datetime.datetime.now().isoformat()
secret_spec['expiration'] = date_time
del secret_spec['secret_type']
secret = models.Secret(secret_spec)
self.assertEqual(secret.secret_type, self.parsed_secret['secret_type'])
class WhenCreatingNewOrder(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewOrder, self).setUp()
self.parsed_order = {
'type': 'certificate',
'meta': {
'email': '[email protected]'
},
'sub_status': 'Pending',
'sub_status_message': 'Waiting for instructions...',
'creator_id': 'creator12345'
}
def test_new_order_is_created(self):
order = models.Order(self.parsed_order)
self.assertEqual(order.type, self.parsed_order['type'])
self.assertEqual(order.meta, self.parsed_order['meta'])
self.assertEqual(order.sub_status, self.parsed_order['sub_status'])
self.assertEqual(order.creator_id, self.parsed_order['creator_id'])
self.assertEqual(
order.sub_status_message,
self.parsed_order['sub_status_message']
)
fields = order.to_dict_fields()
self.assertEqual(self.parsed_order['sub_status'], fields['sub_status'])
self.assertEqual(self.parsed_order['type'], fields['type'])
self.assertEqual(self.parsed_order['creator_id'],
fields['creator_id'])
class WhenCreatingNewContainer(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewContainer, self).setUp()
self.parsed_container = {'name': 'name',
'type': 'generic',
'secret_refs': [
{'name': 'test secret 1',
'secret_ref': '123'},
{'name': 'test secret 2',
'secret_ref': '123'},
{'name': 'test secret 3',
'secret_ref': '123'}
],
'creator_id': 'creator123456'}
def test_new_container_is_created_from_dict(self):
container = models.Container(self.parsed_container)
self.assertEqual(container.name, self.parsed_container['name'])
self.assertEqual(container.type, self.parsed_container['type'])
self.assertEqual(container.creator_id,
self.parsed_container['creator_id'])
self.assertEqual(len(container.container_secrets),
len(self.parsed_container['secret_refs']))
self.assertEqual(container.container_secrets[0].name,
self.parsed_container['secret_refs'][0]['name'])
self.assertEqual(container.container_secrets[0].secret_id,
self.parsed_container['secret_refs'][0]['secret_ref'])
self.assertEqual(container.container_secrets[1].name,
self.parsed_container['secret_refs'][1]['name'])
self.assertEqual(container.container_secrets[1].secret_id,
self.parsed_container['secret_refs'][1]['secret_ref'])
self.assertEqual(container.container_secrets[2].name,
self.parsed_container['secret_refs'][2]['name'])
self.assertEqual(container.container_secrets[2].secret_id,
self.parsed_container['secret_refs'][2]['secret_ref'])
fields = container.to_dict_fields()
self.assertEqual(self.parsed_container['name'], fields['name'])
self.assertEqual(self.parsed_container['type'], fields['type'])
self.assertEqual(self.parsed_container['creator_id'],
fields['creator_id'])
def test_new_certificate_container_is_created_from_dict(self):
self.parsed_container['type'] = 'certificate'
container = models.Container(self.parsed_container)
self.assertEqual(container.name, self.parsed_container['name'])
self.assertEqual(container.type, self.parsed_container['type'])
self.assertEqual(container.creator_id,
self.parsed_container['creator_id'])
self.assertEqual(len(container.container_secrets),
len(self.parsed_container['secret_refs']))
self.assertEqual(container.container_secrets[0].name,
self.parsed_container['secret_refs'][0]['name'])
self.assertEqual(container.container_secrets[0].secret_id,
self.parsed_container['secret_refs'][0]['secret_ref'])
self.assertEqual(container.container_secrets[1].name,
self.parsed_container['secret_refs'][1]['name'])
self.assertEqual(container.container_secrets[1].secret_id,
self.parsed_container['secret_refs'][1]['secret_ref'])
self.assertEqual(container.container_secrets[2].name,
self.parsed_container['secret_refs'][2]['name'])
self.assertEqual(container.container_secrets[2].secret_id,
self.parsed_container['secret_refs'][2]['secret_ref'])
def test_parse_secret_ref_uri(self):
self.parsed_container['secret_refs'][0]['secret_ref'] = (
'http://localhost:9110/123/secrets/123456')
container = models.Container(self.parsed_container)
self.assertEqual(container.container_secrets[0].secret_id, '123456')
self.parsed_container['secret_refs'][0]['secret_ref'] = (
'http://localhost:9110/123/secrets/123456/')
container = models.Container(self.parsed_container)
self.assertEqual(container.container_secrets[0].secret_id, '123456')
class WhenCreatingNewConsumer(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewConsumer, self).setUp()
self.parsed_consumer = {'name': 'name',
'URL': 'URL'}
self.container_id = '12345container'
def test_new_consumer_is_created_from_dict(self):
consumer = models.ContainerConsumerMetadatum(self.container_id,
self.parsed_consumer)
self.assertEqual(consumer.name, self.parsed_consumer['name'])
self.assertEqual(consumer.URL, self.parsed_consumer['URL'])
self.assertEqual(consumer.status, models.States.ACTIVE)
def test_new_consumer_has_correct_hash(self):
consumer_one = models.ContainerConsumerMetadatum(self.container_id,
self.parsed_consumer)
consumer_two = models.ContainerConsumerMetadatum(self.container_id,
self.parsed_consumer)
different_container = '67890container'
consumer_three = models.ContainerConsumerMetadatum(
different_container, self.parsed_consumer)
self.assertEqual(consumer_one.data_hash, consumer_two.data_hash)
self.assertNotEqual(consumer_one.data_hash, consumer_three.data_hash)
class WhenProcessingJsonBlob(utils.BaseTestCase):
def setUp(self):
super(WhenProcessingJsonBlob, self).setUp()
self.json_blob = models.JsonBlob()
def test_process_bind_param_w_dict(self):
res = self.json_blob.process_bind_param({'test': True}, None)
self.assertEqual(res, '{"test": true}')
def test_process_result_value_w_json_str(self):
res = self.json_blob.process_result_value('{"test": true}', None)
self.assertTrue(res.get('test'))
class WhenCreatingOrderRetryTask(utils.BaseTestCase):
def test_create_new_order_task(self):
order = models.Order({
'type': 'certificate',
'meta': {
'email': '[email protected]'
},
'sub_status': 'Pending',
'sub_status_message': 'Waiting for instructions...'
})
at = datetime.datetime.utcnow()
order_retry_task = models.OrderRetryTask()
order_retry_task.order_id = order.id
order_retry_task.retry_task = "foobar"
order_retry_task.retry_at = at
order_retry_task.retry_args = ["one", "two"]
order_retry_task.retry_kwargs = {"three": "four"}
self.assertEqual(order_retry_task.order_id, order.id)
self.assertEqual(order_retry_task.retry_task, "foobar")
self.assertEqual(order_retry_task.retry_at, at)
self.assertEqual(
["one", "two"],
order_retry_task.retry_args,
)
self.assertEqual(
{"three": "four"},
order_retry_task.retry_kwargs,
)
class WhenCreatingNewCertificateAuthority(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewCertificateAuthority, self).setUp()
expiration = (datetime.datetime.utcnow() +
datetime.timedelta(minutes=10))
self.parsed_ca = {'plugin_name': 'dogtag_plugin',
'plugin_ca_id': 'ca_master',
'expiration': expiration.isoformat(),
'name': 'Dogtag CA',
'description': 'Master CA for Dogtag plugin',
'ca_signing_certificate': 'XXXXX',
'intermediates': 'YYYYY'}
def test_new_ca_is_created_from_dict(self):
ca = models.CertificateAuthority(self.parsed_ca)
self.assertEqual(self.parsed_ca['plugin_name'], ca.plugin_name)
self.assertEqual(self.parsed_ca['plugin_ca_id'], ca.plugin_ca_id)
self.assertEqual(self.parsed_ca['name'], ca.ca_meta['name'].value)
self.assertEqual(self.parsed_ca['description'],
ca.ca_meta['description'].value)
self.assertEqual(self.parsed_ca['ca_signing_certificate'],
ca.ca_meta['ca_signing_certificate'].value)
self.assertEqual(self.parsed_ca['intermediates'],
ca.ca_meta['intermediates'].value)
self.assertIsInstance(ca.expiration, datetime.datetime)
self.assertEqual(ca.created_at, ca.updated_at)
class WhenCreatingNewProjectCertificateAuthority(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewProjectCertificateAuthority, self).setUp()
expiration = (datetime.datetime.utcnow() +
datetime.timedelta(minutes=10))
self.parsed_ca = {'plugin_name': 'dogtag_plugin',
'plugin_ca_id': 'ca_master',
'expiration': expiration.isoformat(),
'name': 'Dogtag CA',
'description': 'Master CA for Dogtag plugin',
'ca_signing_certificate': 'XXXXX',
'intermediates': 'YYYYY'}
def test_create_new_project_ca(self):
ca = models.CertificateAuthority(self.parsed_ca)
ca.id = '67890'
project = models.Project()
project.id = '12345'
project_ca = models.ProjectCertificateAuthority(project.id, ca.id)
self.assertEqual(ca.id, project_ca.ca_id)
self.assertEqual(project.id, project_ca.project_id)
class WhenCreatingNewPreferredCertificateAuthority(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewPreferredCertificateAuthority, self).setUp()
expiration = (datetime.datetime.utcnow() +
datetime.timedelta(minutes=10))
self.parsed_ca = {'plugin_name': 'dogtag_plugin',
'plugin_ca_id': 'ca_master',
'expiration': expiration.isoformat(),
'name': 'Dogtag CA',
'description': 'Master CA for Dogtag plugin',
'ca_signing_certificate': 'XXXXX',
'intermediates': 'YYYYY'}
def test_create_new_preferred_ca(self):
ca = models.CertificateAuthority(self.parsed_ca)
ca.id = '67890'
project = models.Project()
project.id = '12345'
preferred_ca = models.PreferredCertificateAuthority(project.id, ca.id)
self.assertEqual(ca.id, preferred_ca.ca_id)
self.assertEqual(project.id, preferred_ca.project_id)
class WhenCreatingNewSecretACL(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewSecretACL, self).setUp()
self.secret_id = 'secret123456'
self.user_ids = ['user12345', 'user67890']
self.operation = 'read'
self.project_access = True
def test_new_secretacl_for_given_all_input(self):
acl = models.SecretACL(self.secret_id, self.operation,
self.project_access, self.user_ids)
self.assertEqual(self.secret_id, acl.secret_id)
self.assertEqual(self.operation, acl.operation)
self.assertEqual(self.project_access, acl.project_access)
self.assertTrue(all(acl_user.user_id in self.user_ids for acl_user
in acl.acl_users))
def test_new_secretacl_check_to_dict_fields(self):
acl = models.SecretACL(self.secret_id, self.operation,
self.project_access, self.user_ids)
self.assertEqual(self.secret_id, acl.to_dict_fields()['secret_id'])
self.assertEqual(self.operation, acl.to_dict_fields()['operation'])
self.assertEqual(self.project_access,
acl.to_dict_fields()['project_access'])
self.assertTrue(all(user_id in self.user_ids for user_id in
acl.to_dict_fields()['users']))
self.assertEqual(None, acl.to_dict_fields()['acl_id'])
def test_new_secretacl_for_bare_minimum_input(self):
acl = models.SecretACL(self.secret_id, self.operation,
None, None)
self.assertEqual(acl.secret_id, self.secret_id)
self.assertEqual(0, len(acl.acl_users))
self.assertEqual(self.operation, acl.operation)
self.assertEqual(None, acl.project_access)
def test_new_secretacl_with_duplicate_userids_input(self):
user_ids = list(self.user_ids)
user_ids = user_ids * 2 # duplicate ids
acl = models.SecretACL(self.secret_id, self.operation,
None, user_ids=user_ids)
self.assertEqual(self.secret_id, acl.secret_id)
self.assertEqual(self.operation, acl.operation)
self.assertEqual(None, acl.project_access)
self.assertEqual(2, len(acl.acl_users))
def test_should_throw_exception_missing_secret_id(self):
self.assertRaises(exception.MissingArgumentError,
models.SecretACL, None, 'read',
['user246'], None)
def test_should_throw_exception_missing_operation(self):
self.assertRaises(exception.MissingArgumentError,
models.SecretACL, self.secret_id, None,
None, ['user246'])
def test_new_secretacl_expect_user_ids_as_list(self):
acl = models.SecretACL(self.secret_id, self.operation,
None, {'aUser': '12345'})
self.assertEqual(0, len(acl.acl_users))
class WhenCreatingNewContainerACL(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewContainerACL, self).setUp()
self.container_id = 'container123456'
self.user_ids = ['user12345', 'user67890']
self.operation = 'read'
self.project_access = True
def test_new_containeracl_for_given_all_input(self):
acl = models.ContainerACL(self.container_id, self.operation,
self.project_access, self.user_ids)
self.assertEqual(acl.container_id, self.container_id)
self.assertEqual(acl.operation, self.operation)
self.assertEqual(acl.project_access, self.project_access)
self.assertTrue(all(acl_user.user_id in self.user_ids for acl_user
in acl.acl_users))
def test_new_containeracl_check_to_dict_fields(self):
acl = models.ContainerACL(self.container_id, self.operation,
self.project_access, self.user_ids)
self.assertEqual(self.container_id,
acl.to_dict_fields()['container_id'])
self.assertEqual(self.operation, acl.to_dict_fields()['operation'])
self.assertEqual(self.project_access,
acl.to_dict_fields()['project_access'])
self.assertTrue(all(user_id in self.user_ids for user_id
in acl.to_dict_fields()['users']))
self.assertEqual(None, acl.to_dict_fields()['acl_id'])
def test_new_containeracl_for_bare_minimum_input(self):
acl = models.ContainerACL(self.container_id, self.operation,
None, None)
self.assertEqual(self.container_id, acl.container_id)
self.assertEqual(0, len(acl.acl_users))
self.assertEqual(self.operation, acl.operation)
self.assertEqual(None, acl.project_access)
def test_new_containeracl_with_duplicate_userids_input(self):
user_ids = list(self.user_ids)
user_ids = user_ids * 2 # duplicate ids
acl = models.ContainerACL(self.container_id, self.operation,
True, user_ids=user_ids)
self.assertEqual(self.container_id, acl.container_id)
self.assertEqual(self.operation, acl.operation)
self.assertEqual(True, acl.project_access)
self.assertEqual(2, len(acl.acl_users))
def test_should_throw_exception_missing_container_id(self):
self.assertRaises(exception.MissingArgumentError,
models.ContainerACL, None, 'read',
None, ['user246'])
def test_should_throw_exception_missing_operation(self):
self.assertRaises(exception.MissingArgumentError,
models.ContainerACL, self.container_id, None,
None, ['user246'])
def test_new_containeracl_expect_user_ids_as_list(self):
acl = models.ContainerACL(self.container_id, self.operation,
None, {'aUser': '12345'})
self.assertEqual(0, len(acl.acl_users))
class WhenCreatingNewSecretACLUser(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewSecretACLUser, self).setUp()
self.secret_acl_id = 'secret_acl_123456'
self.user_ids = ['user12345', 'user67890']
def test_new_secretacl_user_for_given_all_input(self):
acl_user = models.SecretACLUser(self.secret_acl_id, self.user_ids[0])
self.assertEqual(self.secret_acl_id, acl_user.acl_id)
self.assertEqual(self.user_ids[0], acl_user.user_id)
self.assertEqual(models.States.ACTIVE, acl_user.status)
def test_new_secretacl_user_check_to_dict_fields(self):
acl_user = models.SecretACLUser(self.secret_acl_id, self.user_ids[1])
self.assertEqual(self.secret_acl_id,
acl_user.to_dict_fields()['acl_id'])
self.assertEqual(self.user_ids[1],
acl_user.to_dict_fields()['user_id'])
self.assertEqual(models.States.ACTIVE,
acl_user.to_dict_fields()['status'])
def test_should_throw_exception_missing_user_id(self):
self.assertRaises(exception.MissingArgumentError,
models.SecretACLUser, self.secret_acl_id,
None)
class WhenCreatingNewContainerACLUser(utils.BaseTestCase):
def setUp(self):
super(WhenCreatingNewContainerACLUser, self).setUp()
self.container_acl_id = 'container_acl_123456'
self.user_ids = ['user12345', 'user67890']
def test_new_secretacl_user_for_given_all_input(self):
acl_user = models.ContainerACLUser(self.container_acl_id,
self.user_ids[0])
self.assertEqual(self.container_acl_id, acl_user.acl_id)
self.assertEqual(self.user_ids[0], acl_user.user_id)
self.assertEqual(models.States.ACTIVE, acl_user.status)
def test_new_secretacl_user_check_to_dict_fields(self):
acl_user = models.ContainerACLUser(self.container_acl_id,
self.user_ids[1])
self.assertEqual(self.container_acl_id,
acl_user.to_dict_fields()['acl_id'])
self.assertEqual(self.user_ids[1],
acl_user.to_dict_fields()['user_id'])
self.assertEqual(models.States.ACTIVE,
acl_user.to_dict_fields()['status'])
def test_should_throw_exception_missing_user_id(self):
self.assertRaises(exception.MissingArgumentError,
models.ContainerACLUser, self.container_acl_id,
None)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
import inspect
import json
from luigi.batch_notifier import BatchNotifier
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import hashlib
import itertools
import logging
import os
import re
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN, \
BATCH_RUNNING
from luigi.task import Config
logger = logging.getLogger(__name__)
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
BATCH_RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
WORKER_STATE_DISABLED = 'disabled'
WORKER_STATE_ACTIVE = 'active'
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
RPC_METHODS = {}
_retry_policy_fields = [
"retry_count",
"disable_hard_timeout",
"disable_window",
]
RetryPolicy = collections.namedtuple("RetryPolicy", _retry_policy_fields)
def _get_empty_retry_policy():
return RetryPolicy(*[None] * len(_retry_policy_fields))
def rpc_method(**request_args):
def _rpc_method(fn):
# If request args are passed, return this function again for use as
# the decorator function with the request args attached.
fn_args = inspect.getargspec(fn)
assert not fn_args.varargs
assert fn_args.args[0] == 'self'
all_args = fn_args.args[1:]
defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ())))
required_args = frozenset(arg for arg in all_args if arg not in defaults)
fn_name = fn.__name__
@functools.wraps(fn)
def rpc_func(self, *args, **kwargs):
actual_args = defaults.copy()
actual_args.update(dict(zip(all_args, args)))
actual_args.update(kwargs)
if not all(arg in actual_args for arg in required_args):
raise TypeError('{} takes {} arguments ({} given)'.format(
fn_name, len(all_args), len(actual_args)))
return self._request('/api/{}'.format(fn_name), actual_args, **request_args)
RPC_METHODS[fn_name] = rpc_func
return fn
return _rpc_method
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We
# should drop the compatibility at some point
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
batch_emails = parameter.BoolParameter(default=False, description="Send e-mails in batches rather than immediately")
# Jobs are disabled if we see more than retry_count failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
retry_count = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable_failures'))
disable_hard_timeout = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def _get_retry_policy(self):
return RetryPolicy(self.retry_count, self.disable_hard_timeout, self.disable_window)
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and self.failures[0] < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class OrderedSet(collections.MutableSet):
"""
Standard Python OrderedSet recipe found at http://code.activestate.com/recipes/576694/
Modified to include a peek function to get the last element
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def peek(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
return key
def pop(self, last=True):
key = self.peek(last)
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, tracking_url=None, status_message=None, retry_policy='notoptional'):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = OrderedSet() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.retry_policy = retry_policy
self.failures = Failures(self.retry_policy.disable_window)
self.tracking_url = tracking_url
self.status_message = status_message
self.scheduler_disable_time = None
self.runnable = False
self.batchable = False
self.batch_id = None
def __repr__(self):
return "Task(%r)" % vars(self)
# TODO(2017-08-10) replace this function with direct calls to batchable
# this only exists for backward compatibility
def is_batchable(self):
try:
return self.batchable
except AttributeError:
return False
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if self.failures.first_failure_time is not None:
if (time.time() >= self.failures.first_failure_time + self.retry_policy.disable_hard_timeout):
return True
logger.debug('%s task num failures is %s and limit is %s', self.id, self.failures.num_failures(), self.retry_policy.retry_count)
if self.failures.num_failures() >= self.retry_policy.retry_count:
logger.debug('%s task num failures limit(%s) is exceeded', self.id, self.retry_policy.retry_count)
return True
return False
@property
def pretty_id(self):
param_str = ', '.join('{}={}'.format(key, value) for key, value in sorted(self.params.items()))
return '{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
self.rpc_messages = []
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_tasks(self, state, *statuses):
num_self_tasks = len(self.tasks)
num_state_tasks = sum(len(state._status_tasks[status]) for status in statuses)
if num_self_tasks < num_state_tasks:
return six.moves.filter(lambda task: task.status in statuses, self.tasks)
else:
return six.moves.filter(lambda task: self.id in task.workers, state.get_active_tasks_by_status(*statuses))
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_tasks(state, PENDING))
@property
def assistant(self):
return self.info.get('assistant', False)
@property
def enabled(self):
return not self.disabled
@property
def state(self):
if self.enabled:
return WORKER_STATE_ACTIVE
else:
return WORKER_STATE_DISABLED
def add_rpc_message(self, name, **kwargs):
# the message has the format {'name': <function_name>, 'kwargs': <function_kwargs>}
self.rpc_messages.append({'name': name, 'kwargs': kwargs})
def fetch_rpc_messages(self):
messages = self.rpc_messages[:]
del self.rpc_messages[:]
return messages
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
self._task_batchers = {}
def get_state(self):
return self._tasks, self._active_workers, self._task_batchers
def set_state(self, state):
self._tasks, self._active_workers = state[:2]
if len(state) >= 3:
self._task_batchers = state[2]
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from empty state.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
else:
logger.info("No prior state file exists at %s. Starting with empty state", self._state_path)
def get_active_tasks(self):
return six.itervalues(self._tasks)
def get_active_tasks_by_status(self, *statuses):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in statuses)
def get_batch_running_tasks(self, batch_id):
assert batch_id is not None
return [
task for task in self.get_active_tasks_by_status(BATCH_RUNNING)
if task.batch_id == batch_id
]
def set_batcher(self, worker_id, family, batcher_args, max_batch_size):
self._task_batchers.setdefault(worker_id, {})
self._task_batchers[worker_id][family] = (batcher_args, max_batch_size)
def get_batcher(self, worker_id, family):
return self._task_batchers.get(worker_id, {}).get(family, (None, 1))
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_batch_running(self, task, batch_id, worker_id):
self.set_status(task, BATCH_RUNNING)
task.batch_id = batch_id
task.worker_running = worker_id
task.resources_running = task.resources
task.time_running = time.time()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status in (RUNNING, BATCH_RUNNING):
return
remove_on_failure = task.batch_id is not None and not task.batchable
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if task.status == RUNNING and task.batch_id is not None and new_status != RUNNING:
for batch_task in self.get_batch_running_tasks(task.batch_id):
self.set_status(batch_task, new_status, config)
batch_task.batch_id = None
task.batch_id = None
if new_status == FAILED and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
if not config.batch_emails:
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=task.retry_policy.retry_count,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
if new_status == FAILED:
task.retry = time.time() + config.retry_delay
if remove_on_failure:
task.remove = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status in (BATCH_RUNNING, RUNNING) and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def update_status(self, task, config):
# Mark tasks with no remaining active stakeholders for deletion
if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING):
# We don't check for the RUNNING case, because that is already handled
# by the fail_dead_worker_task function.
logger.debug("Task %r has no stakeholders anymore -> might remove "
"task in %s seconds", task.id, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - task.scheduler_disable_time > config.disable_persist:
self.re_enable(task, config)
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
def may_prune(self, task):
return task.remove and time.time() >= task.remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = worker.last_get_work
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers -= workers
def disable_workers(self, worker_ids):
self._remove_workers_from_tasks(worker_ids, remove_stakeholders=False)
for worker_id in worker_ids:
self.get_worker(worker_id).disabled = True
class Scheduler(object):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_impl: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(Task, retry_policy=self._config._get_retry_policy())
self._worker_requests = {}
self._paused = False
if self._config.batch_emails:
self._email_batcher = BatchNotifier()
def load(self):
self._state.load()
def dump(self):
self._state.dump()
if self._config.batch_emails:
self._email_batcher.send_email()
@rpc_method()
def prune(self):
logger.info("Starting pruning of task graph")
self._prune_workers()
self._prune_tasks()
self._prune_emails()
logger.info("Done pruning task graph")
def _prune_workers(self):
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
def _prune_tasks(self):
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
self._state.update_status(task, self._config)
if self._state.may_prune(task):
logger.info("Removing task %r", task.id)
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
def _prune_emails(self):
if self._config.batch_emails:
self._email_batcher.update()
def _update_worker(self, worker_id, worker_reference=None, get_work=False):
# Keep track of whenever the worker was last active.
# For convenience also return the worker object.
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return worker
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
@rpc_method()
def add_task_batcher(self, worker, task_family, batched_args, max_batch_size=float('inf')):
self._state.set_batcher(worker, task_family, batched_args, max_batch_size)
@rpc_method()
def forgive_failures(self, task_id=None):
status = PENDING
task = self._state.get_task(task_id)
if task is None:
return {"task_id": task_id, "status": None}
# we forgive only failures
if task.status == FAILED:
# forgive but do not forget
self._update_task_history(task, status)
self._state.set_status(task, status, self._config)
return {"task_id": task_id, "status": task.status}
@rpc_method()
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, worker=None, batchable=None,
batch_id=None, retry_policy_dict={}, owners=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
assert worker is not None
worker_id = worker
worker = self._update_worker(worker_id)
retry_policy = self._generate_retry_policy(retry_policy_dict)
if worker.enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker.enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if batch_id is not None:
task.batch_id = batch_id
if status == RUNNING and not task.worker_running:
task.worker_running = worker_id
if batch_id:
task.resources_running = self._state.get_batch_running_tasks(batch_id)[0].resources_running
task.time_running = time.time()
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.tracking_url = tracking_url
if batchable is not None:
task.batchable = batchable
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.expl = expl
if not (task.status in (RUNNING, BATCH_RUNNING) and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED and self._config.batch_emails:
batched_params, _ = self._state.get_batcher(worker_id, family)
if batched_params:
unbatched_params = {
param: value
for param, value in six.iteritems(task.params)
if param not in batched_params
}
else:
unbatched_params = task.params
try:
expl_raw = json.loads(expl)
except ValueError:
expl_raw = expl
self._email_batcher.add_failure(
task.pretty_id, task.family, unbatched_params, expl_raw, owners)
if task.status == DISABLED:
self._email_batcher.add_disable(
task.pretty_id, task.family, unbatched_params, owners)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker.enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
# Because some tasks (non-dynamic dependencies) are `_make_task`ed
# before we know their retry_policy, we always set it here
task.retry_policy = retry_policy
if runnable and status != FAILED and worker.enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
@rpc_method()
def announce_scheduling_failure(self, task_name, family, params, expl, owners, **kwargs):
if not self._config.batch_emails:
return
worker_id = kwargs['worker']
batched_params, _ = self._state.get_batcher(worker_id, family)
if batched_params:
unbatched_params = {
param: value
for param, value in six.iteritems(params)
if param not in batched_params
}
else:
unbatched_params = params
self._email_batcher.add_scheduling_fail(task_name, family, unbatched_params, expl, owners)
@rpc_method()
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
@rpc_method()
def disable_worker(self, worker):
self._state.disable_workers({worker})
@rpc_method()
def set_worker_processes(self, worker, n):
self._state.get_worker(worker).add_rpc_message('set_worker_processes', n=n)
@rpc_method()
def is_paused(self):
return {'paused': self._paused}
@rpc_method()
def pause(self):
self._paused = True
@rpc_method()
def unpause(self):
self._paused = False
@rpc_method()
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
@rpc_method()
def update_resource(self, resource, amount):
if not isinstance(amount, int) or amount < 0:
return False
self._resources[resource] = amount
return True
def _generate_retry_policy(self, task_retry_policy_dict):
retry_policy_dict = self._config._get_retry_policy()._asdict()
retry_policy_dict.update({k: v for k, v in six.iteritems(task_retry_policy_dict) if v is not None})
return RetryPolicy(**retry_policy_dict)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks_by_status(RUNNING):
if getattr(task, 'resources_running', task.resources):
for resource, amount in six.iteritems(getattr(task, 'resources_running', task.resources)):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _reset_orphaned_batch_running_tasks(self, worker_id):
running_batch_ids = {
task.batch_id
for task in self._state.get_active_tasks_by_status(RUNNING)
if task.worker_running == worker_id
}
orphaned_tasks = [
task for task in self._state.get_active_tasks_by_status(BATCH_RUNNING)
if task.worker_running == worker_id and task.batch_id not in running_batch_ids
]
for task in orphaned_tasks:
self._state.set_status(task, PENDING)
@rpc_method()
def count_pending(self, worker):
worker_id, worker = worker, self._state.get_worker(worker)
num_pending, num_unique_pending, num_pending_last_scheduled = 0, 0, 0
running_tasks = []
upstream_status_table = {}
for task in worker.get_tasks(self._state, RUNNING):
if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED:
continue
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
if other_worker is not None:
more_info = {'task_id': task.id, 'worker': str(other_worker)}
more_info.update(other_worker.info)
running_tasks.append(more_info)
for task in worker.get_tasks(self._state, PENDING, FAILED):
if self._upstream_status(task.id, upstream_status_table) == UPSTREAM_DISABLED:
continue
num_pending += 1
num_unique_pending += int(len(task.workers) == 1)
num_pending_last_scheduled += int(task.workers.peek(last=True) == worker_id)
return {
'n_pending_tasks': num_pending,
'n_unique_pending': num_unique_pending,
'n_pending_last_scheduled': num_pending_last_scheduled,
'worker_state': worker.state,
'running_tasks': running_tasks,
}
@rpc_method(allow_null=False)
def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
assert worker is not None
worker_id = worker
worker = self._update_worker(
worker_id,
worker_reference={'host': host},
get_work=True)
if not worker.enabled:
reply = {'n_pending_tasks': 0,
'running_tasks': [],
'task_id': None,
'n_unique_pending': 0,
'worker_state': worker.state,
}
return reply
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
batched_params, unbatched_params, batched_tasks, max_batch_size = None, None, [], 1
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_active_tasks_by_status(RUNNING), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
if current_tasks is not None:
# batch running tasks that weren't claimed since the last get_work go back in the pool
self._reset_orphaned_batch_running_tasks(worker_id)
greedy_resources = collections.defaultdict(int)
worker = self._state.get_worker(worker_id)
if self._paused:
relevant_tasks = []
elif worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_tasks(self._state, PENDING, RUNNING)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_active_tasks_by_status(PENDING, RUNNING)
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
if (best_task and batched_params and task.family == best_task.family and
len(batched_tasks) < max_batch_size and task.is_batchable() and all(
task.params.get(name) == value for name, value in unbatched_params.items()) and
task.resources == best_task.resources and self._schedulable(task)):
for name, params in batched_params.items():
params.append(task.params.get(name))
batched_tasks.append(task)
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((getattr(task, 'resources_running', task.resources) or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
in_workers = (assistant and task.runnable) or worker_id in task.workers
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
batch_param_names, max_batch_size = self._state.get_batcher(
worker_id, task.family)
if batch_param_names and task.is_batchable():
try:
batched_params = {
name: [task.params[name]] for name in batch_param_names
}
unbatched_params = {
name: value for name, value in task.params.items()
if name not in batched_params
}
batched_tasks.append(task)
except KeyError:
batched_params, unbatched_params = None, None
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = self.count_pending(worker_id)
if len(batched_tasks) > 1:
batch_string = '|'.join(task.id for task in batched_tasks)
batch_id = hashlib.md5(batch_string.encode('utf-8')).hexdigest()
for task in batched_tasks:
self._state.set_batch_running(task, batch_id, worker_id)
combined_params = best_task.params.copy()
combined_params.update(batched_params)
reply['task_id'] = None
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = combined_params
reply['batch_id'] = batch_id
reply['batch_task_ids'] = [task.id for task in batched_tasks]
elif best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.resources_running = best_task.resources
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
else:
reply['task_id'] = None
return reply
@rpc_method(attempts=1)
def ping(self, **kwargs):
worker_id = kwargs['worker']
worker = self._update_worker(worker_id)
return {"rpc_messages": worker.fetch_rpc_messages()}
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
dep = self._state.get_task(dep_id)
if dep:
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack += [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
status = max((upstream_status_table.get(a_task_id, '')
for a_task_id in dep.deps),
key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
'status_message': getattr(task, "status_message", None)
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
@rpc_method()
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.debug('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
@rpc_method()
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
@rpc_method()
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
@rpc_method()
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None,
**kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks()
for task in filter(filter_func, tasks):
if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table):
serialized = self._serialize_task(task.id, include_deps=False)
result[task.id] = serialized
if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks):
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
@rpc_method()
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=worker.started,
state=worker.state,
first_task_display_name=self._first_task_display_name(worker),
num_unread_rpc_messages=len(worker.rpc_messages),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
for task in self._state.get_active_tasks_by_status(RUNNING):
if task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, include_deps=False)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_active_tasks_by_status(PENDING):
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
@rpc_method()
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_active_tasks_by_status(RUNNING):
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, include_deps=False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in six.iteritems(self._resources):
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
@rpc_method()
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, include_deps=False)
result[task.status][task.id] = serialized
return result
@rpc_method()
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
@rpc_method()
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
@rpc_method()
def set_task_status_message(self, task_id, status_message):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.status_message = status_message
if task.status == RUNNING and task.batch_id is not None:
for batch_task in self._state.get_batch_running_tasks(task.batch_id):
batch_task.status_message = status_message
@rpc_method()
def get_task_status_message(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "statusMessage": task.status_message}
else:
return {"taskId": task_id, "statusMessage": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
|
|
# -*- coding: utf-8 -*-
import datetime
import os.path
import zipfile
from django.core import mail
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.urls import reverse
from unittest import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog
from olympia.activity.utils import ACTIVITY_MAIL_GROUP
from olympia.addons.models import Addon, AddonReviewerFlags
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
TestCase,
formset,
initial,
reverse_ns,
version_factory,
user_factory,
)
from olympia.applications.models import AppVersion
from olympia.constants.promoted import RECOMMENDED
from olympia.files.models import File
from olympia.users.models import Group, UserProfile
from olympia.versions.models import ApplicationsVersions, Version
class TestVersion(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersion, self).setUp()
self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.addon = self.get_addon()
self.version = Version.objects.get(id=81551)
self.url = self.addon.get_dev_url('versions')
self.disable_url = self.addon.get_dev_url('disable')
self.enable_url = self.addon.get_dev_url('enable')
self.delete_url = reverse('devhub.versions.delete', args=['a3615'])
self.delete_data = {'addon_id': self.addon.pk, 'version_id': self.version.pk}
def get_addon(self):
return Addon.objects.get(id=3615)
def get_doc(self):
response = self.client.get(self.url)
assert response.status_code == 200
return pq(response.content)
def test_version_status_public(self):
doc = self.get_doc()
assert doc('.addon-status')
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=True)
doc = self.get_doc()
assert doc('.addon-status .status-admin-disabled')
assert doc('.addon-status .status-admin-disabled').text() == (
'Disabled by Mozilla'
)
self.addon.update(disabled_by_user=False)
doc = self.get_doc()
assert doc('.addon-status .status-admin-disabled').text() == (
'Disabled by Mozilla'
)
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_doc()
assert doc('.addon-status .status-disabled').text() == ('Invisible')
def test_label_open_marked_safe(self):
doc = self.get_doc()
assert '<strong>Visible:</strong>' in doc.html()
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_doc()
assert '<strong>Invisible:</strong>' in doc.html()
def test_upload_link_label_in_edit_nav(self):
url = reverse('devhub.versions.edit', args=(self.addon.slug, self.version.pk))
response = self.client.get(url)
link = pq(response.content)('.addon-status>.addon-upload>strong>a')
assert link.text() == 'Upload New Version'
assert link.attr('href') == (
reverse('devhub.submit.version', args=[self.addon.slug])
)
# Don't show for STATUS_DISABLED addons.
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(url)
assert not pq(response.content)('.addon-status>.addon-upload>strong>a')
def test_delete_message(self):
"""Make sure we warn our users of the pain they will feel."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#modal-delete p').eq(0).text() == (
'Deleting your add-on will permanently delete all versions and '
'files you have submitted for this add-on, listed or not. '
'The add-on ID will continue to be linked to your account, so '
"others won't be able to submit versions using the same ID."
)
def test_delete_message_if_bits_are_messy(self):
"""Make sure we warn krupas of the pain they will feel."""
self.addon.status = amo.STATUS_NOMINATED
self.addon.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#modal-delete p').eq(0).text() == (
'Deleting your add-on will permanently delete all versions and '
'files you have submitted for this add-on, listed or not. '
'The add-on ID will continue to be linked to your account, so '
"others won't be able to submit versions using the same ID."
)
def test_delete_message_incomplete(self):
"""
If an addon has status = 0, they shouldn't be bothered with a
deny list threat if they hit delete.
"""
# Need to hard delete the version or add-on will be soft-deleted.
self.addon.current_version.delete(hard=True)
self.addon.reload()
assert self.addon.status == amo.STATUS_NULL
response = self.client.get(self.url)
doc = pq(response.content)
# Normally 2 paragraphs, one is the warning which we should take out.
assert doc('#modal-delete p.warning').length == 0
def test_delete_version(self):
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
def test_version_delete_version_deleted(self):
self.version.delete()
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 404
def test_cant_delete_version(self):
self.client.logout()
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert Version.objects.filter(pk=81551).exists()
def test_version_delete_status_null(self):
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 0
assert Addon.objects.get(id=3615).status == amo.STATUS_NULL
def test_disable_version(self):
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_cant_disable_or_delete_current_version_recommended(self):
# If the add-on is recommended you can't disable or delete the current
# version.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
assert self.version == self.addon.current_version
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 0
)
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 0
)
def test_can_disable_or_delete_current_ver_if_previous_recommended(self):
# If the add-on is recommended you *can* disable or delete the current
# version if the previous version is approved for recommendation too.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
previous_version = self.version
self.version = version_factory(addon=self.addon, promotion_approved=True)
self.addon.reload()
assert self.version == self.addon.current_version
assert previous_version != self.version
self.delete_data['version_id'] = self.version.id
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=self.version.id).exists()
assert Version.objects.get(pk=self.version.id).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=self.version.id).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
self.addon.reload()
assert self.addon.current_version == previous_version
# It's still recommended.
assert self.addon.promoted_group() == RECOMMENDED
def test_can_still_disable_or_delete_old_version_recommended(self):
# If the add-on is recommended, you can still disable or delete older
# versions than the current one.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
version_factory(addon=self.addon, promotion_approved=True)
self.addon.reload()
assert self.version != self.addon.current_version
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_can_still_disable_or_delete_current_version_unapproved(self):
# If the add-on is in recommended group but hasn't got approval yet,
# then deleting the current version is fine.
self.make_addon_promoted(self.addon, RECOMMENDED)
assert self.version == self.addon.current_version
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_reenable_version(self):
Version.objects.get(pk=81551).all_files[0].update(
status=amo.STATUS_DISABLED, original_status=amo.STATUS_APPROVED
)
self.reenable_url = reverse('devhub.versions.reenable', args=['a3615'])
response = self.client.post(self.reenable_url, self.delete_data, follow=True)
assert response.status_code == 200
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.ENABLE_VERSION.id).count() == 1
def test_reenable_deleted_version(self):
Version.objects.get(pk=81551).delete()
self.delete_url = reverse('devhub.versions.reenable', args=['a3615'])
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 404
assert ActivityLog.objects.filter(action=amo.LOG.ENABLE_VERSION.id).count() == 0
def _extra_version_and_file(self, status):
version = Version.objects.get(id=81551)
version_two = Version(
addon=self.addon, license=version.license, version='1.2.3'
)
version_two.save()
file_two = File(status=status, version=version_two)
file_two.save()
return version_two, file_two
def test_version_delete_status(self):
self._extra_version_and_file(amo.STATUS_APPROVED)
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 1
assert Addon.objects.get(id=3615).status == amo.STATUS_APPROVED
def test_version_delete_status_unreviewed(self):
self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 1
assert Addon.objects.get(id=3615).status == amo.STATUS_NOMINATED
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_user_can_disable_addon(self, hide_mock):
version = self.addon.current_version
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=False)
response = self.client.post(self.disable_url)
assert response.status_code == 302
addon = Addon.objects.get(id=3615)
version.reload()
assert addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
assert hide_mock.called
# Check we didn't change the status of the files.
assert version.files.all()[0].status == amo.STATUS_APPROVED
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.USER_DISABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_user_can_disable_addon_pending_version(self, hide_mock):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=False)
(new_version, _) = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
assert (
self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
== new_version
)
response = self.client.post(self.disable_url)
assert response.status_code == 302
addon = Addon.objects.get(id=3615)
assert addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
assert hide_mock.called
# Check we disabled the file pending review.
assert new_version.all_files[0].status == amo.STATUS_DISABLED
# latest version should be reset when the file/version was disabled.
assert (
self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
!= new_version
)
entry = ActivityLog.objects.latest('pk')
assert entry.action == amo.LOG.USER_DISABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_disabling_addon_awaiting_review_disables_version(self, hide_mock):
self.addon.update(status=amo.STATUS_AWAITING_REVIEW, disabled_by_user=False)
self.version.all_files[0].update(status=amo.STATUS_AWAITING_REVIEW)
res = self.client.post(self.disable_url)
assert res.status_code == 302
addon = Addon.objects.get(id=3615)
assert addon.disabled_by_user
assert addon.status == amo.STATUS_NULL
assert hide_mock.called
# Check we disabled the file pending review.
self.version = Version.objects.get(id=self.version.id)
assert self.version.all_files[0].status == amo.STATUS_DISABLED
def test_user_get(self):
assert self.client.get(self.enable_url).status_code == 405
def test_user_can_enable_addon(self):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
response = self.client.post(self.enable_url)
self.assert3xx(response, self.url, 302)
addon = self.get_addon()
assert not addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.USER_ENABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
def test_unprivileged_user_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
response = self.client.post(self.disable_url)
assert response.status_code == 302
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='[email protected]')
response = self.client.post(self.disable_url)
assert response.status_code == 403
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_enable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='[email protected]')
response = self.client.get(self.enable_url)
assert response.status_code == 403
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_change_status(self):
"""A non-owner can't use the radio buttons."""
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='[email protected]')
response = self.client.get(self.url)
assert response.status_code == 403
def test_published_addon_radio(self):
"""Published (listed) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=False)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.enable-addon').attr('checked') == 'checked'
enable_url = self.addon.get_dev_url('enable')
assert doc('.enable-addon').attr('data-url') == enable_url
assert not doc('.enable-addon').attr('disabled')
assert doc('#modal-disable')
assert not doc('.disable-addon').attr('checked')
assert not doc('.disable-addon').attr('disabled')
def test_hidden_addon_radio(self):
"""Hidden (disabled) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon').attr('checked')
assert not doc('.enable-addon').attr('disabled')
assert doc('.disable-addon').attr('checked') == 'checked'
assert not doc('.disable-addon').attr('disabled')
assert not doc('#modal-disable')
def test_status_disabled_addon_radio(self):
"""Disabled by Mozilla addon: hidden selected, can't change status."""
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=False)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon').attr('checked')
assert doc('.enable-addon').attr('disabled') == 'disabled'
assert doc('.disable-addon').attr('checked') == 'checked'
assert doc('.disable-addon').attr('disabled') == 'disabled'
def test_no_listed_versions_already_enabled(self):
self.addon.versions.all().delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon')
assert not doc('.disable-addon')
def test_no_listed_versions_already_disabled(self):
# If somehow the add-on has no listed versions but is invisible, we
# allow them to switch back to visible so that they can submit listed
# versions.
self.addon.versions.all().delete()
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.enable-addon')
assert doc('.disable-addon')
assert not doc('.enable-addon').attr('checked')
assert not doc('.enable-addon').attr('disabled')
assert doc('.disable-addon').attr('checked') == 'checked'
assert not doc('.disable-addon').attr('disabled')
def test_cancel_get(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
assert self.client.get(cancel_url).status_code == 405
def test_cancel_wrong_status(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
for status in Addon.STATUS_CHOICES:
if status in (amo.STATUS_NOMINATED, amo.STATUS_DELETED):
continue
self.addon.update(status=status)
self.client.post(cancel_url)
assert Addon.objects.get(id=3615).status == status
def test_cancel(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
self.addon.update(status=amo.STATUS_NOMINATED)
self.client.post(cancel_url)
assert Addon.objects.get(id=3615).status == amo.STATUS_NULL
def test_not_cancel(self):
self.client.logout()
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
assert self.addon.status == amo.STATUS_APPROVED
response = self.client.post(cancel_url)
assert response.status_code == 302
assert Addon.objects.get(id=3615).status == amo.STATUS_APPROVED
def test_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status != amo.STATUS_NOMINATED:
continue
self.addon.update(status=status)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#cancel-review')
assert doc('#modal-cancel')
def test_not_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status == amo.STATUS_NOMINATED:
continue
self.addon.update(status=status)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#cancel-review'), status
assert not doc('#modal-cancel'), status
def test_incomplete_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
assert buttons == 'Request Review'
def test_in_submission_can_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
for file_ in latest_version.files.all():
file_.update(status=amo.STATUS_DISABLED)
version_factory(addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button')
# We should only show the links for one of the disabled versions.
assert buttons.length == 1
assert buttons.text() == 'Request Review'
def test_reviewed_cannot_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
for file_ in latest_version.files.all():
file_.update(reviewed=datetime.datetime.now(), status=amo.STATUS_DISABLED)
version_factory(
addon=self.addon,
file_kw={
'reviewed': datetime.datetime.now(),
'status': amo.STATUS_DISABLED,
},
)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button')
# We should only show the links for one of the disabled versions.
assert buttons.length == 0
def test_version_history(self):
self.client.cookies[API_TOKEN_COOKIE] = 'magicbeans'
v1 = self.version
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
show_links = doc('.review-history-show')
assert show_links.length == 3
assert show_links[0].attrib['data-div'] == '#%s-review-history' % v1.id
assert not show_links[1].attrib.get('data-div')
assert show_links[2].attrib['data-div'] == '#%s-review-history' % v2.id
# All 3 links will have a 'data-version' attribute.
assert show_links[0].attrib['data-version'] == str(v1.id)
# But the 2nd link will point to the latest version in the channel.
assert show_links[1].attrib['data-version'] == str(v2.id)
assert show_links[2].attrib['data-version'] == str(v2.id)
# Test review history
review_history_td = doc('#%s-review-history' % v1.id)[0]
assert review_history_td.attrib['data-token'] == 'magicbeans'
api_url = absolutify(
reverse_ns(
'version-reviewnotes-list', args=[self.addon.id, self.version.id]
)
)
assert review_history_td.attrib['data-api-url'] == api_url
assert doc('.review-history-hide').length == 2
pending_activity_count = doc('.review-history-pending-count')
# No counter, because we don't have any pending activity to show.
assert pending_activity_count.length == 0
# Reply box div is there (only one)
assert doc('.dev-review-reply-form').length == 1
review_form = doc('.dev-review-reply-form')[0]
review_form.attrib['action'] == api_url
review_form.attrib['data-token'] == 'magicbeans'
review_form.attrib['data-history'] == '#%s-review-history' % v2.id
def test_version_history_mixed_channels(self):
v1 = self.version
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
v2.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Should be 2 reply boxes, one for each channel
assert doc('.dev-review-reply-form').length == 2
doc('.dev-review-reply-form')[0].attrib['data-history'] == (
'#%s-review-history' % v1.id
)
doc('.dev-review-reply-form')[1].attrib['data-history'] == (
'#%s-review-history' % v2.id
)
def test_pending_activity_count(self):
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
# Add some activity log messages
ActivityLog.create(amo.LOG.REVIEWER_REPLY_VERSION, v2.addon, v2, user=self.user)
ActivityLog.create(amo.LOG.REVIEWER_REPLY_VERSION, v2.addon, v2, user=self.user)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Two versions, but three review-history-show because one reply link.
assert doc('.review-history-show').length == 3
# Two versions, but only one counter, for the latest/deleted version
pending_activity_count = doc('.review-history-pending-count')
assert pending_activity_count.length == 1
# There are two activity logs pending
assert pending_activity_count.text() == '2'
def test_channel_tag(self):
self.addon.current_version.update(created=self.days_ago(1))
v2, _ = self._extra_version_and_file(amo.STATUS_DISABLED)
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_LISTED)
self.addon.update_version()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('td.file-status').length == 2
# No tag shown because all listed versions
assert doc('span.distribution-tag-listed').length == 0
assert doc('span.distribution-tag-unlisted').length == 0
# Make all the versions unlisted.
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.addon.update_version()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('td.file-status').length == 2
# No tag shown because all unlisted versions
assert doc('span.distribution-tag-listed').length == 0
assert doc('span.distribution-tag-unlisted').length == 0
# Make one of the versions listed.
v2.update(channel=amo.RELEASE_CHANNEL_LISTED)
v2.all_files[0].update(status=amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
file_status_tds = doc('td.file-status')
assert file_status_tds.length == 2
# Tag for channels are shown because both listed and unlisted versions.
assert file_status_tds('span.distribution-tag-listed').length == 1
assert file_status_tds('span.distribution-tag-unlisted').length == 1
# Extra tags in the headers too
assert doc('h3 span.distribution-tag-listed').length == 2
class TestVersionEditBase(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super().setUp()
self.user = UserProfile.objects.get(email='[email protected]')
self.client.login(email=self.user.email)
self.addon = self.get_addon()
self.version = self.get_version()
self.url = reverse('devhub.versions.edit', args=['a3615', self.version.id])
self.v1, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='1.0'
)
self.v5, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='5.0'
)
def get_addon(self):
return Addon.objects.get(id=3615)
def get_version(self):
return self.get_addon().current_version
def formset(self, *args, **kw):
return formset(*args, **kw)
class TestVersionEditDetails(TestVersionEditBase):
def setUp(self):
super().setUp()
ctx = self.client.get(self.url).context
compat = initial(ctx['compat_form'].forms[0])
self.initial = formset(compat)
def formset(self, *args, **kw):
return super().formset(*args, **{**self.initial, **kw})
def test_edit_notes(self):
data = self.formset(release_notes='xx', approval_notes='yy')
response = self.client.post(self.url, data)
assert response.status_code == 302
version = self.get_version()
assert str(version.release_notes) == 'xx'
assert str(version.approval_notes) == 'yy'
def test_version_number_redirect(self):
url = self.url.replace(str(self.version.id), self.version.version)
response = self.client.get(url, follow=True)
self.assert3xx(response, self.url)
def test_version_deleted(self):
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
data = self.formset(release_notes='xx', approval_notes='yy')
response = self.client.post(self.url, data)
assert response.status_code == 404
def test_cant_upload(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.add-file')
self.version.files.all().delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.add-file')
def test_add(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert response.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app'
def test_add_not(self):
for id in [18, 52, 59, 60, 61]:
av = AppVersion(application=id, version='1')
av.save()
ApplicationsVersions(
application=id, min=av, max=av, version=self.version
).save()
response = self.client.get(self.url)
doc = pq(response.content)
assert not response.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app hide'
def test_existing_source_link(self):
with temp.NamedTemporaryFile(
suffix='.zip', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2 ** 21))
source_file.seek(0)
self.version.source.save(
os.path.basename(source_file.name), DjangoFile(source_file)
)
self.version.save()
response = self.client.get(self.url)
doc = pq(response.content)
link = doc('.current-source-link')
assert link
assert link.text() == 'View current'
assert link[0].attrib['href'] == reverse(
'downloads.source', args=(self.version.pk,)
)
def test_should_accept_zip_source_file(self):
with temp.NamedTemporaryFile(
suffix='.zip', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.needs_admin_code_review
# Check that the corresponding automatic activity log has been created.
assert ActivityLog.objects.filter(
action=amo.LOG.SOURCE_CODE_UPLOADED.id
).exists()
log = ActivityLog.objects.get(action=amo.LOG.SOURCE_CODE_UPLOADED.id)
assert log.user == self.user
assert log.details is None
assert log.arguments == [self.addon, self.version]
def test_email_is_sent_to_relevant_people_for_source_code_upload(self):
# Have a reviewer review a version.
reviewer = user_factory()
self.grant_permission(reviewer, 'Addons:Review')
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED, self.addon, self.version, user=reviewer
)
# Add an extra developer to the add-on
extra_author = user_factory()
self.addon.authors.add(extra_author)
# Add someone in group meant to receive a copy of all activity emails.
group, _ = Group.objects.get_or_create(name=ACTIVITY_MAIL_GROUP)
staff_user = user_factory()
staff_user.groups.add(group)
# Have the developer upload source file for the version reviewed.
self.test_should_accept_zip_source_file()
# Check that an email has been sent to relevant people.
assert len(mail.outbox) == 3
for message in mail.outbox:
assert message.subject == ('Mozilla Add-ons: Delicious Bookmarks 2.1.072')
assert 'Source code uploaded' in message.body
# Check each message was sent separately to who we are meant to notify.
assert mail.outbox[0].to != mail.outbox[1].to != mail.outbox[2].to
assert set(mail.outbox[0].to + mail.outbox[1].to + mail.outbox[2].to) == set(
[reviewer.email, extra_author.email, staff_user.email]
)
def test_should_not_accept_exe_source_file(self):
with temp.NamedTemporaryFile(
suffix='.exe', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 200
assert not Version.objects.get(pk=self.version.pk).source
def test_dont_reset_needs_admin_code_review_flag_if_no_new_source(self):
tdir = temp.gettempdir()
tmp_file = temp.NamedTemporaryFile
with tmp_file(suffix='.zip', dir=tdir) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2 ** 21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.needs_admin_code_review
# Unset the "admin review" flag, and re save the version. It shouldn't
# reset the flag, as the source hasn't changed.
AddonReviewerFlags.objects.get(addon=version.addon).update(
needs_admin_code_review=False
)
data = self.formset(name='some other name')
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert not version.addon.needs_admin_code_review
class TestVersionEditStaticTheme(TestVersionEditBase):
def setUp(self):
super(TestVersionEditStaticTheme, self).setUp()
self.addon.update(type=amo.ADDON_STATICTHEME)
def test_no_compat(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#id_form-TOTAL_FORMS')
def test_no_upload(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.add-file')
class TestVersionEditCompat(TestVersionEditBase):
def setUp(self):
super(TestVersionEditCompat, self).setUp()
self.android_32pre, _created = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='3.2a1pre'
)
self.android_30, _created = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='3.0'
)
def get_form(self, url=None):
if not url:
url = self.url
av = self.version.apps.get()
assert av.min.version == '2.0'
assert av.max.version == '4.0'
form = self.client.get(url).context['compat_form'].initial_forms[0]
return initial(form)
def formset(self, *args, **kw):
defaults = formset(prefix='files')
defaults.update(kw)
return super(TestVersionEditCompat, self).formset(*args, **defaults)
def test_add_appversion(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = self.formset(
initial(form),
{
'application': amo.ANDROID.id,
'min': self.android_30.id,
'max': self.android_32pre.id,
},
initial_count=1,
)
response = self.client.post(self.url, data)
assert response.status_code == 302
apps = [app.id for app in self.get_version().compatible_apps.keys()]
assert sorted(apps) == sorted([amo.FIREFOX.id, amo.ANDROID.id])
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_update_appversion(self):
data = self.get_form()
data.update(min=self.v1.id, max=self.v5.id)
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 302
av = self.version.apps.get()
assert av.min.version == '1.0'
assert av.max.version == '5.0'
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_ajax_update_appversion(self):
url = reverse('devhub.ajax.compat.update', args=['a3615', self.version.id])
data = self.get_form(url)
data.update(min=self.v1.id, max=self.v5.id)
response = self.client.post(url, self.formset(data, initial_count=1))
assert response.status_code == 200
av = self.version.apps.get()
assert av.min.version == '1.0'
assert av.max.version == '5.0'
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_ajax_update_on_deleted_version(self):
url = reverse('devhub.ajax.compat.update', args=['a3615', self.version.id])
data = self.get_form(url)
data.update(min=self.v1.id, max=self.v5.id)
self.version.delete()
response = self.client.post(url, self.formset(data, initial_count=1))
assert response.status_code == 404
def test_delete_appversion(self):
# Add android compat so we can delete firefox.
self.test_add_appversion()
form = self.client.get(self.url).context['compat_form']
data = list(map(initial, form.initial_forms))
data[0]['DELETE'] = True
response = self.client.post(self.url, self.formset(*data, initial_count=2))
assert response.status_code == 302
apps = [app.id for app in self.get_version().compatible_apps.keys()]
assert apps == [amo.ANDROID.id]
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_unique_apps(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
dupe = initial(form)
del dupe['id']
data = self.formset(initial(form), dupe, initial_count=1)
response = self.client.post(self.url, data)
assert response.status_code == 200
# Because of how formsets work, the second form is expected to be a
# tbird version range. We got an error, so we're good.
def test_require_appversion(self):
old_av = self.version.apps.get()
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['DELETE'] = True
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 200
compat_formset = response.context['compat_form']
assert compat_formset.non_form_errors() == (
['Need at least one compatible application.']
)
assert self.version.apps.get() == old_av
# Make sure the user can re-submit again from the page showing the
# validation error: we should display all previously present compat
# forms, with the DELETE bit off.
assert compat_formset.data == compat_formset.forms[0].data
assert compat_formset.forms[0]['DELETE'].value() is False
def test_proper_min_max(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['min'], data['max'] = data['max'], data['min']
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 200
assert response.context['compat_form'].forms[0].non_field_errors() == (
['Invalid version range.']
)
def test_same_min_max(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['max'] = data['min']
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 302
av = self.version.apps.all()[0]
assert av.min == av.max
def test_statictheme_no_compat_edit(self):
"""static themes don't allow users to overwrite compat data."""
addon = self.get_addon()
addon.update(type=amo.ADDON_STATICTHEME)
|
|
'''tzinfo timezone information for US/Mountain.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Mountain(DstTzInfo):
'''US/Mountain timezone definition. See datetime.tzinfo for details'''
zone = 'US/Mountain'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,9,0,0),
d(1918,10,27,8,0,0),
d(1919,3,30,9,0,0),
d(1919,10,26,8,0,0),
d(1920,3,28,9,0,0),
d(1920,10,31,8,0,0),
d(1921,3,27,9,0,0),
d(1921,5,22,8,0,0),
d(1942,2,9,9,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,8,0,0),
d(1965,4,25,9,0,0),
d(1965,10,31,8,0,0),
d(1966,4,24,9,0,0),
d(1966,10,30,8,0,0),
d(1967,4,30,9,0,0),
d(1967,10,29,8,0,0),
d(1968,4,28,9,0,0),
d(1968,10,27,8,0,0),
d(1969,4,27,9,0,0),
d(1969,10,26,8,0,0),
d(1970,4,26,9,0,0),
d(1970,10,25,8,0,0),
d(1971,4,25,9,0,0),
d(1971,10,31,8,0,0),
d(1972,4,30,9,0,0),
d(1972,10,29,8,0,0),
d(1973,4,29,9,0,0),
d(1973,10,28,8,0,0),
d(1974,1,6,9,0,0),
d(1974,10,27,8,0,0),
d(1975,2,23,9,0,0),
d(1975,10,26,8,0,0),
d(1976,4,25,9,0,0),
d(1976,10,31,8,0,0),
d(1977,4,24,9,0,0),
d(1977,10,30,8,0,0),
d(1978,4,30,9,0,0),
d(1978,10,29,8,0,0),
d(1979,4,29,9,0,0),
d(1979,10,28,8,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MWT'),
i(-21600,3600,'MPT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Mountain = Mountain()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Receives data from a TensorFlow debugger. Writes event summaries.
This listener server writes debugging-related events into a logdir directory,
from which a TensorBoard instance can read.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import json
from six.moves import queue
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import grpc_debug_server
from tensorboard.plugins.debugger import comm_channel as comm_channel_lib
from tensorboard.plugins.debugger import debug_graphs_helper
from tensorboard.plugins.debugger import tensor_helper
from tensorboard.plugins.debugger import tensor_store as tensor_store_lib
RunKey = collections.namedtuple(
'RunKey', ['input_names', 'output_names', 'target_nodes'])
def _extract_device_name_from_event(event):
"""Extract device name from a tf.Event proto carrying tensor value."""
plugin_data_content = json.loads(
tf.compat.as_str(event.summary.value[0].metadata.plugin_data.content))
return plugin_data_content['device']
def _comm_metadata(run_key, timestamp):
return {
'type': 'meta',
'timestamp': timestamp,
'data': {
'run_key': run_key,
}
}
UNINITIALIZED_TAG = 'Uninitialized'
UNSUPPORTED_TAG = 'Unsupported'
NA_TAG = 'N/A'
STRING_ELEMENT_MAX_LEN = 40
def _comm_tensor_data(device_name,
node_name,
maybe_base_expanded_node_name,
output_slot,
debug_op,
tensor_value,
wall_time):
"""Create a dict() as the outgoing data in the tensor data comm route.
Note: The tensor data in the comm route does not include the value of the
tensor in its entirety in general. Only if a tensor satisfies the following
conditions will its entire value be included in the return value of this
method:
1. Has a numeric data type (e.g., float32, int32) and has fewer than 5
elements.
2. Is a string tensor and has fewer than 5 elements. Each string element is
up to 40 bytes.
Args:
device_name: Name of the device that the tensor is on.
node_name: (Original) name of the node that produces the tensor.
maybe_base_expanded_node_name: Possbily base-expanded node name.
output_slot: Output slot number.
debug_op: Name of the debug op.
tensor_value: Value of the tensor, as a numpy.ndarray.
wall_time: Wall timestamp for the tensor.
Returns:
A dict representing the tensor data.
"""
output_slot = int(output_slot)
tf.logging.info(
'Recording tensor value: %s, %d, %s', node_name, output_slot, debug_op)
tensor_values = None
if isinstance(tensor_value, debug_data.InconvertibleTensorProto):
if not tensor_value.initialized:
tensor_dtype = UNINITIALIZED_TAG
tensor_shape = UNINITIALIZED_TAG
else:
tensor_dtype = UNSUPPORTED_TAG
tensor_shape = UNSUPPORTED_TAG
tensor_values = NA_TAG
else:
tensor_dtype = tensor_helper.translate_dtype(tensor_value.dtype)
tensor_shape = tensor_value.shape
# The /comm endpoint should respond with tensor values only if the tensor is
# small enough. Otherwise, the detailed values sould be queried through a
# dedicated tensor_data that supports slicing.
if tensor_helper.numel(tensor_shape) < 5:
_, _, tensor_values = tensor_helper.array_view(tensor_value)
if tensor_dtype == 'string' and tensor_value is not None:
tensor_values = tensor_helper.process_buffers_for_display(
tensor_values, limit=STRING_ELEMENT_MAX_LEN)
return {
'type': 'tensor',
'timestamp': wall_time,
'data': {
'device_name': device_name,
'node_name': node_name,
'maybe_base_expanded_node_name': maybe_base_expanded_node_name,
'output_slot': output_slot,
'debug_op': debug_op,
'dtype': tensor_dtype,
'shape': tensor_shape,
'values': tensor_values,
},
}
class RunStates(object):
"""A class that keeps track of state of debugged Session.run() calls."""
def __init__(self, breakpoints_func=None):
"""Constructor of RunStates.
Args:
breakpoint_func: A callable of the signatuer:
def breakpoint_func():
which returns all the currently activated breakpoints.
"""
# Maps from run key to debug_graphs_helper.DebugGraphWrapper instance.
self._run_key_to_original_graphs = dict()
self._run_key_to_debug_graphs = dict()
if breakpoints_func:
assert callable(breakpoints_func)
self._breakpoints_func = breakpoints_func
def add_graph(self, run_key, device_name, graph_def, debug=False):
"""Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops.
"""
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
if not run_key in graph_dict:
graph_dict[run_key] = dict() # Mapping device_name to GraphDef.
graph_dict[run_key][tf.compat.as_str(device_name)] = (
debug_graphs_helper.DebugGraphWrapper(graph_def))
def get_graphs(self, run_key, debug=False):
"""Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos.
"""
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for device_name, wrapper in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs
def get_graph(self, run_key, device_name, debug=False):
"""Get the runtime GraphDef proto associated with a run key and a device.
Args:
run_key: A Session.run kay.
device_name: Name of the device in question.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `GraphDef` proto.
"""
return self.get_graphs(run_key, debug=debug).get(device_name, None)
def get_breakpoints(self):
"""Obtain all the currently activated breakpoints."""
return self._breakpoints_func()
def get_gated_grpc_tensors(self, run_key, device_name):
return self._run_key_to_debug_graphs[
run_key][device_name].get_gated_grpc_tensors()
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):
"""Obtain possibly base-expanded node name.
Base-expansion is the transformation of a node name which happens to be the
name scope of other nodes in the same graph. For example, if two nodes,
called 'a/b' and 'a/b/read' in a graph, the name of the first node will
be base-expanded to 'a/b/(b)'.
This method uses caching to avoid unnecessary recomputation.
Args:
node_name: Name of the node.
run_key: The run key to which the node belongs.
graph_def: GraphDef to which the node belongs.
Raises:
ValueError: If `run_key` and/or `device_name` do not exist in the record.
"""
device_name = tf.compat.as_str(device_name)
if run_key not in self._run_key_to_original_graphs:
raise ValueError('Unknown run_key: %s' % run_key)
if device_name not in self._run_key_to_original_graphs[run_key]:
raise ValueError(
'Unknown device for run key "%s": %s' % (run_key, device_name))
return self._run_key_to_original_graphs[
run_key][device_name].maybe_base_expanded_node_name(node_name)
class InteractiveDebuggerDataStreamHandler(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of stream handler for debugger data.
Each instance of this class is created by a InteractiveDebuggerDataServer
upon a gRPC stream established between the debugged Session::Run() invocation
in TensorFlow core runtime and the InteractiveDebuggerDataServer instance.
Each instance of this class does the following:
1) receives a core metadata Event proto during its constructor call.
2) receives GraphDef Event proto(s) through its on_graph_def method.
3) receives tensor value Event proto(s) through its on_value_event method.
"""
def __init__(
self, incoming_channel, outgoing_channel, run_states, tensor_store):
"""Constructor of InteractiveDebuggerDataStreamHandler.
Args:
incoming_channel: An instance of FIFO queue, which manages incoming data,
e.g., ACK signals from the client side unblock breakpoints.
outgoing_channel: An instance of `CommChannel`, which manages outgoing
data, i.e., data regarding the starting of Session.runs and hitting of
tensor breakpoint.s
run_states: An instance of `RunStates`, which keeps track of the states
(graphs and breakpoints) of debugged Session.run() calls.
tensor_store: An instance of `TensorStore`, which stores Tensor values
from debugged Session.run() calls.
"""
super(InteractiveDebuggerDataStreamHandler, self).__init__()
self._incoming_channel = incoming_channel
self._outgoing_channel = outgoing_channel
self._run_states = run_states
self._tensor_store = tensor_store
self._run_key = None
self._graph_defs = dict() # A dict mapping device name to GraphDef.
self._graph_defs_arrive_first = True
def on_core_metadata_event(self, event):
"""Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
"""
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
tf.logging.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
tf.logging.info('on_core_metadata_event() client ack received (meta).')
# TODO(cais): If eager mode, this should return something to yield.
def _add_graph_def(self, device_name, graph_def):
self._run_states.add_graph(
self._run_key, device_name,
tf_debug.reconstruct_non_debug_graph_def(graph_def))
self._run_states.add_graph(
self._run_key, device_name, graph_def, debug=True)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the GraphDef-carrying Event proto callback.
Args:
graph_def: A GraphDef proto. N.B.: The GraphDef is from
the core runtime of a debugged Session::Run() call, after graph
partition. Therefore it may differ from the GraphDef available to
the general TensorBoard. For example, the GraphDef in general
TensorBoard may get partitioned for multiple devices (CPUs and GPUs),
each of which will generate a GraphDef event proto sent to this
method.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
# For now, we do nothing with the graph def. However, we must define this
# method to satisfy the handler's interface. Furthermore, we may use the
# graph in the future (for instance to provide a graph if there is no graph
# provided otherwise).
del wall_time
self._graph_defs[device_name] = graph_def
if not self._graph_defs_arrive_first:
self._add_graph_def(device_name, graph_def)
self._incoming_channel.get()
def on_value_event(self, event):
"""Records the summary values based on an updated message from the debugger.
Logs an error message if writing the event to disk fails.
Args:
event: The Event proto to be processed.
"""
if not event.summary.value:
tf.logging.info('The summary of the event lacks a value.')
return None
# The node name property in the event proto is actually a watch key, which
# is a concatenation of several pieces of data.
watch_key = event.summary.value[0].node_name
tensor_value = debug_data.load_tensor_from_event(event)
device_name = _extract_device_name_from_event(event)
node_name, output_slot, debug_op = (
event.summary.value[0].node_name.split(':'))
maybe_base_expanded_node_name = (
self._run_states.get_maybe_base_expanded_node_name(node_name,
self._run_key,
device_name))
self._tensor_store.add(watch_key, tensor_value)
self._outgoing_channel.put(_comm_tensor_data(
device_name, node_name, maybe_base_expanded_node_name, output_slot,
debug_op, tensor_value, event.wall_time))
tf.logging.info('on_value_event(): waiting for client ack (tensors)...')
self._incoming_channel.get()
tf.logging.info('on_value_event(): client ack received (tensor).')
# Determine if the particular debug watch key is in the current list of
# breakpoints. If it is, send an EventReply() to unblock the debug op.
if self._is_debug_node_in_breakpoints(event.summary.value[0].node_name):
tf.logging.info('Sending empty EventReply for breakpoint: %s',
event.summary.value[0].node_name)
# TODO(cais): Support receiving and sending tensor value from front-end.
return debug_service_pb2.EventReply()
return None
def _is_debug_node_in_breakpoints(self, debug_node_key):
node_name, output_slot, debug_op = debug_node_key.split(':')
output_slot = int(output_slot)
return (node_name, output_slot,
debug_op) in self._run_states.get_breakpoints()
# TODO(cais): Consider moving to a seperate python module.
class SourceManager(object):
"""Manages source files and tracebacks involved in the debugged TF program.
"""
def __init__(self):
# A dict mapping file path to file content as a list of strings.
self._source_file_content = dict()
# A dict mapping file path to host name.
self._source_file_host = dict()
# A dict mapping file path to last modified timestamp.
self._source_file_last_modified = dict()
# A dict mapping file path to size in bytes.
self._source_file_bytes = dict()
# Keeps track f the traceback of the latest graph version.
self._graph_traceback = None
self._graph_version = -1
def add_debugged_source_file(self, debugged_source_file):
"""Add a DebuggedSourceFile proto."""
# TODO(cais): Should the key include a host name, for certain distributed
# cases?
key = debugged_source_file.file_path
self._source_file_host[key] = debugged_source_file.host
self._source_file_last_modified[key] = debugged_source_file.last_modified
self._source_file_bytes[key] = debugged_source_file.bytes
self._source_file_content[key] = debugged_source_file.lines
def add_graph_traceback(self, graph_version, graph_traceback):
if graph_version > self._graph_version:
self._graph_traceback = graph_traceback
self._graph_version = graph_version
def get_paths(self):
"""Get the paths to all available source files."""
return list(self._source_file_content.keys())
def get_content(self, file_path):
"""Get the content of a source file.
# TODO(cais): Maybe support getting a range of lines by line number.
Args:
file_path: Path to the source file.
"""
return self._source_file_content[file_path]
def get_op_traceback(self, op_name):
"""Get the traceback of an op in the latest version of the TF graph.
Args:
op_name: Name of the op.
Returns:
Creation traceback of the op, in the form of a list of 2-tuples:
(file_path, lineno)
Raises:
ValueError: If the op with the given name cannot be found in the latest
version of the graph that this SourceManager instance has received, or
if this SourceManager instance has not received any graph traceback yet.
"""
if not self._graph_traceback:
raise ValueError('No graph traceback has been received yet.')
for op_log_entry in self._graph_traceback.log_entries:
if op_log_entry.name == op_name:
return self._code_def_to_traceback_list(op_log_entry.code_def)
raise ValueError(
'No op named "%s" can be found in the graph of the latest version '
' (%d).' % (op_name, self._graph_version))
def get_file_tracebacks(self, file_path):
"""Get the lists of ops created at lines of a specified source file.
Args:
file_path: Path to the source file.
Returns:
A dict mapping line number to a list of 2-tuples,
`(op_name, stack_position)`
`op_name` is the name of the name of the op whose creation traceback
includes the line.
`stack_position` is the position of the line in the op's creation
traceback, represented as a 0-based integer.
Raises:
ValueError: If `file_path` does not point to a source file that has been
received by this instance of `SourceManager`.
"""
if file_path not in self._source_file_content:
raise ValueError(
'Source file of path "%s" has not been received by this instance of '
'SourceManager.' % file_path)
lineno_to_op_names_and_stack_position = dict()
for op_log_entry in self._graph_traceback.log_entries:
for stack_pos, trace in enumerate(op_log_entry.code_def.traces):
if self._graph_traceback.id_to_string[trace.file_id] == file_path:
if trace.lineno not in lineno_to_op_names_and_stack_position:
lineno_to_op_names_and_stack_position[trace.lineno] = []
lineno_to_op_names_and_stack_position[trace.lineno].append(
(op_log_entry.name, stack_pos))
return lineno_to_op_names_and_stack_position
def _code_def_to_traceback_list(self, code_def):
return [
(self._graph_traceback.id_to_string[trace.file_id], trace.lineno)
for trace in code_def.traces]
class InteractiveDebuggerDataServer(
grpc_debug_server.EventListenerBaseServicer):
"""A service that receives and writes debugger data such as health pills.
"""
def __init__(self, receive_port):
"""Receives health pills from a debugger and writes them to disk.
Args:
receive_port: The port at which to receive health pills from the
TensorFlow debugger.
always_flush: A boolean indicating whether the EventsWriter will be
flushed after every write. Can be used for testing.
"""
super(InteractiveDebuggerDataServer, self).__init__(
receive_port, InteractiveDebuggerDataStreamHandler)
self._incoming_channel = queue.Queue()
self._outgoing_channel = comm_channel_lib.CommChannel()
self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints)
self._tensor_store = tensor_store_lib.TensorStore()
self._source_manager = SourceManager()
curried_handler_constructor = functools.partial(
InteractiveDebuggerDataStreamHandler,
self._incoming_channel, self._outgoing_channel, self._run_states,
self._tensor_store)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, receive_port, curried_handler_constructor)
def SendTracebacks(self, request, context):
self._source_manager.add_graph_traceback(request.graph_version,
request.graph_traceback)
return debug_service_pb2.EventReply()
def SendSourceFiles(self, request, context):
# TODO(cais): Handle case in which the size of the request is greater than
# the 4-MB gRPC limit.
for source_file in request.source_files:
self._source_manager.add_debugged_source_file(source_file)
return debug_service_pb2.EventReply()
def get_graphs(self, run_key, debug=False):
return self._run_states.get_graphs(run_key, debug=debug)
def get_graph(self, run_key, device_name, debug=False):
return self._run_states.get_graph(run_key, device_name, debug=debug)
def get_gated_grpc_tensors(self, run_key, device_name):
return self._run_states.get_gated_grpc_tensors(run_key, device_name)
def get_outgoing_message(self, pos):
msg, _ = self._outgoing_channel.get(pos)
return msg
def put_incoming_message(self, message):
return self._incoming_channel.put(message)
def query_tensor_store(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given debugged tensor value.
Args:
watch_key: The watch key of the debugged tensor being sought. Format:
<node_name>:<output_slot>:<debug_op>
E.g., Dense_1/MatMul:0:DebugIdentity.
time_indices: Optional time indices string By default, the lastest time
index ('-1') is returned.
slicing: Optional slicing string.
mapping: Optional mapping string, e.g., 'image/png'.
Returns:
If mapping is `None`, the possibly sliced values as a nested list of
values or its mapped format. A `list` of nested `list` of values,
If mapping is not `None`, the format of the return value will depend on
the mapping.
"""
return self._tensor_store.query(watch_key,
time_indices=time_indices,
slicing=slicing,
mapping=mapping)
def query_source_file_paths(self):
"""Query the source files involved in the current debugged TF program.
Returns:
A `list` of file paths. The files that belong to the TensorFlow Python
library itself are *not* included.
"""
return self._source_manager.get_paths()
def query_source_file_content(self, file_path):
"""Query the content of a given source file.
# TODO(cais): Allow query only a range of the source lines.
Returns:
The source lines as a list of `str`.
"""
return list(self._source_manager.get_content(file_path))
def query_op_traceback(self, op_name):
"""Query the tracebacks of ops in a TensorFlow graph.
Returns:
TODO(cais):
"""
return self._source_manager.get_op_traceback(op_name)
def query_file_tracebacks(self, file_path):
"""Query the lists of ops created at lines of a given source file.
Args:
file_path: Path to the source file to get the tracebacks for.
Returns:
A `dict` mapping line number in the specified source file to a list of
2-tuples:
`(op_name, stack_position)`.
`op_name` is the name of the name of the op whose creation traceback
includes the line.
`stack_position` is the position of the line in the op's creation
traceback, represented as a 0-based integer.
"""
return self._source_manager.get_file_tracebacks(file_path)
def dispose(self):
"""Disposes of this object. Call only after this is done being used."""
self._tensor_store.dispose()
|
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from django.contrib.gis.geos.mutable_list import ListMixin
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = range(length)
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return range(-1 - self.limit, 0) + range(1, 1 + self.limit)
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = (int, long)
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = range(10,14)
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assert_(pl >= ul, 'cmp for gte self')
self.assert_(pl <= ul, 'cmp for lte self')
self.assert_(ul >= pl, 'cmp for self gte')
self.assert_(ul <= pl, 'cmp for self lte')
self.assert_(pl + [5] > ul, 'cmp')
self.assert_(pl + [5] >= ul, 'cmp')
self.assert_(pl < ul + [2], 'cmp')
self.assert_(pl <= ul + [2], 'cmp')
self.assert_(ul + [5] > pl, 'cmp')
self.assert_(ul + [5] >= pl, 'cmp')
self.assert_(ul < pl + [2], 'cmp')
self.assert_(ul <= pl + [2], 'cmp')
pl[1] = 20
self.assert_(pl > ul, 'cmp for gt self')
self.assert_(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assert_(pl < ul, 'cmp for lt self')
self.assert_(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(ListMixinTest))
s.addTest(unittest.makeSuite(ListMixinTestSingle))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
|
|
#!/usr/bin/env python3
#******************************************************************************
# (C) 2019, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# CCSDS Stack - Unit Tests *
#******************************************************************************
import unittest
import testData
import CCSDS.ASSEMBLER, CCSDS.FRAME, CCSDS.PACKET, CCSDS.PACKETIZER
import UTIL.SYS
####################
# global variables #
####################
# Assembler and Packetizer are singletons
s_assembler = None
s_packetizer = None
# the last passed TM packet and TM frame
s_tmBinFrames = []
s_tmBinPackets = []
###########
# classes #
###########
# =============================================================================
class Assembler(CCSDS.ASSEMBLER.Assembler):
"""Subclass of CCSDS.ASSEMBLER.Assembler"""
def __init__(self):
"""Initialise attributes only"""
CCSDS.ASSEMBLER.Assembler.__init__(self)
# ---------------------------------------------------------------------------
def notifyTMframeCallback(self, tmFrameDu):
"""notifies when the next TM frame is assembled"""
# overloaded from CCSDS.ASSEMBLER.Assembler
global s_packetizer, s_tmBinFrames
binFrame = tmFrameDu.getBuffer()
s_tmBinFrames.append(binFrame)
s_packetizer.pushTMframe(binFrame)
# =============================================================================
class Packetizer(CCSDS.PACKETIZER.Packetizer):
"""Subclass of CCSDS.PACKETIZER.Packetizer"""
def __init__(self):
"""Initialise attributes only"""
frameVCID = int(UTIL.SYS.s_configuration.TM_TRANSFER_FRAME_VCID)
CCSDS.PACKETIZER.Packetizer.__init__(self, frameVCID)
# ---------------------------------------------------------------------------
def notifyTMpacketCallback(self, binPacket):
"""notifies when the next TM packet is assembled"""
# overloaded from CSDS.PACKETIZER.Packetizer
global s_tmBinPackets
s_tmBinPackets.append(binPacket)
#############
# functions #
#############
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["SPACECRAFT_ID", "758"],
["TM_VIRTUAL_CHANNEL_ID", "0"],
["TM_TRANSFER_FRAME_SIZE", "1115"],
["TM_TRANSFER_FRAME_VCID", "0"],
["TM_TRANSFER_FRAME_HAS_SEC_HDR", "0"],
["TM_TRANSFER_FRAME_HAS_N_PKTS", "0"]])
#############
# test case #
#############
class TestCCSDStm(unittest.TestCase):
# ---------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
"""setup the environment"""
global s_assembler, s_packetizer
# initialise the system configuration
initConfiguration()
s_assembler = Assembler()
s_packetizer = Packetizer()
# ---------------------------------------------------------------------------
def test_idleFrame(self):
"""pass an idle frame through Assembler and Packetizer"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = False
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
s_assembler.flushTMframeOrIdleFrame()
self.assertEqual(len(s_tmBinFrames), 1)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
tmFrame = CCSDS.FRAME.TMframe(binFrame)
firstHeaderPointer = tmFrame.firstHeaderPointer
self.assertEqual(firstHeaderPointer, CCSDS.FRAME.IDLE_FRAME_PATTERN)
self.assertEqual(len(s_tmBinPackets), 0)
# ---------------------------------------------------------------------------
def test_singlePacket_1(self):
"""pass a single packet through Assembler and Packetizer"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = False
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tmPacket = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tmPacket.getBuffer())
self.assertEqual(len(s_tmBinFrames), 1)
self.assertEqual(len(s_tmBinPackets), 2)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 1)
self.assertEqual(len(s_tmBinPackets), 2)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tmPacket)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
# ---------------------------------------------------------------------------
def test_doublePacket_1(self):
"""pass 2 packets through Assembler and Packetizer"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = False
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tm1Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tm1Packet.getBuffer())
tm2Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm2Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 2)
self.assertEqual(len(s_tmBinPackets), 4)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 2)
self.assertEqual(len(s_tmBinPackets), 4)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
binFrame = s_tmBinFrames[1]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tm1Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[2])
self.assertEqual(receivedTmPacket, tm2Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[3])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
# ---------------------------------------------------------------------------
def test_singlePacket_n(self):
"""pass a single packet through Assembler and Packetizer"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = True
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tmPacket = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tmPacket.getBuffer())
self.assertEqual(len(s_tmBinFrames), 0)
self.assertEqual(len(s_tmBinPackets), 0)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 1)
self.assertEqual(len(s_tmBinPackets), 2)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tmPacket)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
# ---------------------------------------------------------------------------
def test_doublePacket_n(self):
"""pass 2 packets through Assembler and Packetizer"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = True
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tm1Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tm1Packet.getBuffer())
tm2Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm2Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 0)
self.assertEqual(len(s_tmBinPackets), 0)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 1)
self.assertEqual(len(s_tmBinPackets), 3)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tm1Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket, tm2Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[2])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
# ---------------------------------------------------------------------------
def test_spilloverPacket(self):
"""pass 5 packets to force a spillover packet"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = True
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tm1Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tm1Packet.getBuffer())
tm2Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm2Packet.getBuffer())
tm3Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_03)
s_assembler.pushTMpacket(tm3Packet.getBuffer())
tm4Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm4Packet.getBuffer())
tm5Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_01)
s_assembler.pushTMpacket(tm5Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 0)
self.assertEqual(len(s_tmBinPackets), 0)
tm6Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm6Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 1)
self.assertEqual(len(s_tmBinPackets), 5)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 2)
self.assertEqual(len(s_tmBinPackets), 7)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
binFrame = s_tmBinFrames[1]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tm1Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket, tm2Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[2])
self.assertEqual(receivedTmPacket, tm3Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[3])
self.assertEqual(receivedTmPacket, tm4Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[4])
self.assertEqual(receivedTmPacket, tm5Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[5])
self.assertEqual(receivedTmPacket, tm6Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[6])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
# ---------------------------------------------------------------------------
def test_spillover2Frames(self):
"""pass 5 packets to force a spillover packet"""
global s_assembler, s_packetizer, s_tmBinFrames, s_tmBinPackets
s_assembler.multiPacketMode = True
s_assembler.reset()
s_packetizer.reset()
s_tmBinFrames = []
s_tmBinPackets = []
tm1Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_02)
s_assembler.pushTMpacket(tm1Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 0)
self.assertEqual(len(s_tmBinPackets), 0)
tm2Packet = CCSDS.PACKET.TMpacket(testData.TM_PACKET_04)
s_assembler.pushTMpacket(tm2Packet.getBuffer())
self.assertEqual(len(s_tmBinFrames), 2)
self.assertEqual(len(s_tmBinPackets), 1)
s_assembler.flushTMframe()
self.assertEqual(len(s_tmBinFrames), 3)
self.assertEqual(len(s_tmBinPackets), 3)
binFrame = s_tmBinFrames[0]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
binFrame = s_tmBinFrames[1]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
binFrame = s_tmBinFrames[2]
self.assertEqual(len(binFrame), s_assembler.frameDefaults.transferFrameSize)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[0])
self.assertEqual(receivedTmPacket, tm1Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[1])
self.assertEqual(receivedTmPacket, tm2Packet)
receivedTmPacket = CCSDS.PACKET.TMpacket(s_tmBinPackets[2])
self.assertEqual(receivedTmPacket.applicationProcessId, CCSDS.PACKET.IDLE_PKT_APID)
########
# main #
########
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library for reading user information from an id_token.
This is an experimental library that can temporarily be used to extract
a user from an id_token. The functionality provided by this library
will be provided elsewhere in the future.
"""
import base64
import json
import logging
import os
import re
import time
import urllib
from google.appengine.api import memcache
from google.appengine.api import oauth
from google.appengine.api import urlfetch
from google.appengine.api import users
try:
# PyCrypto may not be installed for the import_aeta_test or in dev's
# individual Python installations. It is available on AppEngine in prod.
# Disable "Import not at top of file" warning.
# pylint: disable=g-import-not-at-top
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
# pylint: enable=g-import-not-at-top
_CRYPTO_LOADED = True
except ImportError:
_CRYPTO_LOADED = False
__all__ = ['get_current_user',
'InvalidGetUserCall',
'SKIP_CLIENT_ID_CHECK']
SKIP_CLIENT_ID_CHECK = ['*'] # This needs to be a list, for comparisons.
_CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
_MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
_DEFAULT_CERT_URI = ('https://www.googleapis.com/service_accounts/v1/metadata/'
'raw/[email protected]')
_ENV_USE_OAUTH_SCOPE = 'ENDPOINTS_USE_OAUTH_SCOPE'
_ENV_AUTH_EMAIL = 'ENDPOINTS_AUTH_EMAIL'
_ENV_AUTH_DOMAIN = 'ENDPOINTS_AUTH_DOMAIN'
_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
_TOKENINFO_URL = 'https://www.googleapis.com/oauth2/v1/tokeninfo'
_MAX_AGE_REGEX = re.compile(r'\s*max-age\s*=\s*(\d+)\s*')
_CERT_NAMESPACE = '__verify_jwt'
_ISSUERS = ('accounts.google.com', 'https://accounts.google.com')
class _AppIdentityError(Exception):
pass
class InvalidGetUserCall(Exception):
"""Called get_current_user when the environment was not set up for it."""
# pylint: disable=g-bad-name
def get_current_user():
"""Get user information from the id_token or oauth token in the request.
This should only be called from within an Endpoints request handler,
decorated with an @endpoints.method decorator. The decorator should include
the https://www.googleapis.com/auth/userinfo.email scope.
If the current request uses an id_token, this validates and parses the token
against the info in the current request handler and returns the user.
Or, for an Oauth token, this call validates the token against the tokeninfo
endpoint and oauth.get_current_user with the scopes provided in the method's
decorator.
Returns:
None if there is no token or it's invalid. If the token was valid, this
returns a User. Only the user's email field is guaranteed to be set.
Other fields may be empty.
Raises:
InvalidGetUserCall: if the environment variables necessary to determine the
endpoints user are not set. These are typically set when processing a
request using an Endpoints handler. If they are not set, it likely
indicates that this function was called from outside an Endpoints request
handler.
"""
if not _is_auth_info_available():
raise InvalidGetUserCall('No valid endpoints user in environment.')
if _ENV_USE_OAUTH_SCOPE in os.environ:
# We can get more information from the oauth.get_current_user function,
# as long as we know what scope to use. Since that scope has been
# cached, we can just return this:
return oauth.get_current_user(os.environ[_ENV_USE_OAUTH_SCOPE])
if (_ENV_AUTH_EMAIL in os.environ and
_ENV_AUTH_DOMAIN in os.environ):
if not os.environ[_ENV_AUTH_EMAIL]:
# Either there was no id token or we were unable to validate it,
# so there's no user.
return None
return users.User(os.environ[_ENV_AUTH_EMAIL],
os.environ[_ENV_AUTH_DOMAIN] or None)
# Shouldn't hit this, because all the _is_auth_info_available cases were
# checked, but just in case.
return None
# pylint: disable=g-bad-name
def _is_auth_info_available():
"""Check if user auth info has been set in environment variables."""
return ((_ENV_AUTH_EMAIL in os.environ and
_ENV_AUTH_DOMAIN in os.environ) or
_ENV_USE_OAUTH_SCOPE in os.environ)
def _maybe_set_current_user_vars(method, api_info=None, request=None):
"""Get user information from the id_token or oauth token in the request.
Used internally by Endpoints to set up environment variables for user
authentication.
Args:
method: The class method that's handling this request. This method
should be annotated with @endpoints.method.
api_info: An api_config._ApiInfo instance. Optional. If None, will attempt
to parse api_info from the implicit instance of the method.
request: The current request, or None.
"""
if _is_auth_info_available():
return
# By default, there's no user.
os.environ[_ENV_AUTH_EMAIL] = ''
os.environ[_ENV_AUTH_DOMAIN] = ''
# Choose settings on the method, if specified. Otherwise, choose settings
# from the API. Specifically check for None, so that methods can override
# with empty lists.
try:
api_info = api_info or method.im_self.api_info
except AttributeError:
# The most common case for this is someone passing an unbound method
# to this function, which most likely only happens in our unit tests.
# We could propagate the exception, but this results in some really
# difficult to debug behavior. Better to log a warning and pretend
# there are no API-level settings.
logging.warning('AttributeError when accessing %s.im_self. An unbound '
'method was probably passed as an endpoints handler.',
method.__name__)
scopes = method.method_info.scopes
audiences = method.method_info.audiences
allowed_client_ids = method.method_info.allowed_client_ids
else:
scopes = (method.method_info.scopes
if method.method_info.scopes is not None
else api_info.scopes)
audiences = (method.method_info.audiences
if method.method_info.audiences is not None
else api_info.audiences)
allowed_client_ids = (method.method_info.allowed_client_ids
if method.method_info.allowed_client_ids is not None
else api_info.allowed_client_ids)
if not scopes and not audiences and not allowed_client_ids:
# The user hasn't provided any information to allow us to parse either
# an id_token or an Oauth token. They appear not to be interested in
# auth.
return
token = _get_token(request)
if not token:
return None
# When every item in the acceptable scopes list is
# "https://www.googleapis.com/auth/userinfo.email", and there is a non-empty
# allowed_client_ids list, the API code will first attempt OAuth 2/OpenID
# Connect ID token processing for any incoming bearer token.
if ((scopes == [_EMAIL_SCOPE] or scopes == (_EMAIL_SCOPE,)) and
allowed_client_ids):
logging.debug('Checking for id_token.')
time_now = long(time.time())
user = _get_id_token_user(token, audiences, allowed_client_ids, time_now,
memcache)
if user:
os.environ[_ENV_AUTH_EMAIL] = user.email()
os.environ[_ENV_AUTH_DOMAIN] = user.auth_domain()
return
# Check if the user is interested in an oauth token.
if scopes:
logging.debug('Checking for oauth token.')
if _is_local_dev():
_set_bearer_user_vars_local(token, allowed_client_ids, scopes)
else:
_set_bearer_user_vars(allowed_client_ids, scopes)
def _get_token(request):
"""Get the auth token for this request.
Auth token may be specified in either the Authorization header or
as a query param (either access_token or bearer_token). We'll check in
this order:
1. Authorization header.
2. bearer_token query param.
3. access_token query param.
Args:
request: The current request, or None.
Returns:
The token in the request or None.
"""
# Check if the token is in the Authorization header.
auth_header = os.environ.get('HTTP_AUTHORIZATION')
if auth_header:
allowed_auth_schemes = ('OAuth', 'Bearer')
for auth_scheme in allowed_auth_schemes:
if auth_header.startswith(auth_scheme):
return auth_header[len(auth_scheme) + 1:]
# If an auth header was specified, even if it's an invalid one, we won't
# look for the token anywhere else.
return None
# Check if the token is in the query string.
if request:
for key in ('bearer_token', 'access_token'):
token, _ = request.get_unrecognized_field_info(key)
if token:
return token
def _get_id_token_user(token, audiences, allowed_client_ids, time_now, cache):
"""Get a User for the given id token, if the token is valid.
Args:
token: The id_token to check.
audiences: List of audiences that are acceptable.
allowed_client_ids: List of client IDs that are acceptable.
time_now: The current time as a long (eg. long(time.time())).
cache: Cache to use (eg. the memcache module).
Returns:
A User if the token is valid, None otherwise.
"""
# Verify that the token is valid before we try to extract anything from it.
# This verifies the signature and some of the basic info in the token.
try:
parsed_token = _verify_signed_jwt_with_certs(token, time_now, cache)
except Exception, e: # pylint: disable=broad-except
logging.debug('id_token verification failed: %s', e)
return None
if _verify_parsed_token(parsed_token, audiences, allowed_client_ids):
email = parsed_token['email']
# The token might have an id, but it's a Gaia ID that's been
# obfuscated with the Focus key, rather than the AppEngine (igoogle)
# key. If the developer ever put this email into the user DB
# and retrieved the ID from that, it'd be different from the ID we'd
# return here, so it's safer to not return the ID.
# Instead, we'll only return the email.
return users.User(email)
# pylint: disable=unused-argument
def _set_oauth_user_vars(token_info, audiences, allowed_client_ids, scopes,
local_dev):
logging.warning('_set_oauth_user_vars is deprecated and will be removed '
'soon.')
return _set_bearer_user_vars(allowed_client_ids, scopes)
# pylint: enable=unused-argument
def _set_bearer_user_vars(allowed_client_ids, scopes):
"""Validate the oauth bearer token and set endpoints auth user variables.
If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This
provides enough information that our endpoints.get_current_user() function
can get the user.
Args:
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
"""
for scope in scopes:
try:
client_id = oauth.get_client_id(scope)
except oauth.Error:
# This scope failed. Try the next.
continue
# The client ID must be in allowed_client_ids. If allowed_client_ids is
# empty, don't allow any client ID. If allowed_client_ids is set to
# SKIP_CLIENT_ID_CHECK, all client IDs will be allowed.
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
logging.warning('Client ID is not allowed: %s', client_id)
return
os.environ[_ENV_USE_OAUTH_SCOPE] = scope
logging.debug('Returning user from matched oauth_user.')
return
logging.debug('Oauth framework user didn\'t match oauth token user.')
return None
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes):
"""Validate the oauth bearer token on the dev server.
Since the functions in the oauth module return only example results in local
development, this hits the tokeninfo endpoint and attempts to validate the
token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we
can get the user from the token.
Args:
token: String with the oauth token to validate.
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
"""
# Get token info from the tokeninfo endpoint.
result = urlfetch.fetch(
'%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token})))
if result.status_code != 200:
try:
error_description = json.loads(result.content)['error_description']
except (ValueError, KeyError):
error_description = ''
logging.error('Token info endpoint returned status %s: %s',
result.status_code, error_description)
return
token_info = json.loads(result.content)
# Validate email.
if 'email' not in token_info:
logging.warning('Oauth token doesn\'t include an email address.')
return
if not token_info.get('verified_email'):
logging.warning('Oauth token email isn\'t verified.')
return
# Validate client ID.
client_id = token_info.get('issued_to')
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
logging.warning('Client ID is not allowed: %s', client_id)
return
# Verify at least one of the scopes matches.
token_scopes = token_info.get('scope', '').split(' ')
if not any(scope in scopes for scope in token_scopes):
logging.warning('Oauth token scopes don\'t match any acceptable scopes.')
return
os.environ[_ENV_AUTH_EMAIL] = token_info['email']
os.environ[_ENV_AUTH_DOMAIN] = ''
logging.debug('Local dev returning user from token.')
return
def _is_local_dev():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
def _verify_parsed_token(parsed_token, audiences, allowed_client_ids):
"""Verify a parsed user ID token.
Args:
parsed_token: The parsed token information.
audiences: The allowed audiences.
allowed_client_ids: The allowed client IDs.
Returns:
True if the token is verified, False otherwise.
"""
# Verify the issuer.
if parsed_token.get('iss') not in _ISSUERS:
logging.warning('Issuer was not valid: %s', parsed_token.get('iss'))
return False
# Check audiences.
aud = parsed_token.get('aud')
if not aud:
logging.warning('No aud field in token')
return False
# Special handling if aud == cid. This occurs with iOS and browsers.
# As long as audience == client_id and cid is allowed, we need to accept
# the audience for compatibility.
cid = parsed_token.get('azp')
if aud != cid and aud not in audiences:
logging.warning('Audience not allowed: %s', aud)
return False
# Check allowed client IDs.
if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK:
logging.warning('Client ID check can\'t be skipped for ID tokens. '
'Id_token cannot be verified.')
return False
elif not cid or cid not in allowed_client_ids:
logging.warning('Client ID is not allowed: %s', cid)
return False
if 'email' not in parsed_token:
return False
return True
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * ((4 - len(b64string)) % 4)
return base64.urlsafe_b64decode(padded)
def _get_cert_expiration_time(headers):
"""Get the expiration time for a cert, given the response headers.
Get expiration time from the headers in the result. If we can't get
a time from the headers, this returns 0, indicating that the cert
shouldn't be cached.
Args:
headers: A dict containing the response headers from the request to get
certs.
Returns:
An integer with the number of seconds the cert should be cached. This
value is guaranteed to be >= 0.
"""
# Check the max age of the cert.
cache_control = headers.get('Cache-Control', '')
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 indicates only
# a comma-separated header is valid, so it should be fine to split this on
# commas.
for entry in cache_control.split(','):
match = _MAX_AGE_REGEX.match(entry)
if match:
cache_time_seconds = int(match.group(1))
break
else:
return 0
# Subtract the cert's age.
age = headers.get('Age')
if age is not None:
try:
age = int(age)
except ValueError:
age = 0
cache_time_seconds -= age
return max(0, cache_time_seconds)
def _get_cached_certs(cert_uri, cache):
"""Get certs from cache if present; otherwise, gets from URI and caches them.
Args:
cert_uri: URI from which to retrieve certs if cache is stale or empty.
cache: Cache of pre-fetched certs.
Returns:
The retrieved certs.
"""
certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE)
if certs is None:
logging.debug('Cert cache miss')
try:
result = urlfetch.fetch(cert_uri)
except AssertionError:
# This happens in unit tests. Act as if we couldn't get any certs.
return None
if result.status_code == 200:
certs = json.loads(result.content)
expiration_time_seconds = _get_cert_expiration_time(result.headers)
if expiration_time_seconds:
cache.set(cert_uri, certs, time=expiration_time_seconds,
namespace=_CERT_NAMESPACE)
else:
logging.error(
'Certs not available, HTTP request returned %d', result.status_code)
return certs
def _b64_to_long(b):
b = b.encode('ascii')
b += '=' * ((4 - len(b)) % 4)
b = base64.b64decode(b)
return long(b.encode('hex'), 16)
def _verify_signed_jwt_with_certs(
jwt, time_now, cache,
cert_uri=_DEFAULT_CERT_URI):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
The PyCrypto library included with Google App Engine is severely limited and
so you have to use it very carefully to verify JWT signatures. The first
issue is that the library can't read X.509 files, so we make a call to a
special URI that has the public cert in modulus/exponent form in JSON.
The second issue is that the RSA.verify method doesn't work, at least for
how the JWT tokens are signed, so we have to manually verify the signature
of the JWT, which means hashing the signed part of the JWT and comparing
that to the signature that's been encrypted with the public key.
Args:
jwt: string, A JWT.
time_now: The current time, as a long (eg. long(time.time())).
cache: Cache to use (eg. the memcache module).
cert_uri: string, URI to get cert modulus and exponent in JSON format.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
_AppIdentityError: if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
# Note that anywhere we print the jwt or its json body, we need to use
# %r instead of %s, so that non-printable characters are escaped safely.
raise _AppIdentityError('Token is not an id_token (Wrong number of '
'segments)')
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# pycrypto only deals in integers, so we have to convert the string of bytes
# into a long.
lsignature = long(signature.encode('hex'), 16)
# Verify expected header.
header_body = _urlsafe_b64decode(segments[0])
try:
header = json.loads(header_body)
except:
raise _AppIdentityError("Can't parse header")
if header.get('alg') != 'RS256':
raise _AppIdentityError('Unexpected encryption algorithm: %r' %
header.get('alg'))
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body)
except:
raise _AppIdentityError("Can't parse token body")
certs = _get_cached_certs(cert_uri, cache)
if certs is None:
raise _AppIdentityError(
'Unable to retrieve certs needed to verify the signed JWT')
# Verify that we were able to load the Crypto libraries, before we try
# to use them.
if not _CRYPTO_LOADED:
raise _AppIdentityError('Unable to load pycrypto library. Can\'t verify '
'id_token signature. See http://www.pycrypto.org '
'for more information on pycrypto.')
# SHA256 hash of the already 'signed' segment from the JWT. Since a SHA256
# hash, will always have length 64.
local_hash = SHA256.new(signed).hexdigest()
# Check signature.
verified = False
for keyvalue in certs['keyvalues']:
try:
modulus = _b64_to_long(keyvalue['modulus'])
exponent = _b64_to_long(keyvalue['exponent'])
key = RSA.construct((modulus, exponent))
# Encrypt, and convert to a hex string.
hexsig = '%064x' % key.encrypt(lsignature, '')[0]
# Make sure we have only last 64 base64 chars
hexsig = hexsig[-64:]
# Check the signature on 'signed' by encrypting 'signature' with the
# public key and confirming the result matches the SHA256 hash of
# 'signed'.
verified = (hexsig == local_hash)
if verified:
break
except Exception, e: # pylint: disable=broad-except
# Log the exception for debugging purpose.
logging.debug(
'Signature verification error: %s; continuing with the next cert.', e)
continue
if not verified:
raise _AppIdentityError('Invalid token signature')
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise _AppIdentityError('No iat field in token')
earliest = iat - _CLOCK_SKEW_SECS
# Check expiration timestamp.
exp = parsed.get('exp')
if exp is None:
raise _AppIdentityError('No exp field in token')
if exp >= time_now + _MAX_TOKEN_LIFETIME_SECS:
raise _AppIdentityError('exp field too far in future')
latest = exp + _CLOCK_SKEW_SECS
if time_now < earliest:
raise _AppIdentityError('Token used too early, %d < %d' %
(time_now, earliest))
if time_now > latest:
raise _AppIdentityError('Token used too late, %d > %d' %
(time_now, latest))
return parsed
|
|
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for Windows Hyper-V virtual switch neutron driver
"""
import mock
from os_win import exceptions
from os_win import utilsfactory
from oslo_config import cfg
from hyperv.neutron import constants
from hyperv.neutron import exception
from hyperv.neutron import hyperv_neutron_agent
from hyperv.tests import base
CONF = cfg.CONF
class TestHyperVNeutronAgent(base.BaseTestCase):
_FAKE_PORT_ID = 'fake_port_id'
def setUp(self):
super(TestHyperVNeutronAgent, self).setUp()
utilsfactory_patcher = mock.patch.object(utilsfactory, '_get_class')
utilsfactory_patcher.start()
self.addCleanup(utilsfactory_patcher.stop)
self.agent = hyperv_neutron_agent.HyperVNeutronAgentMixin()
self.agent.plugin_rpc = mock.Mock()
self.agent._metricsutils = mock.MagicMock()
self.agent._utils = mock.MagicMock()
self.agent.sec_groups_agent = mock.MagicMock()
self.agent.context = mock.Mock()
self.agent.client = mock.MagicMock()
self.agent.connection = mock.MagicMock()
self.agent.agent_id = mock.Mock()
self.agent.notifier = mock.Mock()
self.agent._utils = mock.MagicMock()
self.agent._nvgre_ops = mock.MagicMock()
def test_load_physical_network_mappings(self):
test_mappings = ['fakenetwork1:fake_vswitch',
'fakenetwork2:fake_vswitch_2', '*:fake_vswitch_3',
'bad_mapping']
expected = [('fakenetwork1$', 'fake_vswitch'),
('fakenetwork2$', 'fake_vswitch_2'),
('.*$', 'fake_vswitch_3')]
self.agent._load_physical_network_mappings(test_mappings)
self.assertEqual(expected,
list(self.agent._physical_network_mappings.items()))
def test_get_vswitch_for_physical_network_with_default_switch(self):
test_mappings = ['fakenetwork:fake_vswitch',
'fakenetwork2$:fake_vswitch_2',
'fakenetwork*:fake_vswitch_3']
self.agent._load_physical_network_mappings(test_mappings)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork')
self.assertEqual('fake_vswitch', physnet)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork2$')
self.assertEqual('fake_vswitch_2', physnet)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork3')
self.assertEqual('fake_vswitch_3', physnet)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork35')
self.assertEqual('fake_vswitch_3', physnet)
physnet = self.agent._get_vswitch_for_physical_network('fake_network1')
self.assertEqual('fake_network1', physnet)
def test_get_vswitch_for_physical_network_without_default_switch(self):
test_mappings = ['fakenetwork:fake_vswitch',
'fakenetwork2:fake_vswitch_2']
self.agent._load_physical_network_mappings(test_mappings)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork')
self.assertEqual('fake_vswitch', physnet)
physnet = self.agent._get_vswitch_for_physical_network('fakenetwork2')
self.assertEqual('fake_vswitch_2', physnet)
def test_get_vswitch_for_physical_network_none(self):
test_mappings = ['fakenetwork:fake_vswitch',
'fakenetwork2:fake_vswitch_2']
self.agent._load_physical_network_mappings(test_mappings)
physnet = self.agent._get_vswitch_for_physical_network(None)
self.assertEqual('', physnet)
test_mappings = ['fakenetwork:fake_vswitch',
'fakenetwork2:fake_vswitch_2', '*:fake_vswitch_3']
self.agent._load_physical_network_mappings(test_mappings)
physnet = self.agent._get_vswitch_for_physical_network(None)
self.assertEqual('fake_vswitch_3', physnet)
@mock.patch.object(hyperv_neutron_agent.nvgre_ops, 'HyperVNvgreOps')
def test_init_nvgre_disabled(self, mock_hyperv_nvgre_ops):
self.agent.init_nvgre()
self.assertFalse(mock_hyperv_nvgre_ops.called)
self.assertFalse(self.agent._nvgre_enabled)
@mock.patch.object(hyperv_neutron_agent.nvgre_ops, 'HyperVNvgreOps')
def test_init_nvgre_no_tunnel_ip(self, mock_hyperv_nvgre_ops):
self.config(enable_support=True, group='NVGRE')
self.assertRaises(exception.NetworkingHyperVException,
self.agent.init_nvgre)
@mock.patch.object(hyperv_neutron_agent.nvgre_ops, 'HyperVNvgreOps')
def test_init_nvgre_enabled(self, mock_hyperv_nvgre_ops):
self.config(enable_support=True, group='NVGRE')
self.config(provider_tunnel_ip=mock.sentinel.tunneling_ip,
group='NVGRE')
self.agent.init_nvgre()
mock_hyperv_nvgre_ops.assert_called_once_with(
list(self.agent._physical_network_mappings.values()))
self.assertTrue(self.agent._nvgre_enabled)
self.agent._nvgre_ops.init_notifier.assert_called_once_with(
self.agent.context, self.agent.client)
def test_get_network_vswitch_map_by_port_id(self):
net_uuid = 'net-uuid'
self.agent._network_vswitch_map = {
net_uuid: {'ports': [self._FAKE_PORT_ID]}
}
network, port_map = self.agent._get_network_vswitch_map_by_port_id(
self._FAKE_PORT_ID)
self.assertEqual(net_uuid, network)
self.assertEqual({'ports': [self._FAKE_PORT_ID]}, port_map)
def test_get_network_vswitch_map_by_port_id_not_found(self):
net_uuid = 'net-uuid'
self.agent._network_vswitch_map = {net_uuid: {'ports': []}}
network, port_map = self.agent._get_network_vswitch_map_by_port_id(
self._FAKE_PORT_ID)
self.assertIsNone(network)
self.assertIsNone(port_map)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_reclaim_local_network')
def test_network_delete(self, mock_reclaim_local_network):
self.agent._network_vswitch_map[mock.sentinel.net_id] = (
mock.sentinel.vswitch)
self.agent.network_delete(mock.sentinel.context, mock.sentinel.net_id)
mock_reclaim_local_network.assert_called_once_with(
mock.sentinel.net_id)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_reclaim_local_network')
def test_network_delete_not_defined(self, mock_reclaim_local_network):
self.agent.network_delete(mock.sentinel.context, mock.sentinel.net_id)
self.assertFalse(mock_reclaim_local_network.called)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_treat_vif_port')
def test_port_update_not_found(self, mock_treat_vif_port):
self.agent._utils.vnic_port_exists.return_value = False
port = {'id': mock.sentinel.port_id}
self.agent.port_update(self.agent.context, port)
self.assertFalse(mock_treat_vif_port.called)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_treat_vif_port')
def test_port_update(self, mock_treat_vif_port):
self.agent._utils.vnic_port_exists.return_value = True
port = {'id': mock.sentinel.port_id,
'network_id': mock.sentinel.network_id,
'admin_state_up': mock.sentinel.admin_state_up}
self.agent.port_update(self.agent.context, port,
mock.sentinel.network_type,
mock.sentinel.segmentation_id,
mock.sentinel.physical_network)
mock_treat_vif_port.assert_called_once_with(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, mock.sentinel.admin_state_up)
def test_tunnel_update(self):
self.agent.tunnel_update(mock.sentinel.context,
tunnel_ip=mock.sentinel.tunnel_ip,
tunnel_type=mock.sentinel.tunnel_type)
self.agent._nvgre_ops.tunnel_update.assert_called_once_with(
mock.sentinel.context, mock.sentinel.tunnel_ip,
mock.sentinel.tunnel_type)
def test_tunnel_update_provider_ip(self):
self.agent.tunnel_update(mock.sentinel.context,
tunnel_ip=CONF.NVGRE.provider_tunnel_ip)
self.assertFalse(self.agent._nvgre_ops.tunnel_update.called)
def test_lookup_update(self):
kwargs = {'lookup_ip': mock.sentinel.lookup_ip,
'lookup_details': mock.sentinel.lookup_details}
self.agent.lookup_update(mock.sentinel.context, **kwargs)
self.agent._nvgre_ops.lookup_update.assert_called_once_with(kwargs)
def test_get_vswitch_name_local(self):
self.agent._local_network_vswitch = 'test_local_switch'
ret = self.agent._get_vswitch_name(constants.TYPE_LOCAL,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
self.assertEqual('test_local_switch', ret)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_for_physical_network")
def test_get_vswitch_name_vlan(self, mock_get_vswitch_for_phys_net):
ret = self.agent._get_vswitch_name(constants.TYPE_VLAN,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
self.assertEqual(mock_get_vswitch_for_phys_net.return_value, ret)
mock_get_vswitch_for_phys_net.assert_called_once_with(
mock.sentinel.FAKE_PHYSICAL_NETWORK)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_name")
def test_provision_network_exception(self, mock_get_vswitch_name):
self.assertRaises(exception.NetworkingHyperVException,
self.agent._provision_network,
mock.sentinel.FAKE_PORT_ID,
mock.sentinel.FAKE_NET_UUID,
mock.sentinel.FAKE_NETWORK_TYPE,
mock.sentinel.FAKE_PHYSICAL_NETWORK,
mock.sentinel.FAKE_SEGMENTATION_ID)
mock_get_vswitch_name.assert_called_once_with(
mock.sentinel.FAKE_NETWORK_TYPE,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_name")
def test_provision_network_vlan(self, mock_get_vswitch_name):
self.agent._provision_network(mock.sentinel.FAKE_PORT_ID,
mock.sentinel.FAKE_NET_UUID,
constants.TYPE_VLAN,
mock.sentinel.FAKE_PHYSICAL_NETWORK,
mock.sentinel.FAKE_SEGMENTATION_ID)
mock_get_vswitch_name.assert_called_once_with(
constants.TYPE_VLAN,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_name")
def test_provision_network_nvgre(self, mock_get_vswitch_name):
self.agent._nvgre_enabled = True
vswitch_name = mock_get_vswitch_name.return_value
self.agent._provision_network(mock.sentinel.FAKE_PORT_ID,
mock.sentinel.FAKE_NET_UUID,
constants.TYPE_NVGRE,
mock.sentinel.FAKE_PHYSICAL_NETWORK,
mock.sentinel.FAKE_SEGMENTATION_ID)
mock_get_vswitch_name.assert_called_once_with(
constants.TYPE_NVGRE,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
self.agent._nvgre_ops.bind_nvgre_network.assert_called_once_with(
mock.sentinel.FAKE_SEGMENTATION_ID,
mock.sentinel.FAKE_NET_UUID,
vswitch_name)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_name")
def test_provision_network_flat(self, mock_get_vswitch_name):
self.agent._provision_network(mock.sentinel.FAKE_PORT_ID,
mock.sentinel.FAKE_NET_UUID,
constants.TYPE_FLAT,
mock.sentinel.FAKE_PHYSICAL_NETWORK,
mock.sentinel.FAKE_SEGMENTATION_ID)
mock_get_vswitch_name.assert_called_once_with(
constants.TYPE_FLAT,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
"_get_vswitch_name")
def test_provision_network_local(self, mock_get_vswitch_name):
self.agent._provision_network(mock.sentinel.FAKE_PORT_ID,
mock.sentinel.FAKE_NET_UUID,
constants.TYPE_LOCAL,
mock.sentinel.FAKE_PHYSICAL_NETWORK,
mock.sentinel.FAKE_SEGMENTATION_ID)
mock_get_vswitch_name.assert_called_once_with(
constants.TYPE_LOCAL,
mock.sentinel.FAKE_PHYSICAL_NETWORK)
def test_reclaim_local_network(self):
self.agent._network_vswitch_map[mock.sentinel.net_id] = (
mock.sentinel.vswitch)
self.agent._reclaim_local_network(mock.sentinel.net_id)
self.assertNotIn(mock.sentinel.net_id, self.agent._network_vswitch_map)
def test_port_bound_enable_metrics(self):
self.agent.enable_metrics_collection = True
self._test_port_bound(True)
def test_port_bound_no_metrics(self):
self.agent.enable_metrics_collection = False
self._test_port_bound(False)
def _test_port_bound(self, enable_metrics):
port = mock.MagicMock()
net_uuid = 'my-net-uuid'
self.agent._port_bound(port, net_uuid, 'vlan', None, None)
self.assertEqual(enable_metrics,
self.agent._utils.add_metrics_collection_acls.called)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_provision_network')
def test_port_bound_nvgre(self, mock_provision_network):
self.agent._nvgre_enabled = True
network_type = constants.TYPE_NVGRE
net_uuid = 'my-net-uuid'
fake_map = {'vswitch_name': mock.sentinel.vswitch_name,
'ports': []}
def fake_prov_network(*args, **kwargs):
self.agent._network_vswitch_map[net_uuid] = fake_map
mock_provision_network.side_effect = fake_prov_network
self.agent._port_bound(mock.sentinel.port_id, net_uuid, network_type,
mock.sentinel.physical_network,
mock.sentinel.segmentation_id)
self.assertIn(mock.sentinel.port_id, fake_map['ports'])
mock_provision_network.assert_called_once_with(
mock.sentinel.port_id, net_uuid, network_type,
mock.sentinel.physical_network, mock.sentinel.segmentation_id)
self.agent._utils.connect_vnic_to_vswitch.assert_called_once_with(
mock.sentinel.vswitch_name, mock.sentinel.port_id)
self.agent._nvgre_ops.bind_nvgre_port.assert_called_once_with(
mock.sentinel.segmentation_id, mock.sentinel.vswitch_name,
mock.sentinel.port_id)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_get_network_vswitch_map_by_port_id')
def _check_port_unbound(self, mock_get_vswitch_map_by_port_id, ports=None,
net_uuid=None):
map = {
'network_type': 'vlan',
'vswitch_name': 'fake-vswitch',
'ports': ports,
'vlan_id': 1}
network_vswitch_map = (net_uuid, map)
mock_get_vswitch_map_by_port_id.return_value = network_vswitch_map
with mock.patch.object(
self.agent._utils,
'remove_switch_port') as mock_remove_switch_port:
self.agent._port_unbound(self._FAKE_PORT_ID, vnic_deleted=False)
if net_uuid:
mock_remove_switch_port.assert_called_once_with(
self._FAKE_PORT_ID, False)
else:
self.assertFalse(mock_remove_switch_port.called)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_reclaim_local_network')
def test_port_unbound(self, mock_reclaim_local_network):
net_uuid = 'my-net-uuid'
self._check_port_unbound(ports=[self._FAKE_PORT_ID],
net_uuid=net_uuid)
mock_reclaim_local_network.assert_called_once_with(net_uuid)
def test_port_unbound_port_not_found(self):
self._check_port_unbound()
def test_port_enable_control_metrics_ok(self):
self.agent.enable_metrics_collection = True
self.agent._port_metric_retries[self._FAKE_PORT_ID] = (
self.agent._metrics_max_retries)
self.agent._utils.is_metrics_collection_allowed.return_value = True
self.agent._port_enable_control_metrics()
enable_port_metrics_collection = (
self.agent._metricsutils.enable_port_metrics_collection)
enable_port_metrics_collection.assert_called_with(self._FAKE_PORT_ID)
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
def test_port_enable_control_metrics_maxed(self):
self.agent.enable_metrics_collection = True
self.agent._metrics_max_retries = 3
self.agent._port_metric_retries[self._FAKE_PORT_ID] = 3
self.agent._utils.is_metrics_collection_allowed.return_value = False
for i in range(4):
self.assertIn(self._FAKE_PORT_ID,
self.agent._port_metric_retries)
self.agent._port_enable_control_metrics()
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
def test_port_enable_control_metrics_no_vnic(self):
self.agent.enable_metrics_collection = True
self.agent._port_metric_retries[self._FAKE_PORT_ID] = 3
self.agent._utils.is_metrics_collection_allowed.side_effect = (
exceptions.NotFound(resource=self._FAKE_PORT_ID))
self.agent._port_enable_control_metrics()
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_port_unbound')
def test_vif_port_state_down(self, mock_port_unbound):
self.agent._treat_vif_port(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, False)
mock_port_unbound.assert_called_once_with(mock.sentinel.port_id)
sg_agent = self.agent.sec_groups_agent
sg_agent.remove_devices_filter.assert_called_once_with(
[mock.sentinel.port_id])
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_port_bound')
def _check_treat_vif_port_state_up(self, mock_port_bound):
self.agent._treat_vif_port(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, True)
mock_port_bound.assert_called_once_with(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id)
def test_treat_vif_port_sg_enabled(self):
self.agent.enable_security_groups = True
self._check_treat_vif_port_state_up()
sg_agent = self.agent.sec_groups_agent
sg_agent.refresh_firewall.assert_called_once_with(
[mock.sentinel.port_id])
def test_treat_vif_port_sg_disabled(self):
self.agent.enable_security_groups = False
self._check_treat_vif_port_state_up()
self.agent._utils.remove_all_security_rules.assert_called_once_with(
mock.sentinel.port_id)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_treat_vif_port')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_update_port_status_cache')
def test_process_added_port(self, mock_update_port_cache,
mock_treat_vif_port):
self.agent._added_ports = set()
details = self._get_fake_port_details()
self.agent._process_added_port(details)
mock_treat_vif_port.assert_called_once_with(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, mock.sentinel.admin_state_up)
mock_update_port_cache.assert_called_once_with(mock.sentinel.device,
device_bound=True)
self.assertNotIn(mock.sentinel.device, self.agent._added_ports)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_treat_vif_port')
def test_process_added_port_failed(self, mock_treat_vif_port):
mock_treat_vif_port.side_effect = exception.NetworkingHyperVException
self.agent._added_ports = set()
details = self._get_fake_port_details()
self.agent._process_added_port(details)
self.assertIn(mock.sentinel.device, self.agent._added_ports)
def _get_fake_port_details(self):
return {'device': mock.sentinel.device,
'port_id': mock.sentinel.port_id,
'network_id': mock.sentinel.network_id,
'network_type': mock.sentinel.network_type,
'physical_network': mock.sentinel.physical_network,
'segmentation_id': mock.sentinel.segmentation_id,
'admin_state_up': mock.sentinel.admin_state_up}
def test_treat_devices_added_returns_true_for_missing_device(self):
self.agent._added_ports = set([mock.sentinel.port_id])
attrs = {'get_devices_details_list.side_effect': Exception()}
self.agent.plugin_rpc.configure_mock(**attrs)
self.agent._treat_devices_added()
self.assertIn(mock.sentinel.port_id, self.agent._added_ports)
@mock.patch.object(hyperv_neutron_agent.eventlet, 'spawn_n')
def test_treat_devices_added_updates_known_port(self, mock_spawn):
self.agent._added_ports = set([mock.sentinel.device])
details = self._get_fake_port_details()
attrs = {'get_devices_details_list.return_value': [details]}
self.agent.plugin_rpc.configure_mock(**attrs)
self.agent._treat_devices_added()
mock_spawn.assert_called_once_with(
self.agent._process_added_port, details)
self.assertNotIn(mock.sentinel.device, self.agent._added_ports)
def test_treat_devices_added_missing_port_id(self):
self.agent._added_ports = set([mock.sentinel.port_id])
details = {'device': mock.sentinel.port_id}
attrs = {'get_devices_details_list.return_value': [details]}
self.agent.plugin_rpc.configure_mock(**attrs)
self.agent._treat_devices_added()
self.assertNotIn(mock.sentinel.port_id, self.agent._added_ports)
@mock.patch('eventlet.spawn_n')
def test_treat_devices_removed(self, mock_spawn):
mock_removed_ports = [mock.sentinel.port0, mock.sentinel.port1]
self.agent._removed_ports = set(mock_removed_ports)
self.agent._treat_devices_removed()
mock_spawn.assert_has_calls(
[mock.call(self.agent._process_removed_port, port)
for port in mock_removed_ports],
any_order=True)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_port_unbound')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_update_port_status_cache')
def test_process_removed_port_exception(self, mock_update_port_cache,
mock_port_unbound):
self.agent._removed_ports = set([mock.sentinel.port_id])
raised_exc = exception.NetworkingHyperVException
mock_port_unbound.side_effect = raised_exc
self.assertRaises(raised_exc,
self.agent._process_removed_port,
mock.sentinel.port_id)
mock_update_port_cache.assert_called_once_with(
mock.sentinel.port_id, device_bound=False)
self.assertIn(mock.sentinel.port_id, self.agent._removed_ports)
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_port_unbound')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_update_port_status_cache')
def test_process_removed_port(self, mock_update_port_cache,
mock_port_unbound):
self.agent._removed_ports = set([mock.sentinel.port_id])
self.agent._process_removed_port(mock.sentinel.port_id)
mock_update_port_cache.assert_called_once_with(
mock.sentinel.port_id, device_bound=False)
mock_port_unbound.assert_called_once_with(mock.sentinel.port_id,
vnic_deleted=True)
self.agent.sec_groups_agent.remove_devices_filter(
[mock.sentinel.port_id])
self.assertNotIn(mock.sentinel.port_id, self.agent._removed_ports)
def test_process_added_port_event(self):
self.agent._added_ports = set()
self.agent._process_added_port_event(mock.sentinel.port_id)
self.assertIn(mock.sentinel.port_id, self.agent._added_ports)
def test_process_removed_port_event(self):
self.agent._removed_ports = set([])
self.agent._process_removed_port_event(mock.sentinel.port_id)
self.assertIn(mock.sentinel.port_id, self.agent._removed_ports)
@mock.patch.object(hyperv_neutron_agent.eventlet, 'spawn_n')
def test_create_event_listeners(self, mock_spawn):
self.agent._create_event_listeners()
self.agent._utils.get_vnic_event_listener.assert_has_calls([
mock.call(self.agent._utils.EVENT_TYPE_CREATE),
mock.call(self.agent._utils.EVENT_TYPE_DELETE)])
target = self.agent._utils.get_vnic_event_listener.return_value
calls = [mock.call(target,
self.agent._process_added_port_event),
mock.call(target,
self.agent._process_removed_port_event)]
mock_spawn.assert_has_calls(calls, any_order=True)
def test_update_port_status_cache_device_bound(self):
self.agent._bound_ports = set()
self.agent._unbound_ports = set([mock.sentinel.device])
self.agent._update_port_status_cache(mock.sentinel.device,
device_bound=True)
self.assertIn(mock.sentinel.device, self.agent._bound_ports)
self.assertNotIn(mock.sentinel.device, self.agent._unbound_ports)
def test_update_port_status_cache_device_unbound(self):
self.agent._bound_ports = set([mock.sentinel.device])
self.agent._unbound_ports = set()
self.agent._update_port_status_cache(mock.sentinel.device,
device_bound=False)
self.assertIn(mock.sentinel.device, self.agent._unbound_ports)
self.assertNotIn(mock.sentinel.device, self.agent._bound_ports)
def test_notify_plugin_no_updates(self):
self.agent._bound_ports = set()
self.agent._unbound_ports = set()
self.agent._notify_plugin_on_port_updates()
self.assertFalse(self.agent.plugin_rpc.update_device_list.called)
def test_notify_plugin_on_port_updates(self):
bound_ports = [mock.sentinel.bound_port]
unbound_ports = [mock.sentinel.unbound_port]
new_bound_ports = set([mock.sentinel.new_bound_port])
new_unbound_ports = set([mock.sentinel.new_unbound_port])
self.agent._bound_ports = set(bound_ports)
self.agent._unbound_ports = set(unbound_ports)
def plugin_rpc_side_effect(*args, **kwargs):
# We test the scenario in which we're getting some new port
# updates during the plugin rpc call.
self.agent._bound_ports.update(new_bound_ports)
self.agent._unbound_ports.update(new_unbound_ports)
mock_update_device_list = self.agent.plugin_rpc.update_device_list
mock_update_device_list.side_effect = plugin_rpc_side_effect
self.agent._notify_plugin_on_port_updates()
mock_update_device_list.assert_called_once_with(
self.agent.context, bound_ports, unbound_ports,
self.agent.agent_id, self.agent._host)
self.assertEqual(new_bound_ports, self.agent._bound_ports)
self.assertEqual(new_unbound_ports, self.agent._unbound_ports)
@mock.patch('eventlet.spawn_n')
@mock.patch('time.sleep')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_port_enable_control_metrics')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_treat_devices_added')
@mock.patch.object(hyperv_neutron_agent.HyperVNeutronAgentMixin,
'_create_event_listeners')
def test_daemon_loop(self, mock_create_listeners, mock_treat_dev_added,
mock_port_enable_metrics, mock_sleep, mock_spawn):
self.agent._nvgre_enabled = True
mock_port_enable_metrics.side_effect = KeyError
mock_sleep.side_effect = KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, self.agent.daemon_loop)
self.assertEqual(self.agent._utils.get_vnic_ids.return_value,
self.agent._added_ports)
self.assertEqual(set(), self.agent._removed_ports)
self.assertEqual(set(), self.agent._bound_ports)
self.assertEqual(set(), self.agent._unbound_ports)
mock_create_listeners.assert_called_once_with()
mock_spawn.assert_called_once_with(
self.agent._notify_plugin_on_port_updates)
mock_treat_dev_added.assert_called_once_with()
self.agent._nvgre_ops.refresh_nvgre_records.assert_called_once_with()
mock_port_enable_metrics.assert_called_with()
self.agent._utils.update_cache.assert_called_once_with()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module defining the Django auth backend class for the Keystone API. """
import datetime
import logging
import pytz
from django.conf import settings
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from openstack_auth import exceptions
from openstack_auth import user as auth_user
from openstack_auth import utils
LOG = logging.getLogger(__name__)
KEYSTONE_CLIENT_ATTR = "_keystoneclient"
class KeystoneBackend(object):
"""Django authentication backend for use with ``django.contrib.auth``."""
def __init__(self):
self._auth_plugins = None
@property
def auth_plugins(self):
if self._auth_plugins is None:
plugins = getattr(
settings,
'AUTHENTICATION_PLUGINS',
['openstack_auth.plugin.password.PasswordPlugin',
'openstack_auth.plugin.token.TokenPlugin'])
self._auth_plugins = [import_string(p)() for p in plugins]
return self._auth_plugins
def check_auth_expiry(self, auth_ref, margin=None):
if not utils.is_token_valid(auth_ref, margin):
msg = _("The authentication token issued by the Identity service "
"has expired.")
LOG.warning("The authentication token issued by the Identity "
"service appears to have expired before it was "
"issued. This may indicate a problem with either your "
"server or client configuration.")
raise exceptions.KeystoneAuthException(msg)
return True
def get_user(self, user_id):
"""Returns the current user from the session data.
If authenticated, this return the user object based on the user ID
and session data.
.. note::
This required monkey-patching the ``contrib.auth`` middleware
to make the ``request`` object available to the auth backend class.
"""
if (hasattr(self, 'request') and
user_id == self.request.session["user_id"]):
token = self.request.session['token']
endpoint = self.request.session['region_endpoint']
services_region = self.request.session['services_region']
user = auth_user.create_user_from_token(self.request, token,
endpoint, services_region)
return user
else:
return None
def authenticate(self, auth_url=None, **kwargs):
"""Authenticates a user via the Keystone Identity API."""
LOG.debug('Beginning user authentication')
if not auth_url:
auth_url = settings.OPENSTACK_KEYSTONE_URL
auth_url, url_fixed = utils.fix_auth_url_version_prefix(auth_url)
if url_fixed:
LOG.warning("The OPENSTACK_KEYSTONE_URL setting points to a v2.0 "
"Keystone endpoint, but v3 is specified as the API "
"version to use by Horizon. Using v3 endpoint for "
"authentication.")
for plugin in self.auth_plugins:
unscoped_auth = plugin.get_plugin(auth_url=auth_url, **kwargs)
if unscoped_auth:
break
else:
msg = _('No authentication backend could be determined to '
'handle the provided credentials.')
LOG.warning('No authentication backend could be determined to '
'handle the provided credentials. This is likely a '
'configuration error that should be addressed.')
raise exceptions.KeystoneAuthException(msg)
# the recent project id a user might have set in a cookie
recent_project = None
request = kwargs.get('request')
if request:
# Grab recent_project found in the cookie, try to scope
# to the last project used.
recent_project = request.COOKIES.get('recent_project')
unscoped_auth_ref = plugin.get_access_info(unscoped_auth)
# Check expiry for our unscoped auth ref.
self.check_auth_expiry(unscoped_auth_ref)
domain_name = kwargs.get('user_domain_name', None)
domain_auth, domain_auth_ref = plugin.get_domain_scoped_auth(
unscoped_auth, unscoped_auth_ref, domain_name)
scoped_auth, scoped_auth_ref = plugin.get_project_scoped_auth(
unscoped_auth, unscoped_auth_ref, recent_project=recent_project)
# Abort if there are no projects for this user and a valid domain
# token has not been obtained
#
# The valid use cases for a user login are:
# Keystone v2: user must have a role on a project and be able
# to obtain a project scoped token
# Keystone v3: 1) user can obtain a domain scoped token (user
# has a role on the domain they authenticated to),
# only, no roles on a project
# 2) user can obtain a domain scoped token and has
# a role on a project in the domain they
# authenticated to (and can obtain a project scoped
# token)
# 3) user cannot obtain a domain scoped token, but can
# obtain a project scoped token
if not scoped_auth_ref and domain_auth_ref:
# if the user can't obtain a project scoped token, set the scoped
# token to be the domain token, if valid
scoped_auth = domain_auth
scoped_auth_ref = domain_auth_ref
elif not scoped_auth_ref and not domain_auth_ref:
msg = _('You are not authorized for any projects.')
if utils.get_keystone_version() >= 3:
msg = _('You are not authorized for any projects or domains.')
raise exceptions.KeystoneAuthException(msg)
# Check expiry for our new scoped token.
self.check_auth_expiry(scoped_auth_ref)
# We want to try to use the same region we just logged into
# which may or may not be the default depending upon the order
# keystone uses
region_name = None
id_endpoints = scoped_auth_ref.service_catalog.\
get_endpoints(service_type='identity')
for id_endpoint in [cat for cat in id_endpoints['identity']]:
if auth_url in id_endpoint.values():
region_name = id_endpoint['region']
break
interface = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'public')
endpoint, url_fixed = utils.fix_auth_url_version_prefix(
scoped_auth_ref.service_catalog.url_for(
service_type='identity',
interface=interface,
region_name=region_name))
if url_fixed:
LOG.warning("The Keystone URL in service catalog points to a v2.0 "
"Keystone endpoint, but v3 is specified as the API "
"version to use by Horizon. Using v3 endpoint for "
"authentication.")
# If we made it here we succeeded. Create our User!
unscoped_token = unscoped_auth_ref.auth_token
user = auth_user.create_user_from_token(
request,
auth_user.Token(scoped_auth_ref, unscoped_token=unscoped_token),
endpoint,
services_region=region_name)
if request is not None:
# if no k2k providers exist then the function returns quickly
utils.store_initial_k2k_session(auth_url, request, scoped_auth_ref,
unscoped_auth_ref)
request.session['unscoped_token'] = unscoped_token
if domain_auth_ref:
# check django session engine, if using cookies, this will not
# work, as it will overflow the cookie so don't add domain
# scoped token to the session and put error in the log
if utils.using_cookie_backed_sessions():
LOG.error('Using signed cookies as SESSION_ENGINE with '
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT is '
'enabled. This disables the ability to '
'perform identity operations due to cookie size '
'constraints.')
else:
request.session['domain_token'] = domain_auth_ref
request.user = user
timeout = getattr(settings, "SESSION_TIMEOUT", 3600)
token_life = user.token.expires - datetime.datetime.now(pytz.utc)
session_time = min(timeout, int(token_life.total_seconds()))
request.session.set_expiry(session_time)
keystone_client_class = utils.get_keystone_client().Client
session = utils.get_session()
scoped_client = keystone_client_class(session=session,
auth=scoped_auth)
# Support client caching to save on auth calls.
setattr(request, KEYSTONE_CLIENT_ATTR, scoped_client)
LOG.debug('Authentication completed.')
return user
def get_group_permissions(self, user, obj=None):
"""Returns an empty set since Keystone doesn't support "groups"."""
# Keystone V3 added "groups". The Auth token response includes the
# roles from the user's Group assignment. It should be fine just
# returning an empty set here.
return set()
def get_all_permissions(self, user, obj=None):
"""Returns a set of permission strings that the user has.
This permission available to the user is derived from the user's
Keystone "roles".
The permissions are returned as ``"openstack.{{ role.name }}"``.
"""
if user.is_anonymous or obj is not None:
return set()
# TODO(gabrielhurley): Integrate policy-driven RBAC
# when supported by Keystone.
role_perms = {utils.get_role_permission(role['name'])
for role in user.roles}
services = []
for service in user.service_catalog:
try:
service_type = service['type']
except KeyError:
continue
service_regions = [utils.get_endpoint_region(endpoint) for endpoint
in service.get('endpoints', [])]
if user.services_region in service_regions:
services.append(service_type.lower())
service_perms = {"openstack.services.%s" % service
for service in services}
return role_perms | service_perms
def has_perm(self, user, perm, obj=None):
"""Returns True if the given user has the specified permission."""
if not user.is_active:
return False
return perm in self.get_all_permissions(user, obj)
def has_module_perms(self, user, app_label):
"""Returns True if user has any permissions in the given app_label.
Currently this matches for the app_label ``"openstack"``.
"""
if not user.is_active:
return False
for perm in self.get_all_permissions(user):
if perm[:perm.index('.')] == app_label:
return True
return False
|
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from troveclient.compat import exceptions
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
class BackupRunner(TestRunner):
def __init__(self):
self.TIMEOUT_BACKUP_CREATE = 60 * 30
self.TIMEOUT_BACKUP_DELETE = 120
super(BackupRunner, self).__init__(sleep_time=20,
timeout=self.TIMEOUT_BACKUP_CREATE)
self.BACKUP_NAME = 'backup_test'
self.BACKUP_DESC = 'test description'
self.backup_host = None
self.backup_info = None
self.backup_count_prior_to_create = 0
self.backup_count_for_ds_prior_to_create = 0
self.backup_count_for_instance_prior_to_create = 0
self.incremental_backup_info = None
self.restore_instance_id = 0
self.restore_host = None
def run_backup_create_instance_invalid(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
invalid_inst_id = 'invalid-inst-id'
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC)
def run_backup_create_instance_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC)
def run_add_data_for_backup(self):
self.backup_host = self.get_instance_host()
self.assert_add_data_for_backup(self.backup_host)
def assert_add_data_for_backup(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_large_data' method.
"""
self.test_helper.add_data(DataType.large, host)
def run_verify_data_for_backup(self):
self.assert_verify_backup_data(self.backup_host)
def assert_verify_backup_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'verify_large_data' method.
"""
self.test_helper.verify_data(DataType.large, host)
def run_backup_create(self):
self.assert_backup_create()
def assert_backup_create(self):
# Necessary to test that the count increases.
self.backup_count_prior_to_create = len(
self.auth_client.backups.list())
self.backup_count_for_ds_prior_to_create = len(
self.auth_client.backups.list(
datastore=self.instance_info.dbaas_datastore))
self.backup_count_for_instance_prior_to_create = len(
self.auth_client.instances.backups(self.instance_info.id))
result = self.auth_client.backups.create(
self.BACKUP_NAME, self.instance_info.id, self.BACKUP_DESC)
self.backup_info = result
self.assert_equal(self.BACKUP_NAME, result.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, result.description,
'Unexpected backup description')
self.assert_equal(self.instance_info.id, result.instance_id,
'Unexpected instance ID for backup')
self.assert_equal('NEW', result.status,
'Unexpected status for backup')
instance = self.auth_client.instances.get(
self.instance_info.id)
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal('BACKUP', instance.status,
'Unexpected instance status')
self.assert_equal(self.instance_info.dbaas_datastore,
result.datastore['type'],
'Unexpected datastore')
self.assert_equal(self.instance_info.dbaas_datastore_version,
result.datastore['version'],
'Unexpected datastore version')
self.assert_equal(datastore_version.id, result.datastore['version_id'],
'Unexpected datastore version id')
def run_restore_instance_from_not_completed_backup(
self, expected_exception=exceptions.Conflict,
expected_http_code=409):
self.assert_raises(
expected_exception, expected_http_code,
self._restore_from_backup, self.backup_info.id)
def run_instance_action_right_after_backup_create(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.instances.resize_instance,
self.instance_info.id, 1)
def run_backup_create_another_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.create,
'backup_test2', self.instance_info.id,
'test description2')
def run_backup_delete_while_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
result = self.auth_client.backups.list()
backup = result[0]
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.delete, backup.id)
def run_backup_create_completed(self):
self._verify_backup(self.backup_info.id)
def _verify_backup(self, backup_id):
def _result_is_active():
backup = self.auth_client.backups.get(backup_id)
if backup.status == 'COMPLETED':
return True
else:
self.assert_not_equal('FAILED', backup.status,
'Backup status should not be')
return False
poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE)
def run_backup_list(self):
backup_list = self.auth_client.backups.list()
self.assert_backup_list(
backup_list, self.backup_count_prior_to_create + 1)
def assert_backup_list(self, backup_list, expected_count):
self.assert_equal(expected_count, len(backup_list),
'Unexpected number of backups found')
if expected_count:
backup = backup_list[0]
self.assert_equal(self.BACKUP_NAME, backup.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, backup.description,
'Unexpected backup description')
self.assert_not_equal(0.0, backup.size, 'Unexpected backup size')
self.assert_equal(self.instance_info.id, backup.instance_id,
'Unexpected instance id')
self.assert_equal('COMPLETED', backup.status,
'Unexpected backup status')
def run_backup_list_filter_datastore(self):
backup_list = self.auth_client.backups.list(
datastore=self.instance_info.dbaas_datastore)
self.assert_backup_list(
backup_list, self.backup_count_for_ds_prior_to_create + 1)
def run_backup_list_filter_different_datastore(self):
backup_list = self.auth_client.backups.list(
datastore='Test_Datastore_1')
self.assert_backup_list(backup_list, 0)
def run_backup_list_filter_datastore_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.list,
datastore='NOT_FOUND')
def run_backup_list_for_instance(self):
backup_list = self.auth_client.instances.backups(
self.instance_info.id)
self.assert_backup_list(
backup_list, self.backup_count_for_instance_prior_to_create + 1)
def run_backup_get(self):
backup = self.auth_client.backups.get(self.backup_info.id)
self.assert_backup_list([backup], 1)
self.assert_equal(self.instance_info.dbaas_datastore,
backup.datastore['type'],
'Unexpected datastore type')
self.assert_equal(self.instance_info.dbaas_datastore_version,
backup.datastore['version'],
'Unexpected datastore version')
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal(datastore_version.id, backup.datastore['version_id'])
def run_backup_get_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, None,
self.unauth_client.backups.get, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.unauth_client)
def run_restore_from_backup(self):
self.assert_restore_from_backup(self.backup_info.id)
def assert_restore_from_backup(self, backup_ref):
result = self._restore_from_backup(backup_ref)
# TODO(peterstac) - This should probably return code 202
self.assert_client_code(200)
self.assert_equal('BUILD', result.status,
'Unexpected instance status')
self.restore_instance_id = result.id
def _restore_from_backup(self, backup_ref):
restore_point = {'backupRef': backup_ref}
result = self.auth_client.instances.create(
self.instance_info.name + '_restore',
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
nics=self.instance_info.nics,
restorePoint=restore_point,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
return result
def run_restore_from_backup_completed(
self, expected_states=['BUILD', 'ACTIVE'],
# TODO(peterstac) - This should probably return code 202
expected_http_code=200):
self.assert_restore_from_backup_completed(
self.restore_instance_id, expected_states, expected_http_code)
self.restore_host = self.get_instance_host(self.restore_instance_id)
def assert_restore_from_backup_completed(
self, instance_id, expected_states, expected_http_code):
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
def run_verify_data_in_restored_instance(self):
self.assert_verify_backup_data(self.restore_host)
def run_delete_restored_instance(
self, expected_states=['SHUTDOWN'],
expected_http_code=202):
self.assert_delete_restored_instance(
self.restore_instance_id, expected_states, expected_http_code)
def assert_delete_restored_instance(
self, instance_id, expected_states, expected_http_code):
self.auth_client.instances.delete(instance_id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
self.assert_all_gone(instance_id, expected_states[-1])
def run_delete_unknown_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.delete,
'unknown_backup')
def run_delete_backup_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, None,
self.unauth_client.backups.delete, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.unauth_client)
def run_delete_backup(self, expected_http_code=202):
self.assert_delete_backup(self.backup_info.id, expected_http_code)
def assert_delete_backup(
self, backup_id, expected_http_code):
self.auth_client.backups.delete(backup_id)
self.assert_client_code(expected_http_code)
self._wait_until_backup_is_gone(backup_id)
def _wait_until_backup_is_gone(self, backup_id):
def _backup_is_gone():
try:
self.auth_client.backups.get(backup_id)
return False
except exceptions.NotFound:
return True
poll_until(_backup_is_gone,
time_out=self.TIMEOUT_BACKUP_DELETE)
def run_check_for_incremental_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
if self.incremental_backup_info is None:
raise SkipTest("Incremental Backup not created")
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.get,
self.incremental_backup_info.id)
|
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.i18n import _
from sahara.tests.unit import base as b
from sahara.tests.unit.plugins.cdh import utils as ctu
from sahara.utils import files
CONFIGURATION_SCHEMA = {
'node_configs': {
'yarn.scheduler.minimum-allocation-mb': (
'RESOURCEMANAGER', 'yarn_scheduler_minimum_allocation_mb'),
'mapreduce.reduce.memory.mb': (
'YARN_GATEWAY', 'mapreduce_reduce_memory_mb'),
'mapreduce.map.memory.mb': (
'YARN_GATEWAY', 'mapreduce_map_memory_mb',),
'yarn.scheduler.maximum-allocation-mb': (
'RESOURCEMANAGER', 'yarn_scheduler_maximum_allocation_mb'),
'yarn.app.mapreduce.am.command-opts': (
'YARN_GATEWAY', 'yarn_app_mapreduce_am_command_opts'),
'yarn.nodemanager.resource.memory-mb': (
'NODEMANAGER', 'yarn_nodemanager_resource_memory_mb'),
'mapreduce.task.io.sort.mb': (
'YARN_GATEWAY', 'io_sort_mb'),
'mapreduce.map.java.opts': (
'YARN_GATEWAY', 'mapreduce_map_java_opts'),
'mapreduce.reduce.java.opts': (
'YARN_GATEWAY', 'mapreduce_reduce_java_opts'),
'yarn.app.mapreduce.am.resource.mb': (
'YARN_GATEWAY', 'yarn_app_mapreduce_am_resource_mb')
},
'cluster_configs': {
'dfs.replication': ('HDFS', 'dfs_replication')
}
}
def get_concrete_cluster():
cluster = ctu.get_fake_cluster()
# add configs to cluster
configs = {"SQOOP": {}, "HUE": {}, "general": {}, "KMS": {}, "HIVE": {},
"SOLR": {}, "FLUME": {}, "HDFS": {"dfs_replication": 1},
"KS_INDEXER": {}, "SPARK_ON_YARN": {}, "SENTRY": {}, "YARN": {},
"ZOOKEEPER": {}, "OOZIE": {}, "HBASE": {}, "IMPALA": {}}
# cluster is immutable, a work around
dict.__setitem__(cluster, "cluster_config", configs)
# add fake remotes to instances
instances = [i for ng in cluster.node_groups for i in ng.instances]
for i in instances:
object.__setattr__(i, 'remote', mock.MagicMock())
# add cluster_id to each node group
for ng in cluster.node_groups:
dict.__setitem__(ng, "cluster_id", ng.cluster.id)
# add extra config
dict.__setitem__(cluster, "extra", {})
return cluster
def get_fake_worker_instances():
ng = get_concrete_cluster().node_groups[2]
return ng.instances
class TestPluginUtils(b.SaharaTestCase):
def setUp(self):
super(TestPluginUtils, self).setUp()
self.plug_utils = None
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('sahara.plugins.cdh.plugin_utils.'
'CDHPluginAutoConfigsProvider')
def test_recommend_configs(self, provider, log_cfg):
fake_plugin_utils = mock.Mock()
fake_cluster = mock.Mock()
self.plug_utils.recommend_configs(
fake_cluster, fake_plugin_utils, False)
self.assertEqual([mock.call(CONFIGURATION_SCHEMA,
fake_plugin_utils,
fake_cluster,
False)],
provider.call_args_list)
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('sahara.plugins.cdh.commands.install_packages')
def test_install_packages(self, install_packages, log_cfg):
packages = mock.Mock()
instances = get_fake_worker_instances()
self.plug_utils.install_packages(instances, packages)
calls = [mock.call(i.remote().__enter__(), packages)
for i in instances]
install_packages.assert_has_calls(calls, any_order=False)
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('sahara.plugins.cdh.commands.start_agent')
@mock.patch('sahara.plugins.cdh.commands.configure_agent')
def test_start_cloudera_agents(self, configure_agent,
start_agent, log_cfg):
instances = get_fake_worker_instances()
self.plug_utils.start_cloudera_agents(instances)
cfg_calls = [mock.call(i.remote().__enter__(), 'manager_inst')
for i in instances]
start_calls = [mock.call(i.remote().__enter__()) for i in instances]
configure_agent.assert_has_calls(cfg_calls, any_order=False)
start_agent.assert_has_calls(start_calls, any_order=False)
@mock.patch('sahara.config.CONF.disable_event_log')
def test_put_hive_hdfs_xml(self, log_cfg):
cluster = get_concrete_cluster()
hive_server = cluster.node_groups[1].instances[0]
self.plug_utils.put_hive_hdfs_xml(cluster)
with hive_server.remote() as r:
calls = [mock.call('sudo su - -c "hadoop fs -mkdir -p'
' /user/hdfs/conf" hdfs'),
mock.call('sudo su - -c "hadoop fs -put'
' /etc/hive/conf/hive-site.xml'
' /user/hdfs/conf/hive-site.xml" hdfs')]
r.execute_command.assert_has_calls(calls, any_order=False)
@mock.patch('sahara.config.CONF.disable_event_log')
def test_configure_swift(self, log_cfg):
cluster = get_concrete_cluster()
cluster.cluster_config['general']['Enable Swift'] = True
instances = [i for ng in cluster.node_groups for i in ng.instances]
self.plug_utils.configure_swift(cluster)
for i in instances:
with i.remote() as r:
cmd = r'ls /usr/lib/hadoop-mapreduce/hadoop-openstack.jar'
# use any_call because the execute_command has a call:
# call().__getitem__().__ne__(0) during the method
r.execute_command.assert_any_call(cmd,
raise_when_error=False)
cmd = (r'sudo curl %s'
r' -o /usr/lib/hadoop-mapreduce/hadoop-openstack.jar')
cmd = cmd % self.plug_utils.c_helper.get_swift_lib_url(cluster)
r.execute_command.call
r.execute_command.assert_any_call(cmd)
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('uuid.uuid4')
@mock.patch('sahara.conductor.API.cluster_update')
@mock.patch('sahara.conductor.API.cluster_get')
@mock.patch('castellan.key_manager.API')
def test_configure_hive(self, keymanager, cluster_get,
cluster_update, uuid4, log_cfg):
cluster = get_concrete_cluster()
manager = cluster.node_groups[0].instances[0]
cluster_get.return_value = cluster
db_password = 'a8f2939f-ff9f-4659-a333-abc012ee9b2d'
uuid4.return_value = db_password
create_db_script = files.get_file_text(
'plugins/cdh/{version}/resources/create_hive_db.sql'
.format(version=self.version))
create_db_script = create_db_script % db_password
self.plug_utils.configure_hive(cluster)
with manager.remote() as r:
cmd_exe_sql = ('PGPASSWORD=$(sudo head -1'
' /var/lib/cloudera-scm-server-db/data/'
'generated_password.txt) psql'
' -U cloudera-scm -h localhost -p 7432 -d scm -f'
' script_to_exec.sql')
cmd_clean = 'rm script_to_exec.sql'
self.assertEqual(create_db_script, r.write_file_to.call_args[0][1])
r.execute_command.assert_has_calls([mock.call(cmd_exe_sql),
mock.call(cmd_clean)])
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('sahara.plugins.cdh.commands.is_ubuntu_os')
@mock.patch('sahara.plugins.cdh.commands.is_centos_os')
@mock.patch('sahara.plugins.cdh.commands.update_repository')
@mock.patch('sahara.plugins.cdh.commands.add_apt_key')
@mock.patch('sahara.plugins.cdh.commands.write_ubuntu_repository')
@mock.patch('sahara.plugins.cdh.commands.write_centos_repository')
def test_configure_os(self, write_centos_repository,
write_ubuntu_repository, add_apt_key,
update_repository, is_centos_os,
is_ubuntu_os, log_cfg):
cluster = get_concrete_cluster()
ubuntu_instance = cluster.node_groups[2].instances[0]
centos_instance = cluster.node_groups[2].instances[1]
instances = [ubuntu_instance, centos_instance]
is_ubuntu_os.side_effect = \
lambda r: r is ubuntu_instance.remote().__enter__()
is_centos_os.side_effect = \
lambda r: r is centos_instance.remote().__enter__()
self.plug_utils.configure_os(instances)
with ubuntu_instance.remote() as r:
write_ubuntu_repository.assert_has_calls(
[mock.call(r, self.plug_utils.c_helper.CDH5_UBUNTU_REPO,
'cdh'),
mock.call(r, self.plug_utils.c_helper.CM5_UBUNTU_REPO,
'cm')],
any_order=False)
add_apt_key.assert_has_calls(
[mock.call(r,
self.plug_utils.c_helper.
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL),
mock.call(r,
self.plug_utils.c_helper.
DEFAULT_CM5_UBUNTU_REPO_KEY_URL)],
any_order=False)
update_repository.assert_any_call(r)
with centos_instance.remote() as r:
write_centos_repository.assert_has_calls(
[mock.call(r, self.plug_utils.c_helper.CDH5_CENTOS_REPO,
'cdh'),
mock.call(r, self.plug_utils.c_helper.CM5_CENTOS_REPO,
'cm')],
any_order=False)
update_repository.assert_any_call(r)
@mock.patch('sahara.config.CONF.disable_event_log')
def test_install_extjs(self, log_cfg):
cluster = get_concrete_cluster()
oozie_server = cluster.node_groups[1].instances[0]
self.plug_utils.install_extjs(cluster)
with oozie_server.remote() as r:
calls = [mock.call('ls /var/lib/oozie/ext-2.2',
raise_when_error=False),
mock.call("curl -L -o '/var/lib/oozie/extjs.zip'"
" http://tarballs.openstack.org/sahara/dist/"
"common-artifacts/ext-2.2.zip",
run_as_root=True),
mock.call('unzip /var/lib/oozie/extjs.zip'
' -d /var/lib/oozie',
run_as_root=True)]
r.execute_command.assert_has_calls(calls, any_order=True)
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('sahara.utils.poll_utils.plugin_option_poll')
@mock.patch('sahara.plugins.cdh.commands.start_cloudera_db')
@mock.patch('sahara.plugins.cdh.commands.start_manager')
def test_start_cloudera_manager(self, start_manager, start_cloudera_db,
plugin_option_poll, log_cfg):
cluster = get_concrete_cluster()
manager = cluster.node_groups[0].instances[0]
self.plug_utils.start_cloudera_manager(cluster)
with manager.remote() as r:
start_manager.assert_called_once_with(r)
start_cloudera_db.assert_called_once_with(r)
call = [cluster,
self.plug_utils._check_cloudera_manager_started,
self.plug_utils.c_helper.AWAIT_MANAGER_STARTING_TIMEOUT,
_("Await starting Cloudera Manager"),
2, {'manager': manager}]
plugin_option_poll.assert_called_once_with(*call)
class TestPluginUtilsHigherThanV5(TestPluginUtils):
@mock.patch('sahara.config.CONF.disable_event_log')
@mock.patch('uuid.uuid4')
@mock.patch('sahara.conductor.API.cluster_update')
@mock.patch('sahara.conductor.API.cluster_get')
@mock.patch('castellan.key_manager.API')
def test_configure_sentry(self, keymanager, cluster_get,
cluster_update, uuid4, cfg_log):
cluster = get_concrete_cluster()
manager = cluster.node_groups[0].instances[0]
cluster_get.return_value = cluster
db_password = 'a8f2939f-ff9f-4659-a333-abc012ee9b2d'
uuid4.return_value = db_password
create_db_script = files.get_file_text(
'plugins/cdh/{version}/resources/create_sentry_db.sql'
.format(version=self.version))
create_db_script = create_db_script % db_password
self.plug_utils.configure_sentry(cluster)
with manager.remote() as r:
cmd_exe_sql = ('PGPASSWORD=$(sudo head -1'
' /var/lib/cloudera-scm-server-db/data/'
'generated_password.txt) psql'
' -U cloudera-scm -h localhost -p 7432 -d scm -f'
' script_to_exec.sql')
cmd_clean = 'rm script_to_exec.sql'
self.assertEqual(create_db_script, r.write_file_to.call_args[0][1])
r.execute_command.assert_has_calls([mock.call(cmd_exe_sql),
mock.call(cmd_clean)])
|
|
import json
import queue
import logging
import traceback
import multiprocessing
from heapq import nsmallest
from collections import Counter
from collections import OrderedDict
from collections import defaultdict
from itertools import chain
from itertools import zip_longest
from itertools import cycle
from operator import itemgetter
from . import score
from . import storage
from . import featurizers
first = itemgetter(0)
logger = logging.getLogger(__name__)
cat = chain.from_iterable
class defaults:
n = 600
r = int(1e5)
k = None
limit = 5
class Index(object):
def __init__(self, backend):
self.backend = backend
self.rowcount = self.backend.get_rowcount()
self.featurizer = featurizers.all[self.backend.featurizer_name]
def _search(self, query, r, n, k):
toks = self.featurizer(query)
toks = self.backend.find_least_frequent_tokens(toks, r, k)
r_map = Counter()
for i, tok in enumerate(toks, 1):
rng = self.backend.get_token(tok)
r_map.update(rng)
top_ids = map(first, r_map.most_common(n))
return list(top_ids)
def _scored_records(self, record_ids, orig_query,
extract_func=score.features, score_func=score.hit):
orig_features = extract_func(orig_query)
for rownum, r in zip(record_ids, self.backend.get_records(record_ids)):
s = score_func(orig_features, extract_func(r.fields))
yield s, rownum, r
def search(self, query, limit=defaults.limit, r=defaults.r, n=defaults.n,
k=None, extract_func=score.features, score_func=score.hit):
record_ids = self._search(query, r, n, k)
scores_records = self._scored_records(
record_ids, query, extract_func, score_func)
return [
{"fields": rec.fields, "pk": rec.pk, "score": s,
"data": rec.data, "rownum": rownum}
for s, rownum, rec in nsmallest(limit, scores_records, key=first)
]
def _save_records(self, records, idxs=[]):
completed = []
for rec, i in zip_longest(records, idxs):
try:
idx = (self.backend.save_record(rec) if i is None
else self.backend.update_record(rec, i))
except:
for idx in completed:
self.backend.delete_record(idx)
raise
completed.append(idx)
self.backend.increment_rowcount(len(completed))
return completed
def _update_tokens(self, tokmap, freq_update):
for tok in tokmap.keys():
idxs = tokmap[tok]
self.backend.update_token(tok, idxs)
freq_update[tok] = len(idxs)
def _update_tokens_and_freqs(self, tokmap):
freq_update = {}
try:
self._update_tokens(tokmap, freq_update)
self.backend.update_freqs(freq_update.items())
except:
for tok in freq_update:
self.backend.drop_records_from_token(tok, tokmap[tok])
raise
def add(self, records, idxs=[]):
idxs = list(self._save_records(records, idxs))
tokmap = defaultdict(list)
for idx, rec in zip(idxs, records):
for tok in self.featurizer(rec.fields):
tokmap[tok].append(idx)
self._update_tokens_and_freqs(tokmap)
return idxs
def close(self):
return self.backend.close()
class ParallelIndexWorker(multiprocessing.Process):
def __init__(self, backend_name, work_q, result_q):
super().__init__()
self.work_q = work_q
self.result_q = result_q
self.counters = dict()
self.be_cls = storage.backends[backend_name]
self.methods = dict(count_tokens=self._count_tokens,
score_records=self._score_records)
def _count_tokens(self, query_key, n_total_toks, blob, n):
r_map = self.counters.get(query_key, None)
if not r_map:
r_map = self.counters[query_key] = {'cntr': Counter(),
'n_toks': 0}
if blob:
r_map['cntr'].update(self.be_cls._get_token(blob))
r_map['n_toks'] += 1
if r_map['n_toks'] >= n_total_toks:
cnt = self.counters.pop(query_key)['cntr']
return list(map(first, cnt.most_common(n)))
else:
return None
def _scores(self, blobs, orig_features,
extract_func=score.features, score_func=score.hit):
for rownum, blob in blobs:
r = self.be_cls._get_record(blob)
s = score_func(orig_features, extract_func(r.fields))
yield s, rownum, r
def _score_records(self, orig_features, limit, blobs,
extract_func=score.features, score_func=score.hit):
scores_records = self._scores(blobs, orig_features,
extract_func, score_func)
return nsmallest(limit, scores_records, key=first)
def run(self):
while True:
try:
logger.debug("Getting work")
query_id, meth, args = self.work_q.get()
except IOError as e:
logger.debug("Received IOError (%s) errno %s from work_q",
e.message, e.errno)
break
except EOFError:
logger.debug("Received EOFError from work_q")
break
if meth == 'stop':
logger.debug("Received sentinel, stopping")
break
try:
logger.debug("Running method %s", meth)
ret = self.methods[meth](*args)
except Exception as e:
ret = e
traceback.print_exc()
pass
if ret is not None:
logger.debug("Finished, putting result on q")
self.result_q.put_nowait((query_id, meth, ret))
logger.debug("Result put on result_q. Back to get more work.")
else:
logger.debug("Method returned None. Back to get more work.")
class ParallelIndex(Index):
def __init__(self, backend_url, n_workers):
parsed = storage.urlparse(backend_url)
self.backend_name = parsed.scheme
self.n_workers = n_workers
self.backend = storage.backends[parsed.scheme].from_urlparsed(parsed)
self.worker_rot8 = cycle(range(n_workers))
self.featurizer = featurizers.all[self.backend.featurizer_name]
self.started = False
def _startup_workers(self):
logger.debug("Starting %s workers", self.n_workers)
self.work_qs = [multiprocessing.Queue() for _ in range(self.n_workers)]
self.result_q = multiprocessing.Queue()
self.workers = [
ParallelIndexWorker(self.backend_name, work_q, self.result_q)
for work_q in self.work_qs
]
for worker in self.workers:
worker.start()
return True
def _search(self, query_id, query, r, n, k):
which_worker = next(self.worker_rot8)
toks = self.featurizer(query)
toks = self.backend.find_least_frequent_tokens(toks, r, k)
if not toks:
self.work_qs[which_worker].put(
(query_id, 'count_tokens', [query_id, len(toks), None, n])
)
for i, tok in enumerate(toks, 1):
blob = self.backend._load_token_blob(tok)
self.work_qs[which_worker].put(
(query_id, 'count_tokens', [query_id, len(toks), blob, n])
)
return which_worker
def _scored_records(self, query_id, record_ids, query, limit,
extract_func=score.features, score_func=score.hit):
which_worker = next(self.worker_rot8)
orig_features = extract_func(query)
blobs = [(i, self.backend._load_record_blob(i)) for i in record_ids]
self.work_qs[which_worker].put(
(query_id, 'score_records',
[orig_features, limit, blobs, extract_func, score_func])
)
return which_worker
@staticmethod
def _format_resultset(scores_recs):
return [{"fields": rec.fields, "pk": rec.pk, "score": s,
"data": rec.data, "rownum": rownum}
for s, rownum, rec in scores_recs]
def search(self, query, limit=defaults.limit, r=defaults.r,
n=defaults.n, k=defaults.k,
extract_func=score.features, score_func=score.hit):
self.started = self._startup_workers()
try:
self._search(0, query, r, n, k)
_, _, record_ids = self.result_q.get()
self._scored_records(0, record_ids, query, limit,
extract_func, score_func)
_, _, scores_recs = self.result_q.get()
return self._format_resultset(scores_recs)
finally:
self.close(close_backend=False)
def _fill_work_queues(self, r, n, k):
n_filled = 0
while len(self.in_progress) < len(self.workers)*3 and self.to_do:
query_id, query = self.to_do.popitem(last=False)
self._search(query_id, query, r, n, k)
self.in_progress[query_id] = query
n_filled += 1
logger.debug("Added %i tasks to work queues", n_filled)
def _searchmany(self, queries, limit, r, n, k, extract_func, score_func):
self.to_do = OrderedDict(enumerate(queries))
self.in_progress = {}
send_later = {} # query_id : search results
n_sent = 0
while any((self.in_progress, self.to_do, send_later)):
self._fill_work_queues(r, n, k)
try:
query_id, meth, ret = self.result_q.get()
except queue.Empty:
logger.debug("Result q empty.")
continue
if isinstance(ret, Exception):
logger.warning("Hit exception while processing query %i: %s",
query_id, ret)
send_later[query_id] = ret
del self.in_progress[query_id]
continue
if meth == 'count_tokens':
logger.debug('count_tokens completed for query %s', query_id)
query = queries[query_id]
self._scored_records(query_id, ret, query, limit,
extract_func, score_func)
self.in_progress[query_id] = query
elif meth == 'score_records':
logger.debug('score_records completed for query %s', query_id)
send_later[query_id] = ret
del self.in_progress[query_id]
while n_sent in send_later:
logger.debug('Sending resultset %s', n_sent)
yield self._format_resultset(send_later.pop(n_sent))
logger.info("Completed query %i", n_sent)
logger.debug((self.in_progress, self.to_do, send_later))
logger.debug(
"Any left to do? %s",
any((self.in_progress, self.to_do, send_later)))
n_sent += 1
def searchmany(self, queries, limit=defaults.limit, r=defaults.r,
n=defaults.n, k=defaults.k,
extract_func=score.features, score_func=score.hit):
self.started = self._startup_workers()
try:
for result in self._searchmany(queries, limit, r, n, k,
extract_func, score_func):
yield result
finally:
self.close(close_backend=False)
def close(self, timeout=None, close_backend=True):
if close_backend is True:
self.backend.close()
logger.debug("Shutting down workers")
for i, work_q in enumerate(self.work_qs):
work_q.put((0, 'stop', []), timeout or 0)
for i, worker in enumerate(self.workers):
logger.debug("Joining worker %i", i)
worker.join()
logger.debug("Shutdown complete")
class CLI:
name = "query"
arguments = [
(["term"], {
"type": str,
"nargs": "+"
}),
storage.backend_arg,
(["-r", "--seeds"], {
"type": int,
"help": "The number of record votes to tally.",
"default": defaults.r}),
(["-n", "--search-space"], {
"type": int,
"help": ("The number of seed records to search through "
"for best matches"),
"default": defaults.n}),
(["-l", "--limit"], {
"type": int,
"help": "The number of search results to return",
"default": defaults.limit
})
]
@staticmethod
def hook(parser, args):
backend = storage.parse_url(args.backend)
index = Index(backend)
results = index.search(
args.term,
limit=args.limit, k=args.seeds, n=args.search_space
)
print(json.dumps(results, indent=2))
|
|
# Distributed under the MIT software license, see the accompanying
# file LICENSE or https://www.opensource.org/licenses/MIT.
import os
import re
from collections import defaultdict
from pathlib import Path
import json
from help_data import display_name, capitalize, uncapitalize
from annotations import Annotations
class Tag:
def __init__(self, doc, name):
self.doc = doc
self.name = name
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.doc.out += "{%% end%s %%}\n" % self.name
class Page:
def __init__(self):
self.out = ""
def tag(self, name, arg=None):
self.out += "{%% %s " % name
if arg:
self.out += arg + " "
self.out += "%}\n"
return Tag(self, name)
def text(self, text):
self.out += text + "\n"
def nl(self):
self.text("")
class RendererMarkdown:
def __init__(self, output_dir):
self.output_dir = Path(output_dir)
self.annotations = Annotations("annotations-bitcoin-0.18.json")
def add_version_note(self, page):
if "added" in self.annotation:
page.text("*Added in Bitcoin Core %s*\n" %
self.annotation["added"])
def add_wallet_note(self, page):
if "wallet" in self.annotation:
if self.annotation["wallet"]:
page.text("*Requires wallet support.*\n")
def add_see_also_command(self, page, command):
name = display_name(command)
lower_name = uncapitalize(name)
page.text("* [%s][rpc %s]: {{summary_%s}}" %
(name, command, lower_name))
def add_see_also_glossary(self, page, text, link):
page.text("* [%s][/en/glossary/%s]" % (text, link))
def add_see_also_message(self, page, message, text):
page.text("* [`%s` message][%s message]: %s" %
(message, message, text))
def add_see_also(self, page):
if "see_also" in self.annotation:
page.text("*See also*\n")
see_also = self.annotation["see_also"]
if "commands" in see_also:
for command in see_also["commands"]:
self.add_see_also_command(page, command)
if "glossary" in see_also:
for glossary in see_also["glossary"]:
self.add_see_also_glossary(page, glossary[1], glossary[0])
if "messages" in see_also:
for message in see_also["messages"]:
self.add_see_also_message(page, message[0], message[1])
page.nl()
def arg_summary(self, arg):
return arg["name"]
def arg_n(self, arg):
return arg["name"]
def arg_t(self, arg):
t = arg["type"].split(", ")[0]
if t == "numeric":
t = "number (int)"
if "args" in self.annotation:
args = self.annotation["args"]
if arg["name"] in args:
arg_annotation = args[arg["name"]]
if "type" in arg_annotation:
t += " (%s)" % arg_annotation["type"]
return t
def arg_p(self, arg):
arg_line = arg["type"].split(", ")
if len(arg_line) == 1:
return "Required"
else:
p = arg_line[1]
if p == "required":
return "Required<br>(exactly 1)"
elif p == "optional":
if len(arg_line) == 3:
return "Optional<br>" + capitalize(arg_line[2])
else:
return "Optional"
else:
return p
def arg_d(self, arg):
d = arg["description"]
if "args" in self.annotation:
args = self.annotation["args"]
if arg["name"] in args:
arg_annotation = args[arg["name"]]
if "description" in arg_annotation:
d += ". " + arg_annotation["description"]
return d
def result_t(self, result):
t = result["type"]
if t == "numeric":
t = "number (int)"
elif t == "string":
t += " (hex)"
return t
def result_null(self):
return '''*Result---`null` on success*
{% itemplate ntpd1 %}
- n: "`result`"
t: "null"
p: "Required<br>(exactly 1)"
d: "JSON `null` when the command was successfull or a JSON with an error field on error."
{% enditemplate %}
'''
def yaml_escape(self, text):
return text.replace('"', '\\"')
def guarded_code_block(self, block):
return "{% endautocrossref %}\n\n" + self.code_block(block) + "\n{% autocrossref %}\n"
def code_block(self, block):
min_indentation = 999
split_block = block.splitlines()
for line in split_block:
indentation = len(line) - len(line.lstrip(" "))
if indentation < min_indentation:
min_indentation = indentation
indented_block = ""
for line in split_block:
if min_indentation <= 4:
indented_block += " " * (4 - min_indentation) + line
else:
indented_block += line[min_indentation - 4:]
indented_block += "\n"
if not indented_block.endswith("\n"):
indented_block += "\n"
return indented_block
def add_license_header(self, page):
with page.tag("comment"):
page.text("This file is licensed under the MIT License (MIT) available on\n"
"http://opensource.org/licenses/MIT.")
def split_description(self, full_description):
if "summary" in self.annotation:
summary = self.annotation["summary"]
description = full_description
elif full_description:
if "." in full_description:
summary = uncapitalize(full_description.partition(".")[0]) + "."
description = full_description[len(summary) + 1:].lstrip()
else:
summary = uncapitalize(full_description.rstrip()) + "."
description = ""
summary = " ".join(summary.splitlines())
else:
summary = "does %s." % display_name(self.command)
description = None
return summary, description
def process_command_help(self, help_data):
self.help_data = help_data
# print(help_data)
self.command = help_data["command"].split(" ")[0]
self.annotation = self.annotations.annotation(self.command)
page = Page()
self.add_license_header(page)
name = display_name(self.command)
lower_name = name[0].lower() + name[1:]
page.tag(
"assign", 'filename="_data/devdocs/en/bitcoin-core/rpcs/rpcs/%s.md"' % self.command)
title = "\n##### %s" % name
if self.command == "ping":
title += " {#ping-rpc}"
suffix = "-rpc"
else:
suffix = ""
page.text(title)
page.tag("include", "helpers/subhead-links.md")
page.nl()
summary, description = self.split_description(help_data["description"])
page.tag("assign", 'summary_%s%s="%s"' % (lower_name, suffix, summary))
page.nl()
with page.tag("autocrossref"):
page.nl()
self.add_version_note(page)
self.add_wallet_note(page)
page.text("The `%s` RPC {{summary_%s%s}}\n" %
(self.command, lower_name, suffix))
if description:
quoted = False
for line in description.splitlines():
if line.startswith(" "):
if not quoted:
page.text("{% endautocrossref %}")
page.nl()
quoted = True
elif quoted:
page.nl()
page.text("{% autocrossref %}")
quoted = False
page.text(line)
if quoted:
page.nl()
page.text("{% autocrossref %}")
page.nl()
if "arguments" in help_data:
if not help_data["arguments"]:
page.text("*Parameters: none*\n")
else:
count = 1
for arg in help_data["arguments"]:
page.text("*Parameter #%s---%s*\n" %
(count, self.arg_summary(arg)))
with page.tag("itemplate", "ntpd1"):
page.text('- n: "%s"' % self.arg_n(arg))
page.text(' t: "%s"' % self.arg_t(arg))
page.text(' p: "%s"' %
self.yaml_escape(self.arg_p(arg)))
page.text(' d: "%s"' %
self.yaml_escape(self.arg_d(arg)))
page.nl()
page.nl()
if "literal_description" in arg:
page.text(self.guarded_code_block(
arg["literal_description"]))
count += 1
if help_data["results"] == [{'title_extension': ''}] or help_data["results"] == []:
page.text(self.result_null())
else:
for result in help_data["results"]:
result_header = "*Result"
if "title_extension" in result and result["title_extension"]:
result_header += "---" + \
result["title_extension"].lstrip()
result_header += "*\n"
page.text(result_header)
if result["format"] == "literal":
page.text(self.guarded_code_block(result["text"]))
else:
with page.tag("itemplate", "ntpd1"):
page.text('- n: "%s"' % "`result`")
page.text(' t: "%s"' % self.result_t(result))
page.text(' p: "Required<br>(exactly 1)"')
page.text(' d: "%s"' %
self.yaml_escape(result["description"]))
page.nl()
page.nl()
if help_data["examples"]:
page.text("*Example*\n")
for example in help_data["examples"]:
if example.startswith("> "):
if not example.startswith("> curl"):
with page.tag("highlight", "bash"):
page.text(example[2:].rstrip())
else:
if (not example.startswith("As json rpc") and
not example.startswith("As a json rpc") and
not example.startswith("As a JSON-RPC")):
page.text(example)
page.nl()
page.nl()
self.add_see_also(page)
return page.out
def render_cmd_page(self, command, help_data):
command_file = command + ".md"
with open(self.output_dir / "rpcs" / command_file, "w") as file:
file.write(self.process_command_help(help_data))
def add_version_helper_assignment(self, page, type, version, bold=False):
a = type.upper() + version.replace(".", "_") + "='*"
if bold:
a += "*"
a += '<abbr title="' + type + ' in Bitcoin Core v' + version + '">'
a += type + ' in ' + version + '</abbr>*'
if bold:
a += "*"
a += "'"
page.tag("assign", a)
def add_version_helpers(self, page, version, date, new=False, updated=True, bold=False):
page.text("<!-- Bitcoin Core %s %s -->" % (version, date))
if new:
self.add_version_helper_assignment(page, "New", version, bold=bold)
if updated:
self.add_version_helper_assignment(
page, "Updated", version, bold=bold)
page.nl()
def render_version_info(self, page):
with page.tag("comment"):
page.text("""Styling notes: use highly-visible style for upcoming changes (not yet
released) and changes made in the last 6 months. Use less-visible
style for changes made up to two years ago. Don't point out
changes made more than two years ago.
Use v0.n.n in abbreviation title to prevent autocrossrefing.""")
page.nl()
page.text("<!-- Deprecated -->")
page.tag("assign", "DEPRECATED='**<abbr title=\"Deprecated; will be removed in a future version of Bitcoin Core\">Deprecated</abbr>**'")
self.add_version_helpers(page, "0.14.1", "April 2017", bold=True)
self.add_version_helpers(
page, "0.14.0", "March 2017", new=True, bold=True)
self.add_version_helpers(page, "0.13.1", "September 2016")
self.add_version_helpers(page, "0.13.0", "August 2016", new=True)
self.add_version_helpers(page, "0.12.1", "April 2016")
self.add_version_helpers(page, "0.12.0", "February 2016", new=True)
self.add_version_helpers(
page, "0.11.0", "July 2015", new=True, updated=False)
def render_overview_page(self, all_commands, render_version_info=True):
with open(self.output_dir / "quick-reference.md", "w") as file:
page = Page()
self.add_license_header(page)
page.tag(
"assign", 'filename="_data/devdocs/en/bitcoin-core/rpcs/quick-reference.md"')
page.nl()
page.text("#### Quick Reference {#rpc-quick-reference}")
page.tag("include", "helpers/subhead-links.md")
page.nl()
if render_version_info:
self.render_version_info(page)
page.text("""<!-- the summaries used below are defined in the files for the
particular RPC and aggregated into this helper file by the makefile
function manual-update-summaries-file. For example, to edit the
summary for GetBestBlockHash, edit
_includes/rpc/getbestblockhash.md and run `make manual-update-summaries`. -->""")
page.tag("include", "helpers/summaries.md")
page.nl()
for category in all_commands:
page.text("#### " + category + " RPCs")
page.text("{:.no_toc}")
page.text("<!-- no subhead-links here -->\n")
with page.tag("autocrossref"):
page.nl()
if category == "Wallet":
page.text("""**Note:** the wallet RPCs are only available if Bitcoin Core was built
with [wallet support][]{:#term-wallet-support}{:.term}, which is the
default.
""")
for command in all_commands[category]:
cmd = command.split(" ")[0]
item = "* [" + display_name(cmd) + "]"
item += "[rpc " + cmd + "]: "
item += "{{summary_" + uncapitalize(display_name(cmd))
if cmd == "ping":
item += "-rpc"
item += "}}"
if render_version_info:
annotation = self.annotations.annotation(cmd)
if "added" in annotation:
item += " {{NEW%s}}" % annotation["added"].replace(
".", "_")
if "added" in annotation and "changed" in annotation:
item += ","
if "changed" in annotation:
item += " {{UPDATED%s}}" % annotation["changed"].replace(
".", "_")
if "deprecated" in annotation:
item += " {{DEPRECATED}}"
page.text(item)
page.nl()
page.nl()
file.write(page.out)
|
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
from . import class_declaration
from . import type_traits
from . import enumeration
from . import calldef_members
from . import calldef_types
from . import scopedef
from . import cpptypes
from .. import utils
def is_union(declaration):
"""
Returns True if declaration represents a C++ union
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ union
"""
if not is_class(declaration):
return False
decl = class_traits.get_declaration(declaration)
return decl.class_type == class_declaration.CLASS_TYPES.UNION
def is_struct(declaration):
"""
Returns True if declaration represents a C++ struct
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ struct
"""
if not is_class(declaration):
return False
decl = class_traits.get_declaration(declaration)
return decl.class_type == class_declaration.CLASS_TYPES.STRUCT
class declaration_xxx_traits(object):
"""this class implements the functionality needed for convenient work with
declaration classes
Implemented functionality:
- find out whether a declaration is a desired one
- get reference to the declaration
"""
def __init__(self, declaration_class):
self.declaration_class = declaration_class
@staticmethod
def __apply_sequence(type_):
return \
type_traits.remove_declarated(
type_traits.remove_elaborated(
type_traits.remove_cv(
type_traits.remove_alias(
type_traits.remove_pointer(type_)))))
def is_my_case(self, type_):
"""returns True, if type represents the desired declaration,
False otherwise"""
return (
isinstance(self.__apply_sequence(type_), self.declaration_class)
)
def get_declaration(self, type_):
"""returns reference to the declaration
Precondition: self.is_my_case( type ) == True
"""
return self.__apply_sequence(type_)
enum_traits = declaration_xxx_traits(enumeration.enumeration_t)
"""implements functionality, needed for convenient work with C++ enums"""
is_enum = enum_traits.is_my_case
"""returns True, if type represents C++ enumeration declaration,
False otherwise"""
enum_declaration = enum_traits.get_declaration
"""returns reference to enum declaration"""
class_traits = declaration_xxx_traits(class_declaration.class_t)
"""implements functionality, needed for convenient work with C++ classes"""
is_class = class_traits.is_my_case
"""returns True, if type represents C++ class definition, False otherwise"""
class_declaration_traits = declaration_xxx_traits(
class_declaration.class_declaration_t)
"""implements functionality, needed for convenient work with C++ class
declarations"""
is_class_declaration = class_declaration_traits.is_my_case
"""returns True, if type represents C++ class declaration, False otherwise"""
def find_trivial_constructor(type_):
"""
Returns reference to trivial constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the trivial constructor
"""
assert isinstance(type_, class_declaration.class_t)
trivial = type_.constructors(
lambda x: is_trivial_constructor(x),
recursive=False,
allow_empty=True)
if trivial:
return trivial[0]
return None
def find_copy_constructor(type_):
"""
Returns reference to copy constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the copy constructor
"""
copy_ = type_.constructors(
lambda x: is_copy_constructor(x),
recursive=False,
allow_empty=True)
if copy_:
return copy_[0]
return None
def find_noncopyable_vars(class_type, already_visited_cls_vars=None):
"""
Returns list of all `noncopyable` variables.
If an already_visited_cls_vars list is provided as argument, the returned
list will not contain these variables. This list will be extended with
whatever variables pointing to classes have been found.
Args:
class_type (declarations.class_t): the class to be searched.
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
Returns:
list: list of all `noncopyable` variables.
"""
assert isinstance(class_type, class_declaration.class_t)
logger = utils.loggers.cxx_parser
mvars = class_type.variables(
lambda v: not v.type_qualifiers.has_static,
recursive=False,
allow_empty=True)
noncopyable_vars = []
if already_visited_cls_vars is None:
already_visited_cls_vars = []
message = (
"__contains_noncopyable_mem_var - %s - TRUE - " +
"contains const member variable")
for mvar in mvars:
var_type = type_traits.remove_reference(mvar.decl_type)
if type_traits.is_const(var_type):
no_const = type_traits.remove_const(var_type)
if type_traits.is_fundamental(no_const) or is_enum(no_const):
logger.debug(
(message + "- fundamental or enum"),
var_type.decl_string)
noncopyable_vars.append(mvar)
if is_class(no_const):
logger.debug((message + " - class"), var_type.decl_string)
noncopyable_vars.append(mvar)
if type_traits.is_array(no_const):
logger.debug((message + " - array"), var_type.decl_string)
noncopyable_vars.append(mvar)
if type_traits.is_pointer(var_type):
continue
if class_traits.is_my_case(var_type):
cls = class_traits.get_declaration(var_type)
# Exclude classes that have already been visited.
if cls in already_visited_cls_vars:
continue
already_visited_cls_vars.append(cls)
if is_noncopyable(cls, already_visited_cls_vars):
logger.debug(
(message + " - class that is not copyable"),
var_type.decl_string)
noncopyable_vars.append(mvar)
logger.debug((
"__contains_noncopyable_mem_var - %s - FALSE - doesn't " +
"contain noncopyable members"), class_type.decl_string)
return noncopyable_vars
def has_trivial_constructor(class_):
"""if class has public trivial constructor, this function will return
reference to it, None otherwise"""
class_ = class_traits.get_declaration(class_)
trivial = find_trivial_constructor(class_)
if trivial and trivial.access_type == 'public':
return trivial
def has_copy_constructor(class_):
"""if class has public copy constructor, this function will return
reference to it, None otherwise"""
class_ = class_traits.get_declaration(class_)
copy_constructor = find_copy_constructor(class_)
if copy_constructor and copy_constructor.access_type == 'public':
return copy_constructor
def has_destructor(class_):
"""if class has destructor, this function will return reference to it,
None otherwise"""
class_ = class_traits.get_declaration(class_)
destructor = class_.decls(
decl_type=calldef_members.destructor_t,
recursive=False,
allow_empty=True)
if destructor:
return destructor[0]
def has_public_constructor(class_):
"""if class has any public constructor, this function will return list of
them, otherwise None"""
class_ = class_traits.get_declaration(class_)
decls = class_.constructors(
lambda c: not is_copy_constructor(c) and c.access_type == 'public',
recursive=False,
allow_empty=True)
if decls:
return decls
def has_public_assign(class_):
"""returns True, if class has public assign operator, False otherwise"""
class_ = class_traits.get_declaration(class_)
decls = class_.member_operators(
lambda o: o.symbol == '=' and o.access_type == 'public',
recursive=False,
allow_empty=True)
return bool(decls)
def has_public_destructor(decl_type):
"""returns True, if class has public destructor, False otherwise"""
d = has_destructor(decl_type)
return d and d.access_type == 'public'
def has_vtable(decl_type):
"""True, if class has virtual table, False otherwise"""
assert isinstance(decl_type, class_declaration.class_t)
return bool(
decl_type.calldefs(
lambda f: isinstance(f, calldef_members.member_function_t) and
f.virtuality != calldef_types.VIRTUALITY_TYPES.NOT_VIRTUAL,
recursive=False,
allow_empty=True))
def is_base_and_derived(based, derived):
"""returns True, if there is "base and derived" relationship between
classes, False otherwise"""
assert isinstance(based, class_declaration.class_t)
assert isinstance(derived, (class_declaration.class_t, tuple))
if isinstance(derived, class_declaration.class_t):
all_derived = ([derived])
else: # tuple
all_derived = derived
for derived_cls in all_derived:
for base_desc in derived_cls.recursive_bases:
if base_desc.related_class == based:
return True
return False
def has_any_non_copyconstructor(decl_type):
"""if class has any public constructor, which is not copy constructor,
this function will return list of them, otherwise None"""
class_ = class_traits.get_declaration(decl_type)
decls = class_.constructors(
lambda c: not is_copy_constructor(c) and c.access_type == 'public',
recursive=False,
allow_empty=True)
if decls:
return decls
class __is_convertible_t(object):
"""implementation details"""
def __init__(self, source, target):
self.__source = self.__normalize(source)
self.__target = self.__normalize(target)
@staticmethod
def __find_class_by_class_declaration(class_decl):
found = scopedef.find_declaration(
class_decl.parent.declarations,
name=class_decl.name,
decl_type=class_declaration.class_t)
return found
def __normalize(self, type_):
type_ = type_traits.remove_alias(type_)
bt_of_type = type_traits.base_type(type_)
if isinstance(bt_of_type, cpptypes.declarated_t) \
and isinstance(bt_of_type.declaration,
class_declaration.class_declaration_t):
type_ = type_.clone()
bt_of_type = type_traits.base_type(type_)
bt_of_type.declaration = self.__find_class_by_class_declaration(
bt_of_type.declaration)
return type_
@staticmethod
def __test_trivial(src, target):
if not (src and target):
return False
if type_traits.is_same(src, target):
return True # X => X
if type_traits.is_const(target) and type_traits.is_same(
src, target.base):
return True # X => const X
if type_traits.is_reference(target) and type_traits.is_same(
src, target.base):
return True # X => X&
if type_traits.is_reference(target) and type_traits.is_const(
target.base) and type_traits.is_same(src, target.base.base):
return True # X => const X&
if type_traits.is_same(target, cpptypes.pointer_t(cpptypes.void_t())):
if type_traits.is_integral(src) or is_enum(src):
return False
return True # X => void*
if type_traits.is_pointer(src) and \
type_traits.is_pointer(target) and \
type_traits.is_const(target.base) and \
type_traits.is_same(src.base, target.base.base):
return True # X* => const X*
if type_traits.is_reference(src) and \
type_traits.is_reference(target) and \
type_traits.is_const(target.base) and \
type_traits.is_same(src.base, target.base.base):
return True # X& => const X&
if not type_traits.is_const(src) and \
type_traits.is_array(src) and \
type_traits.is_pointer(target) and \
type_traits.is_same(type_traits.base_type(src), target.base):
return True # X[2] => X*
if type_traits.is_array(src) and \
type_traits.is_pointer(target) and \
type_traits.is_const(target.base) and \
type_traits.is_same(
type_traits.base_type(src), target.base.base):
return True
@staticmethod
def __test_pointer_to_func_or_mv__to__func_or_mv(source, target):
if type_traits.is_pointer(source) \
and type_traits.is_reference(target) \
and isinstance(target.base,
(cpptypes.free_function_type_t,
cpptypes.member_function_type_t,
cpptypes.member_variable_type_t)) \
and type_traits.is_same(source.base, target.base):
return True
if type_traits.is_pointer(source) \
and isinstance(target,
(cpptypes.free_function_type_t,
cpptypes.member_function_type_t,
cpptypes.member_variable_type_t)) \
and type_traits.is_same(source.base, target):
return True
if type_traits.is_pointer(target) \
and type_traits.is_reference(source) \
and isinstance(source.base,
(cpptypes.free_function_type_t,
cpptypes.member_function_type_t,
cpptypes.member_variable_type_t)) \
and type_traits.is_same(source.base, target.base):
return True
if type_traits.is_pointer(target) \
and isinstance(source,
(cpptypes.free_function_type_t,
cpptypes.member_function_type_t,
cpptypes.member_variable_type_t)) \
and type_traits.is_same(target.base, source):
return True
@staticmethod
def __test_const_x_ref__to__x(source, target):
if not type_traits.is_reference(source) \
or not type_traits.is_const(source.base) \
or not type_traits.is_same(source.base.base, target):
return False
if type_traits.is_fundamental(target):
return True
if is_enum(target):
return True
if isinstance(target, cpptypes.declarated_t):
assert isinstance(target.declaration, class_declaration.class_t)
if has_copy_constructor(target.declaration):
return True # we have copy constructor
return False
@staticmethod
def __test_const_ref_x__to__y(source, target):
if not type_traits.is_reference(source) or not \
type_traits.is_const(source.base):
return False
if type_traits.is_fundamental(source.base.base) and \
type_traits.is_fundamental(target):
return True
if is_convertible(source.base.base, cpptypes.int_t()) and \
is_enum(target):
return True
if isinstance(target, cpptypes.declarated_t):
assert isinstance(target.declaration, class_declaration.class_t)
if has_copy_constructor(target.declaration):
return True # we have copy constructor
return False
@staticmethod
def __test_ref_x__to__x(source, target):
if not type_traits.is_reference(source) or not \
type_traits.is_same(source.base, target):
return False
if type_traits.is_fundamental(target):
return True
if is_enum(target):
return True
if isinstance(target, cpptypes.declarated_t):
assert isinstance(target.declaration, class_declaration.class_t)
if has_copy_constructor(target.declaration):
return True # we have copy constructor
return False
@staticmethod
def __test_ref_x__to__y(source, target):
if not type_traits.is_reference(source):
return False
if type_traits.is_fundamental(source.base) and \
type_traits.is_fundamental(target):
return True
if is_convertible(source.base, cpptypes.int_t()) and is_enum(target):
return True
if isinstance(target, cpptypes.declarated_t):
assert isinstance(target.declaration, class_declaration.class_t)
if has_copy_constructor(target.declaration):
return True # we have copy constructor
return False
@staticmethod
def __test_fundamental__to__fundamental(source, target):
if not type_traits.is_fundamental(
type_traits.base_type(source)) or not \
type_traits.is_fundamental(
type_traits.base_type(target)):
return False
if type_traits.is_void(type_traits.base_type(source)) or \
type_traits.is_void(type_traits.base_type(target)):
return False
if type_traits.is_fundamental(source) and \
type_traits.is_fundamental(target):
return True
if not type_traits.is_pointer(source) and \
type_traits.is_fundamental(target):
return True
if not type_traits.is_pointer(source) and \
type_traits.is_const(target) and \
type_traits.is_fundamental(target.base):
return True
if type_traits.is_fundamental(source) \
and type_traits.is_reference(target) \
and type_traits.is_const(target.base) \
and type_traits.is_fundamental(target.base.base):
return True # X => const Y&
return False
@staticmethod
def _is_both_declarated(x, y):
return (
isinstance(x, cpptypes.declarated_t) and
isinstance(y, cpptypes.declarated_t))
def __test_derived_to_based(self, source, target):
derived = type_traits.base_type(source)
base = type_traits.base_type(target)
if not (
isinstance(derived, cpptypes.declarated_t) and
isinstance(derived.declaration, class_declaration.class_t)):
return False
if not (isinstance(base, cpptypes.declarated_t) and
isinstance(base.declaration, class_declaration.class_t)):
return False
base = base.declaration
derived = derived.declaration
if not is_base_and_derived(base, derived):
return False
for b in derived.recursive_bases:
if ((b.related_class is base) and b.access_type !=
class_declaration.ACCESS_TYPES.PRIVATE):
break
else:
return False
base = target
derived = source
# d => b
if self._is_both_declarated(base, derived):
return True
# d* => b*
if type_traits.is_pointer(derived) and \
type_traits.is_pointer(base) and \
self._is_both_declarated(base.base, derived.base):
return True
# const d* => const b*
if type_traits.is_pointer(derived) and \
type_traits.is_pointer(base) and \
type_traits.is_const(derived.base) and \
type_traits.is_const(base.base) \
and self._is_both_declarated(base.base.base, derived.base.base):
return True
# d* => const b*
if type_traits.is_pointer(derived) and type_traits.is_pointer(base) \
and type_traits.is_const(derived.base)\
and self._is_both_declarated(base.base.base, derived.base):
return True
# d& => b&
if type_traits.is_reference(derived) and \
type_traits.is_reference(base) and \
self._is_both_declarated(base.base, derived.base):
return True
# const d& => const b&
if type_traits.is_reference(derived) and \
type_traits.is_reference(base) and \
type_traits.is_const(derived.base) and \
type_traits.is_const(base.base) \
and self._is_both_declarated(base.base.base, derived.base.base):
return True
# d& => const b&
if type_traits.is_reference(derived) and \
type_traits.is_reference(base) and \
type_traits.is_const(derived.base) \
and self._is_both_declarated(base.base.base, derived.base):
return True
return False
def is_convertible(self):
source = self.__source
target = self.__target
if self.__test_trivial(source, target):
return True
if type_traits.is_array(source) or type_traits.is_array(target):
return False
if self.__test_const_x_ref__to__x(source, target):
return True
if self.__test_const_ref_x__to__y(source, target):
return True
if self.__test_ref_x__to__x(source, target):
return True
if self.__test_ref_x__to__y(source, target):
return True
if self.__test_fundamental__to__fundamental(source, target):
return True
if self.__test_pointer_to_func_or_mv__to__func_or_mv(source, target):
return True
if self.__test_derived_to_based(source, target):
return True
if isinstance(source, cpptypes.declarated_t):
if isinstance(source.declaration, enumeration.enumeration_t) \
and type_traits.is_fundamental(target) \
and not type_traits.is_void(target):
return True # enum could be converted to any integral type
if isinstance(source.declaration, class_declaration.class_t):
source_inst = source.declaration
# class instance could be convertible to something else if it
# has operator
casting_operators = scopedef.find_all_declarations(
source_inst.declarations,
decl_type=calldef_members.casting_operator_t,
recursive=False)
if casting_operators:
for operator in casting_operators:
if is_convertible(operator.return_type, target):
return True
# may be target is class too, so in this case we should check whether
# is has constructor from source
if isinstance(target, cpptypes.declarated_t) and \
isinstance(target.declaration, class_declaration.class_t):
constructors = scopedef.find_all_declarations(
target.declaration.declarations,
decl_type=calldef_members.constructor_t,
recursive=False)
if constructors:
for constructor in constructors:
if len(constructor.arguments) != 1:
continue
# TODO: add test to check explicitness
if is_convertible(source,
constructor.arguments[0].decl_type):
return True
return False
def is_convertible(source, target):
"""returns True, if source could be converted to target, otherwise False"""
return __is_convertible_t(source, target).is_convertible()
def __is_noncopyable_single(class_, already_visited_cls_vars=None):
"""
Implementation detail.
Checks if the class is non copyable, without considering the base classes.
Args:
class_ (declarations.class_t): the class to be checked
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
Returns:
bool: if the class is non copyable
"""
# It is not enough to check base classes, we should also to check
# member variables.
logger = utils.loggers.cxx_parser
if has_copy_constructor(class_) \
and has_public_constructor(class_) \
and has_public_assign(class_) \
and has_public_destructor(class_):
msg = os.linesep.join([
"__is_noncopyable_single - %s - COPYABLE:" % class_.decl_string,
" trivial copy constructor: yes",
" public constructor: yes",
" public assign: yes",
" public destructor: yes"])
logger.debug(msg)
return False
if already_visited_cls_vars is None:
already_visited_cls_vars = []
if find_noncopyable_vars(class_, already_visited_cls_vars):
logger.debug(
("__is_noncopyable_single(TRUE) - %s - contains noncopyable " +
"members"), class_.decl_string)
return True
logger.debug((
"__is_noncopyable_single(FALSE) - %s - COPYABLE, because is " +
"doesn't contains noncopyable members"), class_.decl_string)
return False
def is_noncopyable(class_, already_visited_cls_vars=None):
"""
Checks if class is non copyable
Args:
class_ (declarations.class_t): the class to be checked
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
In general you can ignore this argument, it is mainly used during
recursive calls of is_noncopyable() done by pygccxml.
Returns:
bool: if the class is non copyable
"""
logger = utils.loggers.cxx_parser
class_decl = class_traits.get_declaration(class_)
true_header = "is_noncopyable(TRUE) - %s - " % class_.decl_string
if is_union(class_):
return False
if class_decl.is_abstract:
logger.debug(true_header + "abstract client")
return True
# if class has public, user defined copy constructor, than this class is
# copyable
copy_ = find_copy_constructor(class_decl)
if copy_ and copy_.access_type == 'public' and not copy_.is_artificial:
return False
if already_visited_cls_vars is None:
already_visited_cls_vars = []
for base_desc in class_decl.recursive_bases:
assert isinstance(base_desc, class_declaration.hierarchy_info_t)
if base_desc.related_class.decl_string in \
('::boost::noncopyable', '::boost::noncopyable_::noncopyable'):
logger.debug(true_header + "derives from boost::noncopyable")
return True
if not has_copy_constructor(base_desc.related_class):
base_copy_ = find_copy_constructor(base_desc.related_class)
if base_copy_ and base_copy_.access_type == 'private':
logger.debug(
true_header +
"there is private copy constructor")
return True
elif __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if not has_copy_constructor(class_decl):
logger.debug(true_header + "does not have trivial copy constructor")
return True
elif not has_public_constructor(class_decl):
logger.debug(true_header + "does not have a public constructor")
return True
elif has_destructor(class_decl) and not has_public_destructor(class_decl):
logger.debug(true_header + "has private destructor")
return True
return __is_noncopyable_single(class_decl, already_visited_cls_vars)
def is_unary_operator(oper):
"""returns True, if operator is unary operator, otherwise False"""
# definition:
# member in class
# ret-type operator symbol()
# ret-type operator [++ --](int)
# globally
# ret-type operator symbol( arg )
# ret-type operator [++ --](X&, int)
symbols = ['!', '&', '~', '*', '+', '++', '-', '--']
if not isinstance(oper, calldef_members.operator_t):
return False
if oper.symbol not in symbols:
return False
if isinstance(oper, calldef_members.member_operator_t):
if len(oper.arguments) == 0:
return True
elif oper.symbol in ['++', '--'] and \
isinstance(oper.arguments[0].decl_type, cpptypes.int_t):
return True
return False
if len(oper.arguments) == 1:
return True
elif oper.symbol in ['++', '--'] \
and len(oper.arguments) == 2 \
and isinstance(oper.arguments[1].decl_type, cpptypes.int_t):
# may be I need to add additional check whether first argument is
# reference or not?
return True
return False
def is_binary_operator(oper):
"""returns True, if operator is binary operator, otherwise False"""
# definition:
# member in class
# ret-type operator symbol(arg)
# globally
# ret-type operator symbol( arg1, arg2 )
symbols = [
',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',
'+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',
'==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']
if not isinstance(oper, calldef_members.operator_t):
return False
if oper.symbol not in symbols:
return False
if isinstance(oper, calldef_members.member_operator_t):
if len(oper.arguments) == 1:
return True
return False
if len(oper.arguments) == 2:
return True
return False
def is_copy_constructor(constructor):
"""
Check if the declaration is a copy constructor,
Args:
constructor (declarations.constructor_t): the constructor
to be checked.
Returns:
bool: True if this is a copy constructor, False instead.
"""
assert isinstance(constructor, calldef_members.constructor_t)
args = constructor.arguments
parent = constructor.parent
# A copy constructor has only one argument
if len(args) != 1:
return False
# We have only one argument, get it
arg = args[0]
if not isinstance(arg.decl_type, cpptypes.compound_t):
# An argument of type declarated_t (a typedef) could be passed to
# the constructor; and it could be a reference.
# But in c++ you can NOT write :
# "typedef class MyClass { MyClass(const MyClass & arg) {} }"
# If the argument is a typedef, this is not a copy constructor.
# See the hierarchy of declarated_t and coumpound_t. They both
# inherit from type_t but are not related so we can discriminate
# between them.
return False
# The argument needs to be passed by reference in a copy constructor
if not type_traits.is_reference(arg.decl_type):
return False
# The argument needs to be const for a copy constructor
if not type_traits.is_const(arg.decl_type.base):
return False
un_aliased = type_traits.remove_alias(arg.decl_type.base)
# un_aliased now refers to const_t instance
if not isinstance(un_aliased.base, cpptypes.declarated_t):
# We are looking for a declaration
# If "class MyClass { MyClass(const int & arg) {} }" is used,
# this is not copy constructor, so we return False here.
# -> un_aliased.base == cpptypes.int_t (!= cpptypes.declarated_t)
return False
# Final check: compare the parent (the class declaration for example)
# with the declaration of the type passed as argument.
return id(un_aliased.base.declaration) == id(parent)
def is_trivial_constructor(constructor):
"""
Check if the declaration is a trivial constructor.
Args:
constructor (declarations.constructor_t): the constructor
to be checked.
Returns:
bool: True if this is a trivial constructor, False instead.
"""
assert isinstance(constructor, calldef_members.constructor_t)
return not bool(constructor.arguments)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.security.privateca_v1beta1.types import resources
from google.cloud.security.privateca_v1beta1.types import service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-private-ca",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CertificateAuthorityServiceTransport(abc.ABC):
"""Abstract transport class for CertificateAuthorityService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "privateca.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_certificate: gapic_v1.method.wrap_method(
self.create_certificate, default_timeout=None, client_info=client_info,
),
self.get_certificate: gapic_v1.method.wrap_method(
self.get_certificate, default_timeout=None, client_info=client_info,
),
self.list_certificates: gapic_v1.method.wrap_method(
self.list_certificates, default_timeout=None, client_info=client_info,
),
self.revoke_certificate: gapic_v1.method.wrap_method(
self.revoke_certificate, default_timeout=None, client_info=client_info,
),
self.update_certificate: gapic_v1.method.wrap_method(
self.update_certificate, default_timeout=None, client_info=client_info,
),
self.activate_certificate_authority: gapic_v1.method.wrap_method(
self.activate_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.create_certificate_authority: gapic_v1.method.wrap_method(
self.create_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.disable_certificate_authority: gapic_v1.method.wrap_method(
self.disable_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.enable_certificate_authority: gapic_v1.method.wrap_method(
self.enable_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.fetch_certificate_authority_csr: gapic_v1.method.wrap_method(
self.fetch_certificate_authority_csr,
default_timeout=None,
client_info=client_info,
),
self.get_certificate_authority: gapic_v1.method.wrap_method(
self.get_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.list_certificate_authorities: gapic_v1.method.wrap_method(
self.list_certificate_authorities,
default_timeout=None,
client_info=client_info,
),
self.restore_certificate_authority: gapic_v1.method.wrap_method(
self.restore_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.schedule_delete_certificate_authority: gapic_v1.method.wrap_method(
self.schedule_delete_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.update_certificate_authority: gapic_v1.method.wrap_method(
self.update_certificate_authority,
default_timeout=None,
client_info=client_info,
),
self.get_certificate_revocation_list: gapic_v1.method.wrap_method(
self.get_certificate_revocation_list,
default_timeout=None,
client_info=client_info,
),
self.list_certificate_revocation_lists: gapic_v1.method.wrap_method(
self.list_certificate_revocation_lists,
default_timeout=None,
client_info=client_info,
),
self.update_certificate_revocation_list: gapic_v1.method.wrap_method(
self.update_certificate_revocation_list,
default_timeout=None,
client_info=client_info,
),
self.get_reusable_config: gapic_v1.method.wrap_method(
self.get_reusable_config, default_timeout=None, client_info=client_info,
),
self.list_reusable_configs: gapic_v1.method.wrap_method(
self.list_reusable_configs,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_certificate(
self,
) -> Callable[
[service.CreateCertificateRequest],
Union[resources.Certificate, Awaitable[resources.Certificate]],
]:
raise NotImplementedError()
@property
def get_certificate(
self,
) -> Callable[
[service.GetCertificateRequest],
Union[resources.Certificate, Awaitable[resources.Certificate]],
]:
raise NotImplementedError()
@property
def list_certificates(
self,
) -> Callable[
[service.ListCertificatesRequest],
Union[
service.ListCertificatesResponse,
Awaitable[service.ListCertificatesResponse],
],
]:
raise NotImplementedError()
@property
def revoke_certificate(
self,
) -> Callable[
[service.RevokeCertificateRequest],
Union[resources.Certificate, Awaitable[resources.Certificate]],
]:
raise NotImplementedError()
@property
def update_certificate(
self,
) -> Callable[
[service.UpdateCertificateRequest],
Union[resources.Certificate, Awaitable[resources.Certificate]],
]:
raise NotImplementedError()
@property
def activate_certificate_authority(
self,
) -> Callable[
[service.ActivateCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def create_certificate_authority(
self,
) -> Callable[
[service.CreateCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def disable_certificate_authority(
self,
) -> Callable[
[service.DisableCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def enable_certificate_authority(
self,
) -> Callable[
[service.EnableCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def fetch_certificate_authority_csr(
self,
) -> Callable[
[service.FetchCertificateAuthorityCsrRequest],
Union[
service.FetchCertificateAuthorityCsrResponse,
Awaitable[service.FetchCertificateAuthorityCsrResponse],
],
]:
raise NotImplementedError()
@property
def get_certificate_authority(
self,
) -> Callable[
[service.GetCertificateAuthorityRequest],
Union[
resources.CertificateAuthority, Awaitable[resources.CertificateAuthority]
],
]:
raise NotImplementedError()
@property
def list_certificate_authorities(
self,
) -> Callable[
[service.ListCertificateAuthoritiesRequest],
Union[
service.ListCertificateAuthoritiesResponse,
Awaitable[service.ListCertificateAuthoritiesResponse],
],
]:
raise NotImplementedError()
@property
def restore_certificate_authority(
self,
) -> Callable[
[service.RestoreCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def schedule_delete_certificate_authority(
self,
) -> Callable[
[service.ScheduleDeleteCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_certificate_authority(
self,
) -> Callable[
[service.UpdateCertificateAuthorityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_certificate_revocation_list(
self,
) -> Callable[
[service.GetCertificateRevocationListRequest],
Union[
resources.CertificateRevocationList,
Awaitable[resources.CertificateRevocationList],
],
]:
raise NotImplementedError()
@property
def list_certificate_revocation_lists(
self,
) -> Callable[
[service.ListCertificateRevocationListsRequest],
Union[
service.ListCertificateRevocationListsResponse,
Awaitable[service.ListCertificateRevocationListsResponse],
],
]:
raise NotImplementedError()
@property
def update_certificate_revocation_list(
self,
) -> Callable[
[service.UpdateCertificateRevocationListRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_reusable_config(
self,
) -> Callable[
[service.GetReusableConfigRequest],
Union[resources.ReusableConfig, Awaitable[resources.ReusableConfig]],
]:
raise NotImplementedError()
@property
def list_reusable_configs(
self,
) -> Callable[
[service.ListReusableConfigsRequest],
Union[
service.ListReusableConfigsResponse,
Awaitable[service.ListReusableConfigsResponse],
],
]:
raise NotImplementedError()
__all__ = ("CertificateAuthorityServiceTransport",)
|
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
import re
import collections
import math
from PySide import QtCore, QtGui
from . import _callinfo, _time
COLOR_TEXT = QtGui.QColor(0, 0, 0)
COLOR_DIVIDER = QtGui.QColor(150, 150, 150)
COLOR_TIMESTAMP = QtGui.QColor(0, 0, 0)
COLOR_BACKGROUND = QtGui.QColor(255, 255, 255)
COLOR_CURSOR = QtGui.QColor(129, 190, 247)
COLOR_QSO_BACKGROUND = QtGui.QColor(246, 227, 206)
COLOR_CALL = QtGui.QColor(150, 0, 0)
class Section:
def __init__(self, kind, content, data = None):
self.kind = kind
self.content = content
self.data = data
class NotedLine:
def __init__(self, timestamp, sections):
self.timestamp = timestamp
self.sections = sections
def __str__(self):
return "{}: {}".format(
_time.z(self.timestamp),
"".join([s.content for s in self.sections]))
class NotedQsos:
class Qso(collections.namedtuple("QSO", "start end")):
def __len__(self):
return self.end - self.start + 1
def __contains__(self, line):
return self.start <= line and line <= self.end
def __init__(self):
self.qsos = []
def __getitem__(self, index):
return self.qsos[index]
def __len__(self):
return len(self.qsos)
def __iter__(self):
return self.qsos
def insert_qso(self, line):
index = self._find_insertion_index(line)
if index < 0: return None
new_qso = self.Qso(line, line)
self.qsos.insert(index, new_qso)
return new_qso
def _find_insertion_index(self, line):
return self._bisect_qsos(
line, 0, len(self.qsos), lambda i, q: -1, lambda s: s)
def is_in_qso(self, line):
return self._find_qso(line)[1] != None
def _find_qso(self, line):
return self._bisect_qsos(
line, 0, len(self.qsos), lambda i, q: (i, q), lambda s: (s, None))
def _bisect_qsos(self, line, start, end, found, not_found):
if start >= end: return not_found(start)
pivot = (start + end) // 2
qso = self.qsos[pivot]
if line in qso:
return found(pivot, qso)
elif qso.start > line:
return self._bisect_qsos(line, start, pivot, found, not_found)
else:
return self._bisect_qsos(line, pivot + 1, end, found, not_found)
def remove_qso(self, line):
qso = self._find_qso(line)[1]
self.qsos.remove(qso)
def get_qso(self, line):
return self._find_qso(line)[1]
def get_qsos(self, start_line, end_line):
start_index, start_qso = self._find_qso(start_line)
if not start_qso:
start_index, start_qso = self._find_qso_after(start_line)
if not start_qso:
return []
end_index, end_qso = self._find_qso(end_line)
if not end_qso:
end_index, end_qso = self._find_qso_before(end_line)
if not end_qso:
return []
return self.qsos[start_index:end_index + 1]
def move_qso_start(self, line):
index, qso = self._find_qso(line)
if not qso:
index, qso = self._find_qso_after(line)
if not qso:
new_qso = self.Qso(line, line)
self.qsos.insert(index, new_qso)
else:
new_qso = self.Qso(line, qso.end)
self.qsos[index] = new_qso
return new_qso
def _find_qso_after(self, line):
index = self._find_insertion_index(line)
if index == len(self.qsos): return (index, None)
return (index, self.qsos[index])
def move_qso_end(self, line):
index, qso = self._find_qso(line)
if not qso:
index, qso = self._find_qso_before(line)
if not qso:
new_qso = self.Qso(line, line)
self.qsos.insert(index, new_qso)
else:
new_qso = self.Qso(qso.start, line)
self.qsos[index] = new_qso
return new_qso
def _find_qso_before(self, line):
index = self._find_insertion_index(line)
if index == 0: return (index, None)
return (index - 1, self.qsos[index - 1])
class Notepad(QtCore.QObject):
line_added = QtCore.Signal(object)
cursor_moved = QtCore.Signal(int)
qso_changed = QtCore.Signal(object)
call_added = QtCore.Signal(object)
def __init__(self, parent = None):
QtCore.QObject.__init__(self, parent)
self.lines = []
self.qsos = NotedQsos()
self.cursor = -1
def __len__(self):
return len(self.lines)
def add_line(self, raw_text):
if len(raw_text.strip()) == 0: return
sections = []
next_char = 0
for match in _callinfo.Call.find_all(raw_text, lambda m: m):
match_start = match.start()
match_end = match.end()
if match_start > next_char:
sections.append(
Section("text", raw_text[next_char:match_start]))
call = _callinfo.Call(raw_text[match_start:match_end])
sections.append(Section("call", str(call), call))
self.call_added.emit(call)
next_char = match.end(0)
if next_char < len(raw_text):
sections.append(Section("text", raw_text[next_char:]))
line = NotedLine(time.time(), sections)
self.lines.append(line)
self.line_added.emit(line)
def move_cursor_up(self):
last_cursor = self.cursor
if self.cursor == -1:
self.cursor = len(self) - 1
else:
self.cursor = max(0, self.cursor - 1)
if last_cursor != self.cursor:
self.cursor_moved.emit(self.cursor)
def move_cursor_down(self):
last_cursor = self.cursor
if self.cursor == len(self) - 1 or self.cursor == -1:
self.cursor = -1
else:
self.cursor += 1
if last_cursor != self.cursor:
self.cursor_moved.emit(self.cursor)
def move_cursor_to(self, line_index):
last_cursor = self.cursor
if line_index < 0:
self.cursor = -1
else:
self.cursor = min(line_index, len(self) - 1)
if last_cursor != self.cursor:
self.cursor_moved.emit(self.cursor)
def is_cursor_line(self, line):
if self.cursor == -1: return False
return line == self.lines[self.cursor]
def insert_qso(self):
if self.cursor == -1: return
qso = self.qsos.insert_qso(self.cursor)
self.qso_changed.emit(qso)
def remove_qso(self):
if self.cursor == -1: return
self.qsos.remove_qso(self.cursor)
self.qso_changed.emit(None)
def move_qso_start(self):
if self.cursor == -1: return
qso = self.qsos.move_qso_start(self.cursor)
self.qso_changed.emit(qso)
def move_qso_end(self):
if self.cursor == -1: return
qso = self.qsos.move_qso_end(self.cursor)
self.qso_changed.emit(qso)
def get_qso(self):
if self.cursor == -1: return None
return self.qsos.get_qso(self.cursor)
def get_qsos(self, start_line, end_line):
return self.qsos.get_qsos(start_line, end_line)
def is_line_in_qso(self, line_index):
return self.qsos.is_in_qso(line_index)
class _NotepadPainter:
def __init__(self, painter, widget):
self.painter = painter
self.widget = widget
self.size = widget.size()
self.line_height = (painter.fontMetrics().boundingRect("Hg").height()
+ 2)
self.visible_lines = math.floor(
(self.size.height() - 2) / self.line_height)
self.timestamp_column_right = self.text_width("MMMMZ")
self.divider_line_x = self.timestamp_column_right + 2
self.content_column_left = self.timestamp_column_right + 4
self.clip_visible_lines_rect = QtCore.QRect(
0, 1, self.size.width(), self.visible_lines * self.line_height)
self.clip_all_rect = QtCore.QRect(
0, 0, self.size.width(), self.size.height())
def text_width(self, text):
return self.painter.fontMetrics().width(text)
def content_line_rect(self, line_index):
return QtCore.QRect(
self.content_column_left, self.line_top(line_index),
self.size.width() - self.content_column_left - 1, self.line_height)
def timestamp_line_rect(self, line_index):
return QtCore.QRect(
0, self.line_top(line_index),
self.timestamp_column_right, self.line_height)
def line_top(self, line_index):
return self.line_height * line_index + 1
def text_rect(self, line_rect):
return QtCore.QRect(
line_rect.x() + 2, line_rect.y() + 1,
line_rect.width() - 4, line_rect.height() - 2)
def clip_visible_lines(self):
self.painter.setClipRect(self.clip_visible_lines_rect)
def clip_all(self):
self.painter.setClipRect(self.clip_all_rect)
def draw_background(self):
self.painter.fillRect(
QtCore.QRect(0, 0, self.size.width(), self.size.height()),
COLOR_BACKGROUND)
def draw_divider(self):
self.painter.setPen(COLOR_DIVIDER)
self.painter.drawLine(
self.divider_line_x, 0, self.divider_line_x, self.size.height())
def draw_timestamp(self, line_index, text, divider_above):
line_rect = self.timestamp_line_rect(line_index)
text_rect = self.text_rect(line_rect)
if divider_above:
self.painter.setPen(COLOR_DIVIDER)
self.painter.drawLine(
line_rect.x(), line_rect.y(),
self.divider_line_x - line_rect.x(), line_rect.y())
self.painter.setPen(COLOR_TIMESTAMP)
self.painter.drawText(text_rect, QtCore.Qt.AlignRight, text)
def draw_content(self, line_index, sections, in_qso):
line_rect = self.content_line_rect(line_index)
text_rect = self.text_rect(line_rect)
if in_qso:
self.painter.fillRect(line_rect, COLOR_QSO_BACKGROUND)
text_colors = {"text": COLOR_TEXT, "call": COLOR_CALL}
x = text_rect.x()
y = text_rect.y()
for section in sections:
section_rect = self.painter.fontMetrics().boundingRect(
section.content)
section_rect.moveTo(x, y)
section_rect.setWidth(self.text_width(section.content))
if section.kind in text_colors:
text_color = text_colors[section.kind]
else:
text_color = COLOR_TEXT
self.painter.setPen(text_color)
self.painter.drawText(
section_rect, QtCore.Qt.AlignLeft, section.content)
x += section_rect.width()
def draw_qso_frame(self, start_index, end_index):
start_line_rect = self.content_line_rect(start_index)
end_line_rect = self.content_line_rect(end_index)
rect = QtCore.QRect(
start_line_rect.x(), start_line_rect.y(),
start_line_rect.width(),
end_line_rect.y() + end_line_rect.height()
- start_line_rect.y() - 1)
pen = QtGui.QPen(COLOR_DIVIDER)
pen.setWidth(2)
self.painter.setPen(pen)
self.clip_visible_lines()
self.painter.drawRect(rect)
self.clip_all()
def draw_cursor(self, line_index):
line_rect = self.content_line_rect(line_index)
pen = QtGui.QPen(COLOR_CURSOR)
pen.setWidth(2)
self.painter.setPen(pen)
self.painter.drawRect(line_rect)
class _PlainNotepadWidget(QtGui.QWidget):
update_visible_lines = QtCore.Signal(int)
line_clicked = QtCore.Signal(int)
def __init__(self, notepad, parent = None):
QtGui.QWidget.__init__(self, parent)
self.notepad = notepad
self.visible_lines = 0
self.line_height = 0
self.stick_to_bottom = True
self.bottom_line = -1
def scroll_to_bottom(self):
self.stick_to_bottom = True
self.bottom_line = -1
self.repaint()
def scroll_to_line(self, line):
self.stick_to_bottom = False
self.bottom_line = line
self.repaint()
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
self.draw_widget(painter)
painter.end()
def draw_widget(self, painter):
notepad_painter = _NotepadPainter(painter, self)
notepad_painter.draw_background()
notepad_painter.draw_divider()
last_timestamp = ""
if self.stick_to_bottom:
top_line = len(self.notepad) - notepad_painter.visible_lines
bottom_line = len(self.notepad) - 1
else:
top_line = self.bottom_line - notepad_painter.visible_lines + 1
bottom_line = self.bottom_line
lines = self.notepad.lines[max(0, top_line):bottom_line + 1]
line_index = max(0, -top_line)
for line in lines:
timestamp = _time.z(line.timestamp)
if timestamp != last_timestamp:
notepad_painter.draw_timestamp(
line_index, timestamp, last_timestamp != "")
last_timestamp = timestamp
line_in_qso = self.notepad.is_line_in_qso(line_index + top_line)
notepad_painter.draw_content(
line_index, line.sections, line_in_qso)
line_index += 1
for qso in self.notepad.get_qsos(max(0, top_line), bottom_line):
notepad_painter.draw_qso_frame(
qso.start - top_line, qso.end - top_line)
if (self.notepad.cursor >= max(0, top_line)
and self.notepad.cursor <= bottom_line):
notepad_painter.draw_cursor(self.notepad.cursor - top_line)
self.line_height = notepad_painter.line_height
last_visible_lines = self.visible_lines
self.visible_lines = notepad_painter.visible_lines
if self.visible_lines != last_visible_lines:
self.update_visible_lines.emit(self.visible_lines)
def mousePressEvent(self, e):
if self.line_height <= 0:
e.ignore()
return
bottom_line = (len(self.notepad) - 1
if self.stick_to_bottom
else self.bottom_line)
tail = 1 #self.size().height() % self.line_height
line_on_page = (e.y() - tail) / self.line_height
line_index = bottom_line - self.visible_lines + line_on_page + 1
if line_index < 0:
e.ignore()
return
self.line_clicked.emit(line_index)
class NotepadWidget(QtGui.QFrame):
def __init__(self, notepad, parent = None):
QtGui.QFrame.__init__(self, parent)
self.notepad = notepad
self.notepad.line_added.connect(self._line_added)
self.notepad.cursor_moved.connect(self._cursor_moved)
self.notepad.qso_changed.connect(self._qso_changed)
self.plain_widget = _PlainNotepadWidget(self.notepad, self)
self.plain_widget.update_visible_lines.connect(
self._update_visible_lines)
self.plain_widget.line_clicked.connect(self._line_clicked)
self.scroll_bar = QtGui.QScrollBar(
QtCore.Qt.Orientation.Vertical, self)
self.scroll_bar.setMinimum(0)
self.scroll_bar.setMaximum(len(notepad))
self.scroll_bar.setPageStep(0)
self.scroll_bar.valueChanged[int].connect(self._scrolled)
hbox = QtGui.QHBoxLayout(self)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(0)
hbox.addWidget(self.plain_widget)
hbox.addWidget(self.scroll_bar)
self.setLayout(hbox)
self.setFrameStyle(QtGui.QFrame.WinPanel | QtGui.QFrame.Sunken)
self.setLineWidth(1)
def _line_added(self, line):
self._update_scroll_bar(
len(self.notepad), self.scroll_bar.pageStep(), True)
self.plain_widget.scroll_to_bottom()
@QtCore.Slot(int)
def _update_visible_lines(self, visible_lines):
self._update_scroll_bar(len(self.notepad), visible_lines, False)
def _update_scroll_bar(self, lines, visible_lines, stick_to_bottom):
was_at_bottom = self.at_bottom()
self.scroll_bar.setPageStep(visible_lines)
self.scroll_bar.setMaximum(max(0, lines - visible_lines))
if was_at_bottom or stick_to_bottom:
self.scroll_bar.setValue(self.scroll_bar.maximum())
def at_bottom(self):
return self.scroll_bar.value() == self.scroll_bar.maximum()
def wheelEvent(self, e):
self.scroll_bar.wheelEvent(e)
@QtCore.Slot(int)
def _scrolled(self, value):
if self.at_bottom():
self.plain_widget.scroll_to_bottom()
else:
bottom_line = value + self.scroll_bar.pageStep() - 1
self.plain_widget.scroll_to_line(bottom_line)
@QtCore.Slot(int)
def _cursor_moved(self, value):
if value == -1:
self.scroll_bar.setValue(self.scroll_bar.maximum())
self.plain_widget.scroll_to_bottom()
elif value < self.scroll_bar.value():
self.scroll_bar.setValue(value)
elif value >= self.scroll_bar.value() + self.scroll_bar.pageStep():
self.scroll_bar.setValue(value - self.scroll_bar.pageStep() + 1)
else:
self.plain_widget.repaint()
@QtCore.Slot(object)
def _qso_changed(self, qso):
self.plain_widget.repaint()
@QtCore.Slot(int)
def _line_clicked(self, line_index):
self.notepad.move_cursor_to(line_index)
class NotepadWindow(QtGui.QWidget):
def __init__(self, notepad, parent = None):
QtGui.QWidget.__init__(self, parent)
self.notepad = notepad
self.notepad_widget = NotepadWidget(notepad)
self.line = QtGui.QLineEdit()
self.line.returnPressed.connect(self.add_line_to_notepad)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.notepad_widget)
vbox.addWidget(self.line)
self.setLayout(vbox)
self.setWindowTitle("Notepad")
def add_line_to_notepad(self):
self.notepad.add_line(self.line.text())
self.line.setText("")
def keyPressEvent(self, e):
if e.matches(QtGui.QKeySequence.MoveToNextLine):
self.notepad.move_cursor_down()
elif e.matches(QtGui.QKeySequence.MoveToPreviousLine):
self.notepad.move_cursor_up()
elif e.matches(QtGui.QKeySequence.SelectNextLine):
self.notepad.move_qso_end()
elif e.matches(QtGui.QKeySequence.SelectPreviousLine):
self.notepad.move_qso_start()
else:
e.ignore()
@QtCore.Slot(object)
def print_added_calls(call):
print("call added: " + str(call))
def main(args):
app = QtGui.QApplication(args)
notepad = Notepad()
notepad.call_added.connect(print_added_calls)
win = NotepadWindow(notepad)
win.resize(640, 480)
win.show()
result = app.exec_()
sys.exit(result)
|
|
# basic library
import os
import shutil
import math
import time
import menpo.io as mio
import menpo3d.io as m3io
import numpy as np
import h5py
import pandas as pd
from menpo.shape import ColouredTriMesh, PointCloud
from menpo.transform import Homogeneous
from menpo3d.rasterize import rasterize_mesh
from pathlib import Path
from functools import partial
from itwmm.visualize import lambertian_shading
# deepmachine
import keras
import tensorflow as tf
import deepmachine as dm
from deepmachine.utils.machine import multi_gpu_model, enqueue_generator
# flag definitions
tf.app.flags.DEFINE_string('meta_path', '/vol/atlas/homes/yz4009/databases/mesh/meta', '''path to meta files''')
from deepmachine.flags import FLAGS
def main():
dm.K.clear_session()
dm.K.set_learning_phase(1) #set learning phase
# hyperparameters
BATCH_SIZE = FLAGS.batch_size
LR = FLAGS.lr
LOGDIR = FLAGS.logdir if 'model_' in FLAGS.logdir else "{}/model_{}".format(
FLAGS.logdir, int(time.time()))
N_VERTICES = 28431
EMBEDING = 128
CAMERA_PARAM = 12
INPUT_SHAPE = 112
FILTERS = [16, 32, 32, 64]
# globel constant
n_gpu = len(FLAGS.gpu.split(','))
face_mean_crop = m3io.import_mesh(FLAGS.meta_path + '/face_mean_mesh_crop.obj')
trilist = face_mean_crop.trilist
graph_laplacians, downsampling_matrices, upsamling_matrices, adj_matrices = mio.import_pickle(
FLAGS.meta_path + '/mein3dcrop_LDUA.pkl', encoding='latin1')
def build_data():
class H5Mesh(dm.utils.Sequence):
def __init__(self, fp, dataset, batch_size=BATCH_SIZE):
self.train_mesh = h5py.File(fp, 'r')[dataset]
self.batch_size = batch_size
self.size = self.train_mesh.len()
self.indexes = list(range(self.size))
np.random.shuffle(self.indexes)
super().__init__()
def __len__(self):
return self.size // self.batch_size
def __getitem__(self, idx):
indexes = self.indexes[idx * self.batch_size: (idx + 1) * self.batch_size]
batch_sample_mesh = np.array([
self.train_mesh[i] for i in indexes
])
return [batch_sample_mesh], [batch_sample_mesh]
def on_epoch_end(self, *args, **kwargs):
np.random.shuffle(self.indexes)
return super().on_epoch_end()
class ImageSequence(dm.utils.Sequence):
def __init__(self, dirpath, batch_size=BATCH_SIZE):
self.detection = pd.read_csv('/homes/yz4009/db/face/loose_landmark_test.csv')
self.size = self.detection.shape[0]
self.image_path = Path(dirpath)
self.batch_size = BATCH_SIZE
self.indexes = list(range(self.size))
np.random.shuffle(self.indexes)
def on_epoch_end(self, *args, **kwargs):
np.random.shuffle(self.indexes)
def __len__(self):
return self.size // self.batch_size
def _preprocess(self, idx):
name, *lms5pt = self.detection.loc[idx]
lms5pt = PointCloud(np.array(lms5pt).reshape([-1,2])[:,::-1])
img = mio.import_image((self.image_path/name).with_suffix('.jpg'))
cimg, _, _ = dm.utils.crop_image_bounding_box(img, lms5pt.bounding_box(), [112, 112], base=186)
return cimg.pixels_with_channels_at_back() * 2 - 1
def __getitem__(self, idx):
image_indexes = self.indexes[
idx * self.batch_size: (idx + 1) * self.batch_size]
batch_img = [self._preprocess(i) for i in image_indexes]
return [np.array(batch_img)], [np.array(batch_img)]
return H5Mesh('/homes/yz4009/wd/gitdev/coma/data/mein3dcrop.h5', 'colour_mesh', batch_size=BATCH_SIZE), ImageSequence(FLAGS.dataset_path)
def build_model(inputs_channels=6, n_gpu=n_gpu):
# define components
## image encoder
def build_img_encoder():
input_img = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3], name='input_img')
img_embedding = dm.networks.Encoder2D(
input_img, EMBEDING + CAMERA_PARAM, depth=4, nf=64)
mesh_rec_embeding = dm.layers.Lambda(lambda x: x[..., :EMBEDING])(img_embedding)
cam_rec_embeding = dm.layers.Lambda(lambda x: dm.K.tanh(x[..., EMBEDING:]) * 3)(img_embedding)
return dm.Model(input_img, [mesh_rec_embeding, cam_rec_embeding], name='image_encoder')
## mesh encoder
def build_mesh_encoder():
input_mesh = dm.layers.Input(shape=[N_VERTICES, inputs_channels], name='input_mesh')
mesh_embedding = dm.networks.MeshEncoder(
input_mesh, EMBEDING, graph_laplacians, downsampling_matrices, filter_list=FILTERS)
return dm.Model(input_mesh, mesh_embedding, name='mesh_encoder')
## common decoder
def build_decoder():
input_embeding = dm.layers.Input(shape=[EMBEDING], name='input_embeding')
output_mesh = dm.networks.MeshDecoder(
input_embeding,
inputs_channels,
graph_laplacians,
adj_matrices,
upsamling_matrices,
polynomial_order=6,
filter_list=FILTERS)
return dm.Model(input_embeding, output_mesh, name='decoder')
## renderer
def build_renderer(mesh_vertices, vertex_color, cam_parameter):
# mesh_vertices = dm.layers.Input(shape=[N_VERTICES, 3], name='mesh_vertices')
mesh_vertices.set_shape([BATCH_SIZE, N_VERTICES, 3])
# vertex_color = dm.layers.Input(shape=[N_VERTICES, 3], name='vertex_color')
vertex_color.set_shape([BATCH_SIZE, N_VERTICES, 3])
# cam_parameter = dm.layers.Input(shape=[CAMERA_PARAM], name='cam_parameter')
cam_parameter.set_shape([BATCH_SIZE, CAMERA_PARAM])
# Build vertices and normals
mesh_normals = tf.nn.l2_normalize(mesh_vertices, axis=2)
# rendering output
mesh_triangles = tf.constant(trilist, dtype=tf.int32)
# camera position:
eye = cam_parameter[...,:3]
center = cam_parameter[...,3:6]
world_up = cam_parameter[...,6:9]
light_positions = cam_parameter[:,None,9:12]
ambient_colors = tf.ones([BATCH_SIZE, 3], dtype=tf.float32) * 0.1
light_intensities = tf.ones([BATCH_SIZE, 1, 3], dtype=tf.float32)
render_mesh = dm.layers.Renderer(
# image size
image_width=INPUT_SHAPE,
image_height=INPUT_SHAPE,
# mesh definition
triangles=mesh_triangles,
normals=mesh_normals,
# colour definition
diffuse_colors=vertex_color,
ambient_color=ambient_colors,
# camera definition
camera_position=eye,
camera_lookat=center,
camera_up=world_up,
# light definition
light_positions=light_positions,
light_intensities=light_intensities,
)(mesh_vertices)
render_mesh = dm.layers.Lambda(lambda x: x[..., :3])(render_mesh)
return render_mesh
# Mesh AE stream
## define inputs
input_mesh_stream = dm.layers.Input(shape=[N_VERTICES, 6], name='input_mesh_stream')
## define components
mesh_encoder_model = build_mesh_encoder()
decoder_model = build_decoder()
## define connections
output_mesh = decoder_model(mesh_encoder_model(input_mesh_stream))
mesh_ae_model = dm.DeepMachine(
inputs=input_mesh_stream,
outputs=output_mesh,
name='MeshStream'
)
## multi gpu support
if n_gpu > 1:
mesh_ae_model = multi_gpu_model(mesh_ae_model, gpus=n_gpu)
## compile mesh stream
mesh_ae_model.compile(
optimizer=dm.optimizers.Adam(lr=LR),
loss=['mae']
)
## set trainable
mesh_ae_model.trainable = False
decoder_model.trainable = False
mesh_encoder_model.trainable = False
# Render Stream
## define inputs
input_image_stream = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3], name='input_image_stream')
## define components
img_encoder_model = build_img_encoder()
## define connections
rec_mesh_emb, rec_cam_emb = img_encoder_model(input_image_stream)
mesh_with_colour = decoder_model(rec_mesh_emb)
mesh_vert = dm.layers.Lambda(lambda x: x[..., :3])(mesh_with_colour)
mesh_vert.set_shape([BATCH_SIZE, N_VERTICES, 3])
mesh_colour = dm.layers.Lambda(lambda x: x[..., 3:])(mesh_with_colour)
mesh_colour.set_shape([BATCH_SIZE, N_VERTICES, 3])
rec_render = build_renderer(
mesh_vert,
mesh_colour,
rec_cam_emb
)
render_model = dm.DeepMachine(
inputs=input_image_stream,
outputs=[rec_render, mesh_with_colour],
name='ImageStream'
)
## multi gpu support
if n_gpu > 1:
render_model = multi_gpu_model(render_model, gpus=n_gpu)
## compile render stream
render_model.compile(
optimizer=dm.optimizers.Adam(lr=LR),
loss=['mae', dm.losses.dummy]
)
return render_model, mesh_ae_model, img_encoder_model
def train_op(models, data, i_epoch, i_batch, epoch_end, training_history=None, **kwargs):
sess = dm.K.get_session()
image_stream, mesh_stream, img_encoder = models
[train_mesh, train_image], _ = dm.engine.training.generator_adapter(data)
# ----------------------
# Train Mesh Stream
# ----------------------
loss_mesh = mesh_stream.train_on_batch([train_mesh], [train_mesh])
# ------------------
# Train Render Stream
# ------------------
loss_img = image_stream.train_on_batch([train_image], [train_image, train_mesh])
logs = dm.utils.Summary(
{
"losses/loss_mesh": loss_mesh,
"losses/loss_img": loss_img[0],
"learning_rate/mesh": mesh_stream.optimizer.lr.eval(sess),
"learning_rate/img": image_stream.optimizer.lr.eval(sess),
}
)
if epoch_end:
ae_mesh = mesh_stream.predict(train_mesh)
rec_imgs, rec_mesh = image_stream.predict(train_image)
_, cam_params = img_encoder.predict(train_image)
logs.update_images({
'image/input': train_image,
'image/render': rec_imgs,
'image/mesh': dm.utils.mesh.render_meshes(rec_mesh[:4], trilist, res=INPUT_SHAPE),
'mesh/input': dm.utils.mesh.render_meshes(train_mesh[:4], trilist, res=INPUT_SHAPE),
'mesh/ae': dm.utils.mesh.render_meshes(ae_mesh[:4], trilist, res=INPUT_SHAPE),
})
logs.update_scalars(
{'cam_params/{}'.format(idx_p): p for idx_p, p in enumerate(cam_params[0])}
)
return logs
# prepare data
train_generator = dm.data.generator.MergeGenerators(*build_data())
train_queue = enqueue_generator(
train_generator, workers=FLAGS.no_thread)
# prepare model
image_stream, mesh_stream, img_encoder = build_model()
mesh_lr_decay = dm.callbacks.LearningRateScheduler(
schedule=lambda epoch: LR * FLAGS.lr_decay ** epoch)
mesh_lr_decay.set_model(mesh_stream)
image_lr_decay = dm.callbacks.LearningRateScheduler(
schedule=lambda epoch: LR * FLAGS.lr_decay ** epoch)
image_lr_decay.set_model(image_stream)
# training
history = dm.engine.training.train_monitor(
[image_stream, mesh_stream, img_encoder],
train_queue,
train_op,
epochs=200, step_per_epoch=len(train_generator),
callbacks=[
train_generator,
mesh_lr_decay, image_lr_decay
],
verbose=FLAGS.verbose,
logdir=LOGDIR,
)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe, json
from frappe import _
from frappe.model.document import Document
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
class CustomizeForm(Document):
doctype_properties = {
'search_fields': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'read_only_onload': 'Check',
'allow_attach': 'Check',
'allow_copy': 'Check',
'max_attachments': 'Int'
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'ignore_user_permissions': 'Check',
'in_filter': 'Check',
'in_list_view': 'Check',
'hidden': 'Check',
'print_hide': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text'
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Text Editor', 'Code'), ('Data', 'Select'), ('Text', 'Small Text'))
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
# doctype properties
for property in self.doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in self.docfield_properties:
new_d[property] = d.get(property)
self.append("customize_form_fields", new_d)
# NOTE doc is sent to clientside by run_method
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.set_property_setters()
self.update_custom_fields()
self.set_idx_property_setter()
validate_fields_for_doctype(self.doc_type)
frappe.msgprint(_("{0} updated").format(_(self.doc_type)))
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in self.doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=self.doctype_properties[property])
for df in self.get("customize_form_fields"):
if df.get("__islocal"):
continue
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in self.docfield_properties:
if df.get(property) != meta_df[0].get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
self.make_property_setter(property=property, value=df.get(property),
property_type=self.docfield_properties[property], fieldname=df.fieldname)
def update_custom_fields(self):
for df in self.get("customize_form_fields"):
if df.get("__islocal"):
self.add_custom_field(df)
else:
self.update_in_custom_field(df)
self.delete_custom_fields()
def add_custom_field(self, df):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in self.docfield_properties:
d.set(property, df.get(property))
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in self.docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
if changed:
custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("customize_form_fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def set_idx_property_setter(self):
meta = frappe.get_meta(self.doc_type)
field_order_has_changed = [df.fieldname for df in meta.get("fields")] != \
[d.fieldname for d in self.get("customize_form_fields")]
if field_order_has_changed:
_idx = []
for df in sorted(self.get("customize_form_fields"), key=lambda x: x.idx):
_idx.append(df.fieldname)
self.make_property_setter(property="_idx", value=json.dumps(_idx), property_type="Text")
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.delete_doc("Property Setter", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception, e:
if e.args[0]==1054:
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
for allowed_changes in self.allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""delete from `tabProperty Setter` where doc_type=%s
and ifnull(field_name, '')!='naming_series'""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the assignment service."""
import abc
import six
from keystone import clean
from keystone.common import cache
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
SHOULD_CACHE = cache.should_cache_fn('assignment')
# NOTE(blk-u): The config option is not available at import time.
EXPIRATION_TIME = lambda: CONF.assignment.cache_time
def calc_default_domain():
return {'description':
(u'Owns users and tenants (i.e. projects)'
' available on Identity API v2.'),
'enabled': True,
'id': CONF.identity.default_domain_id,
'name': u'Default'}
@dependency.provider('assignment_api')
@dependency.optional('revoke_api')
@dependency.requires('credential_api', 'identity_api', 'token_api')
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
assignment.Manager() and identity.Manager() have a circular dependency.
The late import works around this. The if block prevents creation of the
api object by both managers.
"""
_PROJECT = 'project'
def __init__(self):
assignment_driver = CONF.assignment.driver
if assignment_driver is None:
identity_driver = dependency.REGISTRY['identity_api'].driver
assignment_driver = identity_driver.default_assignment_driver()
super(Manager, self).__init__(assignment_driver)
@notifications.created(_PROJECT)
def create_project(self, tenant_id, tenant):
tenant = tenant.copy()
tenant.setdefault('enabled', True)
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
tenant.setdefault('description', '')
ret = self.driver.create_project(tenant_id, tenant)
if SHOULD_CACHE(ret):
self.get_project.set(ret, self, tenant_id)
self.get_project_by_name.set(ret, self, ret['name'],
ret['domain_id'])
return ret
@notifications.disabled(_PROJECT, public=False)
def _disable_project(self, tenant_id):
return self.token_api.delete_tokens_for_users(
self.list_user_ids_for_project(tenant_id),
project_id=tenant_id)
@notifications.updated(_PROJECT)
def update_project(self, tenant_id, tenant):
tenant = tenant.copy()
if 'enabled' in tenant:
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
if not tenant.get('enabled', True):
self._disable_project(tenant_id)
ret = self.driver.update_project(tenant_id, tenant)
self.get_project.invalidate(self, tenant_id)
self.get_project_by_name.invalidate(self, ret['name'],
ret['domain_id'])
return ret
@notifications.deleted(_PROJECT)
def delete_project(self, tenant_id):
project = self.driver.get_project(tenant_id)
user_ids = self.list_user_ids_for_project(tenant_id)
self.token_api.delete_tokens_for_users(user_ids, project_id=tenant_id)
ret = self.driver.delete_project(tenant_id)
self.get_project.invalidate(self, tenant_id)
self.get_project_by_name.invalidate(self, project['name'],
project['domain_id'])
self.credential_api.delete_credentials_for_project(tenant_id)
return ret
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership. If
the OS-INHERIT extension is enabled, then this will also
include roles inherited from the domain.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound
"""
def _get_group_project_roles(user_id, project_ref):
role_list = []
group_refs = self.identity_api.list_groups_for_user(user_id)
for x in group_refs:
try:
metadata_ref = self._get_metadata(
group_id=x['id'], tenant_id=project_ref['id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except exception.MetadataNotFound:
# no group grant, skip
pass
if CONF.os_inherit.enabled:
# Now get any inherited group roles for the owning domain
try:
metadata_ref = self._get_metadata(
group_id=x['id'],
domain_id=project_ref['domain_id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), True)
except (exception.MetadataNotFound,
exception.NotImplemented):
pass
return role_list
def _get_user_project_roles(user_id, project_ref):
role_list = []
try:
metadata_ref = self._get_metadata(user_id=user_id,
tenant_id=project_ref['id'])
role_list = self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except exception.MetadataNotFound:
pass
if CONF.os_inherit.enabled:
# Now get any inherited roles for the owning domain
try:
metadata_ref = self._get_metadata(
user_id=user_id, domain_id=project_ref['domain_id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), True)
except (exception.MetadataNotFound, exception.NotImplemented):
pass
return role_list
project_ref = self.get_project(tenant_id)
user_role_list = _get_user_project_roles(user_id, project_ref)
group_role_list = _get_group_project_roles(user_id, project_ref)
# Use set() to process the list to remove any duplicates
return list(set(user_role_list + group_role_list))
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.DomainNotFound
"""
def _get_group_domain_roles(user_id, domain_id):
role_list = []
group_refs = self.identity_api.list_groups_for_user(user_id)
for x in group_refs:
try:
metadata_ref = self._get_metadata(group_id=x['id'],
domain_id=domain_id)
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except (exception.MetadataNotFound, exception.NotImplemented):
# MetadataNotFound implies no group grant, so skip.
# Ignore NotImplemented since not all backends support
# domains.
pass
return role_list
def _get_user_domain_roles(user_id, domain_id):
metadata_ref = {}
try:
metadata_ref = self._get_metadata(user_id=user_id,
domain_id=domain_id)
except (exception.MetadataNotFound, exception.NotImplemented):
# MetadataNotFound implies no user grants.
# Ignore NotImplemented since not all backends support
# domains
pass
return self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
self.get_domain(domain_id)
user_role_list = _get_user_domain_roles(user_id, domain_id)
group_role_list = _get_group_domain_roles(user_id, domain_id)
# Use set() to process the list to remove any duplicates
return list(set(user_role_list + group_role_list))
def add_user_to_project(self, tenant_id, user_id):
"""Add user to a tenant by creating a default role relationship.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
try:
self.driver.add_role_to_user_and_project(
user_id,
tenant_id,
config.CONF.member_role_id)
except exception.RoleNotFound:
LOG.info(_("Creating the default role %s "
"because it does not exist."),
config.CONF.member_role_id)
role = {'id': CONF.member_role_id,
'name': CONF.member_role_name}
self.driver.create_role(config.CONF.member_role_id, role)
#now that default role exists, the add should succeed
self.driver.add_role_to_user_and_project(
user_id,
tenant_id,
config.CONF.member_role_id)
def remove_user_from_project(self, tenant_id, user_id):
"""Remove user from a tenant
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
roles = self.get_roles_for_user_and_project(user_id, tenant_id)
if not roles:
raise exception.NotFound(tenant_id)
for role_id in roles:
try:
self.driver.remove_role_from_user_and_project(user_id,
tenant_id,
role_id)
if self.revoke_api:
self.revoke_api.revoke_by_grant(role_id, user_id=user_id,
project_id=tenant_id)
except exception.RoleNotFound:
LOG.debug(_("Removing role %s failed because it does not "
"exist."),
role_id)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
def list_projects_for_user(self, user_id, hints=None):
# NOTE(henry-nash): In order to get a complete list of user projects,
# the driver will need to look at group assignments. To avoid cross
# calling between the assignment and identity driver we get the group
# list here and pass it in. The rest of the detailed logic of listing
# projects for a user is pushed down into the driver to enable
# optimization with the various backend technologies (SQL, LDAP etc.).
group_ids = [x['id'] for
x in self.identity_api.list_groups_for_user(user_id)]
return self.driver.list_projects_for_user(
user_id, group_ids, hints or driver_hints.Hints())
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_domain(self, domain_id):
return self.driver.get_domain(domain_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_domain_by_name(self, domain_name):
return self.driver.get_domain_by_name(domain_name)
@notifications.created('domain')
def create_domain(self, domain_id, domain):
ret = self.driver.create_domain(domain_id, domain)
if SHOULD_CACHE(ret):
self.get_domain.set(ret, self, domain_id)
self.get_domain_by_name.set(ret, self, ret['name'])
return ret
@manager.response_truncated
def list_domains(self, hints=None):
return self.driver.list_domains(hints or driver_hints.Hints())
@notifications.disabled('domain', public=False)
def _disable_domain(self, domain_id):
self.token_api.delete_tokens_for_domain(domain_id)
@notifications.updated('domain')
def update_domain(self, domain_id, domain):
ret = self.driver.update_domain(domain_id, domain)
# disable owned users & projects when the API user specifically set
# enabled=False
if not domain.get('enabled', True):
self._disable_domain(domain_id)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, ret['name'])
return ret
@notifications.deleted('domain')
def delete_domain(self, domain_id):
# explicitly forbid deleting the default domain (this should be a
# carefully orchestrated manual process involving configuration
# changes, etc)
if domain_id == CONF.identity.default_domain_id:
raise exception.ForbiddenAction(action=_('delete the default '
'domain'))
domain = self.driver.get_domain(domain_id)
# To help avoid inadvertent deletes, we insist that the domain
# has been previously disabled. This also prevents a user deleting
# their own domain since, once it is disabled, they won't be able
# to get a valid token to issue this delete.
if domain['enabled']:
raise exception.ForbiddenAction(
action=_('cannot delete a domain that is enabled, '
'please disable it first.'))
self._delete_domain_contents(domain_id)
self.driver.delete_domain(domain_id)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, domain['name'])
def _delete_domain_contents(self, domain_id):
"""Delete the contents of a domain.
Before we delete a domain, we need to remove all the entities
that are owned by it, i.e. Users, Groups & Projects. To do this we
call the respective delete functions for these entities, which are
themselves responsible for deleting any credentials and role grants
associated with them as well as revoking any relevant tokens.
The order we delete entities is also important since some types
of backend may need to maintain referential integrity
throughout, and many of the entities have relationship with each
other. The following deletion order is therefore used:
Projects: Reference user and groups for grants
Groups: Reference users for membership and domains for grants
Users: Reference domains for grants
"""
user_refs = self.identity_api.list_users()
proj_refs = self.list_projects()
group_refs = self.identity_api.list_groups()
# First delete the projects themselves
for project in proj_refs:
if project['domain_id'] == domain_id:
try:
self.delete_project(project['id'])
except exception.ProjectNotFound:
LOG.debug(_('Project %(projectid)s not found when '
'deleting domain contents for %(domainid)s, '
'continuing with cleanup.'),
{'projectid': project['id'],
'domainid': domain_id})
for group in group_refs:
# Cleanup any existing groups.
if group['domain_id'] == domain_id:
try:
self.identity_api.delete_group(group['id'],
domain_scope=domain_id)
except exception.GroupNotFound:
LOG.debug(_('Group %(groupid)s not found when deleting '
'domain contents for %(domainid)s, continuing '
'with cleanup.'),
{'groupid': group['id'], 'domainid': domain_id})
# And finally, delete the users themselves
for user in user_refs:
if user['domain_id'] == domain_id:
try:
self.identity_api.delete_user(user['id'],
domain_scope=domain_id)
except exception.UserNotFound:
LOG.debug(_('User %(userid)s not found when '
'deleting domain contents for %(domainid)s, '
'continuing with cleanup.'),
{'userid': user['id'],
'domainid': domain_id})
# add sid part
@notifications.created('sidinfo')
def create_sidinfo(self, sidinfo):
sidinfo.setdefault('flag', True)
ret = self.driver.create_sidinfo(sidinfo)
return ret
def get_sidinfo(self, sid_id):
return self.driver.get_sidinfo(sid_id)
@notifications.deleted('sidinfo')
def delete_sidinfo(self, sid_id):
self.driver.delete_sidinfo(sid_id)
# end of sid part
@manager.response_truncated
def list_projects(self, hints=None):
return self.driver.list_projects(hints or driver_hints.Hints())
# NOTE(henry-nash): list_projects_in_domain is actually an internal method
# and not exposed via the API. Therefore there is no need to support
# driver hints for it.
def list_projects_in_domain(self, domain_id):
return self.driver.list_projects_in_domain(domain_id)
def list_user_projects(self, user_id, hints=None):
return self.driver.list_user_projects(
user_id, hints or driver_hints.Hints())
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_project(self, project_id):
return self.driver.get_project(project_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_project_by_name(self, tenant_name, domain_id):
return self.driver.get_project_by_name(tenant_name, domain_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_role(self, role_id):
return self.driver.get_role(role_id)
@notifications.created('role')
def create_role(self, role_id, role):
ret = self.driver.create_role(role_id, role)
if SHOULD_CACHE(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
@notifications.updated('role')
def update_role(self, role_id, role):
ret = self.driver.update_role(role_id, role)
self.get_role.invalidate(self, role_id)
return ret
@notifications.deleted('role')
def delete_role(self, role_id):
try:
self._delete_tokens_for_role(role_id)
except exception.NotImplemented:
# FIXME(morganfainberg): Not all backends (ldap) implement
# `list_role_assignments_for_role` which would have previously
# caused a NotImplmented error to be raised when called through
# the controller. Now error or proper action will always come from
# the `delete_role` method logic. Work needs to be done to make
# the behavior between drivers consistent (capable of revoking
# tokens for the same circumstances). This is related to the bug
# https://bugs.launchpad.net/keystone/+bug/1221805
pass
self.driver.delete_role(role_id)
self.get_role.invalidate(self, role_id)
def list_role_assignments_for_role(self, role_id=None):
# NOTE(henry-nash): Currently the efficiency of the key driver
# implementation (SQL) of list_role_assignments is severely hampered by
# the existence of the multiple grant tables - hence there is little
# advantage in pushing the logic of this method down into the driver.
# Once the single assignment table is implemented, then this situation
# will be different, and this method should have its own driver
# implementation.
return [r for r in self.driver.list_role_assignments()
if r['role_id'] == role_id]
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self.driver.remove_role_from_user_and_project(user_id, tenant_id,
role_id)
if CONF.token.revoke_by_id:
self.token_api.delete_tokens_for_user(user_id)
if self.revoke_api:
self.revoke_api.revoke_by_grant(role_id, user_id=user_id,
project_id=tenant_id)
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
user_ids = []
if group_id is None:
if self.revoke_api:
self.revoke_api.revoke_by_grant(user_id=user_id,
role_id=role_id,
domain_id=domain_id,
project_id=project_id)
else:
try:
# NOTE(morganfainberg): The user ids are the important part
# for invalidating tokens below, so extract them here.
for user in self.identity_api.list_users_in_group(group_id,
domain_id):
if user['id'] != user_id:
user_ids.append(user['id'])
if self.revoke_api:
self.revoke_api.revoke_by_grant(
user_id=user['id'], role_id=role_id,
domain_id=domain_id, project_id=project_id)
except exception.GroupNotFound:
LOG.debug(_('Group %s not found, no tokens to invalidate.'),
group_id)
self.driver.delete_grant(role_id, user_id, group_id, domain_id,
project_id, inherited_to_projects)
if user_id is not None:
user_ids.append(user_id)
self.token_api.delete_tokens_for_users(user_ids)
def _delete_tokens_for_role(self, role_id):
assignments = self.list_role_assignments_for_role(role_id=role_id)
# Iterate over the assignments for this role and build the list of
# user or user+project IDs for the tokens we need to delete
user_ids = set()
user_and_project_ids = list()
for assignment in assignments:
# If we have a project assignment, then record both the user and
# project IDs so we can target the right token to delete. If it is
# a domain assignment, we might as well kill all the tokens for
# the user, since in the vast majority of cases all the tokens
# for a user will be within one domain anyway, so not worth
# trying to delete tokens for each project in the domain.
if 'user_id' in assignment:
if 'project_id' in assignment:
user_and_project_ids.append(
(assignment['user_id'], assignment['project_id']))
elif 'domain_id' in assignment:
user_ids.add(assignment['user_id'])
elif 'group_id' in assignment:
# Add in any users for this group, being tolerant of any
# cross-driver database integrity errors.
try:
users = self.identity_api.list_users_in_group(
assignment['group_id'])
except exception.GroupNotFound:
# Ignore it, but log a debug message
if 'project_id' in assignment:
target = _('Project (%s)') % assignment['project_id']
elif 'domain_id' in assignment:
target = _('Domain (%s)') % assignment['domain_id']
else:
target = _('Unknown Target')
msg = _('Group (%(group)s), referenced in assignment '
'for %(target)s, not found - ignoring.')
LOG.debug(msg, {'group': assignment['group_id'],
'target': target})
continue
if 'project_id' in assignment:
for user in users:
user_and_project_ids.append(
(user['id'], assignment['project_id']))
elif 'domain_id' in assignment:
for user in users:
user_ids.add(user['id'])
# Now process the built up lists. Before issuing calls to delete any
# tokens, let's try and minimize the number of calls by pruning out
# any user+project deletions where a general token deletion for that
# same user is also planned.
user_and_project_ids_to_action = []
for user_and_project_id in user_and_project_ids:
if user_and_project_id[0] not in user_ids:
user_and_project_ids_to_action.append(user_and_project_id)
self.token_api.delete_tokens_for_users(user_ids)
for user_id, project_id in user_and_project_ids_to_action:
self.token_api.delete_tokens_for_user(user_id, project_id)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
def _role_to_dict(self, role_id, inherited):
role_dict = {'id': role_id}
if inherited:
role_dict['inherited_to'] = 'projects'
return role_dict
def _roles_from_role_dicts(self, dict_list, inherited):
role_list = []
for d in dict_list:
if ((not d.get('inherited_to') and not inherited) or
(d.get('inherited_to') == 'projects' and inherited)):
role_list.append(d['id'])
return role_list
def _add_role_to_role_dicts(self, role_id, inherited, dict_list,
allow_existing=True):
# There is a difference in error semantics when trying to
# assign a role that already exists between the coded v2 and v3
# API calls. v2 will error if the assignment already exists,
# while v3 is silent. Setting the 'allow_existing' parameter
# appropriately lets this call be used for both.
role_set = set([frozenset(r.items()) for r in dict_list])
key = frozenset(self._role_to_dict(role_id, inherited).items())
if not allow_existing and key in role_set:
raise KeyError
role_set.add(key)
return [dict(r) for r in role_set]
def _remove_role_from_role_dicts(self, role_id, inherited, dict_list):
role_set = set([frozenset(r.items()) for r in dict_list])
role_set.remove(frozenset(self._role_to_dict(role_id,
inherited).items()))
return [dict(r) for r in role_set]
def _get_list_limit(self):
return CONF.assignment.list_limit or CONF.list_limit
@abc.abstractmethod
def get_project_by_name(self, tenant_name, domain_id):
"""Get a tenant by name.
:returns: tenant_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_user_ids_for_project(self, tenant_id):
"""Lists all user IDs with a role assignment in the specified project.
:returns: a list of user_ids or an empty set.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
"""Add a role to a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
"""Remove a role from a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
# assignment/grant crud
@abc.abstractmethod
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Creates a new assignment/grant.
If the assignment is to a domain, then optionally it may be
specified as inherited to owned projects (this requires
the OS-INHERIT extension to be enabled).
:raises: keystone.exception.DomainNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Lists assignments/grants.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Lists assignments/grants.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Deletes assignments/grants.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_role_assignments(self):
raise exception.NotImplemented()
# domain crud
@abc.abstractmethod
def create_domain(self, domain_id, domain):
"""Creates a new domain.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_domains(self, hints):
"""List domains in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of domain_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_domain(self, domain_id):
"""Get a domain by ID.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_domain_by_name(self, domain_name):
"""Get a domain by name.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_domain(self, domain_id, domain):
"""Updates an existing domain.
:raises: keystone.exception.DomainNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_domain(self, domain_id):
"""Deletes an existing domain.
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
# project crud
@abc.abstractmethod
def create_project(self, project_id, project):
"""Creates a new project.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects(self, hints):
"""List projects in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects_in_domain(self, domain_id):
"""List projects in the domain.
:param domain_id: the driver MUST only return projects
within this domain.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects_for_user(self, user_id, group_ids, hints):
"""List all projects associated with a given user.
:param user_id: the user in question
:param group_ids: the groups this user is a member of. This list is
built in the Manager, so that the driver itself
does not have to call across to identity.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""List all the roles assigned to groups on either domain or
project.
If the project_id is not None, this value will be used, no matter what
was specified in the domain_id.
:param group_ids: iterable with group ids
:param project_id: id of the project
:param domain_id: id of the domain
:raises: AttributeError: In case both project_id and domain_id are set
to None
:returns: a list of Role entities matching groups and
project_id or domain_id
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects_for_groups(self, group_ids):
"""List projects accessible to specified groups.
:param group_ids: List of group ids.
:returns: List of projects accessible to specified groups.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_domains_for_groups(self, group_ids):
"""List domains accessible to specified groups.
:param group_ids: List of group ids.
:returns: List of domains accessible to specified groups.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_project(self, project_id):
"""Get a project by ID.
:returns: project_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_project(self, project_id, project):
"""Updates an existing project.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_project(self, project_id):
"""Deletes an existing project.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
# role crud
@abc.abstractmethod
def create_role(self, role_id, role):
"""Creates a new role.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_roles(self, hints):
"""List roles in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of role_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_role(self, role_id):
"""Get a role by ID.
:returns: role_ref
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_role(self, role_id, role):
"""Updates an existing role.
:raises: keystone.exception.RoleNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_role(self, role_id):
"""Deletes an existing role.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
#TODO(ayoung): determine what else these two functions raise
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes all assignments for a user.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes all assignments for a group.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
#domain management functions for backends that only allow a single domain.
#currently, this is only LDAP, but might be used by PAM or other backends
#as well. This is used by both identity and assignment drivers.
def _set_default_domain(self, ref):
"""If the domain ID has not been set, set it to the default."""
if isinstance(ref, dict):
if 'domain_id' not in ref:
ref = ref.copy()
ref['domain_id'] = CONF.identity.default_domain_id
return ref
elif isinstance(ref, list):
return [self._set_default_domain(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _validate_default_domain(self, ref):
"""Validate that either the default domain or nothing is specified.
Also removes the domain from the ref so that LDAP doesn't have to
persist the attribute.
"""
ref = ref.copy()
domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
self._validate_default_domain_id(domain_id)
return ref
def _validate_default_domain_id(self, domain_id):
"""Validate that the domain ID specified belongs to the default domain.
"""
if domain_id != CONF.identity.default_domain_id:
raise exception.DomainNotFound(domain_id=domain_id)
|
|
from datetime import datetime
from docker.models.containers import Container
from pytest import mark
from deck_chores.main import parse_iso_timestamp
from deck_chores.parsers import (
parse_flags,
parse_labels,
CronTrigger,
DateTrigger,
IntervalTrigger,
JobConfigValidator,
)
@mark.parametrize(
"sample",
(
"2021-05-05T16:42:18.488227566+00:00",
"2021-05-17T20:07:58.54095Z",
"2021-05-19T19:07:31.118260683Z",
),
)
def test_from_iso_timetamp(sample):
"""This test is to be used to test concrete manifestations of timestamps that Docker
daemons produced in the wild."""
assert isinstance(parse_iso_timestamp(sample), datetime)
def test_parse_labels(cfg, mocker):
labels = {
'project_id': 'test_project',
'service_id': 'ham_machine',
'deck-chores.backup.interval': 'daily',
'deck-chores.backup.command': '/usr/local/bin/backup.sh',
'deck-chores.backup.user': 'www-data',
'deck-chores.backup.workdir': '/backups',
'deck-chores.pull-data.date': '1945-05-08 00:01:00',
'deck-chores.pull-data.command': '/usr/local/bin/pull.sh',
'deck-chores.pull-data.env.BASE_URL': 'https://foo.org/records/',
'deck-chores.pull-data.env.TIMEOUT': '120',
'deck-chores.gen-thumbs.cron': '*/10 * * * *',
'deck-chores.gen-thumbs.command': 'python /scripts/gen_thumbs.py',
'deck-chores.gen-thumbs.jitter': '600',
'deck-chores.gen-thumbs.max': '3',
}
container = mocker.MagicMock(Container)
container.labels = labels
container.image.labels = {}
cfg.client.containers.get.return_value = container
expected_jobs = {
'backup': {
'trigger': (IntervalTrigger, (0, 1, 0, 0, 0)),
'name': 'backup',
'command': '/usr/local/bin/backup.sh',
'user': 'www-data',
'max': 1,
'environment': {},
'workdir': '/backups',
},
'pull-data': {
'trigger': (DateTrigger, ('1945-05-08 00:01:00',)),
'name': 'pull-data',
'command': '/usr/local/bin/pull.sh',
'user': '',
'max': 1,
'environment': {'BASE_URL': 'https://foo.org/records/', 'TIMEOUT': '120'},
},
'gen-thumbs': {
'trigger': (CronTrigger, ('*', '*', '*', '*/10', '*', '*', '*', '*')),
'name': 'gen-thumbs',
'command': 'python /scripts/gen_thumbs.py',
'user': '',
'max': 3,
'environment': {},
'jitter': 600,
},
}
_, _, job_definitions = parse_labels('test_parse_labels')
assert len(job_definitions) == len(expected_jobs)
for name, job_config in job_definitions.items():
job_config.pop('service_id')
assert job_config.pop('timezone') == 'UTC'
assert job_config == expected_jobs[name]
def test_parse_labels_with_time_units(cfg, mocker):
labels = {
'project_id': 'test_project',
'service_id': 'time_machine',
'deck-chores.backup.interval': '2 weeks',
'deck-chores.backup.jitter': '0.5 day',
'deck-chores.backup.command': '/usr/local/bin/backup.sh',
'deck-chores.backup.user': 'www-data',
'deck-chores.backup.workdir': '/backups',
'deck-chores.pull-data.interval': '42 secs 1 day',
'deck-chores.pull-data.command': '/usr/local/bin/pull.sh',
'deck-chores.pull-data.env.BASE_URL': 'https://foo.org/records/',
'deck-chores.pull-data.env.TIMEOUT': '120',
'deck-chores.gen-thumbs.interval': '3 xongs',
'deck-chores.gen-thumbs.command': 'python /scripts/gen_thumbs.py',
'deck-chores.gen-thumbs.jitter': '600',
'deck-chores.gen-thumbs.max': '3',
}
container = mocker.MagicMock(Container)
container.labels = labels
container.image.labels = {}
cfg.client.containers.get.return_value = container
expected_jobs = {
'backup': {
'trigger': (IntervalTrigger, (2, 0, 0, 0, 0)),
'name': 'backup',
'command': '/usr/local/bin/backup.sh',
'user': 'www-data',
'max': 1,
'environment': {},
'workdir': '/backups',
'jitter': 0.5 * 24 * 60 * 60,
},
'pull-data': {
'trigger': (IntervalTrigger, (0, 1, 0, 0, 42)),
'name': 'pull-data',
'command': '/usr/local/bin/pull.sh',
'user': '',
'max': 1,
'environment': {'BASE_URL': 'https://foo.org/records/', 'TIMEOUT': '120'},
},
}
_, _, job_definitions = parse_labels('test_parse_labels_with__time_units')
assert len(job_definitions) == len(expected_jobs), job_definitions
for name, job_config in job_definitions.items():
job_config.pop('service_id')
assert job_config.pop('timezone') == 'UTC'
assert job_config == expected_jobs[name]
def test_parse_labels_with_user_option(cfg, mocker):
labels = {
'deck-chores.options.user': 'c_options_user',
'deck-chores.job.command': 'a_command',
'deck-chores.job.interval': 'hourly',
}
image_labels = {'deck-chores.options.user': 'l_options_user'}
container = mocker.MagicMock(Container)
container.labels = labels
container.image.labels = image_labels
cfg.client.containers.get.return_value = container
expected_jobs = {
'job': {
'trigger': (IntervalTrigger, (0, 0, 1, 0, 0)),
'name': 'job',
'command': 'a_command',
'user': 'c_options_user',
'max': 1,
'timezone': 'UTC',
'environment': {},
}
}
_, _, job_definitions = parse_labels('test_parse_labels_with_user_option')
assert job_definitions == expected_jobs, job_definitions
def test_parse_labels_with_user_option_from_image(cfg, mocker):
labels = {
'deck-chores.job.command': 'a_command',
'deck-chores.job.interval': 'hourly',
}
image_labels = {'deck-chores.options.user': 'l_options_user'}
container = mocker.MagicMock(Container)
container.labels = labels
container.image.labels = image_labels
cfg.client.containers.get.return_value = container
expected_jobs = {
'job': {
'trigger': (IntervalTrigger, (0, 0, 1, 0, 0)),
'name': 'job',
'command': 'a_command',
'user': 'l_options_user',
'max': 1,
'timezone': 'UTC',
'environment': {},
}
}
_, _, job_definitions = parse_labels(
'test_parse_labels_with_user_option_from_image'
)
assert job_definitions == expected_jobs, job_definitions
def test_interval_trigger():
validator = JobConfigValidator({'trigger': {'coerce': 'interval'}})
result = validator.validated({'trigger': '15'})['trigger']
assert result == (IntervalTrigger, (0, 0, 0, 0, 15))
@mark.parametrize(
'default,value,result',
(
(('image', 'service'), '', 'image,service'),
(('image', 'service'), 'noservice', 'image'),
(('image', 'service'), 'noimage', 'service'),
(('service',), 'image', 'image,service'),
),
)
def test_flags(cfg, mocker, default, value, result):
cfg.default_flags = default
container = mocker.MagicMock(Container)
container.labels = {'deck-chores.options.flags': value}
cfg.client.containers.get.return_value = container
assert parse_flags(value) == result
|
|
from __future__ import absolute_import
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.locations import (PIP_DELETE_MARKER_FILENAME, build_prefix)
from pip.req.req_install import InstallRequirement
from pip.utils import (display_path, rmtree, dist_in_usersite,
_make_build_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
from pip.wheel import wheel_ext
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None):
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
if not install_req.match_markers():
logger.debug("Ignore %s: markers %r don't match",
install_req.name, install_req.markers)
return
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
# FIXME: duplicates code from prepare_files; relevant code should
# probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(
self.src_dir
)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(
self.build_dir,
)
if (req_to_install.source_dir is not None
and not os.path.isdir(req_to_install.source_dir)):
raise InstallationError(
'Could not install requirement %s because source folder %s'
' does not exist (perhaps --no-download was used without '
'first running an equivalent install with --no-install?)' %
(req_to_install, req_to_install.source_dir)
)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
from pip.index import Link
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
# ############################################# #
# # Search for archive to fulfill requirement # #
# ############################################# #
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound as exc:
not_found = exc
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.info(
'Requirement already up-to-date: %s',
req_to_install,
)
else:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
elif install:
if (req_to_install.url
and req_to_install.url.lower().startswith('file:')):
path = url_to_path(req_to_install.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
# NB: This call can result in the creation of a temporary
# build directory
location = req_to_install.build_location(
self.build_dir,
)
unpack = True
url = None
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
if os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, location)
)
else:
# FIXME: this won't upgrade when there's an existing
# package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(
req_to_install,
upgrade=self.upgrade,
)
else:
# FIXME: should req_to_install.url already be a
# link?
url = Link(req_to_install.url)
assert url
if url:
try:
if (
url.filename.endswith(wheel_ext)
and self.wheel_download_dir
):
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
url, location, download_dir,
do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, url)
)
else:
unpack = False
if unpack:
is_wheel = url and url.filename.endswith(wheel_ext)
if self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME:https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
install = False
# ###################### #
# # parse dependencies # #
# ###################### #
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
if is_wheel:
dist = list(
pkg_resources.find_distributions(location)
)[0]
else: # sdists
if req_to_install.satisfied_by:
dist = req_to_install.satisfied_by
else:
dist = req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
# FIXME: check for conflict
continue
subreq = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install)
# cleanup tmp src
if (self.is_download or
req_to_install._temp_build_dir is not None):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
if self._pip_has_created_build_dir():
logger.debug('Removing temporary dir %s...', self.build_dir)
rmtree(self.build_dir)
def _pip_has_created_build_dir(self):
return (
self.build_dir == build_prefix
and os.path.exists(
os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)
)
)
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = [r for r in self.requirements.values()[::-1]
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if (req.name == 'distribute'
and req.installed_version is not None
and req.installed_version in distribute_req):
to_install.remove(req)
to_install.append(req)
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's
# pkg_resources. It's ok *not* to check if setuptools>=0.7
# because if someone were actually trying to ugrade from
# distribute to setuptools 0.6.X, then all this could do is
# actually help, although that upgade path was certainly never
# "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = \
pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = \
pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with
and not requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with
and requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
|
from __future__ import absolute_import
import logging
from pip._vendor import pkg_resources
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.wheel import WheelCache
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
canonical_name = pkg_resources.safe_name(req.name).lower()
formats = fmt_ctl_formats(format_control, canonical_name)
search = Search(
req.name,
canonical_name,
formats)
remote_version = finder._link_package_versions(
link, search).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
|
# Copyright 2021 The Jax Influence Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for function_factory."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax_influence import function_factory
from jax_influence import selection
from jax_influence import test_utils
import numpy as np
def _per_example_loss_fn(params, batch, batch_stats=None):
"""Returns loss on each example."""
out = batch['x'] @ params['A']
out = out + batch['y'] @ params['B']
out = out + params['c']
# Batch stats is used as a multiplicative constant.
if batch_stats is not None:
out = out * batch_stats
return out**2 / 2
def _total_loss(params, batch, batch_stats=None):
"""Returns loss on a batch."""
# Batch stats is used as normalization
if batch_stats is not None:
den = batch_stats
else:
den = 1
return jnp.sum(_per_example_loss_fn(params, batch)) / den
class FunctionFactoryTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.params = {
'A': jnp.array([1.0, 2.0, -3.0]),
'B': jnp.array([4.0, 6.0, -3.0, -2.0, -1.0]),
'c': jnp.array([1.3])
}
self.tangents = {
'A': jnp.array([-0.2, 2.4, 3.22]),
'B': jnp.array([2.0, 3.0, 1.2, -2.5, -1.44]),
'c': jnp.array([4.22])
}
def get_random_batch(self, seed):
seed = jax.random.PRNGKey(seed)
seed1, seed2 = jax.random.split(seed)
x = jax.random.uniform(seed1, (3,))
y = jax.random.uniform(seed2, (5,))
return {'x': x, 'y': y}
def concat_batches(self, batch0, batch1):
"""Concatenates two batches."""
return jax.tree_multimap(lambda x, y: jnp.concatenate([x[None], y[None]]),
batch0, batch1)
def get_batch(self):
"""Gets a deterministic batch for testing HVP and JVP."""
batch0 = {
'x': jnp.array([1.2, .3, .4]),
'y': jnp.array([.4, -.5, -2.0, -3.0, -4.2])
}
batch1 = {
'x': jnp.array([3.0, 2.3, 4.4]),
'y': jnp.array([2.4, -1.5, 2.55, 3.44, -1.0])
}
return self.concat_batches(batch0, batch1)
@parameterized.named_parameters(
dict(testcase_name='seed_0', seed=0),
dict(testcase_name='seed_17', seed=17),
)
def test_restrict_subset_params(self, seed):
batch = self.get_random_batch(seed)
fval = _per_example_loss_fn(self.params, batch)
fun_rest = function_factory.restrict_subset_params(
_per_example_loss_fn, self.params, select_fn=lambda x: 'B' in x)
fvalrest = fun_rest({'B': self.params['B']}, batch)
test_utils.check_close(fval, fvalrest)
@parameterized.named_parameters(
dict(
testcase_name='no_sel_no_batch_stats',
select_fn=None,
batch_stats=None,
expected={
'A':
np.array([78.24721, 49.799202, 92.40961]),
'B':
np.array(
[53.393604, -37.480003, 16.873405, 17.799526, -88.53919]),
'c':
np.array([35.944],)
}),
dict(
testcase_name='no_sel_batch_stats',
select_fn=None,
batch_stats=5,
expected={
'A':
np.array([15.649442, 9.959841, 18.481922]),
'B':
np.array(
[10.67872, -7.496001, 3.374681, 3.5599053, -17.707838]),
'c':
np.array([7.1888])
}),
dict(
testcase_name='sel_no_batch_stats',
select_fn=lambda x: 'B' in x,
batch_stats=None,
expected={
'B':
np.array(
[-4.9407997, 0.4759996, -30.585999, -44.416, -40.0816])
}),
dict(
testcase_name='sel_batch_stats',
select_fn=lambda x: 'A' in x,
batch_stats=3,
expected={
'A': np.array([19.795202, 14.810934, 28.231468]),
}))
def test_create_hvp_on_sample(self, select_fn, batch_stats, expected):
hvp_on_sample = function_factory.create_hvp_on_sample(
_total_loss,
batch_stats=batch_stats,
params_select_fn=select_fn,
params_to_bind=self.params)
batch = self.get_batch()
if select_fn is not None:
sel_params, _ = selection.split_params(self.params, select_fn)
sel_tangents, _ = selection.split_params(self.tangents, select_fn)
else:
sel_params = self.params
sel_tangents = self.tangents
result = hvp_on_sample(sel_params, sel_tangents, batch)
test_utils.check_close(result, expected, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
dict(
testcase_name='no_sel_no_batch_stats',
select_fn=None,
batch_stats=None,
expected=np.array([274.4812, -336.12286])),
dict(
testcase_name='no_sel_batch_stats',
select_fn=None,
batch_stats=5,
expected=np.array([6862.0293, -8403.071])),
dict(
testcase_name='sel_no_batch_stats',
select_fn=lambda x: 'B' in x,
batch_stats=None,
expected=np.array([174.48161, 65.47399])),
dict(
testcase_name='sel_batch_stats',
select_fn=lambda x: 'A' in x,
batch_stats=3,
expected=np.array([265.73044, -2959.9763]),
))
def test_create_jvp_on_each_example(self, select_fn, batch_stats, expected):
jvp_on_each_example = function_factory.create_jvp_on_each_example(
_per_example_loss_fn,
batch_stats=batch_stats,
params_select_fn=select_fn,
params_to_bind=self.params)
batch = self.get_batch()
if select_fn is not None:
sel_params, _ = selection.split_params(self.params, select_fn)
sel_tangents, _ = selection.split_params(self.tangents, select_fn)
else:
sel_params = self.params
sel_tangents = self.tangents
result = jvp_on_each_example(sel_params, sel_tangents, batch)
test_utils.check_close(result, expected, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
dict(testcase_name='seed_0', seed=0),
dict(testcase_name='seed_49', seed=49),
)
def test_bind_params(self, seed):
batch = self.get_random_batch(seed)
fval = _per_example_loss_fn(self.params, batch)
fun_bound = function_factory.bind_params(_per_example_loss_fn, self.params)
fvalbound = fun_bound(batch)
test_utils.check_close(fval, fvalbound)
@parameterized.named_parameters(
dict(testcase_name='no_avg', do_average=False),
dict(testcase_name='avg', do_average=True),
)
def test_create_accumulator(self, do_average):
batch = self.get_batch()
expected = _per_example_loss_fn(self.params, batch)
expected = jnp.sum(expected)
if do_average:
expected = expected / 2
accum_fn = function_factory.create_accumulator(
_per_example_loss_fn, num_micro_batches=2, do_average=do_average)
result = accum_fn(params=self.params, batch=batch)
test_utils.check_close(result, expected)
@parameterized.named_parameters(
dict(testcase_name='pmap', handle_p_mapping=True),
dict(testcase_name='nopmap', handle_p_mapping=False),
)
@mock.patch('jax.local_device_count')
def test_create_hvp_estimator(self, jax_mock, handle_p_mapping):
# Enforce using only one device when resizing batches.
jax_mock.return_value = 1
iter1 = test_utils.InfiniteIterator(0, self.get_random_batch)
iter2 = test_utils.InfiniteIterator(0, self.get_random_batch)
hvp_on_sample = function_factory.create_hvp_on_sample(_total_loss)
batch1 = next(iter1)
hvp_on_sample_maybe_pmapped = hvp_on_sample
if handle_p_mapping:
hvp_on_sample_maybe_pmapped = jax.pmap(hvp_on_sample, 'batch')
hvp_estimator = function_factory.create_hvp_estimator(
hvp_on_sample_maybe_pmapped,
handle_p_mapping=handle_p_mapping,
params=self.params,
data_iterator=iter2)
result = hvp_estimator(self.tangents)
expected = hvp_on_sample(
primals=self.params, tangents=self.tangents, batch=batch1)
test_utils.check_close(result, expected)
if __name__ == '__main__':
absltest.main()
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# UVEServer
#
# Operational State Server for UVEs
#
import gevent
import json
import copy
import xmltodict
import redis
import datetime
import sys
from opserver_util import OpServerUtils
import re
from gevent.coros import BoundedSemaphore
from pysandesh.util import UTCTimestampUsec
from pysandesh.connection_info import ConnectionState
from sandesh.viz.constants import UVE_MAP
from pysandesh.gen_py.process_info.ttypes import ConnectionType,\
ConnectionStatus
import traceback
class UVEServer(object):
def __init__(self, redis_uve_server, logger, redis_password=None, uvedbcache=None):
self._local_redis_uve = redis_uve_server
self._redis_uve_map = {}
self._logger = logger
self._redis = None
self._uvedbcache = uvedbcache
self._redis_password = redis_password
self._uve_reverse_map = {}
for h,m in UVE_MAP.iteritems():
self._uve_reverse_map[m] = h
#end __init__
def redis_instances(self):
return set(self._redis_uve_map.keys())
def update_redis_uve_list(self, redis_uve_list):
newlist = set(redis_uve_list)
# if some redis instances are gone, remove them from our map
for test_elem in self._redis_uve_map.keys():
if test_elem not in newlist:
r_ip = test_elem[0]
r_port = test_elem[1]
del self._redis_uve_map[test_elem]
ConnectionState.delete(ConnectionType.REDIS_UVE,\
r_ip+":"+str(r_port))
# new redis instances need to be inserted into the map
for test_elem in newlist:
if test_elem not in self._redis_uve_map:
r_ip = test_elem[0]
r_port = test_elem[1]
self._redis_uve_map[test_elem] = None
ConnectionState.update(ConnectionType.REDIS_UVE,\
r_ip+":"+str(r_port), ConnectionStatus.INIT)
# Exercise redis connections to update health
if len(newlist):
self.get_uve_list("ObjectCollectorInfo")
# end update_redis_uve_list
def fill_redis_uve_info(self, redis_uve_info):
redis_uve_info.ip = self._local_redis_uve[0]
redis_uve_info.port = self._local_redis_uve[1]
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
redis_uve_info.status = 'DisConnected'
else:
redis_uve_info.status = 'Connected'
#end fill_redis_uve_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
ConnectionState.update(conn_type = ConnectionType.REDIS_UVE,
name = 'LOCAL', status = ConnectionStatus.INIT)
while True:
if self._redis:
redish = self._redis
else:
redish = redis.StrictRedis(self._local_redis_uve[0],
self._local_redis_uve[1],
password=self._redis_password,
db=1)
try:
if not self._redis:
value = ""
redish.ping()
else:
k, value = redish.brpop("DELETED")
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
redish.delete(value)
except:
if self._redis:
#send redis connection down msg. Coule be bcos of authentication
ConnectionState.update(conn_type = ConnectionType.REDIS_UVE,
name = 'LOCAL', status = ConnectionStatus.DOWN)
self._redis = None
gevent.sleep(5)
else:
self._logger.debug("Deleted %s" % value)
if not self._redis:
self._redis = redish
ConnectionState.update(conn_type = ConnectionType.REDIS_UVE,
name = 'LOCAL', status = ConnectionStatus.UP)
@staticmethod
def _is_agg_item(attr):
if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
'u8', 'u16', 'u32', 'u64']:
if '@aggtype' in attr:
if attr['@aggtype'] == "counter":
return True
return False
@staticmethod
def _is_agg_list(attr):
if attr['@type'] in ['list']:
if '@aggtype' in attr:
if attr['@aggtype'] == "append":
return True
return False
def get_part(self, part, r_inst):
# Get UVE and Type contents of given partition on given
# collector/redis instance.
uves = {}
try:
r_ip = r_inst[0]
r_port = r_inst[1]
redish = self._redis_inst_get(r_inst)
gen_uves = {}
for elems in redish.smembers("PART2KEY:" + str(part)):
info = elems.split(":", 5)
gen = info[0] + ":" + info[1] + ":" + info[2] + ":" + info[3]
typ = info[4]
key = info[5]
if not gen in gen_uves:
gen_uves[gen] = {}
if not key in gen_uves[gen]:
gen_uves[gen][key] = {}
gen_uves[gen][key][typ] = {}
except Exception as e:
self._logger.error("get_part failed %s for : %s:%d tb %s" \
% (str(e), r_ip, r_port, traceback.format_exc()))
self._redis_inst_down(r_inst)
else:
self._redis_inst_up(r_inst, redish)
return r_ip + ":" + str(r_port) , gen_uves
def _redis_inst_get(self, r_inst):
r_ip = r_inst[0]
r_port = r_inst[1]
if not self._redis_uve_map[r_inst]:
return redis.StrictRedis(
host=r_ip, port=r_port,
password=self._redis_password, db=1)
else:
return self._redis_uve_map[r_inst]
def _redis_inst_up(self, r_inst, redish):
if not self._redis_uve_map[r_inst]:
r_ip = r_inst[0]
r_port = r_inst[1]
self._redis_uve_map[r_inst] = redish
ConnectionState.update(ConnectionType.REDIS_UVE,
r_ip + ":" + str(r_port), ConnectionStatus.UP)
def _redis_inst_down(self, r_inst):
if self._redis_uve_map[r_inst]:
r_ip = r_inst[0]
r_port = r_inst[1]
self._redis_uve_map[r_inst] = None
ConnectionState.update(ConnectionType.REDIS_UVE,
r_ip + ":" + str(r_port), ConnectionStatus.DOWN)
def get_uve(self, key, flat, filters=None, is_alarm=False, base_url=None):
filters = filters or {}
sfilter = filters.get('sfilt')
mfilter = filters.get('mfilt')
tfilter = filters.get('cfilt')
ackfilter = filters.get('ackfilt')
if flat and not sfilter and not mfilter and self._uvedbcache:
return self._uvedbcache.get_uve(key, filters, is_alarm)
state = {}
state[key] = {}
rsp = {}
failures = False
for r_inst in self._redis_uve_map.keys():
try:
redish = self._redis_inst_get(r_inst)
qmap = {}
ppe = redish.pipeline()
ppe.smembers("ALARM_ORIGINS:" + key)
if not is_alarm:
ppe.smembers("ORIGINS:" + key)
pperes = ppe.execute()
origins = set()
for origset in pperes:
for smt in origset:
tt = smt.rsplit(":",1)[1]
sm = smt.rsplit(":",1)[0]
source = sm.split(":", 1)[0]
mdule = sm.split(":", 1)[1]
if tfilter is not None:
if tt not in tfilter:
continue
if sfilter is not None:
if sfilter != source:
continue
if mfilter is not None:
if mfilter != mdule:
continue
origins.add(smt)
ppeval = redish.pipeline()
for origs in origins:
ppeval.hgetall("VALUES:" + key + ":" + origs)
odictlist = ppeval.execute()
idx = 0
for origs in origins:
odict = odictlist[idx]
idx = idx + 1
info = origs.rsplit(":", 1)
dsource = info[0]
typ = info[1]
afilter_list = set()
if tfilter is not None:
afilter_list = tfilter[typ]
for attr, value in odict.iteritems():
if len(afilter_list):
if attr not in afilter_list:
continue
if typ not in state[key]:
state[key][typ] = {}
if value[0] == '<':
snhdict = xmltodict.parse(value)
if snhdict[attr]['@type'] == 'list':
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if snhdict[attr]['list']['@size'] == '0':
continue
elif snhdict[attr]['list']['@size'] == '1':
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = [
snhdict[attr]['list'][sname]]
if typ == 'UVEAlarms' and attr == 'alarms' and \
ackfilter is not None:
alarms = []
for alarm in snhdict[attr]['list'][sname]:
ack_attr = alarm.get('ack')
if ack_attr:
ack = ack_attr['#text']
else:
ack = 'false'
if ack == ackfilter:
alarms.append(alarm)
if not len(alarms):
continue
snhdict[attr]['list'][sname] = alarms
snhdict[attr]['list']['@size'] = \
str(len(alarms))
else:
continue
# print "Attr %s Value %s" % (attr, snhdict)
if attr not in state[key][typ]:
state[key][typ][attr] = {}
if dsource in state[key][typ][attr]:
print "Found Dup %s:%s:%s:%s:%s = %s" % \
(key, typ, attr, source, mdule, state[
key][typ][attr][dsource])
state[key][typ][attr][dsource] = snhdict[attr]
pa = ParallelAggregator(state, self._uve_reverse_map)
rsp = pa.aggregate(key, flat, base_url)
except Exception as e:
self._logger.error("redis-uve failed %s for : %s tb %s" \
% (str(e), str(r_inst), traceback.format_exc()))
self._redis_inst_down(r_inst)
failures = True
else:
self._redis_inst_up(r_inst, redish)
self._logger.debug("Computed %s as %s" % (key,str(rsp)))
return failures, rsp
# end get_uve
def get_uve_regex(self, key):
regex = ''
if key[0] != '*':
regex += '^'
regex += key.replace('*', '.*?')
if key[-1] != '*':
regex += '$'
return re.compile(regex)
# end get_uve_regex
def multi_uve_get(self, table, flat, filters=None, is_alarm=False, base_url=None):
# get_uve_list cannot handle attribute names very efficiently,
# so we don't pass them here
uve_list = self.get_uve_list(table, filters, False, is_alarm)
for uve_name in uve_list:
_,uve_val = self.get_uve(
table + ':' + uve_name, flat, filters, is_alarm, base_url)
if uve_val == {}:
continue
else:
uve = {'name': uve_name, 'value': uve_val}
yield uve
# end multi_uve_get
def get_uve_list(self, table, filters=None, parse_afilter=False,
is_alarm=False):
filters = filters or {}
uve_list = set()
kfilter = filters.get('kfilt')
if kfilter is not None:
patterns = set()
for filt in kfilter:
patterns.add(self.get_uve_regex(filt))
for r_inst in self._redis_uve_map.keys():
try:
redish = self._redis_inst_get(r_inst)
# For UVE queries, we wanna read both UVE and Alarm table
entries = redish.smembers('ALARM_TABLE:' + table)
if not is_alarm:
entries = entries.union(redish.smembers('TABLE:' + table))
for entry in entries:
info = (entry.split(':', 1)[1]).rsplit(':', 5)
uve_key = info[0]
if kfilter is not None:
kfilter_match = False
for pattern in patterns:
if pattern.match(uve_key):
kfilter_match = True
break
if not kfilter_match:
continue
src = info[1]
sfilter = filters.get('sfilt')
if sfilter is not None:
if sfilter != src:
continue
module = info[2]+':'+info[3]+':'+info[4]
mfilter = filters.get('mfilt')
if mfilter is not None:
if mfilter != module:
continue
typ = info[5]
tfilter = filters.get('cfilt')
if tfilter is not None:
if typ not in tfilter:
continue
if parse_afilter:
if tfilter is not None and len(tfilter[typ]):
valkey = "VALUES:" + table + ":" + uve_key + \
":" + src + ":" + module + ":" + typ
for afilter in tfilter[typ]:
attrval = redish.hget(valkey, afilter)
if attrval is not None:
break
if attrval is None:
continue
uve_list.add(uve_key)
except Exception as e:
self._logger.error("get_uve_list failed %s for : %s tb %s" \
% (str(e), str(r_inst), traceback.format_exc()))
self._redis_inst_down(r_inst)
else:
self._redis_inst_up(r_inst, redish)
return uve_list
# end get_uve_list
# end UVEServer
class ParallelAggregator:
def __init__(self, state, rev_map = {}):
self._state = state
self._rev_map = rev_map
def _default_agg(self, oattr):
itemset = set()
result = []
for source in oattr.keys():
elem = oattr[source]
hdelem = json.dumps(elem)
if hdelem not in itemset:
itemset.add(hdelem)
result.append([elem, source])
else:
for items in result:
if elem in items:
items.append(source)
return result
def _is_sum(self, oattr):
akey = oattr.keys()[0]
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["sum"]:
return True
if oattr[akey]['@type'] in ['i8', 'i16', 'i32', 'i64',
'byte', 'u8', 'u16', 'u32', 'u64']:
if oattr[akey]['@aggtype'] in ["counter"]:
return True
return False
def _is_union(self, oattr):
akey = oattr.keys()[0]
if not oattr[akey]['@type'] in ["list"]:
return False
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["union"]:
return True
else:
return False
def _is_append(self, oattr):
akey = oattr.keys()[0]
if not oattr[akey]['@type'] in ["list"]:
return False
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["append"]:
return True
else:
return False
@staticmethod
def get_list_name(attr):
sname = ""
for sattr in attr['list'].keys():
if sattr[0] not in ['@']:
sname = sattr
return sname
@staticmethod
def _get_list_key(elem):
skey = ""
for sattr in elem.keys():
if '@aggtype' in elem[sattr]:
if elem[sattr]['@aggtype'] in ["listkey"]:
skey = sattr
return skey
def _sum_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
count = 0
for source in oattr.keys():
count += int(oattr[source]['#text'])
result['#text'] = str(count)
return result
def _union_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
itemset = set()
sname = ParallelAggregator.get_list_name(oattr[akey])
result['list'][sname] = []
siz = 0
for source in oattr.keys():
if isinstance(oattr[source]['list'][sname], basestring):
oattr[source]['list'][sname] = [oattr[source]['list'][sname]]
for elem in oattr[source]['list'][sname]:
hdelem = json.dumps(elem)
if hdelem not in itemset:
itemset.add(hdelem)
result['list'][sname].append(elem)
siz += 1
result['list']['@size'] = str(siz)
return result
def _append_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
sname = ParallelAggregator.get_list_name(oattr[akey])
result['list'][sname] = []
siz = 0
for source in oattr.keys():
if not isinstance(oattr[source]['list'][sname], list):
oattr[source]['list'][sname] = [oattr[source]['list'][sname]]
for elem in oattr[source]['list'][sname]:
result['list'][sname].append(elem)
siz += 1
result['list']['@size'] = str(siz)
return result
@staticmethod
def _list_agg_attrs(item):
for ctrs in item.keys():
if '@aggtype'in item[ctrs]:
if item[ctrs]['@aggtype'] in ["listkey"]:
continue
if item[ctrs]['@type'] in ['i8', 'i16', 'i32', 'i64',
'byte', 'u8', 'u16', 'u32', 'u64']:
yield ctrs
@staticmethod
def consolidate_list(result, typ, objattr):
applist = ParallelAggregator.get_list_name(
result[typ][objattr])
appkey = ParallelAggregator._get_list_key(
result[typ][objattr]['list'][applist][0])
# There is no listkey ; no consolidation is possible
if len(appkey) == 0:
return result
# If the list's underlying struct has a listkey present,
# we need to further aggregate entries that have the
# same listkey
mod_result = copy.deepcopy(result[typ][objattr])
mod_result['list'][applist] = []
res_size = 0
mod_result['list']['@size'] = int(res_size)
# Add up stats
for items in result[typ][objattr]['list'][applist]:
matched = False
for res_items in mod_result['list'][applist]:
if items[appkey]['#text'] in [res_items[appkey]['#text']]:
for ctrs in ParallelAggregator._list_agg_attrs(items):
res_items[ctrs]['#text'] += int(items[ctrs]['#text'])
matched = True
if not matched:
newitem = copy.deepcopy(items)
for ctrs in ParallelAggregator._list_agg_attrs(items):
newitem[ctrs]['#text'] = int(items[ctrs]['#text'])
mod_result['list'][applist].append(newitem)
res_size += 1
# Convert results back into strings
for res_items in mod_result['list'][applist]:
for ctrs in ParallelAggregator._list_agg_attrs(res_items):
res_items[ctrs]['#text'] = str(res_items[ctrs]['#text'])
mod_result['list']['@size'] = str(res_size)
return mod_result
def aggregate(self, key, flat, base_url = None):
'''
This function does parallel aggregation of this UVE's state.
It aggregates across all sources and return the global state of the UVE
'''
result = {}
try:
for typ in self._state[key].keys():
result[typ] = {}
for objattr in self._state[key][typ].keys():
if self._is_sum(self._state[key][typ][objattr]):
sum_res = self._sum_agg(self._state[key][typ][objattr])
if flat:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(sum_res)
else:
result[typ][objattr] = sum_res
elif self._is_union(self._state[key][typ][objattr]):
union_res = self._union_agg(
self._state[key][typ][objattr])
conv_res = None
if union_res.has_key('@ulink') and base_url and \
union_res['list']['@type'] == 'string':
uterms = union_res['@ulink'].split(":",1)
# This is the linked UVE's table name
m_table = uterms[0]
if self._rev_map.has_key(m_table):
h_table = self._rev_map[m_table]
conv_res = []
sname = ParallelAggregator.get_list_name(union_res)
for el in union_res['list'][sname]:
lobj = {}
lobj['name'] = el
lobj['href'] = base_url + '/analytics/uves/' + \
h_table + '/' + el
if len(uterms) == 2:
lobj['href'] = lobj['href'] + '?cfilt=' + uterms[1]
else:
lobj['href'] = lobj['href'] + '?flat'
conv_res.append(lobj)
if flat:
if not conv_res:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(union_res)
else:
result[typ][objattr] = conv_res
else:
result[typ][objattr] = union_res
elif self._is_append(self._state[key][typ][objattr]):
result[typ][objattr] = self._append_agg(
self._state[key][typ][objattr])
append_res = ParallelAggregator.consolidate_list(
result, typ, objattr)
if flat:
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(append_res)
else:
result[typ][objattr] = append_res
else:
default_res = self._default_agg(
self._state[key][typ][objattr])
if flat:
if (len(default_res) == 1):
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(
default_res[0][0])
else:
nres = []
for idx in range(len(default_res)):
nres.append(default_res[idx])
nres[idx][0] =\
OpServerUtils.uve_attr_flatten(
default_res[idx][0])
result[typ][objattr] = nres
else:
result[typ][objattr] = default_res
except KeyError:
pass
return result
if __name__ == '__main__':
uveserver = UVEServer(None, 0, None, None)
gevent.spawn(uveserver.run())
_, uve_state = json.loads(uveserver.get_uve("abc-corp:vn02", False))
print json.dumps(uve_state, indent=4, sort_keys=True)
|
|
import os
import re
from datetime import datetime
from flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file
from jinja2.exceptions import TemplateNotFound
from passlib.hash import bcrypt_sha256
from sqlalchemy import union_all
from CTFd.utils import authed, is_setup, validate_url, get_config, set_config, sha512, cache, ctftime, view_after_ctf, ctf_started, \
is_admin
from CTFd.models import db, Students, Solves, Awards, Files, Pages, Teams, Challenges, Sections
views = Blueprint('views', __name__)
@views.before_request
def redirect_setup():
if request.path.startswith("/static"):
return
if not is_setup() and request.path != "/setup":
return redirect(url_for('views.setup'))
@views.route('/setup', methods=['GET', 'POST'])
def setup():
# with app.app_context():
# admin = Teams.query.filter_by(admin=True).first()
if not is_setup():
if not session.get('nonce'):
session['nonce'] = sha512(os.urandom(10))
if request.method == 'POST':
ctf_name = request.form['ctf_name']
ctf_name = set_config('ctf_name', ctf_name)
# CSS
css = set_config('start', '')
# Admin user
name = request.form['name']
email = request.form['email']
password = request.form['password']
section = Sections(0, 123)
db.session.add(section)
db.session.commit()
team = Teams("admin", section.sectionNumber)
db.session.add(team)
db.session.commit()
admin = Students(name, email, password, team.id, section.sectionNumber)
admin.admin = True
admin.banned = True
# Index page
page = Pages('index', """<div class="container main-container">
<img class="logo" src="{0}/static/original/img/logo.png" />
<h3 class="text-center">
Welcome to a cool CTF framework written by <a href="https://github.com/ColdHeat">Kevin Chung</a> of <a href="https://github.com/isislab">@isislab</a>
<br>
Modified for educational use by <a href="https://github.com/camgeehr">Cameron Geehr</a>, <a href="https://github.com/jaboyles">Jacob Boyles</a>, and <a href="https://github.com/bgoulds">Brian Gouldsberry</a>
</h3>
</div>""".format(request.script_root))
# max attempts per challenge
max_tries = set_config("max_tries", 0)
# Start time
start = set_config('start', None)
end = set_config('end', None)
# Challenges cannot be viewed by unregistered users
view_challenges_unregistered = set_config('view_challenges_unregistered', None)
# Allow/Disallow registration
prevent_registration = set_config('prevent_registration', None)
# Verify emails
verify_emails = set_config('verify_emails', None)
mail_server = set_config('mail_server', None)
mail_port = set_config('mail_port', None)
mail_tls = set_config('mail_tls', None)
mail_ssl = set_config('mail_ssl', None)
mail_username = set_config('mail_username', None)
mail_password = set_config('mail_password', None)
setup = set_config('setup', True)
db.session.add(page)
db.session.add(admin)
db.session.commit()
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for('views.static_html'))
return render_template('setup.html', nonce=session.get('nonce'), setup=True)
return redirect(url_for('views.static_html'))
# Custom CSS handler
@views.route('/static/user.css')
def custom_css():
return Response(get_config("css"), mimetype='text/css')
# Static HTML files
@views.route("/", defaults={'template': 'index'})
@views.route("/<template>")
def static_html(template):
try:
return render_template('%s.html' % template)
except TemplateNotFound:
page = Pages.query.filter_by(route=template).first_or_404()
return render_template('page.html', content=page.html)
@views.route('/students', defaults={'page': '1'})
@views.route('/students/<int:page>')
def students(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
if get_config('verify_emails'):
count = Students.query.filter_by(verified=True, banned=False).count()
students = Students.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()
else:
count = Students.query.filter_by(banned=False).count()
students = Students.query.filter_by(banned=False).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('students.html', students=students, student_pages=pages, curr_page=page)
@views.route('/student/<int:studentid>', methods=['GET', 'POST'])
def student(studentid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
if not is_admin() and session['id'] != studentid:
return render_template('errors/403.html')
user = Students.query.filter_by(id=studentid).first_or_404()
solves = Solves.query.filter_by(studentid=studentid)
awards = Awards.query.filter_by(studentid=studentid).all()
score = user.score()
place = user.place()
db.session.close()
if request.method == 'GET':
return render_template('student.html', solves=solves, awards=awards, student=user, score=score, place=place)
elif request.method == 'POST':
json = {'solves': []}
for x in solves:
json['solves'].append({'id': x.id, 'chal': x.chalid, 'student': x.studentid})
return jsonify(json)
@views.route('/getStudent/<int:studentid>', methods=['GET'])
def getStudent(studentid):
student = Students.query.filter_by(studentid=studentid).first()
json_data = {
'id' : student.id,
'name' : student.name,
'email' : student.email,
'teamid' : student.teamid,
'password' : student.password,
'bracket' : student.bracket,
'banned' : student.banned,
'verified' : student.verified,
'admin' : student.admin,
'joined' : student.joined,
'sectionid' : student.sectionid
}
db.session.close()
return jsonify(json_data)
@views.route('/profile', methods=['POST', 'GET'])
def profile():
if authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
user = Students.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
names = Students.query.filter_by(name=name).first()
name_len = len(request.form['name']) == 0
emails = Students.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not get_config('prevent_name_change') and names and name != session['username']:
errors.append('That student name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not get_config('prevent_name_change') and name_len:
errors.append('Pick a longer student name')
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, errors=errors)
else:
student = Students.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
student.name = name
if student.email != email.lower():
student.email = email.lower()
if get_config('verify_emails'):
student.verified = False
session['username'] = student.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
student.password = bcrypt_sha256.encrypt(request.form.get('password'))
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Students.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
prevent_name_change = get_config('prevent_name_change')
confirm_email = get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, prevent_name_change=prevent_name_change,
confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
@views.route('/files', defaults={'path': ''})
@views.route('/files/<path:path>')
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
if f.chal:
if not is_admin():
if not ctftime():
if view_after_ctf() and ctf_started():
pass
else:
abort(403)
return send_file(os.path.join(app.root_path, 'uploads', f.location))
@views.route('/teams', defaults={'page': '1'})
@views.route('/teams/<int:page>')
def teams(page):
if get_config('view_scoreboard_if_authed') or not authed():
return redirect(url_for('auth.login', next=request.path))
studentid = session['id']
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
student = Students.query.filter_by(id=studentid).first()
count = Teams.query.filter_by().count()
teams = Teams.query.filter_by(sectionNumber=student.sectionid).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)
@views.route('/team/<int:teamid>')
def team(teamid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
team = Teams.query.filter_by(id=teamid).first()
student = Students.query.filter_by(id = session['id']).first()
if student.sectionid != team.sectionNumber:
return render_template('errors/403.html')
students = Students.query.filter_by(teamid=teamid)
# get solves data by team id
# get awards data by team id
challenges = Challenges.query.all()
db.session.close()
if request.method == 'GET':
return render_template('team.html', team=team, students=students, challenges=challenges)
elif request.method == 'POST':
return None # return solves data by team id
@views.route('/team/<int:teamid>/challenges')
def teamChallenges(teamid):
team = Teams.query.filter_by(id=teamid).first()
challenges = team.challenges()
return render_template('tChallenges.html', team=team, challenges=challenges)
@views.route('/team/<int:teamid>/solves')
def teamSolves(teamid):
team = Teams.query.filter_by(id=teamid).first()
solves = team.solves()
return render_template('tSolves.html', team=team, solves=solves)
|
|
# -*- coding: utf-8 -*-
"""
twython.endpoints
~~~~~~~~~~~~~~~~~
This module provides a mixin for a :class:`Twython <Twython>` instance.
Parameters that need to be embedded in the API url just need to be passed
as a keyword argument.
e.g. Twython.retweet(id=12345)
Official documentation for Twitter API endpoints can be found at:
https://developer.twitter.com/en/docs/api-reference-index
"""
import json
import os
import warnings
from io import BytesIO
from time import sleep
#try:
#from StringIO import StringIO
#except ImportError:
#from io import StringIO
from .advisory import TwythonDeprecationWarning
class EndpointsMixin(object):
# Timelines
def get_mentions_timeline(self, **params):
"""Returns the 20 most recent mentions (tweets containing a users's
@screen_name) for the authenticating user.
Docs:
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-mentions_timeline
"""
return self.get('statuses/mentions_timeline', params=params)
get_mentions_timeline.iter_mode = 'id'
def get_user_timeline(self, **params):
"""Returns a collection of the most recent Tweets posted by the user
indicated by the ``screen_name`` or ``user_id`` parameters.
Docs:
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-user_timeline
"""
return self.get('statuses/user_timeline', params=params)
get_user_timeline.iter_mode = 'id'
def get_home_timeline(self, **params):
"""Returns a collection of the most recent Tweets and retweets
posted by the authenticating user and the users they follow.
Docs:
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-home_timeline
"""
return self.get('statuses/home_timeline', params=params)
get_home_timeline.iter_mode = 'id'
def retweeted_of_me(self, **params):
"""Returns the most recent tweets authored by the authenticating user
that have been retweeted by others.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-retweets_of_me
"""
return self.get('statuses/retweets_of_me', params=params)
retweeted_of_me.iter_mode = 'id'
# Tweets
def get_retweets(self, **params):
"""Returns up to 100 of the first retweets of a given tweet.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-retweet-id
"""
return self.get('statuses/retweets/%s' % params.get('id'),
params=params)
def show_status(self, **params):
"""Returns a single Tweet, specified by the ``id`` parameter
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-show-id
"""
return self.get('statuses/show/%s' % params.get('id'), params=params)
def lookup_status(self, **params):
"""Returns fully-hydrated tweet objects for up to 100 tweets per
request, as specified by comma-separated values passed to the ``id``
parameter.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-lookup
"""
return self.post('statuses/lookup', params=params)
def destroy_status(self, **params):
"""Destroys the status specified by the required ``id`` parameter
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-destroy-id
"""
return self.post('statuses/destroy/%s' % params.get('id'))
def update_status(self, **params):
"""Updates the authenticating user's current status, also known as tweeting
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update
"""
return self.post('statuses/update', params=params)
def retweet(self, **params):
"""Retweets a tweet specified by the ``id`` parameter
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-retweet-id
"""
return self.post('statuses/retweet/%s' % params.get('id'))
def update_status_with_media(self, **params): # pragma: no cover
"""Updates the authenticating user's current status and attaches media
for upload. In other words, it creates a Tweet with a picture attached.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update_with_media
"""
warnings.warn(
'This method is deprecated. You should use Twython.upload_media instead.',
TwythonDeprecationWarning,
stacklevel=2
)
return self.post('statuses/update_with_media', params=params)
def upload_media(self, **params):
"""Uploads media file to Twitter servers. The file will be available to be attached
to a status for 60 minutes. To attach to a update, pass a list of returned media ids
to the :meth:`update_status` method using the ``media_ids`` param.
Docs:
https://developer.twitter.com/en/docs/media/upload-media/api-reference/post-media-upload
"""
# https://developer.twitter.com/en/docs/media/upload-media/api-reference/get-media-upload-status
if params and params.get('command', '') == 'STATUS':
return self.get('https://upload.twitter.com/1.1/media/upload.json', params=params)
return self.post('https://upload.twitter.com/1.1/media/upload.json', params=params)
def create_metadata(self, **params):
""" Adds metadata to a media element, such as image descriptions for visually impaired.
Docs:
https://developer.twitter.com/en/docs/media/upload-media/api-reference/post-media-metadata-create
"""
params = json.dumps(params)
return self.post("https://upload.twitter.com/1.1/media/metadata/create.json", params=params)
def upload_video(self, media, media_type, media_category=None, size=None, check_progress=False):
"""Uploads video file to Twitter servers in chunks. The file will be available to be attached
to a status for 60 minutes. To attach to a update, pass a list of returned media ids
to the :meth:`update_status` method using the ``media_ids`` param.
Upload happens in 3 stages:
- INIT call with size of media to be uploaded(in bytes). If this is more than 15mb, twitter will return error.
- APPEND calls each with media chunk. This returns a 204(No Content) if chunk is received.
- FINALIZE call to complete media upload. This returns media_id to be used with status update.
Twitter media upload api expects each chunk to be not more than 5mb. We are sending chunk of 1mb each.
Docs:
https://developer.twitter.com/en/docs/media/upload-media/uploading-media/chunked-media-upload
"""
upload_url = 'https://upload.twitter.com/1.1/media/upload.json'
if not size:
media.seek(0, os.SEEK_END)
size = media.tell()
media.seek(0)
# Stage 1: INIT call
params = {
'command': 'INIT',
'media_type': media_type,
'total_bytes': size,
'media_category': media_category
}
response_init = self.post(upload_url, params=params)
media_id = response_init['media_id']
# Stage 2: APPEND calls with 1mb chunks
segment_index = 0
while True:
data = media.read(1*1024*1024)
if not data:
break
media_chunk = BytesIO()
media_chunk.write(data)
media_chunk.seek(0)
params = {
'command': 'APPEND',
'media_id': media_id,
'segment_index': segment_index,
'media': media_chunk,
}
self.post(upload_url, params=params)
segment_index += 1
# Stage 3: FINALIZE call to complete upload
params = {
'command': 'FINALIZE',
'media_id': media_id
}
response = self.post(upload_url, params=params)
# Only get the status if explicity asked to
# Default to False
if check_progress:
# Stage 4: STATUS call if still processing
params = {
'command': 'STATUS',
'media_id': media_id
}
# added code to handle if media_category is NOT set and check_progress=True
# the API will return a NoneType object in this case
try:
processing_state = response.get('processing_info').get('state')
except AttributeError:
return response
if processing_state:
while (processing_state == 'pending' or processing_state == 'in_progress') :
# get the secs to wait
check_after_secs = response.get('processing_info').get('check_after_secs')
if check_after_secs:
sleep(check_after_secs)
response = self.get(upload_url, params=params)
# get new state after waiting
processing_state = response.get('processing_info').get('state')
return response
def get_oembed_tweet(self, **params):
"""Returns information allowing the creation of an embedded
representation of a Tweet on third party sites.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-oembed
"""
return self.get('statuses/oembed', params=params)
def get_retweeters_ids(self, **params):
"""Returns a collection of up to 100 user IDs belonging to users who
have retweeted the tweet specified by the ``id`` parameter.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-retweeters-ids
"""
return self.get('statuses/retweeters/ids', params=params)
get_retweeters_ids.iter_mode = 'cursor'
get_retweeters_ids.iter_key = 'ids'
# Search
def search(self, **params):
"""Returns a collection of relevant Tweets matching a specified query.
Docs:
https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets
"""
return self.get('search/tweets', params=params)
search.iter_mode = 'id'
search.iter_key = 'statuses'
search.iter_metadata = 'search_metadata'
# Direct Messages
def get_direct_messages(self, **params):
"""Returns the 20 most recent direct messages sent to the authenticating user.
Docs:
https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/get-messages
"""
return self.get('direct_messages', params=params)
get_direct_messages.iter_mode = 'id'
def get_sent_messages(self, **params):
"""Returns the 20 most recent direct messages sent by the authenticating user.
Docs:
https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/get-sent-message
"""
return self.get('direct_messages/sent', params=params)
get_sent_messages.iter_mode = 'id'
def get_direct_message(self, **params):
"""Returns a single direct message, specified by an ``id`` parameter.
Docs:
https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/get-message
"""
return self.get('direct_messages/show', params=params)
def destroy_direct_message(self, **params):
"""Destroys the direct message specified in the required ``id`` parameter
Docs:
https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/delete-message
"""
return self.post('direct_messages/destroy', params=params)
def send_direct_message(self, **params):
"""Sends a new direct message to the specified user from the
authenticating user.
Docs:
https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-message
"""
return self.post('direct_messages/new', params=params)
# Friends & Followers
def get_user_ids_of_blocked_retweets(self, **params):
"""Returns a collection of user_ids that the currently authenticated
user does not want to receive retweets from.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-no_retweets-ids
"""
return self.get('friendships/no_retweets/ids', params=params)
def get_friends_ids(self, **params):
"""Returns a cursored collection of user IDs for every user the
specified user is following (otherwise known as their "friends").
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids
"""
return self.get('friends/ids', params=params)
get_friends_ids.iter_mode = 'cursor'
get_friends_ids.iter_key = 'ids'
def get_followers_ids(self, **params):
"""Returns a cursored collection of user IDs for every user
following the specified user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-ids
"""
return self.get('followers/ids', params=params)
get_followers_ids.iter_mode = 'cursor'
get_followers_ids.iter_key = 'ids'
def lookup_friendships(self, **params):
"""Returns the relationships of the authenticating user to the
comma-separated list of up to 100 ``screen_names`` or ``user_ids`` provided.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-lookup
"""
return self.get('friendships/lookup', params=params)
def get_incoming_friendship_ids(self, **params):
"""Returns a collection of numeric IDs for every user who has a
pending request to follow the authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-incoming
"""
return self.get('friendships/incoming', params=params)
get_incoming_friendship_ids.iter_mode = 'cursor'
get_incoming_friendship_ids.iter_key = 'ids'
def get_outgoing_friendship_ids(self, **params):
"""Returns a collection of numeric IDs for every protected user for
whom the authenticating user has a pending follow request.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-outgoing
"""
return self.get('friendships/outgoing', params=params)
get_outgoing_friendship_ids.iter_mode = 'cursor'
get_outgoing_friendship_ids.iter_key = 'ids'
def create_friendship(self, **params):
"""Allows the authenticating users to follow the user specified
in the ``id`` parameter.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-create
"""
return self.post('friendships/create', params=params)
def destroy_friendship(self, **params):
"""Allows the authenticating user to unfollow the user specified
in the ``id`` parameter.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-destroy
"""
return self.post('friendships/destroy', params=params)
def update_friendship(self, **params):
"""Allows one to enable or disable retweets and device notifications
from the specified user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/post-friendships-update
"""
return self.post('friendships/update', params=params)
def show_friendship(self, **params):
"""Returns detailed information about the relationship between two
arbitrary users.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-show
"""
return self.get('friendships/show', params=params)
def get_friends_list(self, **params):
"""Returns a cursored collection of user objects for every user the
specified user is following (otherwise known as their "friends").
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-list
"""
return self.get('friends/list', params=params)
get_friends_list.iter_mode = 'cursor'
get_friends_list.iter_key = 'users'
def get_followers_list(self, **params):
"""Returns a cursored collection of user objects for users
following the specified user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-list
"""
return self.get('followers/list', params=params)
get_followers_list.iter_mode = 'cursor'
get_followers_list.iter_key = 'users'
# Users
def get_account_settings(self, **params):
"""Returns settings (including current trend, geo and sleep time
information) for the authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/get-account-settings
"""
return self.get('account/settings', params=params)
def verify_credentials(self, **params):
"""Returns an HTTP 200 OK response code and a representation of the
requesting user if authentication was successful; returns a 401 status
code and an error message if not.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/get-account-verify_credentials
"""
return self.get('account/verify_credentials', params=params)
def update_account_settings(self, **params):
"""Updates the authenticating user's settings.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-settings
"""
return self.post('account/settings', params=params)
def update_delivery_service(self, **params):
"""Sets which device Twitter delivers updates to for the authenticating user.
Docs:
https://dev.twitter.com/docs/api/1.1/post/account/update_delivery_device
"""
return self.post('account/update_delivery_device', params=params)
def update_profile(self, **params):
"""Sets values that users are able to set under the "Account" tab of their
settings page.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-update_profile
"""
return self.post('account/update_profile', params=params)
def update_profile_banner_image(self, **params): # pragma: no cover
"""Updates the authenticating user's profile background image.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-update_profile_background_image
"""
return self.post('account/update_profile_banner', params=params)
def update_profile_colors(self, **params): # pragma: no cover
"""Sets one or more hex values that control the color scheme of the
authenticating user's profile page on twitter.com.
This method is deprecated, replaced by the ``profile_link_color``
parameter to :meth:`update_profile`.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-update_profile
"""
warnings.warn(
'This method is deprecated. You should use the'
' profile_link_color parameter in Twython.update_profile instead.',
TwythonDeprecationWarning,
stacklevel=2
)
return self.post('account/update_profile_colors', params=params)
def update_profile_image(self, **params): # pragma: no cover
"""Updates the authenticating user's profile image.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-update_profile_image
"""
return self.post('account/update_profile_image', params=params)
def list_blocks(self, **params):
"""Returns a collection of user objects that the authenticating user
is blocking.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/get-blocks-list
"""
return self.get('blocks/list', params=params)
list_blocks.iter_mode = 'cursor'
list_blocks.iter_key = 'users'
def list_block_ids(self, **params):
"""Returns an array of numeric user ids the authenticating user is blocking.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/get-blocks-ids
"""
return self.get('blocks/ids', params=params)
list_block_ids.iter_mode = 'cursor'
list_block_ids.iter_key = 'ids'
def create_block(self, **params):
"""Blocks the specified user from following the authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-blocks-create
"""
return self.post('blocks/create', params=params)
def destroy_block(self, **params):
"""Un-blocks the user specified in the ``id`` parameter for the
authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-blocks-destroy
"""
return self.post('blocks/destroy', params=params)
def lookup_user(self, **params):
"""Returns fully-hydrated user objects for up to 100 users per request,
as specified by comma-separated values passed to the ``user_id`` and/or
``screen_name`` parameters.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-lookup
"""
return self.get('users/lookup', params=params)
def show_user(self, **params):
"""Returns a variety of information about the user specified by the
required ``user_id`` or ``screen_name`` parameter.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-show
"""
return self.get('users/show', params=params)
def search_users(self, **params):
"""Provides a simple, relevance-based search interface to public user
accounts on Twitter.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-search
"""
return self.get('users/search', params=params)
def get_contributees(self, **params):
"""Returns a collection of users that the specified user can "contribute" to.
Docs: https://dev.twitter.com/docs/api/1.1/get/users/contributees
"""
return self.get('users/contributees', params=params)
def get_contributors(self, **params):
"""Returns a collection of users who can contribute to the specified account.
Docs: https://dev.twitter.com/docs/api/1.1/get/users/contributors
"""
return self.get('users/contributors', params=params)
def remove_profile_banner(self, **params):
"""Removes the uploaded profile banner for the authenticating user.
Returns HTTP 200 upon success.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-remove_profile_banner
"""
return self.post('account/remove_profile_banner', params=params)
def update_profile_background_image(self, **params):
"""Uploads a profile banner on behalf of the authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/post-account-update_profile_banner
"""
return self.post('account/update_profile_background_image',
params=params)
def get_profile_banner_sizes(self, **params):
"""Returns a map of the available size variations of the specified
user's profile banner.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/manage-account-settings/api-reference/get-users-profile_banner
"""
return self.get('users/profile_banner', params=params)
def list_mutes(self, **params):
"""Returns a collection of user objects that the authenticating user
is muting.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/get-mutes-users-list
"""
return self.get('mutes/users/list', params=params)
list_mutes.iter_mode = 'cursor'
list_mutes.iter_key = 'users'
def list_mute_ids(self, **params):
"""Returns an array of numeric user ids the authenticating user
is muting.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/get-mutes-users-ids
"""
return self.get('mutes/users/ids', params=params)
list_mute_ids.iter_mode = 'cursor'
list_mute_ids.iter_key = 'ids'
def create_mute(self, **params):
"""Mutes the specified user, preventing their tweets appearing
in the authenticating user's timeline.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-mutes-users-create
"""
return self.post('mutes/users/create', params=params)
def destroy_mute(self, **params):
"""Un-mutes the user specified in the user or ``id`` parameter for
the authenticating user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-mutes-users-destroy
"""
return self.post('mutes/users/destroy', params=params)
# Suggested Users
def get_user_suggestions_by_slug(self, **params):
"""Access the users in a given category of the Twitter suggested user list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-suggestions-slug
"""
return self.get('users/suggestions/%s' % params.get('slug'),
params=params)
def get_user_suggestions(self, **params):
"""Access to Twitter's suggested user list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-suggestions
"""
return self.get('users/suggestions', params=params)
def get_user_suggestions_statuses_by_slug(self, **params):
"""Access the users in a given category of the Twitter suggested user
list and return their most recent status if they are not a protected
user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-users-suggestions-slug-members
"""
return self.get('users/suggestions/%s/members' % params.get('slug'),
params=params)
# Favorites
def get_favorites(self, **params):
"""Returns the 20 most recent Tweets favorited by the authenticating
or specified user.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-favorites-list
"""
return self.get('favorites/list', params=params)
get_favorites.iter_mode = 'id'
def destroy_favorite(self, **params):
"""Un-favorites the status specified in the ``id`` parameter as the
authenticating user.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-destroy
"""
return self.post('favorites/destroy', params=params)
def create_favorite(self, **params):
"""Favorites the status specified in the ``id`` parameter as the
authenticating user.
Docs:
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-create
"""
return self.post('favorites/create', params=params)
# Lists
def show_lists(self, **params):
"""Returns all lists the authenticating or specified user subscribes to,
including their own.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-list
"""
return self.get('lists/list', params=params)
def get_list_statuses(self, **params):
"""Returns a timeline of tweets authored by members of the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-statuses
"""
return self.get('lists/statuses', params=params)
get_list_statuses.iter_mode = 'id'
def delete_list_member(self, **params):
"""Removes the specified member from the list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-members-destroy
"""
return self.post('lists/members/destroy', params=params)
def get_list_memberships(self, **params):
"""Returns the lists the specified user has been added to.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-memberships
"""
return self.get('lists/memberships', params=params)
get_list_memberships.iter_mode = 'cursor'
get_list_memberships.iter_key = 'lists'
def get_list_subscribers(self, **params):
"""Returns the subscribers of the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-subscribers
"""
return self.get('lists/subscribers', params=params)
get_list_subscribers.iter_mode = 'cursor'
get_list_subscribers.iter_key = 'users'
def subscribe_to_list(self, **params):
"""Subscribes the authenticated user to the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-subscribers-create
"""
return self.post('lists/subscribers/create', params=params)
def is_list_subscriber(self, **params):
"""Check if the specified user is a subscriber of the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-subscribers-show
"""
return self.get('lists/subscribers/show', params=params)
def unsubscribe_from_list(self, **params):
"""Unsubscribes the authenticated user from the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-subscribers-destroy
"""
return self.post('lists/subscribers/destroy', params=params)
def create_list_members(self, **params):
"""Adds multiple members to a list, by specifying a comma-separated
list of member ids or screen names.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-members-create_all
"""
return self.post('lists/members/create_all', params=params)
def is_list_member(self, **params):
"""Check if the specified user is a member of the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-members-show
"""
return self.get('lists/members/show', params=params)
def get_list_members(self, **params):
"""Returns the members of the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-members
"""
return self.get('lists/members', params=params)
get_list_members.iter_mode = 'cursor'
get_list_members.iter_key = 'users'
def add_list_member(self, **params):
"""Add a member to a list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-members-create
"""
return self.post('lists/members/create', params=params)
def delete_list(self, **params):
"""Deletes the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-destroy
"""
return self.post('lists/destroy', params=params)
def update_list(self, **params):
"""Updates the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-update
"""
return self.post('lists/update', params=params)
def create_list(self, **params):
"""Creates a new list for the authenticated user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-create
"""
return self.post('lists/create', params=params)
def get_specific_list(self, **params):
"""Returns the specified list.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-show
"""
return self.get('lists/show', params=params)
def get_list_subscriptions(self, **params):
"""Obtain a collection of the lists the specified user is subscribed to.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-subscriptions
"""
return self.get('lists/subscriptions', params=params)
get_list_subscriptions.iter_mode = 'cursor'
get_list_subscriptions.iter_key = 'lists'
def delete_list_members(self, **params):
"""Removes multiple members from a list, by specifying a
comma-separated list of member ids or screen names.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/post-lists-members-destroy_all
"""
return self.post('lists/members/destroy_all', params=params)
def show_owned_lists(self, **params):
"""Returns the lists owned by the specified Twitter user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/create-manage-lists/api-reference/get-lists-ownerships
"""
return self.get('lists/ownerships', params=params)
show_owned_lists.iter_mode = 'cursor'
show_owned_lists.iter_key = 'lists'
# Saved Searches
def get_saved_searches(self, **params):
"""Returns the authenticated user's saved search queries.
Docs:
https://developer.twitter.com/en/docs/tweets/search/api-reference/get-saved_searches-list
"""
return self.get('saved_searches/list', params=params)
def show_saved_search(self, **params):
"""Retrieve the information for the saved search represented by the given ``id``.
Docs:
https://developer.twitter.com/en/docs/tweets/search/api-reference/get-saved_searches-show-id
"""
return self.get('saved_searches/show/%s' % params.get('id'),
params=params)
def create_saved_search(self, **params):
"""Create a new saved search for the authenticated user.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-mutes-users-create
"""
return self.post('saved_searches/create', params=params)
def destroy_saved_search(self, **params):
"""Destroys a saved search for the authenticating user.
Docs:
https://developer.twitter.com/en/docs/tweets/search/api-reference/post-saved_searches-destroy-id
"""
return self.post('saved_searches/destroy/%s' % params.get('id'),
params=params)
# Places & Geo
def get_geo_info(self, **params):
"""Returns all the information about a known place.
Docs:
https://developer.twitter.com/en/docs/geo/place-information/api-reference/get-geo-id-place_id
"""
return self.get('geo/id/%s' % params.get('place_id'), params=params)
def reverse_geocode(self, **params):
"""Given a latitude and a longitude, searches for up to 20 places
that can be used as a place_id when updating a status.
Docs:
https://developer.twitter.com/en/docs/geo/places-near-location/api-reference/get-geo-reverse_geocode
"""
return self.get('geo/reverse_geocode', params=params)
def search_geo(self, **params):
"""Search for places that can be attached to a statuses/update.
Docs:
https://developer.twitter.com/en/docs/geo/places-near-location/api-reference/get-geo-search
"""
return self.get('geo/search', params=params)
def get_similar_places(self, **params):
"""Locates places near the given coordinates which are similar in name.
Docs: https://dev.twitter.com/docs/api/1.1/get/geo/similar_places
"""
return self.get('geo/similar_places', params=params)
def create_place(self, **params): # pragma: no cover
"""Creates a new place object at the given latitude and longitude.
Docs: https://dev.twitter.com/docs/api/1.1/post/geo/place
"""
return self.post('geo/place', params=params)
# Trends
def get_place_trends(self, **params):
"""Returns the top 10 trending topics for a specific WOEID, if
trending information is available for it.
Docs:
https://developer.twitter.com/en/docs/trends/trends-for-location/api-reference/get-trends-place
"""
return self.get('trends/place', params=params)
def get_available_trends(self, **params):
"""Returns the locations that Twitter has trending topic information for.
Docs:
https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-available
"""
return self.get('trends/available', params=params)
def get_closest_trends(self, **params):
"""Returns the locations that Twitter has trending topic information
for, closest to a specified location.
Docs:
https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-closest
"""
return self.get('trends/closest', params=params)
# Spam Reporting
def report_spam(self, **params): # pragma: no cover
"""Report the specified user as a spam account to Twitter.
Docs:
https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-users-report_spam
"""
return self.post('users/report_spam', params=params)
# OAuth
def invalidate_token(self, **params): # pragma: no cover
"""Allows a registered application to revoke an issued OAuth 2 Bearer
Token by presenting its client credentials.
Docs:
https://developer.twitter.com/en/docs/basics/authentication/api-reference/invalidate_token
"""
return self.post('oauth2/invalidate_token', params=params)
# Help
def get_twitter_configuration(self, **params):
"""Returns the current configuration used by Twitter
Docs:
https://developer.twitter.com/en/docs/developer-utilities/configuration/api-reference/get-help-configuration
"""
return self.get('help/configuration', params=params)
def get_supported_languages(self, **params):
"""Returns the list of languages supported by Twitter along with
their ISO 639-1 code.
Docs:
https://developer.twitter.com/en/docs/developer-utilities/supported-languages/api-reference/get-help-languages
"""
return self.get('help/languages', params=params)
def get_privacy_policy(self, **params):
"""Returns Twitter's Privacy Policy
Docs:
https://developer.twitter.com/en/docs/developer-utilities/privacy-policy/api-reference/get-help-privacy
"""
return self.get('help/privacy', params=params)
def get_tos(self, **params):
"""Return the Twitter Terms of Service
Docs:
https://developer.twitter.com/en/docs/developer-utilities/terms-of-service/api-reference/get-help-tos
"""
return self.get('help/tos', params=params)
def get_application_rate_limit_status(self, **params):
"""Returns the current rate limits for methods belonging to the
specified resource families.
Docs:
https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
"""
return self.get('application/rate_limit_status', params=params)
# from https://developer.twitter.com/en/docs/ads/general/guides/response-codes
TWITTER_HTTP_STATUS_CODE = {
200: ('OK', 'Success!'),
304: ('Not Modified', 'There was no new data to return.'),
400: ('Bad Request', 'The request was invalid. An accompanying \
error message will explain why. This is the status code \
will be returned during rate limiting.'),
401: ('Unauthorized', 'Authentication credentials were missing \
or incorrect.'),
403: ('Forbidden', 'The request is understood, but it has been \
refused. An accompanying error message will explain why. \
This code is used when requests are being denied due to \
update limits.'),
404: ('Not Found', 'The URI requested is invalid or the resource \
requested, such as a user, does not exists.'),
406: ('Not Acceptable', 'Returned by the Search API when an \
invalid format is specified in the request.'),
410: ('Gone', 'This resource is gone. Used to indicate that an \
API endpoint has been turned off.'),
422: ('Unprocessable Entity', 'Returned when an image uploaded to \
POST account/update_profile_banner is unable to be processed.'),
429: ('Too Many Requests', 'Returned in API v1.1 when a request cannot \
be served due to the application\'s rate limit having been \
exhausted for the resource.'),
500: ('Internal Server Error', 'Something is broken. Please post to the \
group so the Twitter team can investigate.'),
502: ('Bad Gateway', 'Twitter is down or being upgraded.'),
503: ('Service Unavailable', 'The Twitter servers are up, but overloaded \
with requests. Try again later.'),
504: ('Gateway Timeout', 'The Twitter servers are up, but the request \
couldn\'t be serviced due to some failure within our stack. Try \
again later.'),
}
|
|
import collections
__version__ = "0.1.0"
class StataVarVals():
"""A class for intermediate values when calculating with data
variables within a Dta object or in Stata. Variables themselves
are referenced through an instance of StataVariable.
This class is meant for internal use.
Example
-------
A user can create or replace a data variable called "target" with
src.target_ = src.input1_ - 2 * src.input2_
Where `src` is either an instance of Dta or, in Stata, obtained from
`st_mirror`. When `input1_` and `input2_` attributes are looked up,
instances of StataVariable are returned (assuming variables "input1"
and "input2" exist). The calculation on the right hand side results
in an instance of StataVarVals holding the calculated values. The
assignment statement replaces values if the variable exists, or
adds a new variable to the dataset.
"""
def __init__(self, values):
self.values = values
def __setitem__(self, index, value):
self.values[index] = value
def __getitem__(self, index):
return self.values[index]
def __abs__(self):
return StataVarVals([abs(v) for v in self.values])
def __add__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v + o for (v,o) in zip(self.values, other)])
return StataVarVals([v + other for v in self.values])
def __bool__(self):
return StataVarVals([bool(v) for v in self.values])
def __eq__(self, other):
if isinstance(other, collections.Iterable):
values = self.values
return StataVarVals(
[values[i] == other[i] for i in range(len(self))]
)
return StataVarVals([v == other for v in self.values])
def __float__(self):
return StataVarVals([float(v) for v in self.values])
def __floordiv__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v // o for (v,o) in zip(self.values, other)])
return StataVarVals([v // other for v in self.values])
def __ge__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v >= o for (v,o) in zip(self.values, other)])
return StataVarVals([v >= other for v in self.values])
def __gt__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v > o for (v,o) in zip(self.values, other)])
return StataVarVals([v > other for v in self.values])
def __iadd__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v + o for (v,o) in zip(self.values, other)]
else:
self.values = [v + other for v in self.values]
return self
def __ifloordiv__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v // o for (v,o) in zip(self.values, other)]
else:
self.values = [v // other for v in self.values]
return self
def __imod__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v % o for (v,o) in zip(self.values, other)]
else:
self.values = [v % other for v in self.values]
return self
def __imul__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v * o for (v,o) in zip(self.values, other)]
else:
self.values = [v * other for v in self.values]
return self
def __int__(self):
return StataVarVals([int(v) for v in self.values])
def __ipow__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v ** o for (v,o) in zip(self.values, other)]
else:
self.values = [v ** other for v in self.values]
return self
def __isub__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v - o for (v,o) in zip(self.values, other)]
else:
self.values = [v - other for v in self.values]
return self
def __iter__(self):
values = self.values
for v in values:
yield v
def __itruediv__(self, other):
if isinstance(other, collections.Iterable):
self.values = [v / o for (v,o) in zip(self.values, other)]
else:
self.values = [v / other for v in self.values]
return self
def __le__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v <= o for (v,o) in zip(self.values, other)])
return StataVarVals([v <= other for v in self.values])
def __len__(self):
return len(self.values)
def __lt__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v < o for (v,o) in zip(self.values, other)])
return StataVarVals([v < other for v in self.values])
def __mod__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v % o for (v,o) in zip(self.values, other)])
return StataVarVals([v % other for v in self.values])
def __mul__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v * o for (v,o) in zip(self.values, other)])
return StataVarVals([v * other for v in self.values])
def __ne__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v != o for (v,o) in zip(self.values, other)])
return StataVarVals([v != other for v in self.values])
def __neg__(self):
return StataVarVals([-v for v in self.values])
def __pos__(self):
return self
def __pow__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v ** o for (v,o) in zip(self.values, other)])
return StataVarVals([v ** other for v in self.values])
def __radd__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o + v for (v,o) in zip(self.values, other)])
return StataVarVals([other + v for v in self.values])
def __rfloordiv__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o // v for (v,o) in zip(self.values, other)])
return StataVarVals([other // v for v in self.values])
def __rmod__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o % v for (v,o) in zip(self.values, other)])
return StataVarVals([other % v for v in self.values])
def __rmul__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o * v for (v,o) in zip(self.values, other)])
return StataVarVals([other * v for v in self.values])
def __round__(self, n=None):
return StataVarVals([round(v, n) for v in self.values])
def __rpow__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o ** v for (v,o) in zip(self.values, other)])
return StataVarVals([other ** v for v in self.values])
def __rsub__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o - v for (v,o) in zip(self.values, other)])
return StataVarVals([other - v for v in self.values])
def __rtruediv__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([o / v for (v,o) in zip(self.values, other)])
return StataVarVals([other / v for v in self.values])
def __sub__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v - o for (v,o) in zip(self.values, other)])
return StataVarVals([v - other for v in self.values])
def __truediv__(self, other):
if isinstance(other, collections.Iterable):
return StataVarVals([v / o for (v,o) in zip(self.values, other)])
return StataVarVals([v / other for v in self.values])
class StataVariable(StataVarVals):
"""A class for referencing a data variable within a Dta object or in
Stata. Class instances are created when the user accesses a variable
with "src.varname_", where "src" is either a Dta instance or, in
Stata, obtained from `st_mirror`, and "varname" is the full variable
name or an unambiguous abbreviation. Any property ending with a
single underscore is assumed to be a reference to a variable.
This class is meant for internal use.
"""
def __init__(self, source, name):
self.source = source
self.name = name
@property
def values(self):
src = self.source
get = src.get
c = src.index(self.name)
return [get(r,c) for r in range(len(src))]
def __iter__(self):
src = self.source
get = src.get
c = src.index(self.name)
for r in range(len(src)):
yield get(r, c)
def __setattr__(self, name, value):
if name == "values":
src = self.source
c = src.index(self.name)
src[:, c] = value
else:
self.__dict__[name] = value
def __setitem__(self, index, value):
src = self.source
src[index, src.index(self.name)] = value
def __getitem__(self, index):
src = self.source
get = src.get
c = src.index(self.name)
if isinstance(index, int):
return get(index, c)
if isinstance(index, slice):
start, stop, step = index.indices(len(src))
index = range(start, stop, step)
elif not isinstance(index, collections.Iterable):
raise TypeError("index must be slice, iterable, or int")
return [get(i, c) for i in index]
def __len__(self):
return len(self.source)
def __str__(self):
return "variable {} of {}".format(self.name, self.source)
|
|
#!python
# -*- coding: utf-8 -*-
"""
Core Haystack Session client object interface. This file defines an abstract
interface for Project Haystack clients and is responsible for opening and
maintaining a session with the server.
"""
import logging
import hszinc
import weakref
from threading import Lock
from six import string_types
from .http import sync
from .ops import grid as grid_ops
from .ops import entity as entity_ops
from .ops import his as his_ops
from .ops import feature as feature_ops
from .entity.models.haystack import HaystackTaggingModel
class HaystackSession(object):
"""
The Haystack Session handler is responsible for presenting an API for
querying and controlling a Project Haystack server.
HaystackSession itself is the base class, which is then implemented by way
of HaystackOperation subclasses which are instantiated by the session
object before being started and returned.
These operations by default are specified by class member references
to the classes concerned.
Methods for Haystack operations return an 'Operation' object, which
may be used in any of two ways:
- as a synchronous result placeholder by calling its `wait` method
followed by inspection of its `result` attribute.
- as an asynchronous call manager by connecting a "slot" (`callable`
that takes keyword arguments) to the `done_sig` signal.
The base class takes some arguments that control the default behaviour of
the object.
"""
# Operation references
_GET_GRID_OPERATION = grid_ops.GetGridOperation
_POST_GRID_OPERATION = grid_ops.PostGridOperation
_GET_ENTITY_OPERATION = entity_ops.GetEntityOperation
_FIND_ENTITY_OPERATION = entity_ops.FindEntityOperation
_HIS_READ_SERIES_OPERATION = his_ops.HisReadSeriesOperation
_HIS_READ_FRAME_OPERATION = his_ops.HisReadFrameOperation
_HIS_WRITE_SERIES_OPERATION = his_ops.HisWriteSeriesOperation
_HIS_WRITE_FRAME_OPERATION = his_ops.HisWriteFrameOperation
_HAS_FEATURES_OPERATION = feature_ops.HasFeaturesOperation
def __init__(
self,
uri,
api_dir,
grid_format=hszinc.MODE_ZINC,
http_client=sync.SyncHttpClient,
http_args=None,
tagging_model=HaystackTaggingModel,
log=None,
pint=False,
cache_expiry=3600.0,
):
"""
Initialise a base Project Haystack session handler.
:param uri: Base URI for the Haystack installation.
:param api_dir: Subdirectory relative to URI where API calls are made.
:param grid_format: What format to use for grids in GET/POST requests?
:param http_client: HTTP client class to use.
:param http_args: Optional HTTP client arguments to configure.
:param tagging_model: Entity Tagging model in use.
:param log: Logging object for reporting messages.
:param pint: Configure hszinc to use basic quantity or Pint Quanity
:param cache_expiry: Number of seconds before cached data expires.
See : https://pint.readthedocs.io/ for details about pint
"""
if log is None:
log = logging.getLogger("pyhaystack.client.%s" % self.__class__.__name__)
self._log = log
if http_args is None:
http_args = {}
# Configure hszinc to use pint or not for Quantity definition
self.config_pint(pint)
if grid_format not in (hszinc.MODE_ZINC, hszinc.MODE_JSON):
raise ValueError("Unrecognised grid format %s" % grid_format)
self._grid_format = grid_format
# Create the HTTP client object
if bool(http_args.pop("debug", None)) and ("log" not in http_args):
http_args["log"] = log.getChild("http_client")
self._client = http_client(uri=uri, **http_args)
self._api_dir = api_dir
# Current in-progress authentication operation, if any.
self._auth_op = None
# Entity references, stored as weakrefs
self._entities = weakref.WeakValueDictionary()
# Tagging model in use
self._tagging_model = tagging_model(self)
# Grid cache
self._grid_lk = Lock()
self._grid_expiry = cache_expiry
self._grid_cache = {} # 'op' -> (op, expiry, grid)
# Public methods/properties
def authenticate(self, callback=None):
"""
Authenticate with the Project Haystack server. If an authentication
attempt is in progress, we return it, otherwise we instantiate a new
one.
"""
if self._auth_op is not None:
auth_op = self._auth_op()
else:
auth_op = None
new = auth_op is None
if new:
auth_op = self._AUTH_OPERATION(self)
auth_op.done_sig.connect(self._on_authenticate_done)
if callback is not None:
if auth_op.is_done:
# Already done
return callback(auth_op)
else:
auth_op.done_sig.connect(callback)
if new:
auth_op.go()
self._auth_op = weakref.ref(auth_op)
return auth_op
def about(self, cache=True, callback=None):
"""
Retrieve the version information of this Project Haystack server.
"""
return self._on_about(cache=cache, callback=callback)
def ops(self, cache=True, callback=None):
"""
Retrieve the operations supported by this Project Haystack server.
"""
return self._on_ops(cache=cache, callback=callback)
def formats(self, cache=True, callback=None):
"""
Retrieve the grid formats supported by this Project Haystack server.
"""
return self._on_formats(cache=cache, callback=callback)
def read(self, ids=None, filter_expr=None, limit=None, callback=None):
"""
Retrieve information on entities matching the given criteria.
Either ids or filter_expr may be given. ids may be given as a
list or as a single ID string/reference.
filter_expr is given as a string. pyhaystack.util.filterbuilder
may be useful for generating these programatically.
:param id: ID of a single entity to retrieve
:param ids: IDs of many entities to retrieve as a list
:param filter_expr: A filter expression that describes the entities
of interest.
:param limit: A limit on the number of entities to return.
"""
return self._on_read(
ids=ids, filter_expr=filter_expr, limit=limit, callback=callback
)
def nav(self, nav_id=None, callback=None):
"""
The nav op is used navigate a project for learning and discovery. This
operation allows servers to expose the database in a human-friendly
tree (or graph) that can be explored.
"""
return self._on_nav(nav_id=nav_id, callback=callback)
def watch_sub(
self, points, watch_id=None, watch_dis=None, lease=None, callback=None
):
"""
This creates a new watch with debug string watch_dis, identifier
watch_id (string) and a lease time of lease (integer) seconds. points
is a list of strings, Entity objects or hszinc.Ref objects.
"""
return self._on_watch_sub(
points=points,
watch_id=watch_id,
watch_dis=watch_dis,
lease=lease,
callback=callback,
)
def watch_unsub(self, watch, points=None, callback=None):
"""
watch is either the value of watch_id given when creating a watch, or
an instance of a Watch object.
If points is not None, it is a list of strings, Entity objects or
hszinc.Ref objects which will be removed from the Watch object.
Otherwise, it closes the Watch object.
"""
return self._on_watch_unsub(watch=watch, points=points, callback=callback)
def watch_poll(self, watch, refresh=False, callback=None):
"""
watch is either the value of watch_id given when creating a watch, or
an instance of a Watch object.
If refresh is True, then all points on the watch will be updated, not
just those that have changed since the last poll.
"""
return self._on_watch_poll(watch=watch, refresh=refresh, callback=callback)
def point_write(
self, point, level=None, val=None, who=None, duration=None, callback=None
):
"""
point is either the ID of the writeable point entity, or an instance of
the writeable point entity to retrieve the write status of or write a
value to.
If level is None, the other parameters are required to be None too, the
write status of the point is retrieved. Otherwise, a write is
performed to the nominated point.
"""
who = who or self._username
return self._on_point_write(
point=point,
level=level,
val=val,
who=who,
duration=duration,
callback=callback,
)
def his_read(self, point, rng, callback=None):
"""
point is either the ID of the historical point entity, or an instance
of the historical point entity to read historical from. rng is
either a string describing a time range (e.g. "today", "yesterday"), a
datetime.date object (providing all samples on the nominated day), a
datetime.datetime (providing all samples since the nominated time) or a
slice of datetime.dates or datetime.datetimes.
"""
return self._on_his_read(point=point, rng=rng, callback=callback)
def his_write(self, point, timestamp_records, callback=None):
"""
point is either the ID of the writeable historical point entity, or an
instance of the writeable historical point entity to write historical
data to. timestamp_records should be a dict mapping timestamps
(datetime.datetime) to the values to be written at those times, or a
Pandas Series object.
"""
return self._on_his_write(
point=point, timestamp_records=timestamp_records, callback=callback
)
def invoke_action(self, entity, action, callback=None, **kwargs):
"""
entity is either the ID of the entity, or an instance of the entity to
invoke the named action on. Keyword arguments give any additional
parameters required for the user action.
"""
return self._on_invoke_action(
entity=entity, action=action, callback=callback, action_args=kwargs
)
def get_entity(self, ids, refresh=False, single=None, callback=None):
"""
Retrieve instances of entities, possibly refreshing them.
:param ids: A single entity ID, or a list of entity IDs.
:param refresh: Do we refresh the tags on those entities?
:param single: Are we expecting a single entity? Defaults to
True if `ids` is not a list.
:param callback: Asynchronous result callback.
"""
if isinstance(ids, string_types) or isinstance(ids, hszinc.Ref):
# Make sure we always pass a list.
ids = [ids]
if single is None:
single = True
elif single is None:
single = False
op = self._GET_ENTITY_OPERATION(self, ids, refresh, single)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def find_entity(self, filter_expr, limit=None, single=False, callback=None):
"""
Retrieve instances of entities that match a filter expression.
:param filter_expr: The filter expression to search for.
:param limit: Optional limit to number of entities retrieved.
:param single: Are we expecting a single entity? Defaults to
True if `ids` is not a list.
:param callback: Asynchronous result callback.
"""
op = self._FIND_ENTITY_OPERATION(self, filter_expr, limit, single)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def his_read_series(self, point, rng, tz=None, series_format=None, callback=None):
"""
Read the historical data of the given point and return it as a series.
:param point: Haystack 'point' entity to read the data from
:param rng: Historical read range for the 'point'
:param tz: Optional timezone to translate timestamps to
:param series_format: Optional desired format for the series
"""
if series_format is None:
if his_ops.HAVE_PANDAS:
series_format = self._HIS_READ_SERIES_OPERATION.FORMAT_SERIES
else:
series_format = self._HIS_READ_SERIES_OPERATION.FORMAT_LIST
op = self._HIS_READ_SERIES_OPERATION(self, point, rng, tz, series_format)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def his_write_series(self, point, series, tz=None, callback=None):
"""
Write the historical data of the given point.
:param point: Haystack 'point' entity to read the data from
:param series: Historical series data to write
:param tz: Optional timezone to translate timestamps to
"""
op = self._HIS_WRITE_SERIES_OPERATION(self, point, series, tz)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def his_read_frame(self, columns, rng, tz=None, frame_format=None, callback=None):
"""
Read the historical data of multiple given points and return
them as a data frame.
:param columns: A list of Haystack 'point' instances or a dict mapping
the column label to the Haystack 'point' instance.
:param rng: Historical read range for the 'point'
:param tz: Optional timezone to translate timestamps to
:param frame_format: Optional desired format for the data frame
"""
if frame_format is None:
if his_ops.HAVE_PANDAS:
frame_format = self._HIS_READ_FRAME_OPERATION.FORMAT_FRAME
else:
frame_format = self._HIS_READ_FRAME_OPERATION.FORMAT_LIST
op = self._HIS_READ_FRAME_OPERATION(self, columns, rng, tz, frame_format)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def his_write_frame(self, frame, columns=None, tz=None, callback=None):
"""
Write the historical data of multiple given points.
:param frame: Data frame to write to. Columns either list explicit
entity IDs or column aliases which are mapped in the
columns parameter.
:param columns: If frame does not list explicit IDs, this should be a
dict mapping the column names to either entity IDs or
entity instances.
:param tz: Reference timestamp to use for writing, default is UTC.
"""
op = self._HIS_WRITE_FRAME_OPERATION(self, columns, frame, tz)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
@property
def site(self):
"""
This helper will return the first site found on the server.
This case is typical : having one site per server.
"""
sites = self.find_entity("site").result
return sites[list(sites.keys())[0]]
@property
def sites(self):
"""
This helper will return all sites found on the server.
"""
sites = self.find_entity("site").result
return sites
# Extension feature support.
FEATURE_HISREAD_MULTI = "hisRead/multi" # Multi-point hisRead
FEATURE_HISWRITE_MULTI = "hisWrite/multi" # Multi-point hisWrite
FEATURE_ID_UUID = "id_uuid"
def has_features(self, features, cache=True, callback=None):
"""
Determine if a given feature is supported. This is a helper function
for determining if the server implements a given feature. The feature
is given as a string in the form of "base_feature/extension".
Result is a dict of features and the states (boolean).
:param features: Features to check for.
"""
op = self._HAS_FEATURES_OPERATION(self, features, cache=cache)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
# Protected methods/properties
def _on_about(self, cache, callback, **kwargs):
return self._get_grid("about", callback, cache=cache, **kwargs)
def _on_ops(self, cache, callback, **kwargs):
return self._get_grid("ops", callback, cache=cache, **kwargs)
def _on_formats(self, cache, callback, **kwargs):
return self._get_grid("formats", callback, cache=cache, **kwargs)
def _on_read(self, ids, filter_expr, limit, callback, **kwargs):
if isinstance(ids, string_types) or isinstance(ids, hszinc.Ref):
# Make sure we always pass a list.
ids = [ids]
if bool(ids):
if filter_expr is not None:
raise ValueError("Either specify ids or filter_expr, not both")
ids = [self._obj_to_ref(r) for r in ids]
if len(ids) == 1:
# Reading a single entity
return self._get_grid("read", callback, args={"id": ids[0]}, **kwargs)
else:
# Reading several entities
grid = hszinc.Grid()
grid.column["id"] = {}
grid.extend([{"id": r} for r in ids])
return self._post_grid("read", grid, callback, **kwargs)
else:
args = {"filter": filter_expr}
if limit is not None:
args["limit"] = int(limit)
return self._get_grid("read", callback, args=args, **kwargs)
def _on_nav(self, nav_id, callback, **kwargs):
return self._get_grid("nav", callback, args={"nav_id": nav_id}, **kwargs)
def _on_watch_sub(self, points, watch_id, watch_dis, lease, callback, **kwargs):
grid = hszinc.Grid()
grid.column["id"] = {}
grid.extend([{"id": self._obj_to_ref(p)} for p in points])
if watch_id is not None:
grid.metadata["watchId"] = watch_id
if watch_dis is not None:
grid.metadata["watchDis"] = watch_dis
if lease is not None:
grid.metadata["lease"] = lease
return self._post_grid("watchSub", grid, callback, **kwargs)
def _on_watch_unsub(self, watch, points, callback, **kwargs):
grid = hszinc.Grid()
grid.column["id"] = {}
if not isinstance(watch, string_types):
watch = watch.id
grid.metadata["watchId"] = watch
if points is not None:
grid.extend([{"id": self._obj_to_ref(p)} for p in points])
else:
grid.metadata["close"] = hszinc.MARKER
return self._post_grid("watchSub", grid, callback, **kwargs)
def _on_watch_poll(self, watch, refresh, callback, **kwargs):
grid = hszinc.Grid()
grid.column["empty"] = {}
if not isinstance(watch, string_types):
watch = watch.id
grid.metadata["watchId"] = watch
return self._post_grid("watchPoll", grid, callback, **kwargs)
def _on_point_write(self, point, level, val, who, duration, callback, **kwargs):
args = {"id": self._obj_to_ref(point)}
if level is None:
if (val is not None) or (who is not None) or (duration is not None):
raise ValueError(
"If level is None, val, who and duration must " "be None too."
)
else:
args.update({"level": level, "val": val})
if who is not None:
args["who"] = who
if duration is not None:
args["duration"] = duration
return self._get_grid("pointWrite", callback, args=args, **kwargs)
# Won't work in for nhaystack... putting that on old
# return self._post_grid("pointWrite", grid_ops.dict_to_grid(args), callback, expect_format=hszinc.MODE_ZINC, args=args, **kwargs)
def _on_his_read(self, point, rng, callback, **kwargs):
if isinstance(rng, slice):
str_rng = ",".join([hszinc.dump_scalar(p) for p in (rng.start, rng.stop)])
elif not isinstance(rng, string_types):
str_rng = hszinc.dump_scalar(rng)
else:
# Better be valid!
# str_rng = rng
str_rng = hszinc.dump_scalar(rng, mode=hszinc.MODE_ZINC)
return self._get_grid(
"hisRead",
callback,
args={"id": self._obj_to_ref(point), "range": str_rng},
**kwargs
)
def _on_his_write(
self, point, timestamp_records, callback, post_format=hszinc.MODE_ZINC, **kwargs
):
grid = hszinc.Grid()
grid.metadata["id"] = self._obj_to_ref(point)
grid.column["ts"] = {}
grid.column["val"] = {}
if hasattr(timestamp_records, "to_dict"):
timestamp_records = timestamp_records.to_dict()
timestamp_records = list(timestamp_records.items())
timestamp_records.sort(key=lambda rec: rec[0])
for (ts, val) in timestamp_records:
grid.append({"ts": ts, "val": val})
return self._post_grid(
"hisWrite", grid, callback, post_format=post_format, **kwargs
)
def _on_invoke_action(
self,
entity,
action,
callback,
action_args,
post_format=hszinc.MODE_ZINC,
**kwargs
):
grid = hszinc.Grid()
grid.metadata["id"] = self._obj_to_ref(entity)
grid.metadata["action"] = action
for arg in action_args.keys():
grid.column[arg] = {}
grid.append(action_args)
return self._post_grid(
"invokeAction", grid, callback, post_format=post_format, **kwargs
)
def _get(self, uri, callback, api=True, **kwargs):
"""
Perform a raw HTTP GET operation. This is a convenience wrapper around
the HTTP client class that allows pre/post processing of the request by
the session instance.
"""
if api:
uri = "%s/%s" % (self._api_dir, uri)
return self._client.get(uri, callback, **kwargs)
def _get_grid(self, uri, callback, expect_format=None, cache=False, **kwargs):
"""
Perform a HTTP GET of a grid.
"""
if expect_format is None:
expect_format = self._grid_format
op = self._GET_GRID_OPERATION(
self, uri, expect_format=expect_format, cache=cache, **kwargs
)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def _post(
self,
uri,
callback,
body=None,
body_type=None,
body_size=None,
headers=None,
api=True,
**kwargs
):
"""
Perform a raw HTTP POST operation. This is a convenience wrapper around
the HTTP client class that allows pre/post processing of the request by
the session instance.
"""
if api:
uri = "%s/%s" % (self._api_dir, uri)
return self._client.post(
uri=uri,
callback=callback,
body=body,
body_type=body_type,
body_size=body_size,
headers=headers,
**kwargs
)
def _post_grid(
self, uri, grid, callback, post_format=None, expect_format=None, **kwargs
):
"""
Perform a HTTP POST of a grid.
"""
if expect_format is None:
expect_format = self._grid_format
if post_format is None:
post_format = self._grid_format
op = self._POST_GRID_OPERATION(
self,
uri,
grid,
expect_format=expect_format,
post_format=post_format,
**kwargs
)
if callback is not None:
op.done_sig.connect(callback)
op.go()
return op
def _obj_to_ref(self, obj):
"""
Convert an arbitrary object referring to an entity to an entity
reference.
"""
if isinstance(obj, hszinc.Ref):
return obj
if isinstance(obj, string_types):
return hszinc.Ref(obj)
if hasattr(obj, "id"):
return obj.id
raise NotImplementedError(
"Don't know how to get the ID from a %s" % obj.__class__.__name__
)
# Private methods/properties
def _on_authenticate_done(self, operation, **kwargs):
"""
Process the result of an authentication operation. This needs to be
implemented in the subclass and should, at minimum, set a flag in the
subclass to indicate the authentication state and clear the _auth_op
attribute on the base class.
"""
raise NotImplementedError("To be implemented in %s" % self.__class__.__name__)
def config_pint(self, value=False):
if value:
self._use_pint = True
else:
self._use_pint = False
hszinc.use_pint(self._use_pint)
def logout(self):
raise NotImplementedError("Must be defined depending on each implementation")
def __enter__(self):
"""Entering context manager
usage:
with WhateverSession(uri, username, password, **kwargs) as session:
# do whatever with session
"""
return self
def __exit__(self, _type, value, traceback):
"""On exit, call the logout procedure defined in the class"""
self.logout()
|
|
'''
"network": [
{
"index": 0,
"ipv4_gateway": "",
"name": "",
"veth_pair": "",
"mtu": "",
"ipv6_gateway": "",
"flags": "up",
"ipv4": "",
"ipv6": "",
"hwaddr": "00:16:3e:1e:89:6b",
"link": "lxcbr0",
"script_up": "",
"script_down": "",
"type": "veth"
}
'''
import jsonlib
import json
import cpyutils.iputils
import random
class NetworkDefinition(jsonlib.Serializable):
_JSON_FIELDS_required = [ 'name', 'link', 'type' ]
_JSON_FIELDS_other = [ 'gateway' ]
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None, None, None), json_str)
if o is None:
raise Exception("could not create object from json '%s'" % json_str)
return o
def __init__(self, name, link, _type):
self._name = name
self.link = link
self.type = _type
self.gateway = None
self._last_lease = None
self._leases = []
def _get_lease(self, lease):
if self._check_hwaddr_in_leases(lease.hwaddr): return False
if self._check_ipv4_in_leases(lease.ipv4): return False
self._leases.append(lease)
return True
def _check_hwaddr_in_leases(self, hwaddr):
for lease in self._leases:
if lease.hwaddr == hwaddr: return True
return False
def _check_ipv4_in_leases(self, ipv4):
for lease in self._leases:
if lease.ipv4 == ipv4: return True
return False
def get_lease(self):
return None
def release_lease(self, lease):
for e_lease in self._leases:
if (lease.ipv4 == e_lease.ipv4) and (lease.hwaddr == e_lease.hwaddr):
self._leases.remove(e_lease)
return True
return False
class NetworkDefinition_MAC_Prefix(NetworkDefinition):
_JSON_FIELDS_default = { 'hwaddrprefix': '40:00:00' }
@staticmethod
def gen_hex_mac_prefix(original_mac):
mac = (original_mac.upper()).strip()
parts = mac.split(':')
if len(parts) > 6:
return None
if len(parts) > 1:
# let's think that it is a : separated mac
for p in parts:
if len(p) != 2:
return None
mac = ''.join(parts)
for c in mac:
if c not in '0123456789ABCDEF':
return None
return mac
@classmethod
def from_json(cls, json_str, obj = None):
if obj is None: obj = cls(None, None, None)
o = jsonlib.Serializable.from_json(obj, json_str)
mac_prefix = cls.gen_hex_mac_prefix(o.hwaddrprefix)
if mac_prefix is None: raise Exception("Bad MAC mask format %s" % o.hwaddrprefix)
o._mac_prefix = int(mac_prefix, 16)
o._mac_tail = 0
o._mac_tail_bits = (12 - len(mac_prefix)) * 4
for i in range(0, 12 - len(mac_prefix)):
o._mac_prefix = (o._mac_prefix << 4)
o._mac_tail = (o._mac_tail << 4) | 0xf
return o
def _gen_mac(self):
new_mac = ("%x" % (self._mac_prefix | (random.getrandbits(self._mac_tail_bits) & self._mac_tail))).lower()
mac_str = ':'.join([new_mac[i:i+2] for i in range(0, len(new_mac), 2)])
return mac_str
def _gen_hw(self):
max_attempts = 10
mac = self._gen_mac()
while max_attempts > 0 and self._check_hwaddr_in_leases(mac):
mac = self._gen_mac()
max_attempts = max_attempts - 1
if max_attempts == 0:
return None
return mac
def get_lease(self):
mac = self._gen_hw()
if mac is None: return None
lease = NetworkConfiguration(self, hwaddr = mac)
if not self._get_lease(lease): return None
return lease
def _iphex_to_str(iphex):
ip = []
while iphex > 0:
v = iphex & 0xff
ip.append(str(v))
iphex = iphex >> 8
return '.'.join(ip[::-1])
class NetworkDefinition_IP_Range(NetworkDefinition_MAC_Prefix):
_JSON_FIELDS_default = { 'hwaddrprefix': '40:00:00', 'ipv4mask': '192.168.1.1/24' }
@classmethod
def from_json(cls, json_str):
b = NetworkDefinition_MAC_Prefix.from_json(json_str, NetworkDefinition_IP_Range(None, None, None))
if b is None:
return None
o = jsonlib.Serializable.from_json(b, json_str)
o._ipv4, o._mask = cpyutils.iputils.str_to_ipmask(o.ipv4mask)
return o
def get_lease(self):
mac = self._gen_hw()
if mac is None: return None
v = 1
ipv4 = self._ipv4 & self._mask
max_range = 0xffffffff - self._mask
newip = _iphex_to_str(ipv4 | (v & max_range))
while (v < max_range) and self._check_ipv4_in_leases(newip):
newip = _iphex_to_str(ipv4 + (v & max_range))
v = v + 1
lease = NetworkConfiguration(self)
lease.ipv4 = newip
lease.hwaddr = mac
if not self._get_lease(lease): return None
return lease
class NetworkDefinition_Pair(NetworkDefinition):
_JSON_FIELDS_required = [ 'name', 'link', 'type' ]
_JSON_FIELDS_default = { 'iphw': [ { 'ipv4': '192.168.1.1', 'hwaddr': '40:00:00:00:00:01' } ] }
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None, None, None), json_str)
if o is not None:
for lease in o.iphw:
if not cpyutils.iputils.check_ip(lease['ipv4']): raise Exception("bad ip format: %s" % lease['ipv4'])
if not cpyutils.iputils.check_mac(lease['hwaddr']): raise Exception("bad hw address format: %s" % lease['hwaddr'])
else:
raise Exception("could not create object from json '%s'" % json_str)
return o
def get_lease(self):
lease = NetworkConfiguration(self)
for lease_info in self.iphw:
lease.ipv4, lease.hwaddr = (lease_info['ipv4'], lease_info['hwaddr'])
if self._get_lease(lease):
return lease
return None
class NetworkConfiguration(jsonlib.Serializable):
_JSON_FIELDS_required = [ 'link', 'hwaddr', 'type' ]
_JSON_FIELDS_default = { 'ipv4': None }
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None), json_str)
if o is not None:
if not cpyutils.iputils.check_mac(o.hwaddr): raise Exception("mac format is not valid")
return o
def __init__(self, network_definition, ipv4 = None, hwaddr = None):
self._network_definition = network_definition
self.link = network_definition.link
self.hwaddr = hwaddr
self.type = network_definition.type
self.ipv4 = ipv4
self.gateway = network_definition.gateway
def lxc_config(self):
config = []
config.append("lxc.network.type = %s" % self.type)
config.append("lxc.network.link = %s" % self.link)
if self.hwaddr is not None: config.append("lxc.network.hwaddr = %s" % self.hwaddr)
if self.ipv4 is not None: config.append("lxc.network.ipv4 = %s" % self.ipv4)
if self.gateway is not None: config.append("lxc.network.ipv4.gateway = %s" % self.gateway)
config.append("lxc.network.flags = up")
return "\n".join(config)
if __name__ == "__main__":
n = json.dumps( {
'name': 'public_dhcp',
'link': 'br0',
'type': 'veth',
'gateway': '10.0.0.1',
'iphw': [
{ 'ipv4': '10.0.0.1', 'hwaddr': '60:00:00:00:00:01' },
{ 'ipv4': '10.0.0.2', 'hwaddr': '60:00:00:00:00:02' }
]
}
, indent = 4)
m = NetworkDefinition_MAC_Prefix.from_json(n)
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
p = NetworkDefinition_Pair.from_json(n)
l1 = p.get_lease()
l2 = p.get_lease()
l3 = p.get_lease()
print l1, l2, l3
i = NetworkDefinition_IP_Range.from_json(n)
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
'''
d = NetworkDefinition.from_json(
'{\
"name": "basic", \
"link": "br0", \
"type": "veth", \
"hwaddr": "40:00:00:00:00:01"\
}')
print d
'''
# print o
# print json.dumps(o.serialize(), indent=4)
|
|
'''Radiotap'''
import dpkt
# Ref: http://www.radiotap.org
# Fields Ref: http://www.radiotap.org/defined-fields/all
# Present flags
_TSFT_MASK = 0x1000000
_FLAGS_MASK = 0x2000000
_RATE_MASK = 0x4000000
_CHANNEL_MASK = 0x8000000
_FHSS_MASK = 0x10000000
_ANT_SIG_MASK = 0x20000000
_ANT_NOISE_MASK = 0x40000000
_LOCK_QUAL_MASK = 0x80000000
_TX_ATTN_MASK = 0x10000
_DB_TX_ATTN_MASK = 0x20000
_DBM_TX_POWER_MASK = 0x40000
_ANTENNA_MASK = 0x80000
_DB_ANT_SIG_MASK = 0x100000
_DB_ANT_NOISE_MASK = 0x200000
_RX_FLAGS_MASK = 0x400000
_CHANNELPLUS_MASK = 0x200
_EXT_MASK = 0x1
_TSFT_SHIFT = 24
_FLAGS_SHIFT = 25
_RATE_SHIFT = 26
_CHANNEL_SHIFT = 27
_FHSS_SHIFT = 28
_ANT_SIG_SHIFT = 29
_ANT_NOISE_SHIFT = 30
_LOCK_QUAL_SHIFT = 31
_TX_ATTN_SHIFT = 16
_DB_TX_ATTN_SHIFT = 17
_DBM_TX_POWER_SHIFT = 18
_ANTENNA_SHIFT = 19
_DB_ANT_SIG_SHIFT = 20
_DB_ANT_NOISE_SHIFT = 21
_RX_FLAGS_SHIFT = 22
_CHANNELPLUS_SHIFT = 10
_EXT_SHIFT = 0
# Flags elements
_FLAGS_SIZE = 2
_CFP_FLAG_SHIFT = 0
_PREAMBLE_SHIFT = 1
_WEP_SHIFT = 2
_FRAG_SHIFT = 3
_FCS_SHIFT = 4
_DATA_PAD_SHIFT = 5
_BAD_FCS_SHIFT = 6
_SHORT_GI_SHIFT = 7
# Channel type
_CHAN_TYPE_SIZE = 4
_CHANNEL_TYPE_SHIFT = 4
_CCK_SHIFT = 5
_OFDM_SHIFT = 6
_TWO_GHZ_SHIFT = 7
_FIVE_GHZ_SHIFT = 8
_PASSIVE_SHIFT = 9
_DYN_CCK_OFDM_SHIFT = 10
_GFSK_SHIFT = 11
_GSM_SHIFT = 12
_STATIC_TURBO_SHIFT = 13
_HALF_RATE_SHIFT = 14
_QUARTER_RATE_SHIFT = 15
class Radiotap(dpkt.Packet):
__hdr__ = (
('version', 'B', 0),
('pad', 'B', 0),
('length', 'H', 0),
('present_flags', 'I', 0)
)
def _get_tsft_present(self): return (self.present_flags & _TSFT_MASK) >> _TSFT_SHIFT
def _set_tsft_present(self, val): self.present_flags = self.present_flags | (val << _TSFT_SHIFT)
def _get_flags_present(self): return (self.present_flags & _FLAGS_MASK) >> _FLAGS_SHIFT
def _set_flags_present(self, val): self.present_flags = self.present_flags | (val << _FLAGS_SHIFT)
def _get_rate_present(self): return (self.present_flags & _RATE_MASK) >> _RATE_SHIFT
def _set_rate_present(self, val): self.present_flags = self.present_flags | (val << _RATE_SHIFT)
def _get_channel_present(self): return (self.present_flags & _CHANNEL_MASK) >> _CHANNEL_SHIFT
def _set_channel_present(self, val): self.present_flags = self.present_flags | (val << _CHANNEL_SHIFT)
def _get_fhss_present(self): return (self.present_flags & _FHSS_MASK) >> _FHSS_SHIFT
def _set_fhss_present(self, val): self.present_flags = self.present_flags | (val << _FHSS_SHIFT)
def _get_ant_sig_present(self): return (self.present_flags & _ANT_SIG_MASK) >> _ANT_SIG_SHIFT
def _set_ant_sig_present(self, val): self.present_flags = self.present_flags | (val << _ANT_SIG_SHIFT)
def _get_ant_noise_present(self): return (self.present_flags & _ANT_NOISE_MASK) >> _ANT_NOISE_SHIFT
def _set_ant_noise_present(self, val): self.present_flags = self.present_flags | (val << _ANT_NOISE_SHIFT)
def _get_lock_qual_present(self): return (self.present_flags & _LOCK_QUAL_MASK) >> _LOCK_QUAL_SHIFT
def _set_lock_qual_present(self, val): self.present_flags = self.present_flags | (val << _LOCK_QUAL_SHIFT)
def _get_tx_attn_present(self): return (self.present_flags & _TX_ATTN_MASK) >> _TX_ATTN_SHIFT
def _set_tx_attn_present(self, val): self.present_flags = self.present_flags | (val << _TX_ATTN_SHIFT)
def _get_db_tx_attn_present(self): return (self.present_flags & _DB_TX_ATTN_MASK) >> _DB_TX_ATTN_SHIFT
def _set_db_tx_attn_present(self, val): self.present_flags = self.present_flags | (val << _DB_TX_ATTN_SHIFT)
def _get_dbm_power_present(self): return (self.present_flags & _DBM_TX_POWER_MASK) >> _DBM_TX_POWER_SHIFT
def _set_dbm_power_present(self, val): self.present_flags = self.present_flags | (val << _DBM_TX_POWER_SHIFT)
def _get_ant_present(self): return (self.present_flags & _ANTENNA_MASK) >> _ANTENNA_SHIFT
def _set_ant_present(self, val): self.present_flags = self.present_flags | (val << _ANTENNA_SHIFT)
def _get_db_ant_sig_present(self): return (self.present_flags & _DB_ANT_SIG_MASK) >> _DB_ANT_SIG_SHIFT
def _set_db_ant_sig_present(self, val): self.present_flags = self.present_flags | (val << _DB_ANT_SIG_SHIFT)
def _get_db_ant_noise_present(self): return (self.present_flags & _DB_ANT_NOISE_MASK) >> _DB_ANT_NOISE_SHIFT
def _set_db_ant_noise_present(self, val): self.present_flags = self.present_flags | (val << _DB_ANT_NOISE_SHIFT)
def _get_rx_flags_present(self): return (self.present_flags & _RX_FLAGS_MASK) >> _RX_FLAGS_SHIFT
def _set_rx_flags_present(self, val): self.present_flags = self.present_flags | (val << _RX_FLAGS_SHIFT)
def _get_chanplus_present(self): return (self.present_flags & _CHANNELPLUS_MASK) >> _CHANNELPLUS_SHIFT
def _set_chanplus_present(self, val): self.present_flags = self.present_flags | (val << _CHANNELPLUS_SHIFT)
def _get_ext_present(self): return (self.present_flags & _EXT_MASK) >> _EXT_SHIFT
def _set_ext_present(self, val): self.present_flags = self.present_flags | (val << _EXT_SHIFT)
tsft_present = property(_get_tsft_present, _set_tsft_present)
flags_present = property(_get_flags_present, _set_flags_present)
rate_present = property(_get_rate_present, _set_rate_present)
channel_present = property(_get_channel_present, _set_channel_present)
fhss_present = property(_get_fhss_present, _set_fhss_present)
ant_sig_present = property(_get_ant_sig_present, _set_ant_sig_present)
ant_noise_present = property(_get_ant_noise_present, _set_ant_noise_present)
lock_qual_present = property(_get_lock_qual_present, _set_lock_qual_present)
tx_attn_present = property(_get_tx_attn_present, _set_tx_attn_present)
db_tx_attn_present = property(_get_db_tx_attn_present, _set_db_tx_attn_present)
dbm_tx_power_present = property(_get_dbm_power_present, _set_dbm_power_present)
ant_present = property(_get_ant_present, _set_ant_present)
db_ant_sig_present = property(_get_db_ant_sig_present, _set_db_ant_sig_present)
db_ant_noise_present = property(_get_db_ant_noise_present, _set_db_ant_noise_present)
rx_flags_present = property(_get_rx_flags_present, _set_rx_flags_present)
chanplus_present = property(_get_chanplus_present, _set_chanplus_present)
ext_present = property(_get_ext_present, _set_ext_present)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = buf[self.length:]
self.fields = []
buf = buf[self.__hdr_len__:]
# decode each field into self.<name> (eg. self.tsft) as well as append it self.fields list
field_decoder = [
('tsft', self.tsft_present, self.TSFT),
('flags', self.flags_present, self.Flags),
('rate', self.rate_present, self.Rate),
('channel', self.channel_present, self.Channel),
('fhss', self.fhss_present, self.FHSS),
('ant_sig', self.ant_sig_present, self.AntennaSignal),
('ant_noise', self.ant_noise_present, self.AntennaNoise),
('lock_qual', self.lock_qual_present, self.LockQuality),
('tx_attn', self.tx_attn_present, self.TxAttenuation),
('db_tx_attn', self.db_tx_attn_present, self.DbTxAttenuation),
('dbm_tx_power', self.dbm_tx_power_present, self.DbmTxPower),
('ant', self.ant_present, self.Antenna),
('db_ant_sig', self.db_ant_sig_present, self.DbAntennaSignal),
('db_ant_noise', self.db_ant_noise_present, self.DbAntennaNoise),
('rx_flags', self.rx_flags_present, self.RxFlags)
]
for name, present_bit, parser in field_decoder:
if present_bit:
field = parser(buf)
field.data = ''
setattr(self, name, field)
self.fields.append(field)
buf = buf[len(field):]
class Antenna(dpkt.Packet):
__hdr__ = (
('index', 'B', 0),
)
class AntennaNoise(dpkt.Packet):
__hdr__ = (
('db', 'B', 0),
)
class AntennaSignal(dpkt.Packet):
__hdr__ = (
('db', 'B', 0),
)
class Channel(dpkt.Packet):
__hdr__ = (
('freq', 'H', 0),
('flags', 'H', 0),
)
class FHSS(dpkt.Packet):
__hdr__ = (
('set', 'B', 0),
('pattern', 'B', 0),
)
class Flags(dpkt.Packet):
__hdr__ = (
('val', 'B', 0),
)
class LockQuality(dpkt.Packet):
__hdr__ = (
('val', 'H', 0),
)
class RxFlags(dpkt.Packet):
__hdr__ = (
('val', 'H', 0),
)
class Rate(dpkt.Packet):
__hdr__ = (
('val', 'B', 0),
)
class TSFT(dpkt.Packet):
__hdr__ = (
('usecs', 'Q', 0),
)
class TxAttenuation(dpkt.Packet):
__hdr__ = (
('val', 'H', 0),
)
class DbTxAttenuation(dpkt.Packet):
__hdr__ = (
('db', 'H', 0),
)
class DbAntennaNoise(dpkt.Packet):
__hdr__ = (
('db', 'B', 0),
)
class DbAntennaSignal(dpkt.Packet):
__hdr__ = (
('db', 'B', 0),
)
class DbmTxPower(dpkt.Packet):
__hdr__ = (
('dbm', 'B', 0),
)
if __name__ == '__main__':
import unittest
class RadiotapTestCase(unittest.TestCase):
def test_Radiotap(self):
s = '\x00\x00\x00\x18\x6e\x48\x00\x00\x00\x02\x6c\x09\xa0\x00\xa8\x81\x02\x00\x00\x00\x00\x00\x00\x00'
rad = Radiotap(s)
self.failUnless(rad.version == 0)
self.failUnless(rad.present_flags == 0x6e480000)
self.failUnless(rad.tsft_present == 0)
self.failUnless(rad.flags_present == 1)
self.failUnless(rad.rate_present == 1)
self.failUnless(rad.channel_present == 1)
self.failUnless(rad.fhss_present == 0)
self.failUnless(rad.ant_sig_present == 1)
self.failUnless(rad.ant_noise_present == 1)
self.failUnless(rad.lock_qual_present == 0)
self.failUnless(rad.db_tx_attn_present == 0)
self.failUnless(rad.dbm_tx_power_present == 0)
self.failUnless(rad.ant_present == 1)
self.failUnless(rad.db_ant_sig_present == 0)
self.failUnless(rad.db_ant_noise_present == 0)
self.failUnless(rad.rx_flags_present == 1)
self.failUnless(rad.channel.freq == 0x6c09)
self.failUnless(rad.channel.flags == 0xa000)
self.failUnless(len(rad.fields) == 7)
unittest.main()
|
|
import json
import subprocess
import hashlib
import os.path
import shlex
import gevent
from django.db import models
from django_mysql.models import JSONField, Model
from django.core.mail import send_mail
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.conf import settings
# contains helper methods for the User model
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('Please provide an email address')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('The field is_superuser should be set to True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
class Meta:
db_table = "users"
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
full_name = models.CharField(max_length=255, null=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
if self.full_name is not None:
return self.full_name
else:
return self.name
def get_short_name(self):
return self.name
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.email], **kwargs)
class PasswordReset(Model):
class Meta:
db_table = "password_resets"
email = models.CharField(max_length=255)
token = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
class Chart(Model):
class Meta:
db_table = "charts"
id = models.AutoField(primary_key=True)
config = JSONField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
last_edited_by = models.ForeignKey(User, to_field='name', on_delete=models.DO_NOTHING, blank=True, null=True,
db_column='last_edited_by')
last_edited_at = models.DateTimeField()
starred = models.BooleanField(default=False)
published_at = models.DateTimeField(null=True)
published_by = models.ForeignKey(User, to_field='name', on_delete=models.DO_NOTHING, blank=True, null=True, db_column="published_by", related_name="published_charts")
@classmethod
def bake(cls, user, slug):
email = shlex.quote(user.email)
name = shlex.quote(user.get_full_name())
slug = shlex.quote(slug)
cmd = f"node {settings.BASE_DIR}/dist/src/bakeCharts.js {email} {name} {slug} >> /tmp/{settings.DB_NAME}-static.log 2>&1"
print(cmd)
subprocess.Popen(cmd, shell=True)
@classmethod
def owid_commit(cls):
"""
:return: Will return latest commit revision for the repo
"""
git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], shell=False)
return str(git_commit)
def show_type(self):
type = "Unknown"
config = self.config
if config['type'] == "LineChart":
type = "Line Chart"
elif config['type'] == "ScatterPlot":
type = "Scatter Plot"
elif config['type'] == "StackedArea":
type = "Stacked Area"
elif config['type'] == "MultiBar":
type = "Multi Bar"
elif config['type'] == "HorizontalMultiBar":
type = "Horizontal Multi Bar"
elif config['type'] == "DiscreteBar":
type = "Discrete Bar"
elif config['type'] == "SlopeChart":
type = "Slope Chart"
if config.get("tab") == "map":
if config.get("hasChartTab"):
return "Map + " + type
else:
return "Map"
else:
if config.get("hasMapTab"):
return type + " + Map"
else:
return type
class DatasetCategory(Model):
class Meta:
db_table = "dataset_categories"
name = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
fetcher_autocreated = models.BooleanField(default=False)
class DatasetSubcategory(Model):
class Meta:
db_table = "dataset_subcategories"
unique_together = (('name', 'categoryId'),)
name = models.CharField(max_length=255)
categoryId = models.ForeignKey(DatasetCategory, blank=True, null=True, on_delete=models.DO_NOTHING,
db_column='categoryId')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Dataset(Model):
class Meta:
db_table = "datasets"
unique_together = (('name', 'namespace'),)
name = models.CharField(max_length=255)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
categoryId = models.ForeignKey(DatasetCategory, blank=True, null=True, on_delete=models.DO_NOTHING,
db_column='categoryId')
subcategoryId = models.ForeignKey(DatasetSubcategory, blank=True, null=True, on_delete=models.DO_NOTHING,
db_column='subcategoryId')
namespace = models.CharField(max_length=255, default='owid')
class Source(Model):
class Meta:
db_table = 'sources'
unique_together = (('name', 'datasetId'),)
name = models.CharField(max_length=255)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
datasetId = models.IntegerField(db_column='datasetId', blank=True, null=True)
class VariableType(Model):
class Meta:
db_table = 'variable_types'
name = models.CharField(max_length=255)
isSortable = models.BooleanField(db_column='isSortable', default=False)
class Variable(Model):
class Meta:
db_table = 'variables'
unique_together = (('code', 'datasetId'), ('name', 'datasetId'),)
name = models.CharField(max_length=1000)
unit = models.CharField(max_length=255)
short_unit = models.CharField(max_length=255, null=True)
display = JSONField()
description = models.TextField(blank=True, null=True)
datasetId = models.ForeignKey(Dataset, on_delete=models.CASCADE, db_column='datasetId')
sourceId = models.ForeignKey(Source, on_delete=models.DO_NOTHING, db_column='sourceId')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
variableTypeId = models.ForeignKey(VariableType, on_delete=models.DO_NOTHING, db_column='variableTypeId')
uploaded_by = models.ForeignKey(User, to_field='name', on_delete=models.DO_NOTHING, db_column='uploaded_by',
blank=True, null=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
code = models.CharField(max_length=255, blank=True, null=True)
coverage = models.CharField(max_length=255)
timespan = models.CharField(max_length=255)
class ChartDimension(Model):
class Meta:
db_table = "chart_dimensions"
chartId = models.ForeignKey(Chart, on_delete=models.CASCADE, db_column='chartId')
variableId = models.ForeignKey(Variable, models.DO_NOTHING, db_column='variableId')
order = models.IntegerField()
property = models.CharField(max_length=255)
class ChartSlugRedirect(Model):
class Meta:
db_table = 'chart_slug_redirects'
slug = models.CharField(unique=True, max_length=255)
chart_id = models.IntegerField()
class Entity(Model):
class Meta:
db_table = "entities"
code = models.CharField(max_length=255, blank=True, null=True, unique=True)
name = models.CharField(max_length=255, unique=True)
validated = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
displayName = models.CharField(db_column='displayName', max_length=255)
class DataValue(Model):
class Meta:
db_table = "data_values"
unique_together = (('entityId', 'variableId', 'year'),)
value = models.CharField(max_length=255)
entityId = models.ForeignKey(Entity, on_delete=models.DO_NOTHING, db_column='entityId')
variableId = models.ForeignKey(Variable, on_delete=models.CASCADE, db_column='variableId')
year = models.IntegerField()
class License(Model):
class Meta:
db_table = 'licenses'
name = models.CharField(max_length=255)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Logo(Model):
class Meta:
db_table = 'logos'
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
svg = models.TextField()
class Setting(Model):
class Meta:
db_table = 'settings'
meta_name = models.CharField(max_length=255)
meta_value = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class UserInvitation(Model):
class Meta:
db_table = 'user_invitations'
code = models.CharField(max_length=255)
email = models.CharField(max_length=255)
user_id = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column='user_id')
status = models.CharField(max_length=10, choices=(('pending', 'pending'), ('successful', 'successful'),
('canceled', 'canceled'), ('expired', 'expired')))
valid_till = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
import mock
import requests
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.providers.http.sensors.http import HttpSensor
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
TEST_DAG_ID = 'unit_test_dag'
class TestHttpSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_poke_exception(self, mock_session_send):
"""
Exception occurs in poke function should not be ignored.
"""
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(_):
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1)
with self.assertRaisesRegex(AirflowException, 'AirflowException raised here!'):
task.execute(context={})
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_head_method(self, mock_session_send):
def resp_check(_):
return True
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1)
task.execute(context={})
args, kwargs = mock_session_send.call_args
received_request = args[0]
prep_request = requests.Request(
'HEAD',
'https://www.httpbin.org',
{}).prepare()
self.assertEqual(prep_request.url, received_request.url)
self.assertTrue(prep_request.method, received_request.method)
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_poke_context(self, mock_session_send):
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(_, execution_date):
if execution_date == DEFAULT_DATE:
return True
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1,
dag=self.dag)
task_instance = TaskInstance(task=task, execution_date=DEFAULT_DATE)
task.execute(task_instance.get_template_context())
@patch("airflow.providers.http.hooks.http.requests.Session.send")
def test_logging_head_error_request(
self,
mock_session_send
):
def resp_check(_):
return True
response = requests.Response()
response.status_code = 404
response.reason = 'Not Found'
response._content = b'This endpoint doesnt exist'
mock_session_send.return_value = response
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1
)
with mock.patch.object(task.hook.log, 'error') as mock_errors:
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
self.assertTrue(mock_errors.called)
calls = [
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('This endpoint doesnt exist'),
]
mock_errors.assert_has_calls(calls)
class FakeSession:
def __init__(self):
self.response = requests.Response()
self.response.status_code = 200
self.response._content = 'apache/airflow'.encode('ascii', 'ignore')
def send(self, *args, **kwargs):
return self.response
def prepare_request(self, request):
if 'date' in request.params:
self.response._content += (
'/' + request.params['date']
).encode('ascii', 'ignore')
return self.response
class TestHttpOpSensor(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
op = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
op = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("apache/airflow" in response.text),
headers={},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='/search',
request_params={"client": "ubuntu", "q": "airflow", 'date': '{{ds}}'},
headers={},
response_check=lambda response: (
"apache/airflow/" + DEFAULT_DATE.strftime('%Y-%m-%d')
in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
|
|
"""
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils.fixes import in1d
from ..utils import array2d, check_random_state
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(in1d(edges[0], inds),
in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.todense()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) /
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print patches.shape
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
image = array2d(image)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
n_patches = max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
n_patches = int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
rng = check_random_state(random_state)
i_s = rng.randint(n_h, size=n_patches)
j_s = rng.randint(n_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
n_patches = all_patches
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(xrange(n_h), xrange(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in xrange(i_h):
for j in xrange(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h / 10, i_w / 10
else:
patch_size = self.patch_size
if self.max_patches:
n_patches = self.max_patches
else:
p_h, p_w = patch_size
n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
|
|
#!/usr/bin/env python
## Copyright 2012, En7788.com, Inc. All rights reserved.
##
## FormUI is a easy used GUI framwork for python, which is based on wxpython.
## FormUI is a free software: you can redistribute it and/or modify it under
## the terms of version 3 of the GNU Lesser General Public License as
## published by the Free Software Foundation.
##
## You should have received a copy of the GNU Lesser General Public License
## along with AndBug. If not, see <http://www.gnu.org/licenses/>.
#from CommonCtrl import *
import wx.lib.filebrowsebutton
import Queue
from FormControl import *
from WorkThread import *
from Builder import *
EVT_RESULT_ID = wx.NewId()
def EVT_RESULT(win, func):
"""Define Result Event."""
win.Connect(-1, -1, EVT_RESULT_ID, func)
##Result Event,used to postEvent to window
class ResultEvent(wx.PyEvent):
def __init__(self, data):
wx.PyEvent.__init__(self)
self.SetEventType(EVT_RESULT_ID)
self.data = data
##WindowControl, Deseperate Control From Frame
class WindowControl():
def __init__(self, handlerMap, window):
self.window = window
self.handlerMap = handlerMap
self.eventId2IdMap = {} #windowID: item_Id
self.id2EventIdMap = {}
self.id2CtrlMap = {}
self.id2ItemMap = {}
self.valueItems = []
self.eventTypeNameMap = {}
self.acceleratorTable = []
#self.window.SetAcceleratorTable(accelTbl)
def __registAccelerator(self, accelerator, ctrlId):
accelerator = accelerator.lower()
accelerator = accelerator.split('+')
ctrlKey = wx.ACCEL_NORMAL
normalKey = ''
for item in accelerator:
if item == 'ctrl':
if ctrlKey == wx.ACCEL_NORMAL:
ctrlKey = wx.ACCEL_CTRL
else:
ctrlKey = ctrlKey | wx.ACCEL_CTRL
elif item == 'alt':
if ctrlKey == wx.ACCEL_NORMAL:
ctrlKey = wx.ACCEL_ALT
else:
ctrlKey = ctrlKey | wx.ACCEL_ALT
elif item == 'shift':
if ctrlKey == wx.ACCEL_NORMAL:
ctrlKey = wx.ACCEL_SHIFT
else:
ctrlKey = ctrlKey | wx.ACCEL_SHIFT
else:
normalKey = item
self.acceleratorTable.append((ctrlKey, int(ord(normalKey)), ctrlId))
def registItem(self, id, eventId, item, control):
if item is not None:
if id not in self.id2ItemMap.keys():
self.id2ItemMap[id] = []
self.id2ItemMap[id].append(item)
#if item['type'] != 'static' and item['type'] != 'static_line' and item['type'] != 'button':
self.valueItems.append(item)
if id not in self.id2CtrlMap.keys():
self.id2CtrlMap[id] = []
self.id2CtrlMap[id].append(control)
if item != None:
accelerator = FormControlUtil.getAccelerator(item)
if accelerator is not None:
self.__registAccelerator(accelerator, eventId)
if eventId is not None:
self.eventId2IdMap[eventId] = id
self.id2EventIdMap[id] = eventId
def registItemHandler(self, ctrl, eventId,eventType,eventTypeName=''):
self.eventTypeNameMap[eventType.typeId] = eventTypeName
ctrl.Bind(eventType, self.OnItemEvent, id = eventId)
def updateResult(self, valueList):
global gControlTypeRegister
for item in self.valueItems:
value = item['control'].GetValue()
if value is not None:
valueList[item['id']] = value
def setItemValue(self,itemId, value):
if itemId in self.id2CtrlMap.keys():
ctrlList =self.id2CtrlMap[itemId]
for ctrl in ctrlList:
ctrl.SetValue(value)
def enableCtrl(self,itemId, bEnable):
if itemId in self.id2CtrlMap.keys():
ctrlList =self.id2CtrlMap[itemId]
for ctrl in ctrlList:
ctrl.Enable(bEnable)
def updateLayout(self, ctrl):
if hasattr(ctrl, 'Layout'):
ctrl.Layout()
if isinstance(ctrl, PanelCtrl) or isinstance(ctrl,wx.Frame):
return
if hasattr(ctrl, 'parent'):
self.updateLayout(ctrl.parent)
def showCtrl(self,itemId, bShow):
if itemId in self.id2CtrlMap.keys():
ctrlList =self.id2CtrlMap[itemId]
for ctrl in ctrlList:
ctrl.Show(bShow)
self.updateLayout(ctrl)
#self.window.Layout()
def handlerItemMessage(self,itemId, messageId, messagePara):
if itemId not in self.id2ItemMap.keys():
return
itemList = self.id2ItemMap[itemId]
for item in itemList:
self.window.handlerReturn = item['control'].onMessage(messageId, messagePara)
def highlightItem(self,itemId):
if not itemId in self.id2CtrlMap.keys():
return
ctrlList = self.id2CtrlMap[itemId]
for ctrl in ctrlList:
if isinstance(ctrl, PanelCtrl):
ctrl.noteboolCtrl.SetSelection(ctrl.panelIndex)
break
elif isinstance(ctrl, LineCtrl):
panel = ctrl.parent
if isinstance(panel, PanelCtrl):
panel.noteboolCtrl.SetSelection(panel.panelIndex)
ctrl.highLight()
break
elif isinstance(ctrl, wx.Notebook):
pass
else:
line = ctrl.parent
if hasattr(line, 'parent'):
panel = line.parent
if isinstance(panel, PanelCtrl) and panel.panelIndex != None:
panel.noteboolCtrl.SetSelection(panel.panelIndex)
ctrl.SetFocus()
def makeReturnPara(self, eventId, eventType,handler):
para = HandlerPara(eventId, eventType,handler)
self.updateResult(para.valueList)
return para
def OnItemEvent(self, event):
if self.window.windowState != self.window.WINDOW_STATE_WORK:
return
if event.Id in self.eventId2IdMap.keys():
id = self.eventId2IdMap[event.Id]
if id != None and id != '':
self.window.CallFormHandler(id,self.eventTypeNameMap[event.EventType])
#Todo:Notebook,Panel,Line Refactored by Composite Pattern
##The Base Class of Ctrl
class CtrlBase():
def __init__(self,windowControl,form):
self.windowControl = windowControl
self.form = form
pass
def createLine(self, line):
lineSizer = LineCtrl(self, self.windowControl)
self.windowSizer.AddSpacer(Builder.DEFAULT_LINE_HEIGHT_SPACE)
lineSizer.initCtrls(self, line)
flag = wx.EXPAND | wx.ALL
if line.align == 'center':
flag = flag | wx.ALIGN_CENTER
elif line.align == 'right':
flag = flag | wx.ALIGN_RIGHT
elif line.align == 'left':
flag = flag | wx.ALIGN_LEFT
proportion = 0
if line.expand:
proportion = 1
self.windowSizer.AddWindow(lineSizer, proportion, border=Builder.DEFAULT_LINE_WIDTH_EDGE, flag=flag)
# if line.lineId != "":
# self.idLineMap[line.lineId] = lineSizer
if line.visible == False:
lineSizer.Show(False)
def createPanel(self,panel):
eventId = wx.NewId()
panelControl = PanelCtrl(id=eventId, name=panel.panelId,
parent=self,
pos=wx.Point(0, 0),
size=wx.Size(self.form.windowWidth, panel.height),
lines=panel.getLines(),form=self.form,
style=wx.TAB_TRAVERSAL, windowControl=self.windowControl
)
self.windowControl.registItem(panel.panelId, eventId, None, panelControl)
proportion = 1
if panel.height >= 0:
proportion = 0
self.windowSizer.AddWindow(panelControl, proportion, border=0, flag=wx.ALL | wx.EXPAND)
if panel.visible is False:
panelControl.Show(False)
if panel.enable is False:
panelControl.Enable(False)
panelControl.panelIndex = None
panelControl.id = panel.panelId
return panelControl
def createNotebook(self, notebook):
eventId = wx.NewId()
notebookCtrl = wx.Notebook(id=eventId, name=notebook.id,
parent=self,
pos=wx.Point(0, 0), size=wx.Size(self.form.windowWidth, notebook.height),
style=0)
self.windowControl.registItem(notebook.id, eventId, None, notebookCtrl)
proportion = 1
if notebook.height >= 0:
proportion = 0
self.windowSizer.AddWindow(notebookCtrl, proportion, border=0, flag=wx.ALL | wx.EXPAND)
panels = notebook.getPanels()
panelIndex = 0
for panel in panels:
eventId = wx.NewId()
panelControl = PanelCtrl(id=eventId, name=panel.panelId,
parent=notebookCtrl,
pos=wx.Point(0, 0),
size=wx.Size(self.form.windowWidth, notebook.height),
lines=panel.getLines(),form=self.form,
style=wx.TAB_TRAVERSAL, windowControl=self.windowControl
)
self.windowControl.registItem(panel.panelId, eventId, None, panelControl)
#self.idPanelMap[panel.panelId] = panelControl
if panel.visible is False:
panelControl.Show(False)
if panel.enable is False:
panelControl.Enable(False)
notebookCtrl.AddPage(panelControl, panel.panelName)
panelControl.panelIndex = panelIndex
panelControl.noteboolCtrl = notebookCtrl
panelIndex += 1
panelControl.id = panel.panelId
#self.panelList.append(panelControl)
def showWindow(self,lines):
edge = 5
self.lines = lines
self.windowSizer = wx.BoxSizer(orient=wx.VERTICAL)
self.viewHeight = edge
self.lineNum = 0
for line in self.lines:
if isinstance(line, Line):
self.createLine(line)
if isinstance(line, Panel):
self.createPanel(line)
if isinstance(line, Notebook):
self.createNotebook(line)
self.SetSizer(self.windowSizer,True)
self.SetAutoLayout(1)
self.Layout()
if isinstance(self, PanelCtrl):
self.SetupScrolling()
def showMenu(self):
menubarInfo = self.form.menubar
if menubarInfo is None:
return
self.menuBar = wx.MenuBar()
self.menuIdCtrlmap = {}
self.menuCtrlIdMap = {}
for submenuInfo in menubarInfo.subMenus:
menu, type = self.createMenu(None, submenuInfo)
self.menuBar.Append(menu, submenuInfo.title)
self.menuBar.Show()
self.SetMenuBar(self.menuBar)
for submenuInfo in menubarInfo.subMenus:
self.updateMenuEnable(submenuInfo)
def createMenu(self, currentMenu, menuInfo):
eventId = wx.NewId()
if len(menuInfo.subMenus) > 0 or currentMenu == None:
menu = wx.Menu()
for submenuInfo in menuInfo.subMenus:
submenu,type=self.createMenu(menu, submenuInfo)
if type == 'menu':
menu.AppendMenu(eventId,submenuInfo.title,submenu)
self.windowControl.registItem(menuInfo.id, eventId, None, None)
else:
menu.AppendItem(submenu)
return menu,'menu'
else:
menuInfo.title = menuInfo.title.replace('\\t', '\t')
menuItem = wx.MenuItem(currentMenu, eventId, menuInfo.title, menuInfo.hint)
self.windowControl.registItem(menuInfo.id, eventId, None, menuItem)
self.windowControl.registItemHandler(self, eventId, wx.EVT_MENU, 'evt_menu')
return menuItem,'item'
def updateMenuEnable(self, menuInfo):
if len(menuInfo.subMenus) > 0:
if menuInfo.enable == False and menuInfo.id != '':
if menuInfo.id in self.windowControl.id2EventIdMap.keys():
self.menuBar.Enable(self.windowControl.id2EventIdMap[menuInfo.id], False)
for subMenuInfo in menuInfo.subMenus:
self.updateMenuEnable(subMenuInfo)
else:
if menuInfo.enable == False:
if menuInfo.id in self.windowControl.id2EventIdMap.keys():
self.menuBar.Enable(self.windowControl.id2EventIdMap[menuInfo.id], False)
##Line Ctrl
##Ctrl Must Put in Line Ctrl
class LineCtrl(wx.BoxSizer):
def __init__(self,parent, windowControl):
wx.BoxSizer.__init__(self,wx.HORIZONTAL)
self.windowControl = windowControl
self.parent = parent
def createMultiFolderFileControl(self, item,value,itemWidth, itemHeight,bAddFile,bAddFolder):
item['control'] = MultiFolderFile(parent=self.parent,
pos=wx.Point(0, 0),
size=wx.Size(itemWidth, itemHeight),
mask=BuilderUtil.getItemValue(item, 'mask', '*'),
bAddFile=bAddFile, bAddFolder=bAddFolder)
if value != "":
item['control'].SetValue(value)
#self.valueItems.append(item)
def getAlign(self,item):
if 'align' in item.keys():
alignText = BuilderUtil.getItemValue(item,'align','left')
if alignText == 'center':
return wx.ALIGN_CENTER
elif alignText == 'right':
return wx.ALIGN_RIGHT
elif alignText == 'left' :
return wx.ALIGN_LEFT
return 0
def createItem(self, lineSizer, item):
global gControlTypeRegister
if item['type'] in gControlTypeRegister.keys():
item['control'] = gControlTypeRegister[item['type']](item, self.parent,self.windowControl)
else:
return
if 'value' in item.keys() and item['value'] != '':
item['control'].SetValue(item['value'])
if 'visible' in item.keys():
if item['visible'] == 'false':
item['control'].Show(False)
else:
item['control'].Show(True)
if 'enable' in item.keys():
if BuilderUtil.getItemValue(item, 'enable', 'true') == 'true':
item['control'].Enable(True)
else:
item['control'].Enable(False)
#if 'id' in item.keys():
# self.idItemMap[item['id']] = item
item['parent'] = self
proportion = 1
if 'width' in item.keys() and int(item['width']) > 0:
proportion = 0
self.Add(item['control'], proportion, wx.EXPAND)
if 'id' in item.keys():
self.windowControl.registItem(item['id'], item['event_id'], item, item['control'])
return True
def initCtrls(self, parent, line):
self.parent = parent
for item in line.items:
self.createItem(self, item)
def Enable(self, bEnable):
EnableSizer(self, bEnable)
def Show(self, bShow):
self.ShowItems(bShow)
def highLight(self):
children = self.GetChildren()
for child in children:
widget = child.GetWindow()
if isinstance(widget, wx.BoxSizer):
continue
elif not isinstance(widget,wx.StaticText) and not isinstance(widget,StaticLine):
widget.SetFocus()
##Panel
class PanelCtrl(scrolled.ScrolledPanel,CtrlBase):
def __init__(self, parent, id, pos, size, style, name,lines,form,windowControl):
#scrolled.ScrolledPanel.__init__(self, parent, -1)
self.form = form
CtrlBase.__init__(self, windowControl,form)
scrolled.ScrolledPanel.__init__(self, style=style, name=name,
parent=parent, pos=pos, id=id,
size=size)
self.parent = self
self.showWindow(lines)
#Form Ctrl
class FormCtrl(CtrlBase):
def __init__(self,form, windowControl):
CtrlBase.__init__(self, windowControl, form)
self.menuBar = None
self.showMenu()
self.showWindow(form.lines)
def DestroyForm(self):
self.windowSizer.DeleteWindows()
if self.menuBar != None:
self.SetMenuBar(None)
self.Layout()
##Handler Para
##The Menu,Ctrl can Set Handler, The Handler will be called when click or other event
##The Handler Para will be passed to Handler, from which Handler can get ctrl value
class HandlerPara():
def __init__(self, eventId,eventType,handler):
self.valueList = {}
self.eventId = eventId
self.eventType = eventType
self.handler = handler
def getValue(self,id):
if id in self.valueList.keys():
return self.valueList[id]
return None
def getEventId(self):
return self.eventId
def getEventType(self):
return self.eventType
##Handler Para
##The Menu,Ctrl can Set Handler, The Handler will be called when click or other event
##WindowHandler will be passed to Handler, Handler can control the window by WindowHandler
class WindowHandler():
def __init__(self, window):
self.window = window
self.windowClosed = False
self.returnOk = False
def __setWaitHandler(self,para):
para['syncTask'] = True
self.window.handlerReturn = None
def __waitHandlerFinish(self):
try:
task = self.window.handlerFinishQueue.get(block=True)
except Queue.Empty:
return
self.window.handlerFinishQueue.task_done()
return self.window.handlerReturn
def closeWindow(self, returnOk = True):
self.window.windowState = self.window.WINDOW_STATE_CLOSED
self.returnOk = True
para = {}
para['event'] = EVENT_WORKTHREAD_CLOSE
wx.PostEvent(self.window, ResultEvent(para))
def showWindow(self, bShow):
para = {}
para['bShow'] = bShow
para['event'] = EVENT_WORKTHREAD_SHOW
wx.PostEvent(self.window, ResultEvent(para))
def enableCtrl(self, itemId, bEnable):
para = {}
para['itemId'] = itemId
para['bEnable'] = bEnable
para['event'] = EVENT_WORKTHREAD_ENABLE_ITEM
wx.PostEvent(self.window, ResultEvent(para))
def showCtrl(self, itemId, bShow):
para = {}
para['itemId'] = itemId
para['bShow'] = bShow
para['event'] = EVENT_WORKTHREAD_SHOW_ITEM
wx.PostEvent(self.window, ResultEvent(para))
def setValue(self,itemId, value):
para = {}
para['itemId'] = itemId
para['value'] = value
para['event'] = EVENT_WORKTHREAD_ITEM_SET_VALUE
wx.PostEvent(self.window, ResultEvent(para))
def update(self, builder, updateWindow=True):
para = {}
para['builder'] = builder
para['updateWindow'] = updateWindow
para['event'] = EVENT_WORKTHREAD_UPDATE
wx.PostEvent(self.window, ResultEvent(para))
def showForm(self,builder, bModule=False):
para = {}
para['builder'] = builder
waitQueue = Queue(1)
para['event'] = EVENT_WORKTHREAD_SHOWFORM
workThread = SubFormThread(waitQueue = waitQueue)
para['workThread'] = workThread
wx.PostEvent(self.window, ResultEvent(para))
if bModule:
try:
task = waitQueue.get(block=True)
except Queue.Empty:
pass
waitQueue.task_done()
return workThread.returnState, workThread.valueList
def sendMessage(self,itemId,messageId,messagePara):
para = {}
para['itemId'] = itemId
para['event'] = EVENT_WORKTHREAD_MESSAGE
para['messageId'] = messageId
para['messagePara'] = messagePara
self.__setWaitHandler(para)
wx.PostEvent(self.window, ResultEvent(para))
ret = self.__waitHandlerFinish()
return ret
def messageBox(self,message, caption):
para = {}
para['message'] = message
para['caption'] = caption
self.__setWaitHandler(para)
para['event'] = EVENT_WORKTHREAD_MESSAGEBOX
wx.PostEvent(self.window, ResultEvent(para))
self.__waitHandlerFinish()
def confirmMessageBox(self,message, caption, bWithCancelButton=False):
para = {}
para['message'] = message
para['caption'] = caption
para['bWithCancelButton'] = bWithCancelButton
self.__setWaitHandler(para)
para['event'] = EVENT_WORKTHREAD_CONFIRM_MESSAGEBOX
wx.PostEvent(self.window, ResultEvent(para))
ret = self.__waitHandlerFinish()
if ret == wx.ID_YES:
return 'yes'
elif ret == wx.ID_NO:
return 'no'
else:
return 'cancel'
def highlightItem(self,itemId):
#self.window.highlightItem(itemId)
para = {}
para['itemId'] = itemId
para['event'] = EVENT_WORKTHREAD_HIGHLIGHT_ITEM
wx.PostEvent(self.window, ResultEvent(para))
def getBuilder(self):
return self.window.builder
|
|
try: paraview.simple
except: from paraview.simple import *
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
if datadescription.GetForceOutput() == True:
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
timestep = datadescription.GetTimeStep()
input_name = 'input'
if (timestep % 1 == 0) :
datadescription.GetInputDescriptionByName(input_name).AllFieldsOn()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn()
else:
datadescription.GetInputDescriptionByName(input_name).AllFieldsOff()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff()
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
cp_writers = []
cp_views = []
timestep = datadescription.GetTimeStep()
a2DRenderView1 = CreateView( Create2DRenderView, "image_%t.png", 1, 0, 1, cp_views )
# mvm: PV bug workaround, client doesn't export viewsize
# mvm: set to Lock View Size Custom settings
a2DRenderView1.ViewSize = [640,480]
a2DRenderView1.LightSpecularColor = [1.0, 1.0, 1.0]
a2DRenderView1.KeyLightAzimuth = 10.0
a2DRenderView1.UseTexturedBackground = 0
a2DRenderView1.UseLight = 0
a2DRenderView1.CameraPosition = [316.21445673906658, 513.7761642761908, 2038.7425740279175]
a2DRenderView1.FillLightKFRatio = 3.0
a2DRenderView1.Background2 = [0.0, 0.0, 0.16500000000000001]
a2DRenderView1.FillLightAzimuth = -10.0
a2DRenderView1.LODResolution = 50.0
a2DRenderView1.BackgroundTexture = []
a2DRenderView1.InteractionMode = '3D'
a2DRenderView1.StencilCapable = 1
a2DRenderView1.LightIntensity = 1.0
a2DRenderView1.CameraFocalPoint = [316.21445673906658, 513.7761642761908, -756.14540211504584]
a2DRenderView1.ImageReductionFactor = 2
a2DRenderView1.CameraViewAngle = 30.0
a2DRenderView1.CameraParallelScale = 723.37023715383816
a2DRenderView1.EyeAngle = 2.0
a2DRenderView1.HeadLightKHRatio = 3.0
a2DRenderView1.StereoRender = 0
a2DRenderView1.KeyLightIntensity = 0.75
a2DRenderView1.BackLightAzimuth = 110.0
a2DRenderView1.AxesVisibility = 0
a2DRenderView1.OrientationAxesInteractivity = 0
a2DRenderView1.UseInteractiveRenderingForSceenshots = 0
a2DRenderView1.UseOffscreenRendering = 0
a2DRenderView1.Background = [0.18431372549019609, 0.18431372549019609, 0.18431372549019609]
a2DRenderView1.UseOffscreenRenderingForScreenshots = 0
a2DRenderView1.NonInteractiveRenderDelay = 2
a2DRenderView1.CenterOfRotation = [0.0, 0.0, 0.0]
a2DRenderView1.CameraParallelProjection = 0
a2DRenderView1.CompressorConfig = 'vtkSquirtCompressor 0 3'
a2DRenderView1.HeadLightWarmth = 0.5
a2DRenderView1.MaximumNumberOfPeels = 4
a2DRenderView1.LightDiffuseColor = [1.0, 1.0, 1.0]
a2DRenderView1.StereoType = 'Red-Blue'
a2DRenderView1.DepthPeeling = 1
a2DRenderView1.BackLightKBRatio = 3.5
a2DRenderView1.StereoCapableWindow = 1
a2DRenderView1.CameraViewUp = [0.0, 1.0, 0.0]
a2DRenderView1.LightType = 'HeadLight'
a2DRenderView1.LightAmbientColor = [1.0, 1.0, 1.0]
a2DRenderView1.RemoteRenderThreshold = 3.0
a2DRenderView1.CacheKey = 0.0
a2DRenderView1.UseCache = 0
a2DRenderView1.KeyLightElevation = 50.0
a2DRenderView1.CenterAxesVisibility = 1
a2DRenderView1.MaintainLuminance = 0
a2DRenderView1.StillRenderImageReductionFactor = 1
a2DRenderView1.BackLightWarmth = 0.5
a2DRenderView1.FillLightElevation = -75.0
a2DRenderView1.MultiSamples = 0
a2DRenderView1.FillLightWarmth = 0.40000000000000002
a2DRenderView1.AlphaBitPlanes = 1
a2DRenderView1.LightSwitch = 1
a2DRenderView1.OrientationAxesVisibility = 1
a2DRenderView1.CameraClippingRange = [2018.3551482876383, 2069.3237126383365]
a2DRenderView1.BackLightElevation = 0.0
a2DRenderView1.ViewTime = 0.0
a2DRenderView1.OrientationAxesOutlineColor = [1.0, 1.0, 1.0]
a2DRenderView1.LODThreshold = 5.0
a2DRenderView1.CollectGeometryThreshold = 100.0
a2DRenderView1.UseGradientBackground = 0
a2DRenderView1.KeyLightWarmth = 0.59999999999999998
a2DRenderView1.OrientationAxesLabelColor = [1.0, 1.0, 1.0]
ns2dcn = CreateProducer( datadescription, "input" )
a1_omeg_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
a1_omeg_PVLookupTable = GetLookupTableForArray( "omeg", 1, Discretize=1, RGBPoints=[-13166394.436894085, 0.0, 1.0, 1.0, -1316639.4436894078, 0.0, 0.0, 0.99215686274509807, 0.0, 0.0, 0.0, 0.52156862745098043, 1316639.4436894096, 0.98039215686274506, 0.0, 0.011764705882352941, 13166394.436894085, 1.0, 1.0, 0.0], UseLogScale=0, VectorComponent=0, NanColor=[1.0, 1.0, 0.0], NumberOfTableValues=256, ColorSpace='RGB', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 )
# mvm: PV bug, Enabled=1 means 'active gui component' which draws a box around it
# mvm: set to 0
ScalarBarWidgetRepresentation1 = CreateScalarBar( Title='omeg', Position2=[0.12999999999999995, 0.96555323590814335], TitleOpacity=1.0, TitleShadow=0, AutomaticLabelFormat=0, TitleFontSize=14, TitleColor=[1.0, 1.0, 1.0], AspectRatio=20.0, NumberOfLabels=5, ComponentTitle='', Resizable=1, TitleFontFamily='Arial', Visibility=1, LabelFontSize=12, LabelFontFamily='Arial', TitleItalic=0, Selectable=0, LabelItalic=0, Enabled=0, LabelColor=[1.0, 1.0, 1.0], Position=[0.076572769953051609, 0.029227557411273364], LabelBold=0, UseNonCompositedRenderer=1, LabelOpacity=1.0, TitleBold=0, LabelFormat='%-#6.3g', Orientation='Vertical', LabelShadow=0, LookupTable=a1_omeg_PVLookupTable, Repositionable=1 )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation1)
DataRepresentation1 = Show()
DataRepresentation1.Opacity = 1.0
DataRepresentation1.Origin = [0.0, 0.0, 0.0]
DataRepresentation1.Scale = [1.0, 1.0, 1.0]
DataRepresentation1.UseXYPlane = 0
DataRepresentation1.ColorAttributeType = 'POINT_DATA'
DataRepresentation1.Position = [0.0, 0.0, 0.0]
DataRepresentation1.ColorArrayName = 'omeg'
DataRepresentation1.Visibility = 1
DataRepresentation1.Slice = 0
DataRepresentation1.LookupTable = a1_omeg_PVLookupTable
DataRepresentation1.MapScalars = 1
DataRepresentation1.SliceMode = 'XY Plane'
DataRepresentation1.Pickable = 1
DataRepresentation1.Orientation = [0.0, 0.0, 0.0]
for writer in cp_writers:
if timestep % writer.cpFrequency == 0 or datadescription.GetForceOutput() == True:
writer.FileName = writer.cpFileName.replace("%t", str(timestep))
writer.UpdatePipeline()
if False : # rescale data range
import math
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
reps = view.Representations
for rep in reps:
if hasattr(rep, 'Visibility') and rep.Visibility == 1 and hasattr(rep, 'MapScalars') and rep.MapScalars != '':
input = rep.Input
input.UpdatePipeline() #make sure range is up-to-date
lut = rep.LookupTable
if lut == None:
continue
if rep.ColorAttributeType == 'POINT_DATA':
datainformation = input.GetPointDataInformation()
elif rep.ColorAttributeType == 'CELL_DATA':
datainformation = input.GetCellDataInformation()
else:
print 'something strange with color attribute type', rep.ColorAttributeType
if lut.VectorMode != 'Magnitude' or datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents() == 1:
datarange = datainformation.GetArray(rep.ColorArrayName).GetRange(lut.VectorComponent)
else:
datarange = [0,0]
for i in range(datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents()):
for j in range(2):
datarange[j] += datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]*datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]
datarange[0] = math.sqrt(datarange[0])
datarange[1] = math.sqrt(datarange[1])
rgbpoints = lut.RGBPoints.GetData()
numpts = len(rgbpoints)/4
minvalue = min(datarange[0], rgbpoints[0])
maxvalue = max(datarange[1], rgbpoints[(numpts-1)*4])
if minvalue != rgbpoints[0] or maxvalue != rgbpoints[(numpts-1)*4]:
# rescale all of the points
oldrange = rgbpoints[(numpts-1)*4] - rgbpoints[0]
newrange = maxvalue - minvalue
newrgbpoints = list(rgbpoints)
for v in range(numpts):
newrgbpoints[v*4] = minvalue+(rgbpoints[v*4] - rgbpoints[0])*newrange/oldrange
lut.RGBPoints.SetData(newrgbpoints)
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
fname = view.cpFileName
fname = fname.replace("%t", str(timestep))
if view.cpFitToScreen != 0:
if view.IsA("vtkSMRenderViewProxy") == True:
view.ResetCamera()
elif view.IsA("vtkSMContextViewProxy") == True:
view.ResetDisplay()
else:
print ' do not know what to do with a ', view.GetClassName()
WriteImage(fname, view, Magnification=view.cpMagnification)
# explicitly delete the proxies -- we do it this way to avoid problems with prototypes
tobedeleted = GetNextProxyToDelete()
while tobedeleted != None:
Delete(tobedeleted)
tobedeleted = GetNextProxyToDelete()
def GetNextProxyToDelete():
proxyiterator = servermanager.ProxyIterator()
for proxy in proxyiterator:
group = proxyiterator.GetGroup()
if group.find("prototypes") != -1:
continue
if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
return proxy
return None
def CreateProducer(datadescription, gridname):
"Creates a producer proxy for the grid"
if not datadescription.GetInputDescriptionByName(gridname):
raise RuntimeError, "Simulation input name '%s' does not exist" % gridname
grid = datadescription.GetInputDescriptionByName(gridname).GetGrid()
producer = PVTrivialProducer()
producer.GetClientSideObject().SetOutput(grid)
if grid.IsA("vtkImageData") == True or grid.IsA("vtkStructuredGrid") == True or grid.IsA("vtkRectilinearGrid") == True:
extent = datadescription.GetInputDescriptionByName(gridname).GetWholeExtent()
producer.WholeExtent= [ extent[0], extent[1], extent[2], extent[3], extent[4], extent[5] ]
producer.UpdatePipeline()
return producer
def CreateWriter(proxy_ctor, filename, freq, cp_writers):
writer = proxy_ctor()
writer.FileName = filename
writer.add_attribute("cpFrequency", freq)
writer.add_attribute("cpFileName", filename)
cp_writers.append(writer)
return writer
def CreateView(proxy_ctor, filename, freq, fittoscreen, magnification, cp_views):
view = proxy_ctor()
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFrequency", freq)
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFitToScreen", fittoscreen)
view.add_attribute("cpMagnification", magnification)
cp_views.append(view)
return view
|
|
"""
Apple Quicktime Movie (file extension ".mov") parser.
Documents:
- Parsing and Writing QuickTime Files in Java (by Chris Adamson, 02/19/2003)
http://www.onjava.com/pub/a/onjava/2003/02/19/qt_file_format.html
- QuickTime File Format (official technical reference)
http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf
- Apple QuickTime:
http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
- File type (ftyp):
http://www.ftyps.com/
Author: Victor Stinner
Creation: 2 august 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError, FieldSet, MissingField,
UInt8, Int16, UInt16, UInt32, TimestampMac32,
String, PascalString8, CString,
RawBytes, PaddingBytes)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
class QTFloat32(FieldSet):
static_size = 32
def createFields(self):
yield Int16(self, "int_part")
yield UInt16(self, "float_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 65535
def createDescription(self):
return str(self.value)
class AtomList(FieldSet):
def createFields(self):
while not self.eof:
yield Atom(self, "atom[]")
class TrackHeader(FieldSet):
def createFields(self):
yield textHandler(UInt8(self, "version"), hexadecimal)
# TODO: sum of :
# TrackEnabled = 1;
# TrackInMovie = 2;
# TrackInPreview = 4;
# TrackInPoster = 8
yield RawBytes(self, "flags", 3)
yield TimestampMac32(self, "creation_date")
yield TimestampMac32(self, "lastmod_date")
yield UInt32(self, "track_id")
yield PaddingBytes(self, "reserved[]", 8)
yield UInt32(self, "duration")
yield PaddingBytes(self, "reserved[]", 8)
yield Int16(self, "video_layer", "Middle is 0, negative in front")
yield PaddingBytes(self, "other", 2)
yield QTFloat32(self, "geom_a", "Width scale")
yield QTFloat32(self, "geom_b", "Width rotate")
yield QTFloat32(self, "geom_u", "Width angle")
yield QTFloat32(self, "geom_c", "Height rotate")
yield QTFloat32(self, "geom_d", "Height scale")
yield QTFloat32(self, "geom_v", "Height angle")
yield QTFloat32(self, "geom_x", "Position X")
yield QTFloat32(self, "geom_y", "Position Y")
yield QTFloat32(self, "geom_w", "Divider scale")
yield QTFloat32(self, "frame_size_width")
yield QTFloat32(self, "frame_size_height")
class HDLR(FieldSet):
def createFields(self):
yield textHandler(UInt8(self, "version"), hexadecimal)
yield RawBytes(self, "flags", 3)
yield String(self, "subtype", 8)
yield String(self, "manufacturer", 4)
yield UInt32(self, "res_flags")
yield UInt32(self, "res_flags_mask")
if self.root.is_mpeg4:
yield CString(self, "name")
else:
yield PascalString8(self, "name")
class MediaHeader(FieldSet):
def createFields(self):
yield textHandler(UInt8(self, "version"), hexadecimal)
yield RawBytes(self, "flags", 3)
yield TimestampMac32(self, "creation_date")
yield TimestampMac32(self, "lastmod_date")
yield UInt32(self, "time_scale")
yield UInt32(self, "duration")
yield UInt16(self, "mac_lang")
yield Int16(self, "quality")
class ELST(FieldSet):
def createFields(self):
yield textHandler(UInt8(self, "version"), hexadecimal)
yield RawBytes(self, "flags", 3)
yield UInt32(self, "nb_edits")
yield UInt32(self, "length")
yield UInt32(self, "start")
yield QTFloat32(self, "playback_speed")
class Load(FieldSet):
def createFields(self):
yield UInt32(self, "start")
yield UInt32(self, "length")
yield UInt32(self, "flags") # PreloadAlways = 1 or TrackEnabledPreload = 2
yield UInt32(self, "hints") # KeepInBuffer = 0x00000004; HighQuality = 0x00000100; SingleFieldVideo = 0x00100000
class MovieHeader(FieldSet):
def createFields(self):
yield textHandler(UInt8(self, "version"), hexadecimal)
yield RawBytes(self, "flags", 3)
yield TimestampMac32(self, "creation_date")
yield TimestampMac32(self, "lastmod_date")
yield UInt32(self, "time_scale")
yield UInt32(self, "duration")
yield QTFloat32(self, "play_speed")
yield UInt16(self, "volume")
yield PaddingBytes(self, "reserved[]", 10)
yield QTFloat32(self, "geom_a", "Width scale")
yield QTFloat32(self, "geom_b", "Width rotate")
yield QTFloat32(self, "geom_u", "Width angle")
yield QTFloat32(self, "geom_c", "Height rotate")
yield QTFloat32(self, "geom_d", "Height scale")
yield QTFloat32(self, "geom_v", "Height angle")
yield QTFloat32(self, "geom_x", "Position X")
yield QTFloat32(self, "geom_y", "Position Y")
yield QTFloat32(self, "geom_w", "Divider scale")
yield UInt32(self, "preview_start")
yield UInt32(self, "preview_length")
yield UInt32(self, "still_poster")
yield UInt32(self, "sel_start")
yield UInt32(self, "sel_length")
yield UInt32(self, "current_time")
yield UInt32(self, "next_track")
class FileType(FieldSet):
def createFields(self):
yield String(self, "brand", 4, "Major brand")
yield UInt32(self, "version", "Version")
while not self.eof:
yield String(self, "compat_brand[]", 4, "Compatible brand")
class Atom(FieldSet):
tag_info = {
# TODO: Use dictionnary of dictionnary, like Matroska parser does
# "elst" is a child of "edts", but not of "moov" for example
"moov": (AtomList, "movie", "Movie"),
"trak": (AtomList, "track", "Track"),
"mdia": (AtomList, "media", "Media"),
"edts": (AtomList, "edts", ""),
"minf": (AtomList, "minf", ""),
"stbl": (AtomList, "stbl", ""),
"dinf": (AtomList, "dinf", ""),
"elst": (ELST, "edts", ""),
"tkhd": (TrackHeader, "track_hdr", "Track header"),
"hdlr": (HDLR, "hdlr", ""),
"mdhd": (MediaHeader, "media_hdr", "Media header"),
"load": (Load, "load", ""),
"mvhd": (MovieHeader, "movie_hdr", "Movie header"),
"ftyp": (FileType, "file_type", "File type"),
}
tag_handler = [ item[0] for item in tag_info ]
tag_desc = [ item[1] for item in tag_info ]
def createFields(self):
yield UInt32(self, "size")
yield String(self, "tag", 4)
size = self["size"].value
if size == 1:
raise ParserError("Extended size is not supported!")
#yield UInt64(self, "size64")
size = self["size64"].value
elif size == 0:
#size = (self.root.size - self.root.current_size - self.current_size) / 8
if self._size is None:
size = (self.parent.size - self.current_size) / 8 - 8
else:
size = (self.size - self.current_size) / 8
else:
size = size - 8
if 0 < size:
tag = self["tag"].value
if tag in self.tag_info:
handler, name, desc = self.tag_info[tag]
yield handler(self, name, desc, size=size*8)
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Atom: %s" % self["tag"].value
class MovFile(Parser):
PARSER_TAGS = {
"id": "mov",
"category": "video",
"file_ext": ("mov", "qt", "mp4", "m4v", "m4a", "m4p", "m4b"),
"mime": (u"video/quicktime", u'video/mp4'),
"min_size": 8*8,
"magic": (("moov", 4*8),),
"description": "Apple QuickTime movie"
}
BRANDS = {
# File type brand => MIME type
'mp41': u'video/mp4',
'mp42': u'video/mp4',
}
endian = BIG_ENDIAN
def __init__(self, *args, **kw):
Parser.__init__(self, *args, **kw)
self.is_mpeg4 = False
def validate(self):
# TODO: Write better code, erk!
size = self.stream.readBits(0, 32, self.endian)
if size < 8:
return "Invalid first atom size"
tag = self.stream.readBytes(4*8, 4)
return tag in ("ftyp", "moov", "free")
def createFields(self):
while not self.eof:
yield Atom(self, "atom[]")
def createMimeType(self):
first = self[0]
try:
# Read brands in the file type
if first['tag'].value != "ftyp":
return None
file_type = first["file_type"]
brand = file_type["brand"].value
if brand in self.BRANDS:
return self.BRANDS[brand]
for field in file_type.array("compat_brand"):
brand = field.value
if brand in self.BRANDS:
return self.BRANDS[brand]
except MissingField:
pass
return None
|
|
"""
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from comment_utils.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
akismet = True
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply several moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is allowed to post, it will be submitted to an
Akismet spam check (requires the Python Akismet module and an
Akismet API key); if Akismet thinks the comment is spam, its
``is_public`` field will be set to ``False``.
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
Several example subclasses of ``CommentModerator`` are provided in
this module as well, both to provide common moderation options and to
demonstrate some of the ways subclasses can customize moderation
behavior.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.db.models import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import Site
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``akismet``
If ``True``, comments will be submitted to an Akismet spam
check and, if Akismet thinks they're spam, will have their
``is_public`` field set to ``False`` before saving. If
this is enabled, you will need to have the Python Akismet
module installed, and you will need to add the setting
``AKISMET_API_KEY`` to your Django settings file; the
value of this setting should be a valid Akismet API
key. Default value is ``False``.
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with two arguments: ``comment``, which is the comment
being submitted, and ``content_object``, which is the object the
comment will be attached to::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
akismet = False
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
moderate_field = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after:
if self._get_delta(datetime.datetime.now(), getattr(content_object, self.auto_close_field)).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after:
if self._get_delta(datetime.datetime.now(), getattr(content_object, self.auto_moderate_field)).days >= self.moderate_after:
return True
if self.akismet:
from akismet import Akismet
from django.utils.encoding import smart_str
akismet_api = Akismet(key=settings.AKISMET_API_KEY,
blog_url='http://%s/' % Site.objects.get_current().domain)
if akismet_api.verify_key():
akismet_data = { 'comment_type': 'comment',
'referrer': '',
'user_ip': comment.ip_address,
'user_agent': '' }
if akismet_api.comment_check(smart_str(comment.comment), data=akismet_data, build_data=True):
return True
return False
def comments_open(self, obj):
"""
Return ``True`` if new comments are being accepted for
``obj``, ``False`` otherwise.
The algorithm for determining this is as follows:
1. If ``enable_field`` is set and the relevant field on
``obj`` contains a false value, comments are not open.
2. If ``close_after`` is set and the relevant date field on
``obj`` is far enough in the past, comments are not open.
3. If neither of the above checks determined that comments are
not open, comments are open.
"""
if self.enable_field:
if not getattr(obj, self.enable_field):
return False
if self.auto_close_field and self.close_after:
if self._get_delta(datetime.datetime.now(), getattr(obj, self.auto_close_field)).days >= self.close_after:
return False
return True
def comments_moderated(self, obj):
"""
Return ``True`` if new comments for ``obj`` are being
automatically sent to moderation, ``False`` otherwise.
The algorithm for determining this is as follows:
1. If ``moderate_field`` is set and the relevant field on
``obj`` contains a true value, comments are moderated.
2. If ``moderate_after`` is set and the relevant date field on
``obj`` is far enough in the past, comments are moderated.
3. If neither of the above checks decided that comments are
moderated, comments are not moderated.
"""
if self.moderate_field:
if getattr(obj, self.moderate_field):
return True
if self.auto_moderate_field and self.moderate_after:
if self._get_delta(datetime.datetime.now(), getattr(obj, self.auto_moderate_field)).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comment_utils/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object,
'site': Site.objects.get_current(),
})
subject = '[%s] Comment: "%s"' % (Site.objects.get_current().name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class AkismetModerator(CommentModerator):
"""
Subclass of ``CommentModerator`` which applies Akismet spam
filtering to all new comments for its model.
"""
akismet = True
class AlwaysModerate(CommentModerator):
"""
Subclass of ``CommentModerator`` which forces all new comments for
its model into moderation (marks all comments non-public to begin
with).
"""
def moderate(self, comment, content_object):
"""
Always return ``True``, no matter what comment or content
object is supplied, so that new comments always get marked
non-public to start with.
"""
return True
def comments_moderated(self, obj):
"""
Always return ``True``, no matter what object is supplied,
because new comments always get moderated.
"""
return True
class NoComments(CommentModerator):
"""
Subclass of ``CommentModerator`` which forbids all new comments
for its model (deletes all comments posted to objects of that
model).
"""
def allow(self, comment, content_object):
"""
Always return ``False`` because new comments are never allowed
for this model.
"""
return False
def comments_open(self, obj):
"""
Always return ``False``, because new comments are never
allowed for this model.
"""
return False
class ModerateFirstTimers(CommentModerator):
"""
Subclass of ``CommentModerator`` which automatically moderates all
comments from anyone who has not previously had a comment
approved, while allowing all other comments to skip moderation.
"""
def moderate(self, comment, content_object):
"""
For each new comment, checks to see if the person submitting
it has any previously-approved comments; if not, the comment
will be moderated.
"""
person_kwargs = { 'username__exact': comment.name }
approved_comments = comment_class.objects.filter(is_public__exact=True, **person_kwargs)
if approved_comments.count() == 0:
return True
return False
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``CommentModerator``
(this module exports one as ``moderator``), and call its
``register`` method, passing the model class and a moderation
class (which should be a subclass of ``CommentModerator``). Note
that both of these should be the actual classes, not instances of
the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.pre_save.connect(self.pre_save_moderation, sender=comments.get_model())
signals.post_save.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, instance, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = instance.content_type.model_class()
if instance.id or (model not in self._registry):
return
content_object = instance.content_object
moderation_class = self._registry[model]
if not moderation_class.allow(instance, content_object): # Comment will get deleted in post-save hook.
instance.moderation_disallowed = True
return
if moderation_class.moderate(instance, content_object):
instance.is_public = False
def post_save_moderation(self, sender, instance, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = instance.content_type.model_class()
if model not in self._registry:
return
if hasattr(instance, 'moderation_disallowed'):
instance.delete()
return
self._registry[model].email(instance, instance.content_object)
def comments_open(self, obj):
"""
Return ``True`` if new comments are being accepted for
``obj``, ``False`` otherwise.
If no moderation rules have been registered for the model of
which ``obj`` is an instance, comments are assumed to be open
for that object.
"""
model = obj.__class__
if model not in self._registry:
return True
return self._registry[model].comments_open(obj)
def comments_moderated(self, obj):
"""
Return ``True`` if new comments for ``obj`` are being
automatically sent to moderation, ``False`` otherwise.
If no moderation rules have been registered for the model of
which ``obj`` is an instance, comments for that object are
assumed not to be moderated.
"""
model = obj.__class__
if model not in self._registry:
return False
return self._registry[model].comments_moderated(obj)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
|
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except BaseException:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "1.0.5"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.pandas',
'pyspark.pandas.data_type_ops',
'pyspark.pandas.indexes',
'pyspark.pandas.missing',
'pyspark.pandas.plot',
'pyspark.pandas.spark',
'pyspark.pandas.typedef',
'pyspark.pandas.usage_logging',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.3'],
extras_require={
'ml': ['numpy>=1.15'],
'mllib': ['numpy>=1.15'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
],
'pandas_on_spark': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
'numpy>=1.15',
],
},
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
|
|
from objc import selector
from objc import python_method
from Foundation import NSObject
from AppKit import NSAlert, NSSavePanel, NSOpenPanel, NSAlertStyleCritical, NSAlertStyleInformational, NSAlertStyleWarning, NSAlertFirstButtonReturn, NSAlertSecondButtonReturn, NSAlertThirdButtonReturn, NSOKButton, NSURL, NSImage
__all__ = ["message", "askYesNoCancel", "askYesNo", "getFile", "getFolder", "getFileOrFolder", "putFile"]
alertStyleMap = {
None : NSAlertStyleInformational,
"informational" : NSAlertStyleInformational,
"critical" : NSAlertStyleCritical,
# "warning" : NSAlertStyleWarning, # no difference with
# backwards compatible keys
NSAlertStyleInformational : NSAlertStyleInformational,
NSAlertStyleCritical : NSAlertStyleCritical,
NSAlertStyleWarning : NSAlertStyleWarning
}
class BasePanel(NSObject):
def initWithWindow_resultCallback_(cls, parentWindow=None, resultCallback=None):
self = cls.init()
self.retain()
self._parentWindow = parentWindow
self._resultCallback = resultCallback
return self
def windowWillClose_(self, notification):
self.autorelease()
class BaseMessageDialog(BasePanel):
def initWithWindow_resultCallback_(cls, parentWindow=None, resultCallback=None):
self = super(BaseMessageDialog, cls).initWithWindow_resultCallback_(parentWindow, resultCallback)
self.messageText = ""
self.informativeText = ""
self.alertStyle = NSAlertStyleInformational
self.buttonTitlesValues = []
self.accessoryView = None
self.icon = None
self.showsHelpCallback = None
return self
def initWithMessageText_informativeText_alertStyle_buttonTitlesValues_window_resultCallback_(self,
messageText="", informativeText="", alertStyle=NSAlertStyleInformational, buttonTitlesValues=[], parentWindow=None, resultCallback=None):
# make it backwards compatible
import warnings
warnings.warn(
"'BaseMessageDiaglog.alloc().initWithMessageText_informativeText_alertStyle_buttonTitlesValues_window_resultCallback_' has been deprecated and will be removed."
"Please update your code.",
DeprecationWarning
)
self = self.initWithWindow_resultCallback_(parentWindow, resultCallback)
self.messageText = messageText
self.informativeText = informativeText
self.alertStyle = alertStyle
self.buttonTitlesValues = _mapButtonTitles(buttonTitlesValues)
self.run()
return self
def run(self):
self.alert = NSAlert.alloc().init()
self.alert.setDelegate_(self)
self.alert.setMessageText_(self.messageText)
self.alert.setInformativeText_(self.informativeText)
self.alert.setAlertStyle_(self.alertStyle)
if self.accessoryView:
self.alert.setAccessoryView_(self.accessoryView)
if self.icon:
self.alert.setIcon_(self.icon)
if self.showsHelpCallback:
self.alert.setShowsHelp_(True)
for buttonTitle in self.buttonTitlesValues:
self.alert.addButtonWithTitle_(buttonTitle["title"])
self._value = None
if self._parentWindow is None:
code = self.alert.runModal()
self._translateValue(code)
if self._resultCallback is not None:
self._resultCallback(self._value)
else:
self.alert.beginSheetModalForWindow_completionHandler_(self._parentWindow, self.completionHandler_)
def completionHandler_(self, returnCode):
self.alert.window().close()
self._translateValue(returnCode)
if self._resultCallback is not None:
self._resultCallback(self._value)
@python_method
def _translateValue(self, code):
if code == NSAlertFirstButtonReturn:
value = 1
elif code == NSAlertSecondButtonReturn:
value = 2
elif code == NSAlertThirdButtonReturn:
value = 3
else:
value = code - NSAlertThirdButtonReturn + 3
if self.buttonTitlesValues:
result = self.buttonTitlesValues[value - 1]
if "callback" in result:
result["callback"]()
self._value = result.get("returnCode")
# delegate method
def alertShowHelp_(self, sender):
self.showsHelpCallback()
class BasePutGetPanel(BasePanel):
def completionHandler_(self, returnCode):
self.panel.close()
if returnCode:
self._result = self.panel.filenames()
if self._resultCallback is not None:
self._resultCallback(self._result)
class PutFilePanel(BasePutGetPanel):
def initWithWindow_resultCallback_(self, parentWindow=None, resultCallback=None):
self = super(PutFilePanel, self).initWithWindow_resultCallback_(parentWindow, resultCallback)
self.messageText = None
self.title = None
self.fileTypes = None
self.directory = None
self.fileName = None
self.canCreateDirectories = True
self.accessoryView = None
self._result = None
return self
def run(self):
self.panel = NSSavePanel.alloc().init()
if self.messageText:
self.panel.setMessage_(self.messageText)
if self.title:
self.panel.setTitle_(self.title)
if self.fileName:
self.panel.setNameFieldStringValue_(self.fileName)
if self.directory:
self.panel.setDirectoryURL_(NSURL.fileURLWithPath_(self.directory))
if self.fileTypes:
self.panel.setAllowedFileTypes_(self.fileTypes)
self.panel.setCanCreateDirectories_(self.canCreateDirectories)
self.panel.setCanSelectHiddenExtension_(True)
self.panel.setAccessoryView_(self.accessoryView)
if self._parentWindow is not None:
self.panel.beginSheetModalForWindow_completionHandler_(self._parentWindow, self.completionHandler_)
else:
isOK = self.panel.runModalForDirectory_file_(self.directory, self.fileName)
if isOK == NSOKButton:
self._result = self.panel.filename()
def completionHandler_(self, returnCode):
self.panel.close()
if returnCode:
self._result = self.panel.filename()
if self._resultCallback is not None:
self._resultCallback(self._result)
class GetFileOrFolderPanel(BasePutGetPanel):
def initWithWindow_resultCallback_(self, parentWindow=None, resultCallback=None):
self = super(GetFileOrFolderPanel, self).initWithWindow_resultCallback_(parentWindow, resultCallback)
self.messageText = None
self.title = None
self.directory = None
self.fileName = None
self.fileTypes = None
self.allowsMultipleSelection = False
self.canChooseDirectories = True
self.canChooseFiles = True
self.resolvesAliases = True
self.accessoryView = None
self._result = None
return self
def run(self):
self.panel = NSOpenPanel.alloc().init()
if self.messageText:
self.panel.setMessage_(self.messageText)
if self.title:
self.panel.setTitle_(self.title)
if self.fileName:
self.panel.setNameFieldLabel_(self.fileName)
if self.directory:
self.panel.setDirectoryURL_(NSURL.fileURLWithPath_(self.directory))
if self.fileTypes:
self.panel.setAllowedFileTypes_(self.fileTypes)
self.panel.setCanChooseDirectories_(self.canChooseDirectories)
self.panel.setCanChooseFiles_(self.canChooseFiles)
self.panel.setAllowsMultipleSelection_(self.allowsMultipleSelection)
self.panel.setResolvesAliases_(self.resolvesAliases)
self.panel.setAccessoryView_(self.accessoryView)
if self._parentWindow is not None:
self.panel.beginSheetModalForWindow_completionHandler_(self._parentWindow, self.completionHandler_)
else:
isOK = self.panel.runModalForDirectory_file_types_(self.directory, self.fileName, self.fileTypes)
if isOK == NSOKButton:
self._result = self.panel.filenames()
def _unwrapWindow(window):
from vanilla.vanillaWindows import Window
if window is None:
return window
if isinstance(window, Window):
window = window.getNSWindow()
return window
def _unwrapView(view):
from vanilla.vanillaBase import VanillaBaseObject
if view is None:
return view
if isinstance(view, VanillaBaseObject):
l, t, w, h = view.getPosSize()
view = view._getContentView()
view.setFrame_(((0, 0), (w, h)))
return view
def _mapButtonTitles(titles):
"""
Convert key
"""
buttonTitles = []
for buttonTitle in titles:
if isinstance(buttonTitle, tuple):
title, returnCode = buttonTitle
buttonTitle = dict(title=title, returnCode=returnCode)
buttonTitles.append(buttonTitle)
return buttonTitles
def message(messageText="", informativeText="", alertStyle="informational", parentWindow=None, resultCallback=None, icon=None, accessoryView=None, showsHelpCallback=None):
assert icon is None or isinstance(icon, NSImage)
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
alert = BaseMessageDialog.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
alert.messageText = messageText
alert.informativeText = informativeText
alert.alertStyle = alertStyleMap[alertStyle]
alert.accessoryView = accessoryView
alert.icon = icon
alert.showsHelpCallback = showsHelpCallback
alert.run()
if resultCallback is None:
return 1
def ask(messageText="", informativeText="", alertStyle="informational", buttonTitles=[],
parentWindow=None, resultCallback=None, icon=None, accessoryView=None, showsHelpCallback=None):
assert buttonTitles, "Button titles are required"
assert icon is None or isinstance(icon, NSImage)
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
buttonTitles = _mapButtonTitles(buttonTitles)
alert = BaseMessageDialog.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
alert.messageText = messageText
alert.informativeText = informativeText
alert.alertStyle = alertStyleMap[alertStyle]
alert.buttonTitlesValues = buttonTitles
alert.accessoryView = accessoryView
alert.icon = icon
alert.showsHelpCallback = showsHelpCallback
alert.run()
if resultCallback is None:
return alert._value
def askYesNoCancel(messageText="", informativeText="", alertStyle="informational",
parentWindow=None, resultCallback=None, icon=None, accessoryView=None, showsHelpCallback=None):
return ask(
messageText=messageText,
informativeText=informativeText,
alertStyle=alertStyle,
buttonTitles=[
dict(title="Cancel", returnCode=-1),
dict(title="Yes", returnCode=1),
dict(title="No", returnCode=0)
],
parentWindow=parentWindow,
resultCallback=resultCallback,
icon=icon,
accessoryView=accessoryView,
showsHelpCallback=showsHelpCallback
)
def askYesNo(messageText="", informativeText="", alertStyle="informational",
parentWindow=None, resultCallback=None, icon=None, accessoryView=None, showsHelpCallback=None):
return ask(
messageText=messageText,
informativeText=informativeText,
alertStyle=alertStyle,
buttonTitles=[
dict(title="Yes", returnCode=1),
dict(title="No", returnCode=0)
],
parentWindow=parentWindow,
resultCallback=resultCallback,
icon=icon,
accessoryView=accessoryView,
showsHelpCallback=showsHelpCallback
)
def getFile(messageText=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None, parentWindow=None, resultCallback=None, accessoryView=None):
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
basePanel = GetFileOrFolderPanel.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
basePanel.messageText = messageText
basePanel.title = title
basePanel.directory = directory
basePanel.fileName = fileName
basePanel.fileTypes = fileTypes
basePanel.allowsMultipleSelection = allowsMultipleSelection
basePanel.canChooseDirectories = False
basePanel.canChooseFiles = True
basePanel.accessoryView = accessoryView
basePanel.run()
if resultCallback is None:
return basePanel._result
def getFolder(messageText=None, title=None, directory=None, allowsMultipleSelection=False,
parentWindow=None, resultCallback=None, accessoryView=None):
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
basePanel = GetFileOrFolderPanel.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
basePanel.messageText = messageText
basePanel.title = title
basePanel.directory = directory
basePanel.allowsMultipleSelection = allowsMultipleSelection
basePanel.canChooseDirectories = True
basePanel.canChooseFiles = False
basePanel.accessoryView = accessoryView
basePanel.run()
if resultCallback is None:
return basePanel._result
def getFileOrFolder(messageText=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None, parentWindow=None, resultCallback=None, accessoryView=None):
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
basePanel = GetFileOrFolderPanel.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
basePanel.messageText = messageText
basePanel.title = title
basePanel.directory = directory
basePanel.fileName = fileName
basePanel.fileTypes = fileTypes
basePanel.allowsMultipleSelection = allowsMultipleSelection
basePanel.canChooseDirectories = True
basePanel.canChooseFiles = True
basePanel.accessoryView = accessoryView
basePanel.run()
if resultCallback is None:
return basePanel._result
def putFile(messageText=None, title=None, directory=None, fileName=None, canCreateDirectories=True,
fileTypes=None, parentWindow=None, resultCallback=None, accessoryView=None):
parentWindow = _unwrapWindow(parentWindow)
accessoryView = _unwrapView(accessoryView)
basePanel = PutFilePanel.alloc().initWithWindow_resultCallback_(parentWindow, resultCallback)
basePanel.messageText = messageText
basePanel.title = title
basePanel.directory = directory
basePanel.fileName = fileName
basePanel.fileTypes = fileTypes
basePanel.canCreateDirectories = canCreateDirectories
basePanel.accessoryView = accessoryView
basePanel.run()
if resultCallback is None:
return basePanel._result
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
from tensorflow.python.platform import tf_logging as logging
OP_INSTANCE_KEY_START_NUMBER = 100
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
This class is thread safe.
"""
def __init__(self,
group_key_start=1,
op_instance_key_start=OP_INSTANCE_KEY_START_NUMBER,
variable_instance_key_start=1000000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
op_instance_key_start: the starting integer of instance key for ops.
variable_instance_key_start: the starting integer of instance key for
variables.
"""
self._group_key = group_key_start
self._group_key_table = {}
assert op_instance_key_start != variable_instance_key_start
self._op_instance_key = op_instance_key_start
self._variable_instance_key = variable_instance_key_start
self._lock = threading.Lock()
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with self._lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_op_instance_key(self):
"""Returns a new instance key for use in defining a collective op."""
with self._lock:
v = self._op_instance_key
self._op_instance_key += 1
return v
def get_variable_instance_key(self):
"""Returns a new instance key for use in creating a Variable."""
with self._lock:
v = self._variable_instance_key
self._variable_instance_key += 1
return v
def __deepcopy__(self, memo):
# distribute_coordinator deep-copies the strategy object, so
# CollectiveKeys needs to support deep copy as well.
copied = CollectiveKeys()
copied._group_key = self._group_key
copied._group_key_table = copy.deepcopy(self._group_key_table, memo)
copied._op_instance_key = self._op_instance_key
copied._variable_instance_key = self._variable_instance_key
return copied
def build_collective_reduce(input_tensors,
devices,
group_size,
collective_keys,
reduction_op='Add',
unary_op='Id',
communication_hint='AUTO',
control_inputs=None,
executors=None,
timeout=None):
"""Build a subgraph that does one full all-reduce, using the collective Op.
If called in eager mode, it's required to supply a list of async executors for
each input Tensor.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
devices: a list of device strings to run the collective on.
group_size: total number of devices globally that will be doing this same
reduction. The reduction will actually include the corresponding tensors
at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
communication_hint: string providing hint to runtime for choosing collective
implementation.
control_inputs: if not None, add control edges between control_inputs and
(index-wise) corresponding collective_reduce tensors
executors: a list of async executor. Required for eager execution.
timeout: a float or None. The timeout in seconds.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
if context.executing_eagerly():
if (not executors or len(executors) != len(input_tensors) or
not all(e.is_async() for e in executors)):
raise ValueError(
'collectives requires async executors for each device in eager mode')
if len(input_tensors) != len(devices):
raise ValueError('collective requires one input tensor for each device, '
'len(input_tensors) = %d, len(devices) = %d' %
(len(input_tensors), len(devices)))
if group_size < 2:
return input_tensors
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_op_instance_key()
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
out_tensors = []
for idx, input_tensor in enumerate(input_tensors):
if context.executing_eagerly():
executor_scope = context.executor_scope(executors[idx])
else:
executor_scope = ops.NullContextmanager()
with executor_scope, \
ops.device(devices[idx]), \
ops.control_dependencies(
_control_input(devices, control_inputs, idx)):
out_tensor = collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
reduction_op,
unary_op,
subdiv_offsets,
communication_hint,
timeout=timeout)
out_tensors.append(out_tensor)
return out_tensors
def build_collective_gather(input_tensors,
devices,
group_size,
collective_keys,
communication_hint='AUTO',
control_inputs=None,
timeout=None):
"""Build a subgraph that does one full all-gather, using the collective Op.
This method must be called in graph mode or inside a tf.function.
Args:
input_tensors: tensors within a single worker graph that are to be gathered
together; must be one per device.
devices: a list of device strings to run the collective on.
group_size: total number of devices globally that will be doing this same
gathering. The gathering will actually include the corresponding tensors
at all these workers.
collective_keys: a CollectiveKeys object.
communication_hint: string providing hint to runtime for choosing collective
implementation.
control_inputs: if not None, add control edges between control_inputs and
(index-wise) corresponding collective_gather tensors
timeout: a float or None. The timeout in seconds.
Returns:
An array of final tensors, one per device, computed by the full gather.
"""
assert not context.executing_eagerly(), (
'build_collective_gather can only be called in graph mode or inside '
'tf.function')
if len(input_tensors) != len(devices):
raise ValueError(
'collective requires one input tensor for each device, %d != %d' %
(len(input_tensors), len(devices)))
if group_size < 2:
return input_tensors
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_op_instance_key()
out_tensors = []
for idx, input_tensor in enumerate(input_tensors):
with ops.device(devices[idx]):
with ops.control_dependencies(
_control_input(devices, control_inputs, idx)):
out_tensor = collective_ops.all_gather(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint,
timeout=timeout)
out_tensors.append(out_tensor)
return out_tensors
def build_collective_gather_indexed_slices(input_slices_list,
devices,
group_size,
collective_keys,
communication_hint='AUTO',
control_inputs=None,
timeout=None):
"""Build a subgraph that all-gathers IndexedSlices using the collective Op.
This method must be called in graph mode or inside a tf.function.
Args:
input_slices_list: a list of IndexedSlices within a single worker graph that
are to be gathered together; must be one per device.
devices: a list of device strings to run the collective on.
group_size: total number of devices globally that will be doing this same
gathering. The gathering will actually include the corresponding tensors
at all these workers.
collective_keys: a CollectiveKeys object.
communication_hint: string providing hint to runtime for choosing collective
implementation.
control_inputs: if not None, add control edges between control_inputs and
(index-wise) corresponding collective_reduce tensors
timeout: a float or None. The timeout in seconds.
Returns:
An array of final IndexedSlices, one per device, computed by the full
gather.
Raises:
ValueError: if control_inputs is not None and doesn't match the length and
devices of inputs.
"""
assert not context.executing_eagerly(), (
'build_collective_gather_indexed_slices can only be called in graph mode'
' or inside tf.function')
if len(input_slices_list) != len(devices):
raise ValueError(
'collective requires one input IndexedSlice for each device, %d != %d' %
(len(input_slices_list), len(devices)))
if group_size < 2:
return input_slices_list
group_key = collective_keys.get_group_key(devices)
gather_length_key = collective_keys.get_op_instance_key()
gather_indices_key = collective_keys.get_op_instance_key()
gather_values_key = collective_keys.get_op_instance_key()
reduce_densified_key = collective_keys.get_op_instance_key()
# Current CollectiveAllGather implementations require input IndexedSlices to
# have consistent length across the board, we handle the reduction of
# IndexedSlices as follows:
# 1. Gather the lengths of IndexedSlices from all participants.
# 2. If they have consistent length, apply all_gather.
# 3. Otherwise convert IndexedSlices to dense tensors and apply
# all_reduce.
out_slices_list = []
for idx, input_slices in enumerate(input_slices_list):
# pylint: disable = cell-var-from-loop
with ops.device(devices[idx]):
def all_gather():
"""Use all_gather to aggregate `IndexedSlices`."""
all_values = collective_ops.all_gather(
input_slices.values,
group_size,
group_key,
gather_values_key,
communication_hint,
timeout=timeout)
# Add control dependency to order the all-gather.
control = [all_values] if communication_hint == 'NCCL' else []
with ops.control_dependencies(control):
all_indices = collective_ops.all_gather(
input_slices.indices,
group_size,
group_key,
gather_indices_key,
communication_hint,
timeout=timeout)
return ops.IndexedSlices(
values=all_values,
indices=all_indices,
dense_shape=input_slices.dense_shape)
def densify_and_all_reduce():
"""Use all_reduce to aggregate `IndexedSlices`."""
densified = ops.convert_to_tensor(input_slices)
reduced = collective_ops.all_reduce(
densified,
group_size,
group_key,
reduce_densified_key,
'Add',
'Id', [0],
communication_hint,
timeout=timeout)
# We have to convert dense grad to IndexedSlice because all_reduce()
# and all_gather() must have the same return type as required by
# control_flow_ops.cond.
return ops.IndexedSlices(
values=reduced,
indices=math_ops.range(array_ops.shape(reduced)[0]),
dense_shape=input_slices.dense_shape)
length = array_ops.shape(input_slices.indices)
with ops.control_dependencies(
_control_input(input_slices, control_inputs, idx)):
all_lengths = collective_ops.all_gather(
length,
group_size,
group_key,
gather_length_key,
communication_hint,
timeout=timeout)
out_slices = control_flow_ops.cond(
math_ops.equal(
math_ops.reduce_max(all_lengths),
math_ops.reduce_min(all_lengths)), all_gather,
densify_and_all_reduce)
out_slices_list.append(out_slices)
# pylint: enable=cell-var-from-loop
return out_slices_list
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return backprop.aggregate_indexed_slices_gradients(values)
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = backprop.flatten_nested_indexed_slices(value)
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def is_indexed_slices(value):
if isinstance(value, ops.IndexedSlices):
return True
assert isinstance(value, value_lib.DistributedValues)
return all(isinstance(v, ops.IndexedSlices) for v in value.values)
def split_by_sparsity(values):
"""Split values into dense and sparse values.
Args:
values: a list of tensors or `PerReplica`s.
Returns:
Four lists:
a list of dense values, a list of their indices in `values` and
a list of sparse values, a list of their indices in `values`.
"""
dense_values = []
dense_indices = []
sparse_values = []
sparse_indices = []
for i, v in enumerate(values):
if is_indexed_slices(v):
sparse_values.append(v)
sparse_indices.append(i)
else:
dense_values.append(v)
dense_indices.append(i)
return dense_values, dense_indices, sparse_values, sparse_indices
def stitch_values(values_and_indices_list):
"""Stitch values together according to their indices.
Args:
values_and_indices_list: a list of tuples of values and indices indicating
the values and positions in the returned list.
Returns:
a stitched list of values.
"""
length = 0
for values_and_indices in values_and_indices_list:
length += len(values_and_indices[0])
result = [None] * length
for values_and_indices in values_and_indices_list:
if values_and_indices and values_and_indices[0]:
for v, i in zip(*values_and_indices):
assert result[i] is None
result[i] = v
return result
def per_replica_num_elements(per_replica):
"""Returns the static number of elements of one replica.
Args:
per_replica: A PerReplica of Tensor or IndexedSlices.
Returns:
Number of elements. None if some replica has a different or unknown shape.
"""
values = per_replica._values # pylint: disable=protected-access
s0 = values[0].shape
for v in values:
assert not isinstance(v, ops.IndexedSlices)
if v.shape != s0:
return None
return s0.num_elements()
def pack_by_size(per_replica_list, bytes_per_pack):
"""Packs `per_replica_list` into chunks of `bytes_per_pack`.
The method preserves the original order of `per_replica_list`. The packing is
best effort, each pack could have more or less bytes than `bytes_per_pack`.
It only packs values with known shape. Note that, the usage is different from
`cross_device_ops._pack_tensors`, this function is intended to work with the
ScopeAllocator style batching used in `CollectiveAllReduce`.
Args:
per_replica_list: A list of PerReplica.
bytes_per_pack: Bytes per pack.
Returns:
A list of packs of PerReplica. All values are packed into one pack if
`bytes_per_pack` is zero or any of the value has unknown shape.
"""
if bytes_per_pack == 0:
return [per_replica_list]
packs = []
last_pack_size = 0
for value in per_replica_list:
num_elements = per_replica_num_elements(value)
if num_elements is None:
# Can't pack values with unknown shape.
logging.warning(
'not packing values due to the unknown or inconsistent shape of %s',
value)
return [per_replica_list]
size = num_elements * value._primary.dtype.size # pylint: disable=protected-access
# Try to keep each pack as close to bytes_per_pack as possible, while each
# pack is at least bytes_per_pack large. I.E. we err on the side of having
# few but large packs.
if not packs or last_pack_size > bytes_per_pack:
packs.append([])
last_pack_size = 0
packs[-1].append(value)
last_pack_size += size
return packs
def _control_input(devices, control_inputs, idx):
"""Returns the `idx`-th item in control_inputs to be used in ops.control_dependencies.
This is a helper function for building collective ops.
Args:
devices: a list of device strings the collective run on.
control_inputs: a list or None.
idx: the index into `inputs` and `control_inputs`.
Returns:
A one item list of the `idx`-th element of `control_inputs`, or an empty
list if `control_inputs` is None.
"""
if control_inputs is None:
return []
if len(control_inputs) != len(devices):
raise ValueError(
'control_inputs must match the length of the devices, %s != %s' %
(len(control_inputs), len(devices)))
return [control_inputs[idx]]
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class KeyMobileSitesPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(KeyMobileSitesPage, self).__init__(url=url, page_set=page_set,
name=name)
self.credentials_path = 'data/credentials.json'
self.user_agent_type = 'mobile'
self.archive_data_file = 'data/key_mobile_sites.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
def RunRepaint(self, action_runner):
action_runner.RepaintContinuously(seconds=5)
class Page1(KeyMobileSitesPage):
""" Why: Top news site """
def __init__(self, page_set):
super(Page1, self).__init__(
url='http://nytimes.com/',
page_set=page_set)
self.fastpath = True
class Page2(KeyMobileSitesPage):
""" Why: Typical mobile business site """
def __init__(self, page_set):
super(Page2, self).__init__(
url=('http://iphone.capitolvolkswagen.com/index.htm'
'#new-inventory_p_2Fsb-new_p_2Ehtm_p_3Freset_p_3DInventoryListing'),
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForElement(text='Next 35')
action_runner.WaitForJavaScriptCondition(
'document.body.scrollHeight > 2560')
class Page3(KeyMobileSitesPage):
""" Why: Image-heavy site """
def __init__(self, page_set):
super(Page3, self).__init__(
url='http://cuteoverload.com',
page_set=page_set)
self.fastpath = True
class Page4(KeyMobileSitesPage):
""" Why: Top tech blog """
def __init__(self, page_set):
super(Page4, self).__init__(
# pylint: disable=C0301
url='http://www.theverge.com/2012/10/28/3568746/amazon-7-inch-fire-hd-ipad-mini-ad-ballsy',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.Chorus !== undefined &&'
'window.Chorus.Comments !== undefined &&'
'window.Chorus.Comments.Json !== undefined &&'
'(window.Chorus.Comments.loaded ||'
' window.Chorus.Comments.Json.load_comments())')
class Page5(KeyMobileSitesPage):
""" Why: Top news site """
def __init__(self, page_set):
super(Page5, self).__init__(
# pylint: disable=C0301
url='http://www.cnn.com/2012/10/03/politics/michelle-obama-debate/index.html',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(8)
class Page6(KeyMobileSitesPage):
""" Why: Social; top Google property; Public profile; infinite scrolls """
def __init__(self, page_set):
super(Page6, self).__init__(
# pylint: disable=C0301
url='https://plus.google.com/app/basic/110031535020051778989/posts?source=apppromo',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Page7(KeyMobileSitesPage):
""" Why: #1 (Alexa global) """
def __init__(self, page_set):
super(Page7, self).__init__(
url='https://facebook.com/barackobama',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("u_0_c") !== null &&'
'document.body.scrollHeight > window.innerHeight')
class Page8(KeyMobileSitesPage):
""" Why: #3 (Alexa global) """
def __init__(self, page_set):
super(Page8, self).__init__(
url='http://m.youtube.com/watch?v=9hBpF_Zj4OA',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("paginatortarget") !== null')
class Page9(KeyMobileSitesPage):
"""
Why: #11 (Alexa global), google property; some blogger layouts have infinite
scroll but more interesting
"""
def __init__(self, page_set):
super(Page9, self).__init__(
url='http://googlewebmastercentral.blogspot.com/',
page_set=page_set,
name='Blogger')
class Page10(KeyMobileSitesPage):
""" Why: #18 (Alexa global), Picked an interesting post """
def __init__(self, page_set):
super(Page10, self).__init__(
# pylint: disable=C0301
url='http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
page_set=page_set,
name='Wordpress')
class Page11(KeyMobileSitesPage):
""" Why: #12 (Alexa global),Public profile """
def __init__(self, page_set):
super(Page11, self).__init__(
url='https://www.linkedin.com/in/linustorvalds',
page_set=page_set,
name='LinkedIn')
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("profile-view-scroller") !== null')
class Page12(KeyMobileSitesPage):
""" Why: #6 (Alexa) most visited worldwide, picked an interesting page """
def __init__(self, page_set):
super(Page12, self).__init__(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=page_set,
name='Wikipedia (1 tab)')
class Page13(KeyMobileSitesPage):
""" Why: #8 (Alexa global), picked an interesting page """
def __init__(self, page_set):
super(Page13, self).__init__(
url='http://twitter.com/katyperry',
page_set=page_set,
name='Twitter')
self.disabled = 'Forbidden (Rate Limit Exceeded)'
class Page14(KeyMobileSitesPage):
""" Why: #37 (Alexa global) """
def __init__(self, page_set):
super(Page14, self).__init__(
url='http://pinterest.com',
page_set=page_set,
name='Pinterest')
class Page15(KeyMobileSitesPage):
""" Why: #1 sports """
def __init__(self, page_set):
super(Page15, self).__init__(
url='http://espn.go.com',
page_set=page_set,
name='ESPN')
self.disabled = 'Fails often; crbug.com/249722'
class Page16(KeyMobileSitesPage):
""" Why: #1 Alexa reference """
def __init__(self, page_set):
super(Page16, self).__init__(
# pylint: disable=C0301
url='http://answers.yahoo.com/question/index?qid=20110117024343AAopj8f',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForElement(text='Other Answers (1 - 20 of 149)')
action_runner.ClickElement(text='Other Answers (1 - 20 of 149)')
class Page17(KeyMobileSitesPage):
""" Why: productivity, top google properties """
def __init__(self, page_set):
super(Page17, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set)
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("og_user_warning") !== null')
action_runner.WaitForJavaScriptCondition(
'document.getElementById("og_user_warning") === null')
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
interaction.End()
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
interaction.End()
class Page18(KeyMobileSitesPage):
""" Why: crbug.com/169827 """
def __init__(self, page_set):
super(Page18, self).__init__(
url='http://slashdot.org/',
page_set=page_set)
self.fastpath = True
class Page19(KeyMobileSitesPage):
""" Why: #5 Alexa news """
def __init__(self, page_set):
super(Page19, self).__init__(
url='http://www.reddit.com/r/programming/comments/1g96ve',
page_set=page_set)
self.fastpath = True
class Page20(KeyMobileSitesPage):
""" Why: Problematic use of fixed position elements """
def __init__(self, page_set):
super(Page20, self).__init__(
url='http://www.boingboing.net',
page_set=page_set)
self.fastpath = True
class Page21(KeyMobileSitesPage):
""" Why: crbug.com/172906 """
def __init__(self, page_set):
super(Page21, self).__init__(
url='http://groupcloned.com',
page_set=page_set)
self.disabled = ('Page behaves non-deterministically, replaced with test'
'version for now')
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(5)
action_runner.WaitForJavaScriptCondition('''
document.getElementById("element-19") !== null &&
document.getElementById("element-19").contentDocument
.getElementById("element-22") !== null &&
document.getElementById("element-19").contentDocument
.getElementsByClassName(
"container list-item gc-list-item stretched").length !== 0''')
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 + document.getElementById("element-19")
.contentDocument
.getElementById("element-22")
.getBoundingClientRect().top);''',
use_touch=True)
interaction.End()
class Page22(KeyMobileSitesPage):
""" Why: crbug.com/172906 """
def __init__(self, page_set):
super(Page22, self).__init__(
url='http://groupcloned.com/test/list-images-variable/index.html',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("element-5") !== null')
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 +
document.getElementById("element-5")
.getBoundingClientRect().top);''',
use_touch=True)
interaction.End()
class Page23(KeyMobileSitesPage):
""" Why: crbug.com/231413 """
def __init__(self, page_set):
super(Page23, self).__init__(
url='http://forecast.io',
page_set=page_set)
self.disabled = u"Doesn't scroll; crbug.com/249736"
class Page24(KeyMobileSitesPage):
""" Why: Google News: accelerated scrolling version """
def __init__(self, page_set):
super(Page24, self).__init__(
url='http://mobile-news.sandbox.google.com/news/pt1',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'typeof NEWS_telemetryReady !== "undefined" && '
'NEWS_telemetryReady == true')
class Page25(KeyMobileSitesPage):
"""
Why: Google News: this iOS version is slower than accelerated scrolling
"""
def __init__(self, page_set):
super(Page25, self).__init__(
url='http://mobile-news.sandbox.google.com/news/pt0',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById(":h") != null')
action_runner.Wait(1)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
element_function='document.getElementById(":5")',
distance_expr='''
Math.max(0, 2500 +
document.getElementById(':h').getBoundingClientRect().top)''',
use_touch=True)
interaction.End()
class Page26(KeyMobileSitesPage):
"""
Why: #1 world commerce website by visits; #3 commerce in the US by time spent
"""
def __init__(self, page_set):
super(Page26, self).__init__(
url='http://www.amazon.com/gp/aw/s/ref=is_box_?k=nicolas+cage',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#search',
distance_expr='document.body.scrollHeight - window.innerHeight')
interaction.End()
class KeyMobileSitesPageSet(page_set_module.PageSet):
""" Key mobile sites """
def __init__(self):
super(KeyMobileSitesPageSet, self).__init__(
credentials_path='data/credentials.json',
user_agent_type='mobile',
archive_data_file='data/key_mobile_sites.json',
bucket=page_set_module.PARTNER_BUCKET)
self.AddPage(Page1(self))
self.AddPage(Page2(self))
self.AddPage(Page3(self))
self.AddPage(Page4(self))
self.AddPage(Page5(self))
self.AddPage(Page6(self))
self.AddPage(Page7(self))
self.AddPage(Page8(self))
self.AddPage(Page9(self))
self.AddPage(Page10(self))
self.AddPage(Page11(self))
self.AddPage(Page12(self))
# self.AddPage(Page13(self))
self.AddPage(Page14(self))
# self.AddPage(Page15(self))
self.AddPage(Page16(self))
self.AddPage(Page17(self))
self.AddPage(Page18(self))
self.AddPage(Page19(self))
self.AddPage(Page20(self))
self.AddPage(Page21(self))
self.AddPage(Page22(self))
# self.AddPage(Page23(self))
self.AddPage(Page24(self))
self.AddPage(Page25(self))
self.AddPage(Page26(self))
urls_list = [
# Why: crbug.com/242544
('http://www.androidpolice.com/2012/10/03/rumor-evidence-mounts-that-an-'
'lg-optimus-g-nexus-is-coming-along-with-a-nexus-phone-certification-'
'program/'),
# Why: crbug.com/149958
'http://gsp.ro',
# Why: Top tech blog
'http://theverge.com',
# Why: Top tech site
'http://digg.com',
# Why: Top Google property; a Google tab is often open
'https://www.google.com/#hl=en&q=barack+obama',
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
'http://www.cnn.com',
# Why: #1 commerce website by time spent by users in US
'http://shop.mobileweb.ebay.com/searchresults?kw=viking+helmet',
# Why: #1 Alexa recreation
# pylint: disable=C0301
'http://www.booking.com/searchresults.html?src=searchresults&latitude=65.0500&longitude=25.4667',
# Why: #1 Alexa sports
'http://sports.yahoo.com/',
# Why: Top tech blog
'http://techcrunch.com',
# Why: #6 Alexa sports
'http://mlb.com/',
# Why: #14 Alexa California
'http://www.sfgate.com/',
# Why: Non-latin character set
'http://worldjournal.com/',
# Why: Mobile wiki
'http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
# Why: #15 Alexa news
'http://online.wsj.com/home-page',
# Why: Image-heavy mobile site
'http://www.deviantart.com/',
# Why: Top search engine
('http://www.baidu.com/s?wd=barack+obama&rsv_bp=0&rsv_spt=3&rsv_sug3=9&'
'rsv_sug=0&rsv_sug4=3824&rsv_sug1=3&inputT=4920'),
# Why: Top search engine
'http://www.bing.com/search?q=sloths'
]
for url in urls_list:
self.AddPage(KeyMobileSitesPage(url, self))
|
|
import copy
import importlib
import json
import logging
import os
from trellio.services import TCPService, HTTPService
from ..utils.log_handlers import BufferingSMTPHandler
logger = logging.getLogger(__name__)
GLOBAL_CONFIG = {
"RONIN": False,
"HOST_NAME": "",
"ADMIN_EMAILS": [],
"SERVICE_NAME": "",
"SERVICE_VERSION": "",
"REGISTRY_HOST": "",
"REGISTRY_PORT": "",
"REDIS_HOST": "",
"REDIS_PORT": "",
"HTTP_HOST": "",
"TCP_HOST": "",
"HTTP_PORT": "",
"TCP_PORT": "",
"SIGNALS": {},
"MIDDLEWARES": [],
"APPS": [],
"DATABASE_SETTINGS": {
"database": "",
"user": "",
"password": "",
"host": "",
"port": ""
},
"SMTP_SETTINGS": {}
}
class InvalidConfigurationError(Exception):
pass
class ConfigHandler:
smtp_host = 'SMTP_HOST'
smtp_user = 'SMTP_USER'
smtp_port = 'SMTP_PORT'
smtp_password = 'SMTP_PASSWORD'
admin_emails = 'ADMIN_EMAILS'
middleware_key = 'MIDDLEWARES'
signal_key = 'SIGNALS'
service_name_key = 'SERVICE_NAME'
host_name_key = 'HOST_NAME'
service_version_key = 'SERVICE_VERSION'
reg_host_key = "REGISTRY_HOST"
reg_port_key = "REGISTRY_PORT"
redis_host_key = "REDIS_HOST"
redis_port_key = "REDIS_PORT"
http_host_key = "HTTP_HOST"
tcp_host_key = "TCP_HOST"
http_port_key = "HTTP_PORT"
tcp_port_key = "TCP_PORT"
database_key = 'DATABASE_SETTINGS'
ronin_key = "RONIN"
smtp_key = 'SMTP_SETTINGS'
apps_key = 'APPS'
# service_path_key = "SERVICE_PATH"
def __init__(self, host_class):
self.settings = None
self.host = host_class
@property
def service_name(self):
return self.settings[self.service_name_key]
def get_tcp_clients(self):
from trellio.services import TCPServiceClient
tcp_clients = self.inheritors(TCPServiceClient)
return tcp_clients
def get_http_clients(self):
from trellio.services import HTTPServiceClient
http_clients = self.inheritors(HTTPServiceClient)
return http_clients
def get_subscribers(self):
from trellio.pubsub import Subscriber
subscriber_classes = self.inheritors(Subscriber)
subscribers = []
for subs in subscriber_classes:
s = subs()
s.pubsub_host = self.settings[self.redis_host_key]
s.pubsub_port = self.settings[self.redis_port_key]
subscribers.append(s)
return subscribers
def configure_host(self, host):
host.configure(
host_name=self.settings[self.host_name_key],
service_name=self.settings[self.service_name_key],
service_version=self.settings[self.service_version_key],
http_host=self.settings[self.http_host_key],
http_port=self.settings[self.http_port_key],
tcp_host=self.settings[self.tcp_host_key],
tcp_port=self.settings[self.tcp_port_key],
registry_host=self.settings[self.reg_host_key],
registry_port=self.settings[self.reg_port_key],
pubsub_host=self.settings[self.redis_host_key],
pubsub_port=self.settings[self.reg_port_key],
ronin=self.settings[self.ronin_key]
)
def setup_host(self):
host = self.host
self.configure_host(host)
publisher = self.get_publisher()
subscribers = self.get_subscribers()
if publisher:
host.attach_publisher(publisher)
if subscribers:
host.attach_subscribers(subscribers)
http_service = self.get_http_service()
tcp_service = self.get_tcp_service()
tcp_clients = self.get_tcp_clients()
http_clients = self.get_http_clients()
http_views = self.get_http_views()
tcp_views = self.get_tcp_views()
if not http_service:
http_service = HTTPService(host.service_name, host.service_version, host.http_host, host.http_port)
if not tcp_service:
tcp_service = TCPService(host.service_name, host.service_version, host.tcp_host, host.tcp_port)
self.enable_signals()
self.enable_middlewares(http_service=http_service, http_views=http_views)
if http_service:
# self.register_http_views(http_service)
host.attach_service(http_service)
http_service.clients = [i() for i in http_clients + tcp_clients]
# self.register_tcp_views(tcp_service)
host.attach_service(tcp_service)
if http_service:
tcp_service.clients = http_service.clients
if http_views:
host.attach_http_views(http_views)
for view_inst in host.get_tcp_views():
pass
if tcp_views:
host.attach_tcp_views(tcp_views)
_tcp_service = host.get_tcp_service()
_tcp_service.tcp_views = host._tcp_views
host._smtp_handler = self.get_smtp_logging_handler()
def get_database_settings(self):
return self.settings[self.database_key]
def set_config(self, config_path):
settings = None
with open(config_path) as f:
settings = json.load(f)
new_settings = copy.deepcopy(GLOBAL_CONFIG)
new_settings.update(settings)
self.settings = new_settings
parent_dir = os.getcwd().split('/')[-1]
client_path = parent_dir + '.clients'
service_path = parent_dir + '.service'
try:
importlib.import_module(client_path)
except:
logger.warning('No clients found')
service_imported = True
service_exception = None
try:
importlib.import_module(service_path)
except Exception as e:
service_imported = False
service_exception = e.__traceback__
if self.settings.get(self.apps_key):
apps = self.settings[self.apps_key]
for app in apps:
views_path = parent_dir + '.{}.views'.format(app)
try:
importlib.import_module(views_path)
except Exception as e:
print(e.__traceback__.__str__())
else:
if not service_imported:
print(service_exception.__str__())
def get_smtp_logging_handler(self):
if self.settings.get(self.smtp_key):
keys = ["smtp_host", "smtp_port", "smtp_user", "smtp_password"]
setting_keys = self.settings[self.smtp_key].keys()
missing_keys = list(filter(lambda x: x not in setting_keys, keys))
if not missing_keys:
handler = BufferingSMTPHandler(mailhost=self.settings[self.smtp_key]['smtp_host'],
mailport=self.settings[self.smtp_key]['smtp_port'],
fromaddr=self.settings[self.smtp_key]['smtp_user'],
toaddrs=self.settings[self.admin_emails],
subject='Error {} {}:{}'.format(self.settings[self.host_name_key],
self.settings[
self.service_name_key].upper(),
self.settings[self.service_version_key]),
capacity=1,
password=self.settings[self.smtp_key]['smtp_password'])
handler.setLevel(logging.ERROR)
if not self.settings[self.ronin_key]:
return handler
def get_http_service(self):
from trellio.services import HTTPService
http_service = None
if HTTPService.__subclasses__():
service_sub_class = HTTPService.__subclasses__()[0]
http_service = service_sub_class(self.settings[self.service_name_key],
self.settings[self.service_version_key],
self.settings[self.http_host_key],
self.settings[self.http_port_key])
return http_service
def get_tcp_service(self):
from trellio.services import TCPService
tcp_service = None
if TCPService.__subclasses__():
service_sub_class = TCPService.__subclasses__()[0]
tcp_service = service_sub_class(self.settings[self.service_name_key],
self.settings[self.service_version_key],
self.settings[self.tcp_host_key],
self.settings[self.tcp_port_key])
return tcp_service
def get_publisher(self):
from trellio.pubsub import Publisher
publisher = None
if Publisher.__subclasses__():
publisher_sub_class = Publisher.__subclasses__()[0]
publisher = publisher_sub_class(self.settings[self.service_name_key],
self.settings[self.service_version_key],
self.settings[self.redis_host_key],
self.settings[self.redis_port_key])
return publisher
def get_http_views(self):
from trellio.views import HTTPView
return self.inheritors(HTTPView)
def get_tcp_views(self):
from trellio.views import TCPView
return self.inheritors(TCPView)
def import_class_from_path(self, path):
broken = path.split('.')
class_name = broken[-1]
module_name = '.'.join(broken[:-1])
module = importlib.import_module(module_name)
class_value = getattr(module, class_name)
return module, class_value
def enable_middlewares(self, http_service=None, http_views=()):
middlewares = self.settings[self.middleware_key] or []
middle_cls = []
for i in middlewares:
module, class_value = self.import_class_from_path(i)
if not class_value:
raise InvalidConfigurationError
else:
middle_cls.append(class_value())
if http_service:
http_service.middlewares = middle_cls
for view in http_views:
view.middlewares = middle_cls
def enable_signals(self):
'''
e.g signal_dict = {signal_path:signal_receiver_path_list, ....}
:return:
'''
signal_dict = self.settings[self.signal_key] or {}
for i in signal_dict.keys():
sig_module, signal_class = self.import_class_from_path(i)
for j in signal_dict[i]:
recv_module, recv_coro = self.import_class_from_path(j)
signal_class.register(recv_coro) # registering reciever
@staticmethod
def inheritors(klass):
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return list(subclasses)
|
|
"""
===============================
Module provider for ValueDomain
===============================
Value-Domain API
================
Reference: https://www.value-domain.com/vdapi/
- Preparation
1. Get your auth token: https://www.value-domain.com/vdapi/
2. export LEXICON_VALUEDOMAIN_AUTH_TOKEN="<Your Auth Token>"
3. export DOMAIN_NAME=$(curl -s https://api.value-domain.com/v1/domains -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -c "import sys,json;print(json.load(sys.stdin)['results'][0]['domainname'])")
- Get list of your domain information
curl -s https://api.value-domain.com/v1/domains -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
- Get domain name
curl -s https://api.value-domain.com/v1/domains -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -c "import sys,json;print(json.load(sys.stdin)['results'][0]['domainname'])"
- Get name servers
curl -s https://api.value-domain.com/v1/domains/nameserver -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
curl -s https://api.value-domain.com/v1/domains/${DOMAIN_NAME}/nameserver -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
- Get DNS records
curl -s https://api.value-domain.com/v1/domains/dns -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
curl -s https://api.value-domain.com/v1/domains/${DOMAIN_NAME}/dns -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
- Get API Logs
curl -s https://api.value-domain.com/v1/logs -H "Authorization: Bearer ${LEXICON_VALUEDOMAIN_AUTH_TOKEN}" | python -m json.tool
Value Doamin DNS records syntax
===============================
Reference: https://www.value-domain.com/moddnsfree.php
- A record
- a www 123.123.123.3 : A record for www.<YOUR-DOMAIN>
- a * 123.123.123.5 : A record for any sub domain of <YOUR-DOMAIN>
- a @ 123.123.123.5 : A record for <YOUR-DOMAIN> (no sub domain)
- AAAA record
- aaaa ipv6 FF01::101 : AAAA record for ipv6.<YOUR-DOMAIN>
- MX record
- mx mx1.your.domain. 10 : MX record for <YOUR-DOMAIN> (Server:mx1.your.domain, Priority 10)
- mx @ 10 : MX record for <YOUR-DOMAIN> (Server:<YOUR-DOMAIN>, Priority 10)
- NS record
- ns abc ns1.example.com. : NS record for abc.<YOUR-DOMAIN>
- TXT record
- txt abc v=spf1 mx ~all : TXT record for abc.<YOUR-DOMAIN> (v=spf1 mx ~all)
- txt @ v=spf1 mx ~all : TXT record for <YOUR-DOMAIN> (v=spf1 mx ~all)
- SRV record
- srv _smtp._tcp 1 2 25 server1.your.domain : SRV record for _smtp._tcp.<YOUR-DOMAIN> (Priority:1, Weight:2, Port:25)
"""
import hashlib
import json
import logging
import time
from http import cookiejar
from http.client import HTTPResponse
from typing import Any, Callable, Dict, List, NamedTuple, Optional, TypeVar, Union
from urllib import request
from urllib.error import HTTPError
from urllib.request import OpenerDirector
from lexicon.providers.base import Provider as BaseProvider
T = TypeVar("T")
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["value-domain.com"]
DEFAULT_TTL = 3600
########################################################################
# Util
########################################################################
def convert_json_to_bytes(x):
return bytes(json.dumps(x), "utf-8")
def is_domain(target: str) -> bool:
return (
len(target) > 1
and len(target.strip(".")) > 0
and target[0] != "."
and target[-1] == "."
)
def is_sub_domain(target: str, domainname: str) -> Union[str, bool]:
idx = target.rfind(domainname)
return (
target[0:idx].strip(".")
if idx >= 0 and is_domain(target) and idx + len(domainname) + 1 == len(target)
else False
)
########################################################################
# Rest API
########################################################################
class RestApiResponse(NamedTuple):
header: HTTPResponse
data: bytes
RESTAPI_CALLER_TYPE = Callable[[str, str, Optional[T]], RestApiResponse]
def reastapi_add_content_type(
_headers: Optional[Dict[str, str]],
content_type: Optional[str],
content: Optional[bytes],
) -> Dict[str, str]:
"""Add 'Content-Type' header if exists"""
headers = _headers.copy() if _headers is not None else {}
if content_type is not None and content is not None and len(content) != 0:
headers["Content-Type"] = content_type
return headers
def restapi_create_request(
url: str,
method: str,
headers: Optional[Dict[str, str]],
content_type: Optional[str],
content: Optional[bytes],
) -> request.Request:
"""Create Request instance including content if exists"""
return request.Request(
url,
data=content if content is not None and len(content) > 0 else None,
method=method,
headers=reastapi_add_content_type(headers, content_type, content),
)
def restapi_call(
opener: OpenerDirector,
url: str,
method: str,
headers: Optional[Dict[str, str]],
content_type: Optional[str] = None,
content: Optional[bytes] = None,
) -> RestApiResponse:
"""Execute HTTP Request with OpenerDirector"""
with opener.open(
restapi_create_request(url, method, headers, content_type, content)
) as response:
return RestApiResponse(response, response.read())
def restapi_build_opener() -> OpenerDirector:
"""Create OpenerDirector instance with cookie processor"""
return request.build_opener(
request.BaseHandler(), request.HTTPCookieProcessor(cookiejar.CookieJar())
)
def restapi_exception_not_200(response: HTTPResponse):
# HTTPError will be raised if status >= 400
if response.status != 200:
LOGGER.error(f"HTTP Status: {response.status}")
raise Exception(f"HTTP Status: {response.status}")
########################################################################
# Value Domain API
########################################################################
VDAPI_ENDPOINT = "https://api.value-domain.com/v1"
class RecordData(NamedTuple):
rtype: str
name: str
content: str
def __eq__(self, other):
return (
self.rtype.lower() == other.rtype.lower()
and self.name.lower() == other.name.lower()
and self.content == other.content
)
def __str__(self):
return f"{self.rtype.lower()} {self.name.lower()} {self.content}"
def match(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
return (
(rtype is None or rtype.lower() == self.rtype.lower())
and (name is None or name.lower() == self.name.lower())
and (content is None or content.lower() == self.content.lower())
)
def id(self):
return hashlib.md5(str(self).encode("utf-8")).hexdigest()
class DomainData(NamedTuple):
records: List[RecordData]
ttl: int
def vdapi_build_caller(
opener: OpenerDirector,
content_type: str,
headers: Optional[Dict[str, str]] = None,
content_decoder=lambda x: x,
) -> RESTAPI_CALLER_TYPE:
def _(
url: str, method: str, content: Optional[T] = None, interval=1
) -> RestApiResponse:
try:
return restapi_call(
opener,
url,
method,
headers,
content_type,
content_decoder(content) if content is not None else None,
)
except HTTPError as http_error:
if http_error.code == 429: # Too much Request
time.sleep(interval)
return _(url, method, content, interval * 2)
else:
raise http_error
return _
def vdapi_create_caller(auth_token: str):
return vdapi_build_caller(
restapi_build_opener(),
"application/json",
{
# Value-Domain API rejects the request with default Python User-Agent.
"User-Agent": "curl/7.74.0",
"Cache-Control": "no-cache, no-store",
"Authorization": f"Bearer {auth_token}",
},
convert_json_to_bytes,
)
def vdapi_get_domain_list(caller: RESTAPI_CALLER_TYPE) -> List[str]:
resp: RestApiResponse = caller(f"{VDAPI_ENDPOINT}/domains", "GET", None)
restapi_exception_not_200(resp.header)
return list(
filter(
lambda x: x is not None,
[
domain.get("domainname")
for domain in json.loads(resp.data.decode("utf-8").strip()).get(
"results"
)
],
)
)
def vdapi_get_domain_data(
caller: RESTAPI_CALLER_TYPE, domainname: str
) -> Optional[DomainData]:
resp: RestApiResponse = caller(
f"{VDAPI_ENDPOINT}/domains/{domainname}/dns", "GET", None
)
restapi_exception_not_200(resp.header)
domain_info: dict = json.loads(resp.data.decode("utf-8").strip())
domain_records: Optional[str] = domain_info.get("results", {}).get("records")
return (
DomainData(
[
RecordData(elem[0], elem[1], elem[2])
for elem in [line.split(" ", 3) for line in domain_records.split("\n")]
if len(elem) == 3
],
int(domain_info.get("results", {}).get("ttl", DEFAULT_TTL)),
)
if domain_records is not None
else None
)
def vdapi_set_domain_data(
caller: RESTAPI_CALLER_TYPE, domainname: str, data: DomainData
):
resp = caller(
f"{VDAPI_ENDPOINT}/domains/{domainname}/dns",
"PUT",
{
"ns_type": "valuedomain1",
"records": "\n".join([str(record) for record in data.records]),
"ttl": data.ttl,
},
)
restapi_exception_not_200(resp.header)
########################################################################
# Lexicon Provider for Value-Domain
########################################################################
def provider_parser(subparser):
"""Configure provider parser for Value Domain"""
subparser.description = """
Value Domain requires a token to access its API.
You can generate one for your account on the following URL:
https://www.value-domain.com/vdapi/"""
subparser.add_argument("--auth-token", help="specify youyr API token")
class Provider(BaseProvider):
"""Provider class for Value Domain"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id: Optional[list[str]] = None
auth_token = self._get_provider_option("auth_token")
assert auth_token is not None, "No authenticaion token defined"
self.caller: RESTAPI_CALLER_TYPE = vdapi_create_caller(auth_token)
# Authenticate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls.
# Should throw an error if authentication fails for any reason,
# of if the domain does not exist.
def _authenticate(self):
self.domain_id = vdapi_get_domain_list(self.caller)
assert len(self.domain_id) > 0, "Failed to get domain names"
if self.domain not in self.domain_id:
raise Exception(f"{self.domain} not managed")
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype: str, name: str, content: str):
self._assert_initialized()
ttl_option = self._get_lexicon_option("ttl")
domain_data = vdapi_get_domain_data(self.caller, self.domain)
rec = self._create_record_data(rtype, self._relative_name(name), content)
if domain_data is not None and rec not in domain_data.records:
vdapi_set_domain_data(
self.caller,
self.domain,
DomainData(
domain_data.records + [rec],
ttl_option
if ttl_option is not None and ttl_option > 0
else DEFAULT_TTL
if ttl_option is not None
else domain_data.ttl,
),
)
elif domain_data is not None:
pass
else:
vdapi_set_domain_data(
self.caller,
self.domain,
DomainData(
[rec],
ttl_option
if ttl_option is not None and ttl_option > 0
else DEFAULT_TTL,
),
)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
):
self._assert_initialized()
domain_data = vdapi_get_domain_data(self.caller, self.domain)
return (
[
{
"id": record_data.id(),
"ttl": domain_data.ttl,
"type": record_data.rtype.upper(),
"name": self._fqdn_name(record_data.name)
if record_data.rtype.lower() != "txt"
else self._full_name(record_data.name),
"content": record_data.content,
}
for record_data in domain_data.records
if record_data.match(
rtype,
self._relative_name(name) if name is not None else None,
content,
)
]
if domain_data is not None
else []
)
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
self._assert_initialized()
ttl_option = self._get_lexicon_option("ttl")
domain_data = vdapi_get_domain_data(self.caller, self.domain)
target = [record for record in domain_data.records if record.id() == identifier]
if len(target) > 0:
vdapi_set_domain_data(
self.caller,
self.domain,
DomainData(
[
record
for record in domain_data.records
if record.id() != identifier
]
+ [
self._create_record_data(
rtype or target[0].rtype,
name or self._relative_name(target[0].name),
content or target[0].content,
)
],
ttl_option
if ttl_option is not None and ttl_option > 0
else DEFAULT_TTL
if ttl_option is not None
else domain_data.ttl,
),
)
return True
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
self._assert_initialized()
domain_data = vdapi_get_domain_data(self.caller, self.domain)
if domain_data is not None and identifier is not None:
vdapi_set_domain_data(
self.caller,
self.domain,
DomainData(
[
record
for record in domain_data.records
if record.id() != identifier
],
domain_data.ttl,
),
)
elif domain_data is not None:
vdapi_set_domain_data(
self.caller,
self.domain,
DomainData(
[
record
for record in domain_data.records
if not record.match(rtype, self._relative_name(name), content)
],
domain_data.ttl,
),
)
return True
def _request(
self,
action: str = "GET",
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
pass
# self._assert_initialized()
# return self.caller(url, action, data)
def _assert_initialized(self):
if self.caller is None or self.domain_id is None:
self._authenticate()
assert self.caller is not None, "HTTP caller not defined"
assert self.domain_id is not None or len(
self.domain_id == 0
), "Domain name not retriebed"
def _create_record_data(self, rtype: str, name: str, content: str) -> RecordData:
return RecordData(rtype.lower(), self._relative_name(name).lower(), content)
if __name__ == "__main__":
import os
import unittest
DUMMY_TYPE = "a"
DUMMY_NAME = "test"
DUMMY_CONTENT = "1.2.3.4"
DUMMY_CONTENT2 = "2.3.4.5"
class TestValueDomain(unittest.TestCase):
def setUp(self):
self.auth_token = os.environ.get("LEXICON_VALUEDOMAIN_AUTH_TOKEN")
if self.auth_token is None:
raise Exception("LEXICON_VALUEDOMAIN_AUTH_TOKEN not defined")
self.caller = vdapi_create_caller(self.auth_token)
def tearDown(self):
pass
def _create_provide(self, domainname: str):
return Provider(
{
"provider_name": "valuedomain",
"domain": domainname,
"valuedomain": {"auth_token": self.auth_token},
}
)
def test_vdapi_get_domain_list(self):
domain_list = vdapi_get_domain_list(self.caller)
self.assertGreater(len(domain_list), 0)
def test_vdapi_get_record_list(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
record = vdapi_get_domain_data(self.caller, domainname)
self.assertIsNotNone(record)
self.assertGreater(len(record.records), 0)
self.assertIsNotNone(record.ttl)
def test_vdapi_set_record_list(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
domain_data = vdapi_get_domain_data(self.caller, domainname)
dummy = RecordData(DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
vdapi_set_domain_data(
self.caller,
domainname,
DomainData(domain_data.records + [dummy], domain_data.ttl),
)
updated_domain_data = vdapi_get_domain_data(self.caller, domainname)
self.assertIn(dummy, updated_domain_data.records)
# cleanup
vdapi_set_domain_data(self.caller, domainname, domain_data)
updated_domain_data = vdapi_get_domain_data(self.caller, domainname)
self.assertNotIn(dummy, updated_domain_data.records)
def test_list_records(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
provider = self._create_provide(domainname)
provider._authenticate()
records = provider._list_records()
self.assertGreater(len(records), 0)
records = provider._list_records(rtype="A")
self.assertGreater(len(records), 0)
records = provider._list_records(rtype="B")
self.assertEqual(len(records), 0)
def test_create_records(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
provider = self._create_provide(domainname)
provider._authenticate()
provider._create_record(DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
records = provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
self.assertGreater(len(records), 0)
provider._create_record(
DUMMY_TYPE, f"{DUMMY_NAME}.{domainname}.", DUMMY_CONTENT
)
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE,
name=f"DUMMY_NAME.{domainname}.",
content=DUMMY_CONTENT,
)
),
0,
)
# cleanup
provider._delete_record(None, DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
),
0,
)
def test_delete_records_by_id(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
provider = self._create_provide(domainname)
provider._authenticate()
provider._create_record(DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
recl = provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
self.assertGreater(len(recl), 0)
provider._delete_record(identifier=recl[0].get("id"))
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
),
0,
)
def test_delete_records_by_data(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
provider = self._create_provide(domainname)
provider._authenticate()
provider._create_record(DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
recl = provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
self.assertGreater(len(recl), 0)
provider._delete_record(None, DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
),
0,
)
def test_update_record(self):
domain_list = vdapi_get_domain_list(self.caller)
for domainname in domain_list:
provider = self._create_provide(domainname)
provider._authenticate()
provider._create_record(DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT)
recl = provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
self.assertGreater(len(recl), 0)
provider._update_record(
identifier=recl[0].get("id"), content=DUMMY_CONTENT2
)
self.assertGreater(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT2
)
),
0,
)
# cleanup
provider._delete_record(None, DUMMY_TYPE, DUMMY_NAME, DUMMY_CONTENT2)
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT
)
),
0,
)
self.assertEqual(
len(
provider._list_records(
rtype=DUMMY_TYPE, name=DUMMY_NAME, content=DUMMY_CONTENT2
)
),
0,
)
unittest.main()
|
|
import sys
import logging
import html2text
from django.utils.safestring import mark_safe
from google_analytics_reporter.tracking import Event
from squeezemail.renderer import renderer
PY3 = sys.version_info > (3, 0)
import re
if PY3:
from urllib.parse import urlparse, urlencode, urlunparse, parse_qsl
else:
from urlparse import urlparse, urlunparse, parse_qsl
from urllib import urlencode
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.core.mail import EmailMultiAlternatives
# from django.utils.html import strip_tags
from django.contrib.sites.models import Site
try:
# Django >= 1.9
from django.utils.module_loading import import_module
except ImportError:
from django.utils.importlib import import_module
from content_editor.contents import contents_for_item
from content_editor.renderer import PluginRenderer
from .utils import get_token_for_email
from . import SQUEEZE_CELERY_EMAIL_CHUNK_SIZE, SQUEEZE_DEFAULT_HTTP_PROTOCOL, SQUEEZE_DEFAULT_FROM_EMAIL
from .tasks import send_drip, process_sent
from .models import SendDrip, Subscriber, RichText, Image
from .utils import chunked
logger = logging.getLogger(__name__)
HREF_RE = re.compile(r'href\="((\{\{[^}]+\}\}|[^"><])+)"')
def configured_message_classes():
conf_dict = getattr(settings, 'DRIP_MESSAGE_CLASSES', {})
if 'default' not in conf_dict:
conf_dict['default'] = 'squeezemail.handlers.DripMessage'
return conf_dict
def message_class_for(name):
path = configured_message_classes()[name]
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
klass = getattr(mod, klass_name)
return klass
class DripMessage(object):
def __init__(self, drip, subscriber):
self.drip = drip
self.subscriber = subscriber
self._context = None
self._subject = None
self._body = None
self._plain = None
self._message = None
self._token = None
@cached_property
def from_email(self):
if self.drip.from_email_name and self.drip.from_email:
from_ = "%s <%s>" % (self.drip.from_email_name, self.drip.from_email)
elif self.drip.from_email and not self.drip.from_email_name:
from_ = self.drip.from_email
else:
from_ = SQUEEZE_DEFAULT_FROM_EMAIL
return from_
@property
def from_email_name(self):
return self.drip.from_email_name
def render_body(self):
# import the custom renderer and do renderer.plugins() instead
contents = contents_for_item(self.drip, plugins=[Image, RichText])
# assert False, contents['body']
body = renderer.render(contents['body']) #TODO: get split test feincms content here
return body
@property
def context(self):
if not self._context:
token = self.get_email_token()
context = Context({
'subscriber': self.subscriber,
'user': self.subscriber.user,
'drip': self.drip,
'token': token,
'tracking_pixel': self.tracking_pixel,
'unsubscribe_link': self.unsubscribe_link
})
context['content'] = mark_safe(self.replace_urls(Template(self.render_body()).render(context)))
self._context = context
return self._context
@cached_property
def subject_model(self):
return self.drip.choose_split_test_subject
@property
def subject(self):
if not self._subject:
self._subject = Template(self.subject_model.text).render(self.context)
return self._subject
@property
def body(self):
if not self._body:
self._body = render_to_string('squeezemail/email/body.html', self.context)
return self._body
@property
def plain(self):
if not self._plain:
h = html2text.HTML2Text()
h.ignore_images = True
self._plain = render_to_string('squeezemail/email/plain.txt', self.context)
return self._plain
@property
def message(self):
if not self._message:
self._message = EmailMultiAlternatives(self.subject, self.plain, self.from_email, [self.subscriber.email])
self._message.attach_alternative(self.body, 'text/html')
return self._message
def replace_urls(self, content):
offset = 0
for match in HREF_RE.finditer(content):
link = match.group(1)
replacelink = self.encode_url(link)
content = ''.join((content[:match.start(1)+offset], replacelink, content[match.end(1)+offset:]))
offset += len(replacelink) - len(match.group(1))
return content
def encode_url(self, raw_url):
"""
Returns a replacement link
Example of how this works:
Here's an ordinary link in your email. There may be many of these in each email.
original_url = http://anydomain.com/?just=athingwedontcareabout&but=letsmakeitinteresting
Turns into:
new_url = http://YOURDOMAIN.com/squeezemail/link/?sq_user_id=1&sq_drip_id=1&sq_user_token=123456789&just=athingwedontcareabout&but=letsmakeitinteresting&sq_target=http://somedomain.com
When someone goes to the above new_url link, it'll hit our function at /link/ which re-creates the original url, but also passes user_id, drip_id, etc
with it in case it's needed and redirects to the target url with the params. This is also where we throw some stats at Google Analytics.
"""
site_domain = self.current_domain
parsed_url = urlparse(raw_url)
if parsed_url.netloc is '':
# stick the scheme and netloc in the url if it's missing. This is so urls aren't just '/sublocation/'
parsed_url = parsed_url._replace(scheme=SQUEEZE_DEFAULT_HTTP_PROTOCOL, netloc=site_domain)
url_params = dict(parse_qsl(parsed_url.query))
target_url = parsed_url._replace(query='')
# where the user will be redirected to after clicking this link
url_params['sq_target'] = urlunparse(target_url)
# add subscriber_id, drip_id, token, subject_id to the params
url_params.update(self.extra_url_params)
parsed_url_list = list(parsed_url)
parsed_url_list[4] = urlencode(url_params)
new_url = urlparse('')._replace(
scheme=SQUEEZE_DEFAULT_HTTP_PROTOCOL,
netloc=site_domain,
path=reverse('squeezemail:link'),
query=parsed_url_list[4]
)
#rebuild new url
new_url_with_extra_params = urlunparse(new_url)
return new_url_with_extra_params
@cached_property
def extra_url_params(self):
# These params will be inserted in every link in the content of the email.
# Useful for tracking clicks and knowing who clicked it on which drip
params = {
'sq_subscriber_id': self.subscriber.id,
'sq_drip_id': self.drip.id,
'sq_token': self.get_email_token(),
'sq_subject_id': self.subject_model.id
}
return params
def get_email_token(self):
if not self._token:
self._token = str(get_token_for_email(self.subscriber.email))
return self._token
@cached_property
def current_domain(self):
return Site.objects.get_current().domain
@cached_property
def tracking_pixel(self):
p = urlparse('')._replace(
scheme=SQUEEZE_DEFAULT_HTTP_PROTOCOL,
netloc=self.current_domain,
path=reverse('squeezemail:tracking_pixel'),
query=urlencode(self.extra_url_params)
)
return mark_safe(urlunparse(p))
@cached_property
def unsubscribe_link(self):
url_params = {'sq_email': self.subscriber.email}
url_params.update(self.extra_url_params)
l = urlparse('')._replace(
scheme=SQUEEZE_DEFAULT_HTTP_PROTOCOL,
netloc=self.current_domain,
path=reverse('squeezemail:unsubscribe'),
query=urlencode(url_params)
)
return mark_safe(urlunparse(l))
class HandleDrip(object):
"""
A base object for defining a Drip.
You can extend this manually and set it as your default drip
handler class by setting SQUEEZE_DRIP_HANDLER in your settings.
(e.g. SQUEEZE_DRIP_HANDLER = 'myapp.handlers.MyHandleDrip')
"""
def __init__(self, *args, **kwargs):
self.drip_model = kwargs.get('drip_model')
self._queryset = kwargs.get('queryset')
self.step = kwargs.get('step', None)
def get_queryset(self):
if not self._queryset:
self._queryset = self.queryset()
return self._queryset
def queryset(self):
"""
If there was no queryset passed in, our queryset is all active subscribers with our custom
queryset rules applied to it (if the drip has any).
"""
base_qs = Subscriber.objects.filter(is_active=True)
qs = self.drip_model.apply_queryset_rules(base_qs).distinct()
return qs
def apply_queryset_rules(self):
return
def step_run(self):
next_step = self.step.get_next_step()
self.prune()
count = self.send(next_step=next_step)
return count
def campaign_run(self):
return
def broadcast_run(self):
self.create_unsent_drips()
results = self.create_tasks_for_unsent_drips()
return results
def prune(self):
"""
Do an exclude for all Users who have a SendDrip already.
"""
target_subscriber_ids = self.get_queryset().values_list('id', flat=True)
exclude_subscriber_ids = SendDrip.objects.filter(
drip_id=self.drip_model.id,
subscriber_id__in=target_subscriber_ids
).values_list('subscriber_id', flat=True)
self._queryset = self.get_queryset().exclude(id__in=exclude_subscriber_ids)
return self._queryset
def send(self, next_step=None):
"""
Send the message to each subscriber on the queryset.
Create SendDrip for each subscriber that gets a message.
Returns count of created SendDrips.
"""
MessageClass = message_class_for(self.drip_model.message_class)
count = 0
for subscriber in self.get_queryset():
message_instance = MessageClass(self.drip_model, subscriber)
try:
# Make sure they haven't received this drip just before sending.
SendDrip.objects.get(drip_id=self.drip_model.id, subscriber_id=subscriber.id, sent=True)
continue
except SendDrip.DoesNotExist:
result = message_instance.message.send()
if result:
SendDrip.objects.create(drip=self.drip_model, subscriber=subscriber, sent=True)
if next_step:
subscriber.move_to_step(next_step.id)
# send a 'sent' event to google analytics
process_sent.delay(
user_id=subscriber.user_id,
subject=message_instance.subject,
drip_id=self.drip_model.id,
drip_name=self.drip_model.name,
source='step',
split='main'
)
count += 1
except Exception as e:
logging.error("Failed to send drip %s to subscriber %s: %s" % (str(self.drip_model.id), str(subscriber), e))
return count
def create_unsent_drips(self):
"""
Create an unsent SendDrip objects for every subscriber_id in the queryset.
Used for huge sendouts like broadcasts.
"""
drip_id = self.drip_model.id
subscriber_id_list = self.get_queryset().values_list('id', flat=True)
for subscriber_id in subscriber_id_list:
try:
sentdrip = SendDrip.objects.create(drip_id=drip_id, subscriber_id=subscriber_id, sent=False)
except Exception as e:
logger.warning("Failed to create SendDrip for subscriber_id %i & drip_id %i. (%r)", subscriber_id, drip_id, e)
return
def create_tasks_for_unsent_drips(self, **kwargs):
"""
Grab all of the SendDrips that haven't been sent yet, and queue up some celery tasks for them.
"""
result_tasks = []
kwargs['drip_id'] = self.drip_model.id
# Get a fresh list of all user IDs that haven't received this drip yet.
subscriber_id_list = SendDrip.objects.filter(drip_id=self.drip_model.id, sent=False).values_list('subscriber_id', flat=True)
chunk_size = SQUEEZE_CELERY_EMAIL_CHUNK_SIZE
for chunk in chunked(subscriber_id_list, chunk_size):
result_tasks.append(send_drip.delay(chunk, **kwargs))
logging.info('drips queued')
return result_tasks
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
# Wizard for running the mccSearch program
'''
import os
import networkx as nx
# mccSearch modules
from mccSearch import *
def main():
CEGraph = nx.DiGraph()
prunedGraph = nx.DiGraph()
MCCList = []
MCSList = []
MCSMCCNodesList = []
allMCSsList = []
allCETRMMList = []
DIRS = {}
# DIRS={
# mainDirStr= "/directory/to/where/to/store/outputs"
# TRMMdirName = "/directory/to/the/TRMM/netCDF/files"
# CEoriDirName = "/directory/to/the/MERG/netCDF/files"
# }
preprocessing = ''
rawMERG = ''
print("Running MCCSearch ..... \n")
# This is where data created will be stored
DIRS['mainDirStr'] = input("> Please enter working directory: \n")
# preprocessing = raw_input ("> Do you need to preprocess the MERG files? [y/n]: \n")
# while preprocessing.lower() != 'n':
# if preprocessing.lower() == 'y':
# #get location for raw files
# rawMERG = raw_input("> Please enter the directory to the RAW MERG (.Z) files: \n")
# #run preprocessing
# mccSearch.preprocessingMERG(rawMERG)
# continue
# elif preprocessing.lower() == 'n' :
# pass
# else:
# print("Error! Invalid choice.")
# preprocessing = raw_input ("> Do you need to preprocess the MERG files? [y/n]: \n")
# get the location of the MERG and TRMM data
DIRS['CEoriDirName'] = input(
"> Please enter the directory to the MERG netCDF files: \n")
try:
if not os.path.exists(DIRS['CEoriDirName']):
print("Error! MERG invalid path!")
DIRS['CEoriDirName'] = input(
"> Please enter the directory to the MERG netCDF files: \n")
except BaseException:
print("...")
DIRS['TRMMdirName'] = input(
"> Please enter the location to the raw TRMM netCDF files: \n")
try:
if not os.path.exists(DIRS['TRMMdirName']):
print("Error: TRMM invalid path!")
DIRS['TRMMdirName'] = input(
"> Please enter the location to the raw TRMM netCDF files: \n")
except BaseException:
pass
# get the dates for analysis
startDateTime = input(
"> Please enter the start date and time yyyymmddhr: \n")
# check validity of time
while validDate(startDateTime) == 0:
print("Invalid time entered for startDateTime!")
startDateTime = input(
"> Please enter the start date and time yyyymmddhr: \n")
endDateTime = input("> Please enter the end date and time yyyymmddhr: \n")
while validDate(endDateTime) == 0:
print("Invalid time entered for endDateTime!")
endDateTime = input(
"> Please enter the end date and time yyyymmddhr: \n")
# check if all the files exisits in the MERG and TRMM directories entered
test, _ = mccSearch.checkForFiles(
startDateTime, endDateTime, DIRS['TRMMdirName'], 2)
if not test:
print("Error with files in the original TRMM directory entered. Please check your files before restarting.")
return
test, filelist = mccSearch.checkForFiles(
startDateTime, endDateTime, DIRS['CEoriDirName'], 1)
if not test:
print("Error with files in the original MERG directory entered. Please check your files before restarting.")
return
# create main directory and file structure for storing intel
mccSearch.createMainDirectory(DIRS['mainDirStr'])
TRMMCEdirName = DIRS['mainDirStr'] + '/TRMMnetcdfCEs'
CEdirName = DIRS['mainDirStr'] + '/MERGnetcdfCEs'
# for doing some postprocessing with the clipped datasets instead of
# running the full program, e.g.
postprocessing = input("> Do you wish to postprocess data? [y/n] \n")
while postprocessing.lower() != 'n':
if postprocessing.lower() == 'y':
option = postProcessingplotMenu(DIRS)
return
elif postprocessing.lower() == 'n':
pass
else:
print("\n Invalid option.")
postprocessing = input(
"> Do you wish to postprocess data? [y/n] \n")
# -------------------------------------------------------------------------------------------------
# Getting started. Make it so number one!
print(("-" * 80))
print("\t\t Starting the MCCSearch Analysis.")
print(("-" * 80))
print("\n -------------- Reading MERG and TRMM Data ----------")
mergImgs, timeList = mccSearch.readMergData(DIRS['CEoriDirName'], filelist)
print("\n -------------- findCloudElements ----------")
CEGraph = mccSearch.findCloudElements(
mergImgs, timeList, DIRS['TRMMdirName'])
# if the TRMMdirName wasnt entered for whatever reason, you can still get the TRMM data this way
# CEGraph = mccSearch.findCloudElements(mergImgs,timeList)
# allCETRMMList=mccSearch.findPrecipRate(DIRS['TRMMdirName'],timeList)
# ----------------------------------------------------------------------------------------------
print("\n -------------- findCloudClusters ----------")
prunedGraph = mccSearch.findCloudClusters(CEGraph)
print("\n -------------- findMCCs ----------")
MCCList, MCSList = mccSearch.findMCC(prunedGraph)
# now ready to perform various calculations/metrics
print(("-" * 80))
print("\n -------------- METRICS ----------")
print(("-" * 80))
# some calculations/metrics that work that work
print(("creating the MCC userfile ", mccSearch.createTextFile(MCCList, 1)))
print(("creating the MCS userfile ", mccSearch.createTextFile(MCSList, 2)))
plotMenu(MCCList, MCSList)
# Let's get outta here! Engage!
print(("-" * 80))
#*************************************************************************
def plotMenu(MCCList, MCSList):
'''
Purpose:: The flow of plots for the user to choose
Input:: MCCList: a list of directories representing a list of nodes in the MCC
MCSList: a list of directories representing a list of nodes in the MCS
Output:: None
'''
option = displayPlotMenu()
while option != 0:
try:
if option == 1:
print(
"Generating Accumulated Rainfall from TRMM for the entire period ...\n")
mccSearch.plotAccTRMM(MCSList)
if option == 2:
startDateTime = input(
"> Please enter the start date and time yyyy-mm-dd_hr:mm:ss format: \n")
endDateTime = input(
"> Please enter the end date and time yyyy-mm-dd_hr:mm:ss format: \n")
print(("Generating acccumulated rainfall between ",
startDateTime, " and ", endDateTime, " ... \n"))
mccSearch.plotAccuInTimeRange(startDateTime, endDateTime)
if option == 3:
print("Generating area distribution plot ... \n")
mccSearch.displaySize(MCCList)
if option == 4:
print("Generating precipitation and area distribution plot ... \n")
mccSearch.displayPrecip(MCCList)
if option == 5:
try:
print("Generating histogram of precipitation for each time ... \n")
mccSearch.plotPrecipHistograms(MCCList)
except BaseException:
pass
except BaseException:
print("Invalid option. Please try again, enter 0 to exit \n")
option = displayPlotMenu()
return
#*************************************************************************
def displayPlotMenu():
'''
Purpose:: Display the plot Menu Options
Input:: None
Output:: option: an integer representing the choice of the user
'''
print("**************** PLOTS ************** \n")
print("0. Exit \n")
print("1. Accumulated TRMM precipitation \n")
print("2. Accumulated TRMM precipitation between dates \n")
print("3. Area distribution of the system over time \n")
print("4. Precipitation and area distribution of the system \n")
print("5. Histogram distribution of the rainfall in the area \n")
option = int(input("> Please enter your option for plots: \n"))
return option
#*************************************************************************
def displayPostprocessingPlotMenu():
'''
Purpose:: Display the plot Menu Options
Input:: None
Output:: option: an integer representing the choice of the user
'''
print("**************** POST PROCESSING PLOTS ************** \n")
print("0. Exit \n")
print("1. Map plots of the original MERG data \n")
print("2. Map plots of the cloud elements using IR data \n")
print("3. Map plots of the cloud elements rainfall accumulations using TRMM data \n")
#print("4. Accumulated TRMM precipitation \n")
#print("5. Accumulated TRMM precipitation between dates \n")
option = int(input("> Please enter your option for plots: \n"))
return option
#*************************************************************************
def postProcessingplotMenu(DIRS):
'''
Purpose:: The flow of plots for the user to choose
Input:: DIRS a dictionary of directories
# DIRS={
# mainDirStr= "/directory/to/where/to/store/outputs"
# TRMMdirName = "/directory/to/the/TRMM/netCDF/files"
# CEoriDirName = "/directory/to/the/MERG/netCDF/files"
# }
Output:: None
'''
TRMMCEdirName = DIRS['mainDirStr'] + '/TRMMnetcdfCEs'
CEdirName = DIRS['mainDirStr'] + '/MERGnetcdfCEs'
option = displayPostprocessingPlotMenu()
while option != 0:
try:
if option == 1:
print("Generating images from the original MERG dataset ... \n")
mccSearch.postProcessingNetCDF(1, DIRS['CEoriDirName'])
if option == 2:
print(
"Generating images from the cloud elements using MERG IR data ... \n")
mccSearch.postProcessingNetCDF(2, CEdirName)
if option == 3:
print(
"Generating precipitation accumulation images from the cloud elements using TRMM data ... \n")
mccSearch.postProcessingNetCDF(3, TRMMCEdirName)
# if option == 4:
# print("Generating Accumulated TRMM rainfall from cloud elements for each MCS ... \n")
# featureType = int(raw_input("> Please enter type of MCS MCC-1 or MCS-2: \n"))
# if featureType == 1:
# filename = DIRS['mainDirStr']+'/textFiles/MCCPostProcessing.txt'
# try:
# if os.path.isfile(filename):
# #read each line as a list
# mccSearch.plotAccTRMM()
# if option == 5:
# mccSearch.plotAccuInTimeRange()
except BaseException:
print("Invalid option, please try again")
option = displayPostprocessingPlotMenu()
return
#*************************************************************************
def validDate(dataString):
'''
'''
if len(dataString) > 10:
print("invalid time entered")
return 0
yr = int(dataString[:4])
mm = int(dataString[4:6])
dd = int(dataString[6:8])
hh = int(dataString[-2:])
if mm < 1 or mm > 12:
return 0
elif hh < 0 or hh > 23:
return 0
elif (dd < 0 or dd > 30) and (mm == 4 or mm == 6 or mm == 9 or mm == 11):
return 0
elif (dd < 0 or dd > 31) and (mm == 1 or mm == 3 or mm == 5 or mm == 7 or mm == 8 or mm == 10):
return 0
elif dd > 28 and mm == 2 and (yr % 4) != 0:
return 0
elif (yr % 4) == 0 and mm == 2 and dd > 29:
return 0
elif dd > 31 and mm == 12:
return 0
else:
return 1
#*************************************************************************
main()
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:51473")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:51473")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
import pytest
import atomic_reactor.util
import docker.errors
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.source import get_source_instance_for
from atomic_reactor.util import df_parser, DockerfileImages
from tests.constants import (
LOCALHOST_REGISTRY, MOCK, SOURCE,
DOCKERFILE_OK_PATH, DOCKERFILE_MULTISTAGE_PATH,
DOCKERFILE_MULTISTAGE_SCRATCH_PATH, DOCKERFILE_MULTISTAGE_CUSTOM_PATH,
DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH
)
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from osbs.utils import ImageName
from tests.util import requires_internet
from flexmock import flexmock
from textwrap import dedent
if MOCK:
from tests.docker_mock import mock_docker
# This stuff is used in tests; you have to have internet connection,
# running registry on port 5000 and it helps if you've pulled fedora:latest before
git_base_repo = "fedora"
git_base_tag = "latest"
git_base_image = ImageName(registry=LOCALHOST_REGISTRY, repo="fedora", tag="latest")
with_all_sources = pytest.mark.parametrize('source_params', [
SOURCE,
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_SCRATCH_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_CUSTOM_PATH},
])
default_build_method = CONTAINER_DOCKERPY_BUILD_METHOD
@requires_internet
def test_different_custom_base_images(tmpdir):
if MOCK:
mock_docker()
source_params = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH,
'tmpdir': str(tmpdir)}
s = get_source_instance_for(source_params)
with pytest.raises(NotImplementedError) as exc:
InsideBuilder(s, '')
message = "multiple different custom base images aren't allowed in Dockerfile"
assert message in str(exc.value)
@requires_internet
@with_all_sources
def test_inspect_built_image(tmpdir, source_params):
provided_image = "test-build:test_tag"
if MOCK:
mock_docker(provided_image_repotags=provided_image)
flexmock(InsideBuilder, ensure_is_built=None)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, provided_image)
b.tasker.build_method = default_build_method
built_inspect = b.inspect_built_image()
assert built_inspect is not None
assert built_inspect["Id"] is not None
@requires_internet
@with_all_sources
@pytest.mark.parametrize('insecure', [True, False])
@pytest.mark.parametrize('parents_pulled', [True, False])
def test_parent_image_inspect(insecure, parents_pulled, tmpdir, source_params):
provided_image = "test-build:test_tag"
if MOCK:
mock_docker(provided_image_repotags=provided_image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, provided_image)
b.tasker.build_method = default_build_method
b.parents_pulled = parents_pulled
provided_imagename = ImageName.parse(provided_image)
registry_name = "registry.example.com"
provided_imagename.registry = registry_name
b.pull_registries = {registry_name: {'insecure': insecure, 'dockercfg_path': str(tmpdir)}}
if not parents_pulled:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.with_args(provided_imagename, provided_imagename.registry, insecure, str(tmpdir))
.and_return({'Id': 123}))
built_inspect = b.parent_image_inspect(provided_imagename)
assert built_inspect is not None
assert built_inspect["Id"] is not None
@requires_internet
@with_all_sources
@pytest.mark.parametrize('parents_pulled', [True, False])
@pytest.mark.parametrize('insecure', [True, False])
@pytest.mark.parametrize('base_exist', [True, False])
def test_base_image_inspect(tmpdir, source_params, parents_pulled, insecure, base_exist):
if MOCK:
mock_docker()
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, '')
b.tasker.build_method = default_build_method
b.parents_pulled = parents_pulled
if b.dockerfile_images.base_from_scratch:
base_exist = True
registry_name = "registry.example.com"
original_parents = b.dockerfile_images.original_parents
new_parents = []
for parent in original_parents:
if parent == 'scratch':
new_parents.append(parent)
else:
mod_parent = ImageName.parse(parent)
mod_parent.registry = registry_name
new_parents.append(mod_parent.to_str())
b.dockerfile_images = DockerfileImages(new_parents)
b.pull_registries = {registry_name: {'insecure': insecure, 'dockercfg_path': str(tmpdir)}}
if base_exist:
if b.dockerfile_images.base_from_scratch:
built_inspect = b.base_image_inspect
assert built_inspect == {}
else:
if not parents_pulled:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.with_args(b.dockerfile_images.base_image, b.dockerfile_images.base_image.registry,
insecure, str(tmpdir))
.and_return({'Id': 123}))
built_inspect = b.base_image_inspect
assert built_inspect is not None
assert built_inspect["Id"] is not None
else:
if parents_pulled or b.dockerfile_images.custom_base_image:
response = flexmock(content="not found", status_code=404)
(flexmock(docker.APIClient)
.should_receive('inspect_image')
.and_raise(docker.errors.NotFound, "xyz", response))
with pytest.raises(KeyError):
b.base_image_inspect # pylint: disable=pointless-statement; is a property
else:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.and_raise(NotImplementedError))
with pytest.raises(NotImplementedError):
b.base_image_inspect # pylint: disable=pointless-statement; is a property
@requires_internet
@with_all_sources
@pytest.mark.parametrize(('image', 'will_raise'), [
(
"buildroot-fedora:latest",
False,
),
(
"non-existing",
True,
),
])
def test_get_base_image_info(tmpdir, source_params, image, will_raise):
if DOCKERFILE_MULTISTAGE_CUSTOM_PATH in source_params['uri']:
return
if MOCK:
mock_docker(provided_image_repotags=image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, image)
b.tasker.build_method = default_build_method
if b.dockerfile_images.base_from_scratch:
will_raise = False
if will_raise:
with pytest.raises(Exception):
b.get_base_image_info()
else:
built_inspect = b.get_base_image_info()
if b.dockerfile_images.base_from_scratch:
assert built_inspect is None
else:
assert built_inspect is not None
assert built_inspect["Id"] is not None
assert built_inspect["RepoTags"] is not None
def test_no_base_image(tmpdir):
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH, 'tmpdir': str(tmpdir)}
b = InsideBuilder(get_source_instance_for(source), 'built-img')
dfp = df_parser(str(tmpdir))
dfp.content = "# no FROM\nADD spam /eggs"
with pytest.raises(RuntimeError) as exc:
b.set_df_path(str(tmpdir))
assert "no base image specified" in str(exc.value)
def test_copy_from_is_blocked(tmpdir):
"""test when user has specified COPY --from=image (instead of builder)"""
dfp = df_parser(str(tmpdir))
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + str(tmpdir), 'tmpdir': str(tmpdir)}
dfp.content = dedent("""\
FROM monty AS vikings
FROM python
COPY --from=vikings /spam/eggs /bin/eggs
COPY --from=0 /spam/eggs /bin/eggs
COPY src dest
""")
# init calls set_df_path, which should not raise an error:
InsideBuilder(get_source_instance_for(source), 'built-img')
dfp.content = dedent("""\
FROM monty as vikings
FROM python
# using a stage name we haven't seen should break:
COPY --from=notvikings /spam/eggs /bin/eggs
""")
with pytest.raises(RuntimeError) as exc_info:
InsideBuilder(get_source_instance_for(source), 'built-img') # calls set_df_path at init
assert "FROM notvikings AS source" in str(exc_info.value)
dfp.content = dedent("""\
FROM monty as vikings
# using an index we haven't seen should break:
COPY --from=5 /spam/eggs /bin/eggs
""")
with pytest.raises(RuntimeError) as exc_info:
InsideBuilder(get_source_instance_for(source), 'built-img') # calls set_df_path at init
assert "COPY --from=5" in str(exc_info.value)
@requires_internet
@with_all_sources
@pytest.mark.parametrize('is_built', [
True,
False,
])
def test_ensure_built(tmpdir, source_params, is_built):
if MOCK:
mock_docker()
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, '')
b.is_built = is_built
if is_built:
assert b.ensure_is_built() is None
with pytest.raises(Exception):
b.ensure_not_built()
else:
assert b.ensure_not_built() is None
with pytest.raises(Exception):
b.ensure_is_built()
@requires_internet
@with_all_sources
@pytest.mark.parametrize(('image', 'will_raise'), [
(
"buildroot-fedora:latest",
False,
),
(
"non-existing",
True,
),
])
def test_get_image_built_info(tmpdir, source_params, image, will_raise):
if MOCK:
mock_docker(provided_image_repotags=image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, image)
b.tasker.build_method = default_build_method
if will_raise:
with pytest.raises(Exception):
b.get_built_image_info()
else:
b.get_built_image_info()
def test_build_result():
with pytest.raises(AssertionError):
BuildResult(fail_reason='it happens', image_id='spam')
with pytest.raises(AssertionError):
BuildResult(fail_reason='', image_id='spam')
with pytest.raises(AssertionError):
BuildResult(fail_reason='it happens', oci_image_path='/somewhere')
with pytest.raises(AssertionError):
BuildResult(image_id='spam', oci_image_path='/somewhere')
with pytest.raises(AssertionError):
BuildResult(image_id='spam', fail_reason='it happens', oci_image_path='/somewhere')
assert BuildResult(fail_reason='it happens').is_failed()
assert not BuildResult(image_id='spam').is_failed()
assert BuildResult(image_id='spam', logs=list('logs')).logs == list('logs')
assert BuildResult(fail_reason='it happens').fail_reason == 'it happens'
assert BuildResult(image_id='spam').image_id == 'spam'
assert BuildResult(image_id='spam', annotations={'ham': 'mah'}).annotations == {'ham': 'mah'}
assert BuildResult(image_id='spam', labels={'ham': 'mah'}).labels == {'ham': 'mah'}
assert BuildResult(oci_image_path='/somewhere').oci_image_path == '/somewhere'
assert BuildResult(image_id='spam').is_image_available()
assert not BuildResult(fail_reason='it happens').is_image_available()
assert not BuildResult.make_remote_image_result().is_image_available()
assert not BuildResult.make_remote_image_result().is_failed()
def test_parent_images_to_str(tmpdir, caplog):
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH, 'tmpdir': str(tmpdir)}
b = InsideBuilder(get_source_instance_for(source), 'built-img')
b.dockerfile_images = DockerfileImages(['fedora:latest', 'bacon'])
b.dockerfile_images['fedora:latest'] = "spam"
expected_results = {
"fedora:latest": "spam:latest"
}
assert b.parent_images_to_str() == expected_results
assert "None in: base bacon:latest has parent None" in caplog.text
|
|
#!/usr/bin/env python
#
# Execute with sudo python defendARP.py
#
#
import time
import sys
import socket
import os
import re
import time
import logging
import subprocess
from optparse import OptionParser
###############################################################
##################### MAIN DEFENSE LOGIC ######################
def startDefense(ipAddress, my_ip, interface):
'''
ipAddress = IP to defend.
my_ip = IP on the device running the script.
interface = Network interface we are defending.
'''
# Remove given IP Address from local ARP table.
print("\nINITIALIZING...")
print("Removing %s from the ARP table.") % (ipAddress)
os.system("arp -d " + ipAddress)
print("OK.")
# Ping the IP to establish it's correct MAC address.
# NOTE: The ARP could still be poisoned if an attacker sends poison packets while we are pinging.
print("Obtainting MAC address.")
ping(ipAddress)
mac = getMAC(ipAddress)
print("MAC address found: %s") % (mac)
# Confirm the physical address of target
valid = False
while valid != True:
print("Is %s the correct MAC address for %s (y/n)?") % (mac,ipAddress)
answer = str(raw_input("> "))
if answer == "N" or answer == "n":
print("If this is not the correct MAC then you have already been poisoned.")
print("You must start this script in a 'safe' state.")
sys.exit()
elif answer == "Y" or answer == "y":
print("OK.\n")
print("Monitoring your ARP table...\n")
goodMac = mac
valid = True
# Set monitor loop
monitor = True
while monitor == True:
mac = getMAC(ipAddress)
# Check to make sure our good MAC address matches the one in the ARP table
if goodMac != mac:
beep()
print("ARP POISONED!")
print("Spoofed IP: %s") % (ipAddress)
attackersIP = ''
attackersIP = getAttackerIP(ipAddress, mac)
print("Attacker is sending your traffic to %s") % (attackersIP)
print("%s's actual Physical Address: %s") % (ipAddress, goodMac)
print("Attacker's Physical Address: %s") % (mac)
print("Attempting to reset the correct Physical Address...")
deleteMAC(ipAddress)
# Re-ping the target to establish correct MAC
ping(ipAddress)
mac = getMAC(ipAddress)
print("ARP Table reset.")
print("\nMonitoring your ARP table...\n")
# Wait for 2 seconds
time.sleep(2)
###############################################################
###################### UTILITY FUNCTIONS ######################
# Grab the IP address on a specific interface
def getMyIp(interface):
# This is ok because we validate the output (check to make sure it is an IP address). But could be used to exfultrate info.
p = subprocess.Popen("ifconfig " + interface + " | grep 'inet addr' | awk -F: '{print $2}' | awk '{print $1}'", shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
try:
socket.inet_aton(output)
return output
except socket.error:
return ''
# play beep sound
def beep():
print("\a")
# Remove a IP/MAC pair from ARP table where IP == $1
def deleteMAC(ipAddress):
p = subprocess.Popen("arp -d " + ipAddress, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
# Get duplicate IP from ARP table
def getAttackerIP(goodIP, mac):
command = "arp -a | grep '" + mac + "' | grep -v '(" + goodIP + ")' | awk '{print $2}'"
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
output = output.replace('(', '').replace(')', '')
return output
# Get MAC of IP from ARP Table
def getMAC(ip):
p = subprocess.Popen("arp -a | grep '(" + ip + ")' | awk '{print $4}'", shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
return output
# Find the name of the interface we are going to use
def getInterface():
# This is ok because there is not user input. Do NOT trust user input in this function. Use call() instead.
p = subprocess.Popen("ifconfig | grep 'Link encap' | awk '{print $1}' | head -1", shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
return output
def ping(ip):
# return == 1: OK.
# return == 0: Failed.
p = subprocess.Popen("ping -c 1 " + ip, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0].rstrip()
if "1 received" in output:
return 1
else:
return 0
# Print the header information
def printHeader():
print("SUMMARY")
print("\tDeletes the specified IP from the ARP table, then pings the IP to")
print("\textablish the correct Physical Address.")
print("\tThe script will then continually monitor the specified IP's entry in")
print("\tthe ARP table. If the IP's ARP table ever changes or is removed, the")
print("\tscript will BEEP and set the Physical Address back to the correct value.")
print("LICENSE")
print("\tCopyright 2013. Apache license 2.0")
print("SYNTAX")
print("\tUse: python defendARP.py -h for help.")
sys.exit()
# Print basic usage info
def printUsageInfo():
print("Usage:")
print("\tpython defendARP.py -a <ip_addr_to_monitor>")
print("\tpython defendARP.py --address=<ip_addr_to_monitor>")
print("Help:")
print("\tpython defendARP.py --help")
sys.exit()
def isUnix():
if os.name == "posix":
return 1
else:
return 0
def printOsRequirements():
print("ERROR:");
print("\tThis script only works on Unix systems.")
print("\tAn equivalent script for Windows can be found at https://github.com/alan-reed/ARP-Defense/blob/master/defendAPR.bat")
sys.exit()
###############################################################
############################ MAIN #############################
def main(argv):
# Check OS (must be unix)
if not isUnix():
printOsRequirements()
# Create option parser
parser = OptionParser()
# Define options
parser.add_option("-a", "--address", dest="ip_addr", help="IP address to monitor.")
parser.add_option("-f", "--interface", dest="interface", help="Interface to defend.")
parser.add_option("-i", "--info", action="store_true", dest="showInfo", help="Show the copyright and about information.")
(options, args) = parser.parse_args()
# Validate arguments
if options.showInfo == True:
printHeader()
if options.ip_addr == None:
printUsageInfo()
if options.interface == None:
interface = getInterface()
my_ip = getMyIp(interface)
else:
my_ip = getMyIp(interface)
if options.ip_addr == my_ip:
print("Error: Cannot monitor your own IP Address -- Try using the Default Gateway's IP.\n")
printUsageInfo()
# Make sure the IP address is reachable
res = ping(options.ip_addr)
if res == 0:
print("Address unreachable.");
sys.exit()
# Call main defense logic
startDefense(options.ip_addr, my_ip, interface)
if __name__ == "__main__":
main(sys.argv)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import constants
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import model_base
from neutron.extensions import dhcpagentscheduler
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),
cfg.IntOpt('dhcp_agents_per_network', default=1,
help=_('Number of DHCP agents scheduled to host a network.')),
]
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
class NetworkDhcpAgentBinding(model_base.BASEV2):
"""Represents binding between neutron networks and DHCP agents."""
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete='CASCADE'),
primary_key=True)
dhcp_agent = orm.relation(agents_db.Agent)
dhcp_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
"""Common class for agent scheduler mixins."""
# agent notifiers to handle agent update operations;
# should be updated by plugins;
agent_notifiers = {
constants.AGENT_TYPE_DHCP: None,
constants.AGENT_TYPE_L3: None,
constants.AGENT_TYPE_LOADBALANCER: None,
}
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agents_db.AgentDbMixin.is_agent_down(
agent['heartbeat_timestamp'])
def update_agent(self, context, id, agent):
original_agent = self.get_agent(context, id)
result = super(AgentSchedulerDbMixin, self).update_agent(
context, id, agent)
agent_data = agent['agent']
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
if (agent_notifier and
'admin_state_up' in agent_data and
original_agent['admin_state_up'] != agent_data['admin_state_up']):
agent_notifier.agent_updated(context,
agent_data['admin_state_up'],
original_agent['host'])
return result
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
.DhcpAgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add DHCP agent scheduler extension to db_plugin_base_v2.
"""
network_scheduler = None
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None):
if not network_ids:
return []
query = context.session.query(NetworkDhcpAgentBinding)
query = query.options(joinedload('dhcp_agent'))
if len(network_ids) == 1:
query = query.filter(
NetworkDhcpAgentBinding.network_id == network_ids[0])
elif network_ids:
query = query.filter(
NetworkDhcpAgentBinding.network_id in network_ids)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
return [binding.dhcp_agent
for binding in query
if AgentSchedulerDbMixin.is_eligible_agent(active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
not agent_db['admin_state_up']):
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
for dhcp_agent in dhcp_agents:
if id == dhcp_agent.id:
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
network_id=network_id, agent_id=id)
binding = NetworkDhcpAgentBinding()
binding.dhcp_agent_id = id
binding.network_id = network_id
context.session.add(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_added_to_agent(
context, network_id, agent_db.host)
def remove_network_from_dhcp_agent(self, context, id, network_id):
agent = self._get_agent(context, id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(NetworkDhcpAgentBinding)
binding = query.filter(
NetworkDhcpAgentBinding.network_id == network_id,
NetworkDhcpAgentBinding.dhcp_agent_id == id).one()
except exc.NoResultFound:
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id=network_id, agent_id=id)
# reserve the port, so the ip is reused on a subsequent add
device_id = utils.get_dhcp_agent_device_id(network_id,
agent['host'])
filters = dict(device_id=[device_id])
ports = self.get_ports(context, filters=filters)
for port in ports:
port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT
self.update_port(context, port['id'], dict(port=port))
context.session.delete(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent.host)
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
return {'networks':
self.get_networks(context, filters={'id': net_ids})}
else:
return {'networks': []}
def list_active_networks_on_active_dhcp_agent(self, context, host):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_DHCP, host)
if not agent.admin_state_up:
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
return self.get_networks(
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
def list_dhcp_agents_hosting_network(self, context, network_id):
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
if agent_ids:
return {
'agents': self.get_agents(context, filters={'id': agent_ids})}
else:
return {'agents': []}
def schedule_network(self, context, created_network):
if self.network_scheduler:
return self.network_scheduler.schedule(
self, context, created_network)
def auto_schedule_networks(self, context, host):
if self.network_scheduler:
self.network_scheduler.auto_schedule_networks(self, context, host)
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic test utilities."""
from oslo_utils import timeutils
from ironic.common import states
from ironic.db import api as db_api
from ironic.drivers import base as drivers_base
from ironic.objects import chassis
from ironic.objects import conductor
from ironic.objects import node
from ironic.objects import port
from ironic.objects import portgroup
from ironic.objects import volume_connector
from ironic.objects import volume_target
def get_test_ipmi_info():
return {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin",
"ipmi_password": "fake"
}
def get_test_ipmi_bridging_parameters():
return {
"ipmi_bridging": "dual",
"ipmi_local_address": "0x20",
"ipmi_transit_channel": "0",
"ipmi_transit_address": "0x82",
"ipmi_target_channel": "7",
"ipmi_target_address": "0x72"
}
def get_test_ssh_info(auth_type='password', virt_type='virsh'):
result = {
"ssh_address": "1.2.3.4",
"ssh_username": "admin",
"ssh_port": 22,
"ssh_virt_type": virt_type,
}
if 'password' == auth_type:
result['ssh_password'] = 'fake'
elif 'file' == auth_type:
result['ssh_key_filename'] = '/not/real/file'
elif 'key' == auth_type:
result['ssh_key_contents'] = '--BEGIN PRIVATE ...blah'
elif 'file_with_passphrase' == auth_type:
result['ssh_password'] = 'fake'
result['ssh_key_filename'] = '/not/real/file'
elif 'key_with_passphrase' == auth_type:
result['ssh_password'] = 'fake'
result['ssh_key_contents'] = '--BEGIN PRIVATE ...blah'
elif 'too_many' == auth_type:
result['ssh_key_contents'] = '--BEGIN PRIVATE ...blah'
result['ssh_key_filename'] = '/not/real/file'
else:
# No auth details (is invalid)
pass
return result
def get_test_pxe_driver_info():
return {
"deploy_kernel": "glance://deploy_kernel_uuid",
"deploy_ramdisk": "glance://deploy_ramdisk_uuid",
}
def get_test_pxe_driver_internal_info():
return {
"is_whole_disk_image": False,
}
def get_test_pxe_instance_info():
return {
"image_source": "glance://image_uuid",
"root_gb": 100,
}
def get_test_ilo_info():
return {
"ilo_address": "1.2.3.4",
"ilo_username": "admin",
"ilo_password": "fake",
}
def get_test_drac_info():
return {
"drac_address": "1.2.3.4",
"drac_port": 443,
"drac_path": "/wsman",
"drac_protocol": "https",
"drac_username": "admin",
"drac_password": "fake",
}
def get_test_irmc_info():
return {
"irmc_address": "1.2.3.4",
"irmc_username": "admin0",
"irmc_password": "fake0",
"irmc_port": 80,
"irmc_auth_method": "digest",
}
def get_test_agent_instance_info():
return {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum',
'image_disk_format': 'qcow2',
'image_container_format': 'bare',
}
def get_test_agent_driver_info():
return {
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
'ipmi_password': 'foo',
}
def get_test_agent_driver_internal_info():
return {
'agent_url': 'http://127.0.0.1/foo',
'is_whole_disk_image': True,
}
def get_test_snmp_info(**kw):
result = {
"snmp_driver": kw.get("snmp_driver", "teltronix"),
"snmp_address": kw.get("snmp_address", "1.2.3.4"),
"snmp_port": kw.get("snmp_port", "161"),
"snmp_outlet": kw.get("snmp_outlet", "1"),
"snmp_version": kw.get("snmp_version", "1")
}
if result["snmp_version"] in ("1", "2c"):
result["snmp_community"] = kw.get("snmp_community", "public")
elif result["snmp_version"] == "3":
result["snmp_security"] = kw.get("snmp_security", "public")
return result
def get_test_node(**kw):
properties = {
"cpu_arch": "x86_64",
"cpus": "8",
"local_gb": "10",
"memory_mb": "4096",
}
# NOTE(deva): API unit tests confirm that sensitive fields in instance_info
# and driver_info will get scrubbed from the API response
# but other fields (eg, 'foo') do not.
fake_instance_info = {
"configdrive": "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ=",
"image_url": "http://example.com/test_image_url",
"foo": "bar",
}
fake_driver_info = {
"foo": "bar",
"fake_password": "fakepass",
}
fake_internal_info = {
"private_state": "secret value"
}
result = {
'version': kw.get('version', node.Node.VERSION),
'id': kw.get('id', 123),
'name': kw.get('name', None),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'),
'chassis_id': kw.get('chassis_id', None),
'conductor_affinity': kw.get('conductor_affinity', None),
'power_state': kw.get('power_state', states.NOSTATE),
'target_power_state': kw.get('target_power_state', states.NOSTATE),
'provision_state': kw.get('provision_state', states.AVAILABLE),
'target_provision_state': kw.get('target_provision_state',
states.NOSTATE),
'provision_updated_at': kw.get('provision_updated_at'),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
'instance_info': kw.get('instance_info', fake_instance_info),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', fake_driver_info),
'driver_internal_info': kw.get('driver_internal_info',
fake_internal_info),
'clean_step': kw.get('clean_step'),
'properties': kw.get('properties', properties),
'reservation': kw.get('reservation'),
'maintenance': kw.get('maintenance', False),
'maintenance_reason': kw.get('maintenance_reason'),
'console_enabled': kw.get('console_enabled', False),
'extra': kw.get('extra', {}),
'updated_at': kw.get('updated_at'),
'created_at': kw.get('created_at'),
'inspection_finished_at': kw.get('inspection_finished_at'),
'inspection_started_at': kw.get('inspection_started_at'),
'raid_config': kw.get('raid_config'),
'target_raid_config': kw.get('target_raid_config'),
'tags': kw.get('tags', []),
'resource_class': kw.get('resource_class'),
}
for iface in drivers_base.ALL_INTERFACES:
name = '%s_interface' % iface
result[name] = kw.get(name)
return result
def create_test_node(**kw):
"""Create test node entry in DB and return Node DB object.
Function to be used to create test Node objects in the database.
:param kw: kwargs with overriding values for node's attributes.
:returns: Test Node DB object.
"""
node = get_test_node(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del node['id']
# Create node with tags will raise an exception. If tags are not
# specified explicitly just delete it.
if 'tags' not in kw:
del node['tags']
dbapi = db_api.get_instance()
return dbapi.create_node(node)
def get_test_port(**kw):
return {
'id': kw.get('id', 987),
'version': kw.get('version', port.Port.VERSION),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
'local_link_connection': kw.get('local_link_connection',
{'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'Ethernet3/1',
'switch_info': 'switch1'}),
'portgroup_id': kw.get('portgroup_id'),
'pxe_enabled': kw.get('pxe_enabled', True),
'internal_info': kw.get('internal_info', {"bar": "buzz"}),
'physical_network': kw.get('physical_network'),
}
def create_test_port(**kw):
"""Create test port entry in DB and return Port DB object.
Function to be used to create test Port objects in the database.
:param kw: kwargs with overriding values for port's attributes.
:returns: Test Port DB object.
"""
port = get_test_port(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del port['id']
dbapi = db_api.get_instance()
return dbapi.create_port(port)
def get_test_volume_connector(**kw):
return {
'id': kw.get('id', 789),
'version': kw.get('version', volume_connector.VolumeConnector.VERSION),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'type': kw.get('type', 'iqn'),
'connector_id': kw.get('connector_id',
'iqn.2012-06.com.example:initiator'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_volume_connector(**kw):
"""Create test connector entry in DB and return VolumeConnector DB object.
Function to be used to create test VolumeConnector objects in the database.
:param kw: kwargs with overriding values for connector's attributes.
:returns: Test VolumeConnector DB object.
"""
connector = get_test_volume_connector(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del connector['id']
dbapi = db_api.get_instance()
return dbapi.create_volume_connector(connector)
def get_test_volume_target(**kw):
fake_properties = {"target_iqn": "iqn.foo"}
return {
'id': kw.get('id', 789),
'version': kw.get('version', volume_target.VolumeTarget.VERSION),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'volume_type': kw.get('volume_type', 'iscsi'),
'properties': kw.get('properties', fake_properties),
'boot_index': kw.get('boot_index', 0),
'volume_id': kw.get('volume_id', '12345678'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_volume_target(**kw):
"""Create test target entry in DB and return VolumeTarget DB object.
Function to be used to create test VolumeTarget objects in the database.
:param kw: kwargs with overriding values for target's attributes.
:returns: Test VolumeTarget DB object.
"""
target = get_test_volume_target(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del target['id']
dbapi = db_api.get_instance()
return dbapi.create_volume_target(target)
def get_test_chassis(**kw):
return {
'id': kw.get('id', 42),
'version': kw.get('version', chassis.Chassis.VERSION),
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'extra': kw.get('extra', {}),
'description': kw.get('description', 'data-center-1-chassis'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_chassis(**kw):
"""Create test chassis entry in DB and return Chassis DB object.
Function to be used to create test Chassis objects in the database.
:param kw: kwargs with overriding values for chassis's attributes.
:returns: Test Chassis DB object.
"""
chassis = get_test_chassis(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del chassis['id']
dbapi = db_api.get_instance()
return dbapi.create_chassis(chassis)
def get_test_conductor(**kw):
return {
'id': kw.get('id', 6),
'version': kw.get('version', conductor.Conductor.VERSION),
'hostname': kw.get('hostname', 'test-conductor-node'),
'drivers': kw.get('drivers', ['fake-driver', 'null-driver']),
'created_at': kw.get('created_at', timeutils.utcnow()),
'updated_at': kw.get('updated_at', timeutils.utcnow()),
}
def get_test_ucs_info():
return {
"ucs_username": "admin",
"ucs_password": "password",
"ucs_service_profile": "org-root/ls-devstack",
"ucs_address": "ucs-b",
}
def get_test_cimc_info():
return {
"cimc_username": "admin",
"cimc_password": "password",
"cimc_address": "1.2.3.4",
}
def get_test_oneview_properties():
return {
"cpu_arch": "x86_64",
"cpus": "8",
"local_gb": "10",
"memory_mb": "4096",
"capabilities": ("server_hardware_type_uri:fake_sht_uri,"
"enclosure_group_uri:fake_eg_uri,"
"server_profile_template_uri:fake_spt_uri"),
}
def get_test_oneview_driver_info():
return {
'server_hardware_uri': 'fake_sh_uri',
}
def get_test_redfish_info():
return {
"redfish_address": "https://example.com",
"redfish_system_id": "/redfish/v1/Systems/FAKESYSTEM",
"redfish_username": "username",
"redfish_password": "password"
}
def get_test_portgroup(**kw):
return {
'id': kw.get('id', 654),
'version': kw.get('version', portgroup.Portgroup.VERSION),
'uuid': kw.get('uuid', '6eb02b44-18a3-4659-8c0b-8d2802581ae4'),
'name': kw.get('name', 'fooname'),
'node_id': kw.get('node_id', 123),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
'internal_info': kw.get('internal_info', {"bar": "buzz"}),
'standalone_ports_supported': kw.get('standalone_ports_supported',
True),
'mode': kw.get('mode'),
'properties': kw.get('properties', {}),
}
def create_test_portgroup(**kw):
"""Create test portgroup entry in DB and return Portgroup DB object.
Function to be used to create test Portgroup objects in the database.
:param kw: kwargs with overriding values for port's attributes.
:returns: Test Portgroup DB object.
"""
portgroup = get_test_portgroup(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del portgroup['id']
dbapi = db_api.get_instance()
return dbapi.create_portgroup(portgroup)
def get_test_node_tag(**kw):
return {
# TODO(rloo): Replace None below with the object NodeTag VERSION,
# after this lands: https://review.openstack.org/#/c/233357
'version': kw.get('version', None),
"tag": kw.get("tag", "tag1"),
"node_id": kw.get("node_id", "123"),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_node_tag(**kw):
"""Create test node tag entry in DB and return NodeTag DB object.
Function to be used to create test NodeTag objects in the database.
:param kw: kwargs with overriding values for tag's attributes.
:returns: Test NodeTag DB object.
"""
tag = get_test_node_tag(**kw)
dbapi = db_api.get_instance()
return dbapi.add_node_tag(tag['node_id'], tag['tag'])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import copy
import datetime
import json
import os
import six
from bson.objectid import ObjectId
from .model_base import Model, ValidationException, GirderException
from girder import events
from girder import logger
from girder.constants import AccessType
from girder.utility import acl_mixin
from girder.utility.progress import setResponseTimeLimit
class Item(acl_mixin.AccessControlMixin, Model):
"""
Items are leaves in the data hierarchy. They can contain 0 or more
files within them, and can also contain arbitrary metadata.
"""
def initialize(self):
self.name = 'item'
self.ensureIndices(('folderId', 'name', 'lowerName',
([('folderId', 1), ('name', 1)], {})))
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.resourceColl = 'folder'
self.resourceParent = 'folderId'
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'size', 'updated', 'description', 'created', 'meta',
'creatorId', 'folderId', 'name', 'baseParentType', 'baseParentId'))
def filter(self, item, user=None):
"""Preserved override for kwarg backwards compatibility."""
return Model.filter(self, doc=item, user=user)
def _validateString(self, value):
"""
Make sure a value is a string and is stripped of whitespace.
:param value: the value to coerce into a string if it isn't already.
:return stringValue: the string version of the value.
"""
if value is None:
value = ''
if not isinstance(value, six.string_types):
value = str(value)
return value.strip()
def validate(self, doc):
doc['name'] = self._validateString(doc.get('name', ''))
doc['description'] = self._validateString(doc.get('description', ''))
if not doc['name']:
raise ValidationException('Item name must not be empty.', 'name')
# Ensure unique name among sibling items and folders. If the desired
# name collides with an existing item or folder, we will append (n)
# onto the end of the name, incrementing n until the name is unique.
name = doc['name']
n = 0
while True:
q = {
'name': name,
'folderId': doc['folderId']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
dupItem = self.findOne(q, fields=['_id'])
q = {
'parentId': doc['folderId'],
'name': name,
'parentCollection': 'folder'
}
dupFolder = self.model('folder').findOne(q, fields=['_id'])
if dupItem is None and dupFolder is None:
doc['name'] = name
break
else:
n += 1
name = '%s (%d)' % (doc['name'], n)
doc['lowerName'] = doc['name'].lower()
return doc
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
Calls AccessControlMixin.load while doing some auto-correction.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlledMixin.load`.
"""
doc = super(Item, self).load(id, level, user, objectId, force, fields,
exc)
if doc is not None and 'baseParentType' not in doc:
pathFromRoot = self.parentsToRoot(doc, user=user, force=True)
baseParent = pathFromRoot[0]
doc['baseParentId'] = baseParent['object']['_id']
doc['baseParentType'] = baseParent['type']
self.save(doc, triggerEvents=False)
if doc is not None and 'lowerName' not in doc:
self.save(doc, triggerEvents=False)
return doc
def move(self, item, folder):
"""
Move the given item from its current folder into another folder.
:param item: The item to move.
:type item: dict
:param folder: The folder to move the item into.
:type folder: dict.
"""
self.propagateSizeChange(item, -item['size'])
item['folderId'] = folder['_id']
item['baseParentType'] = folder['baseParentType']
item['baseParentId'] = folder['baseParentId']
self.propagateSizeChange(item, item['size'])
return self.save(item)
def propagateSizeChange(self, item, inc):
self.model('folder').increment(query={
'_id': item['folderId']
}, field='size', amount=inc, multi=False)
self.model(item['baseParentType']).increment(query={
'_id': item['baseParentId']
}, field='size', amount=inc, multi=False)
def recalculateSize(self, item):
"""
Recalculate the item size based on the files that are in it. If this
is different than the recorded size, propagate the changes.
:param item: The item to recalculate the size of.
:returns: the recalculated size in bytes
"""
size = 0
for file in self.childFiles(item):
# We could add a recalculateSize to the file model, in which case
# this would be:
# size += self.model('file').recalculateSize(file)
size += file.get('size', 0)
delta = size-item.get('size', 0)
if delta:
logger.info('Item %s was wrong size: was %d, is %d' % (
item['_id'], item['size'], size))
item['size'] = size
self.update({'_id': item['_id']}, update={'$set': {'size': size}})
self.propagateSizeChange(item, delta)
return size
def childFiles(self, item, limit=0, offset=0, sort=None, **kwargs):
"""
Returns child files of the item. Passes any kwargs to the find
function.
:param item: The parent item.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
"""
q = {
'itemId': item['_id']
}
return self.model('file').find(
q, limit=limit, offset=offset, sort=sort, **kwargs)
def remove(self, item, **kwargs):
"""
Delete an item, and all references to it in the database.
:param item: The item document to delete.
:type item: dict
"""
# Delete all files in this item
files = self.model('file').find({
'itemId': item['_id']
})
for file in files:
fileKwargs = kwargs.copy()
fileKwargs.pop('updateItemSize', None)
self.model('file').remove(file, updateItemSize=False, **fileKwargs)
# Delete pending uploads into this item
uploads = self.model('upload').find({
'parentId': item['_id'],
'parentType': 'item'
})
for upload in uploads:
self.model('upload').remove(upload, **kwargs)
# Delete the item itself
Model.remove(self, item)
def textSearch(self, query, user=None, filters=None, limit=0, offset=0,
sort=None, fields=None):
"""
Custom override of Model.textSearch to filter items by permissions
of the parent folder.
"""
if not filters:
filters = {}
# get the non-filtered search result from Model.textSearch
cursor = Model.textSearch(self, query=query, sort=sort,
filters=filters)
return self.filterResultsByPermission(
cursor=cursor, user=user, level=AccessType.READ, limit=limit,
offset=offset)
def createItem(self, name, creator, folder, description='',
reuseExisting=False):
"""
Create a new item. The creator will be given admin access to it.
:param name: The name of the item.
:type name: str
:param description: Description for the item.
:type description: str
:param folder: The parent folder of the item.
:param creator: User document representing the creator of the item.
:type creator: dict
:param reuseExisting: If an item with the given name already exists
under the given folder, return that item rather than creating a
new one.
:type reuseExisting: bool
:returns: The item document that was created.
"""
if reuseExisting:
existing = self.findOne({
'folderId': folder['_id'],
'name': name
})
if existing:
return existing
now = datetime.datetime.utcnow()
if not type(creator) is dict or '_id' not in creator:
# Internal error -- this shouldn't be called without a user.
raise GirderException('Creator must be a user.',
'girder.models.item.creator-not-user')
if 'baseParentType' not in folder:
pathFromRoot = self.parentsToRoot({'folderId': folder['_id']},
creator, force=True)
folder['baseParentType'] = pathFromRoot[0]['type']
folder['baseParentId'] = pathFromRoot[0]['object']['_id']
return self.save({
'name': self._validateString(name),
'description': self._validateString(description),
'folderId': ObjectId(folder['_id']),
'creatorId': creator['_id'],
'baseParentType': folder['baseParentType'],
'baseParentId': folder['baseParentId'],
'created': now,
'updated': now,
'size': 0
})
def updateItem(self, item):
"""
Updates an item.
:param item: The item document to update
:type item: dict
:returns: The item document that was edited.
"""
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item)
def setMetadata(self, item, metadata):
"""
Set metadata on an item. A rest exception is thrown in the cases where
the metadata json object is badly formed, or if any of the metadata
keys contains a period ('.').
:param item: The item to set the metadata on.
:type item: dict
:param metadata: A dictionary containing key-value pairs to add to
the items meta field
:type metadata: dict
:returns: the item document
"""
if 'meta' not in item:
item['meta'] = {}
# Add new metadata to existing metadata
item['meta'].update(metadata.items())
# Remove metadata fields that were set to null (use items in py3)
toDelete = [k for k, v in six.iteritems(item['meta']) if v is None]
for key in toDelete:
del item['meta'][key]
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item)
def parentsToRoot(self, item, user=None, force=False):
"""
Get the path to traverse to a root of the hierarchy.
:param item: The item whose root to find
:type item: dict
:returns: an ordered list of dictionaries from root to the current item
"""
curFolder = self.model('folder').load(
item['folderId'], user=user, level=AccessType.READ, force=force)
folderIdsToRoot = self.model('folder').parentsToRoot(
curFolder, user=user, level=AccessType.READ, force=force)
if force:
folderIdsToRoot.append({'type': 'folder', 'object': curFolder})
else:
filteredFolder = self.model('folder').filter(curFolder, user)
folderIdsToRoot.append({'type': 'folder', 'object': filteredFolder})
return folderIdsToRoot
def copyItem(self, srcItem, creator, name=None, folder=None,
description=None):
"""
Copy an item, including duplicating files and metadata.
:param srcItem: the item to copy.
:type srcItem: dict
:param creator: the user who will own the copied item.
:param name: The name of the new item. None to copy the original name.
:type name: str
:param folder: The parent folder of the new item. None to store in the
same folder as the original item.
:param description: Description for the new item. None to copy the
original description.
:type description: str
:returns: the new item.
"""
if name is None:
name = srcItem['name']
if folder is None:
folder = self.model('folder').load(srcItem['folderId'], force=True)
if description is None:
description = srcItem['description']
newItem = self.createItem(
folder=folder, name=name, creator=creator, description=description)
# copy metadata and other extension values
filteredItem = self.filter(newItem, creator)
updated = False
for key in srcItem:
if key not in filteredItem and key not in newItem:
newItem[key] = copy.deepcopy(srcItem[key])
updated = True
if updated:
self.save(newItem, triggerEvents=False)
# Give listeners a chance to change things
events.trigger('model.item.copy.prepare', (srcItem, newItem))
# copy files
for file in self.childFiles(item=srcItem):
self.model('file').copyFile(file, creator=creator, item=newItem)
events.trigger('model.item.copy.after', newItem)
return self.filter(newItem, creator)
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True):
"""
Generate a list of files within this item.
:param doc: the item to list.
:param user: a user used to validate data that is returned. This isn't
used, but is present to be consistent across all model
implementations of fileList.
:param path: a path prefix to add to the results.
:param includeMetadata: if True and there is any metadata, include a
result which is the json string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the item.
:param subpath: if True and the item has more than one file, metadata,
or the sole file is not named the same as the item,
then the returned paths include the item name.
"""
if subpath:
files = [file for file in self.childFiles(item=doc, limit=2)]
if (len(files) != 1 or files[0]['name'] != doc['name'] or
(includeMetadata and len(doc.get('meta', {})))):
path = os.path.join(path, doc['name'])
metadataFile = "girder-item-metadata.json"
for file in self.childFiles(item=doc):
if file['name'] == metadataFile:
metadataFile = None
yield (os.path.join(path, file['name']),
self.model('file').download(file, headers=False))
if includeMetadata and metadataFile and len(doc.get('meta', {})):
def stream():
yield json.dumps(doc['meta'], default=str)
yield (os.path.join(path, metadataFile), stream)
def checkConsistency(self, stage, progress=None):
"""
Check all of the items and make sure they are valid. This operates in
stages, since some actions should be done before other models that rely
on items and some need to be done after. The stages are:
* count - count how many items need to be checked.
* remove - remove lost items
* verify - verify and fix existing items
:param stage: which stage of the check to run. See above.
:param progress: an optional progress context to update.
:returns: numItems: number of items to check or processed,
numChanged: number of items changed.
"""
if stage == 'count':
numItems = self.find(limit=1).count()
return numItems, 0
elif stage == 'remove':
# Check that all items are in existing folders. Any that are not
# can be deleted. Perhaps we should put them in a lost+found
# instead
folderIds = self.model('folder').collection.distinct('_id')
lostItems = self.find({
'$or': [{'folderId': {'$nin': folderIds}},
{'folderId': {'$exists': False}}]})
numItems = itemsLeft = lostItems.count()
if numItems:
if progress is not None:
progress.update(message='Removing orphaned items')
for item in lostItems:
setResponseTimeLimit()
self.collection.remove({'_id': item['_id']})
if progress is not None:
itemsLeft -= 1
progress.update(increment=1, message='Removing '
'orphaned items (%d left)' % itemsLeft)
return numItems, numItems
elif stage == 'verify':
# Check items sizes
items = self.find()
numItems = itemsLeft = items.count()
itemsCorrected = 0
if progress is not None:
progress.update(message='Checking items')
for item in items:
itemCorrected = False
setResponseTimeLimit()
oldSize = item.get('size', 0)
newSize = self.recalculateSize(item)
if newSize != oldSize:
itemCorrected = True
newBaseParent = self.parentsToRoot(item, force=True)[0]
if item['baseParentType'] != newBaseParent['type'] or \
item['baseParentId'] != newBaseParent['object']['_id']:
self.update(
{'_id': item['_id']}, update={'$set': {
'baseParentType': newBaseParent['type'],
'baseParentId': newBaseParent['object']['_id']
}})
itemCorrected = True
if itemCorrected:
itemsCorrected += 1
if progress is not None:
itemsLeft -= 1
progress.update(increment=1, message='Checking items (%d '
'left)' % itemsLeft)
return numItems, itemsCorrected
|
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import errno
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
_MAX_RESULT_SIZE = 8192
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [31, 0],
'green': [32, 0],
'yellow': [33, 0],
'lightgray': [37, 0],
'gray': [30, 1],
'purple': [35, 0],
'cyan': [36, 0]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
while True:
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
tag, msg, '\n'
if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError, e:
if e.errno != errno.EINTR:
raise
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
self.cmdline)
def __str__(self):
return '%s: %s %s' % (self.shortname, ' '.join(
'%s=%s' % kv
for kv in self.environ.items()), ' '.join(self.cmdline))
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
self.cpu_estimated = 1
self.cpu_measured = 1
def read_from_start(f):
f.seek(0)
return f.read()
class Job(object):
"""Manages one job."""
def __init__(self,
spec,
newline_on_success,
travis,
add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
self._tempfile = tempfile.TemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
# The Unix time command is finicky when used with MSBuild, so we don't use it
# with jobs that run MSBuild.
global measure_cpu_costs
if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
cmdline = ['time', '-p'] + cmdline
else:
measure_cpu_costs = False
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message('WARNING', 'Failed to start %s, retrying in %f seconds'
% (self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
stdout = read_from_start(self._tempfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message(
'FLAKE',
'%s [ret=%d, pid=%d]' %
(self._spec.shortname, self._process.returncode,
self._process.pid),
stdout(),
do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message(
'FAILED',
'%s [ret=%d, pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.returncode,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(
r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
self.result.cpu_estimated = float('%.01f' %
self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
message(
'PASSED',
'%s [time=%.1fsec, retries=%d:%d%s]' %
(self._spec.shortname, elapsed, self._retries,
self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message(
'TIMEOUT_FLAKE',
'%s [pid=%d]' % (self._spec.shortname, self._process.pid),
stdout(),
do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message(
'TIMEOUT',
'%s [pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self._max_time > 0 and time.time(
) - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled(): return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
if len(self._running) < self._maxjobs_cpu_agnostic:
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled(): return False
job = Job(spec, self._newline_on_success, self._travis, self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self, waiting_for=None, waiting_for_cost=None):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = eintr_be_gone(lambda: job.state())
if st == _RUNNING: continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead: return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (
self._remaining + len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
if waiting_for is not None:
wstr = ' next: %s @ %.2f cpu' % (waiting_for,
waiting_for_cost)
else:
wstr = ''
message(
'WAITING',
'%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
(rstr, len(self._running), self._completed, self._failures,
self.cpu_cost(), wstr))
if platform_string() == 'windows':
time.sleep(0.1)
else:
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
if platform_string() != 'windows':
signal.alarm(0)
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
maxjobs_cpu_agnostic=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False,
max_time=-1):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
_DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
|
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import unittest
from hashlib import sha1
from StringIO import StringIO
from time import time
from swift.common.swob import Request, Response
from swift.common.middleware import tempauth, formpost
from swift.common.utils import split_path
class FakeApp(object):
def __init__(self, status_headers_body_iter=None,
check_no_query_string=True):
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, '')])
self.requests = []
self.check_no_query_string = check_no_query_string
def __call__(self, env, start_response):
try:
if self.check_no_query_string and env.get('QUERY_STRING'):
raise Exception('Query string %s should have been discarded!' %
env['QUERY_STRING'])
body = ''
while True:
chunk = env['wsgi.input'].read()
if not chunk:
break
body += chunk
env['wsgi.input'] = StringIO(body)
self.requests.append(Request.blank('', environ=env))
if env.get('swift.authorize_override') and \
env.get('REMOTE_USER') != '.wsgi.pre_authed':
raise Exception(
'Invalid REMOTE_USER %r with swift.authorize_override' % (
env.get('REMOTE_USER'),))
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.requests[-1])
if resp:
return resp(env, start_response)
status, headers, body = next(self.status_headers_body_iter)
return Response(status=status, headers=headers,
body=body)(env, start_response)
except EOFError:
start_response('499 Client Disconnect',
[('Content-Type', 'text/plain')])
return ['Client Disconnect\n']
class TestCappedFileLikeObject(unittest.TestCase):
def test_whole(self):
self.assertEquals(
formpost._CappedFileLikeObject(StringIO('abc'), 10).read(), 'abc')
def test_exceeded(self):
exc = None
try:
formpost._CappedFileLikeObject(StringIO('abc'), 2).read()
except EOFError as err:
exc = err
self.assertEquals(str(exc), 'max_file_size exceeded')
def test_whole_readline(self):
fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 10)
self.assertEquals(fp.readline(), 'abc\n')
self.assertEquals(fp.readline(), 'def')
self.assertEquals(fp.readline(), '')
def test_exceeded_readline(self):
fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 5)
self.assertEquals(fp.readline(), 'abc\n')
exc = None
try:
self.assertEquals(fp.readline(), 'def')
except EOFError as err:
exc = err
self.assertEquals(str(exc), 'max_file_size exceeded')
def test_read_sized(self):
fp = formpost._CappedFileLikeObject(StringIO('abcdefg'), 10)
self.assertEquals(fp.read(2), 'ab')
self.assertEquals(fp.read(2), 'cd')
self.assertEquals(fp.read(2), 'ef')
self.assertEquals(fp.read(2), 'g')
self.assertEquals(fp.read(2), '')
class TestFormPost(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def _make_request(self, path, tempurl_keys=(), **kwargs):
req = Request.blank(path, **kwargs)
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
_junk, account, _junk, _junk = split_path(path, 2, 4)
req.environ['swift.account/' + account] = self._fake_cache_env(
account, tempurl_keys)
return req
def _fake_cache_env(self, account, tempurl_keys=()):
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
return {'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
def _make_sig_env_body(self, path, redirect, max_file_size, max_file_count,
expires, key, user_agent=True):
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
body = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'wsgi.errors': wsgi_errors,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
if user_agent is False:
del env['HTTP_USER_AGENT']
return sig, env, body
def test_passthrough(self):
for method in ('HEAD', 'GET', 'PUT', 'POST', 'DELETE'):
resp = self._make_request(
'/v1/a/c/o',
environ={'REQUEST_METHOD': method}).get_response(self.formpost)
self.assertEquals(resp.status_int, 401)
self.assertTrue('FormPost' not in resp.body)
def test_auth_scheme(self):
# FormPost rejects
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
authenticate_v = None
for h, v in headers:
if h.lower() == 'www-authenticate':
authenticate_v = v
self.assertTrue('FormPost: Form Expired' in body)
self.assertEquals('Swift realm="unknown"', authenticate_v)
def test_safari(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_firefox(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------168072824752491622650073--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------168072824752491622650073',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; '
'rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_chrome(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'null',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/535.7 (KHTML, like Gecko) '
'Chrome/16.0.912.63 Safari/535.7',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_explorer(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file1"; '
'filename="C:\\testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file2"; '
'filename="C:\\testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------7db20d93017c--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------7db20d93017c',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-US',
'HTTP_ACCEPT': 'text/html, application/xhtml+xml, */*',
'HTTP_CACHE_CONTROL': 'no-cache',
'HTTP_CONNECTION': 'Keep-Alive',
'HTTP_HOST': '172.16.83.128:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT '
'6.1; WOW64; Trident/5.0)',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.129',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_messed_up_start(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def log_assert_int_status(env, response_status_int):
self.assertTrue(isinstance(response_status_int, int))
self.formpost._log_request = log_assert_int_status
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: invalid starting boundary' in body)
self.assertEquals(len(self.app.requests), 0)
def test_max_file_size_exceeded(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: max_file_size exceeded' in body)
self.assertEquals(len(self.app.requests), 0)
def test_max_file_count_exceeded(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 1,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
'http://brim.net?status=400&message=max%20file%20count%20exceeded')
self.assertEquals(exc_info, None)
self.assertTrue(
'http://brim.net?status=400&message=max%20file%20count%20exceeded'
in body)
self.assertEquals(len(self.app.requests), 1)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
def test_subrequest_does_not_pass_query(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['QUERY_STRING'] = 'this=should¬=get&passed'
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(
iter([('201 Created', {}, ''),
('201 Created', {}, '')]),
check_no_query_string=True)
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
# Make sure we 201 Created, which means we made the final subrequest
# (and FakeApp verifies that no QUERY_STRING got passed).
self.assertEquals(status, '201 Created')
self.assertEquals(exc_info, None)
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
def test_subrequest_fails(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('404 Not Found', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=404&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=404&message=' in body)
self.assertEquals(len(self.app.requests), 1)
def test_truncated_attr_value(self):
key = 'abc'
redirect = 'a' * formpost.MAX_VALUE_LENGTH
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
# Tack on an extra char to redirect, but shouldn't matter since it
# should get truncated off on read.
redirect += 'b'
env['wsgi.input'] = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
('a' * formpost.MAX_VALUE_LENGTH) + '?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(
('a' * formpost.MAX_VALUE_LENGTH) + '?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_file_to_process(self):
key = 'abc'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
env['wsgi.input'] = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
'http://brim.net?status=400&message=no%20files%20to%20process')
self.assertEquals(exc_info, None)
self.assertTrue(
'http://brim.net?status=400&message=no%20files%20to%20process'
in body)
self.assertEquals(len(self.app.requests), 0)
def test_formpost_without_useragent(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def start_response(s, h, e=None):
pass
body = ''.join(self.formpost(env, start_response))
self.assertTrue('User-Agent' in self.app.requests[0].headers)
self.assertEquals(self.app.requests[0].headers['User-Agent'],
'FormPost')
def test_formpost_with_origin(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
env['HTTP_ORIGIN'] = 'http://localhost:5000'
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created',
{'Access-Control-Allow-Origin':
'http://localhost:5000'}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
headers = {}
def start_response(s, h, e=None):
for k, v in h:
headers[k] = v
pass
body = ''.join(self.formpost(env, start_response))
self.assertEquals(headers['Access-Control-Allow-Origin'],
'http://localhost:5000')
def test_formpost_with_multiple_keys(self):
key = 'ernie'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
# Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', ['bert', key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = ''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_formpost_with_multiple_container_keys(self):
first_key = 'ernie'
second_key = 'bert'
keys = [first_key, second_key]
meta = {}
for idx, key in enumerate(keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
for key in keys:
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env('AUTH_test')
# Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it
env['swift.container/AUTH_test/container'] = {'meta': meta}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = ''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_redirect(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://redirect?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(location in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_redirect_with_query(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location,
'http://redirect?one=two&status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(location in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_redirect(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_redirect_expired(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Form Expired' in body)
def test_no_redirect_invalid_sig(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
# Change key to invalidate sig
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key + ' is bogus now'])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_no_redirect_with_error(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: invalid starting boundary' in body)
def test_no_v1(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_empty_v1(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'//AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_empty_account(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1//container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_wrong_account(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([
('200 Ok', {'x-account-meta-temp-url-key': 'def'}, ''),
('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_no_container(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_completely_non_int_expires(self):
key = 'abc'
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, expires, key)
for i, v in enumerate(body):
if v == str(expires):
body[i] = 'badvalue'
break
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: expired not an integer' in body)
def test_x_delete_at(self):
delete_at = int(time() + 100)
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertTrue("X-Delete-At" in self.app.requests[0].headers)
self.assertTrue("X-Delete-At" in self.app.requests[1].headers)
self.assertEquals(delete_at,
self.app.requests[0].headers["X-Delete-At"])
self.assertEquals(delete_at,
self.app.requests[1].headers["X-Delete-At"])
def test_x_delete_at_not_int(self):
delete_at = "2014-07-16"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertTrue('FormPost: x_delete_at not an integer' in body)
def test_x_delete_after(self):
delete_after = 100
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertTrue("X-Delete-After" in self.app.requests[0].headers)
self.assertTrue("X-Delete-After" in self.app.requests[1].headers)
self.assertEqual(delete_after,
self.app.requests[0].headers["X-Delete-After"])
self.assertEqual(delete_after,
self.app.requests[1].headers["X-Delete-After"])
def test_x_delete_after_not_int(self):
delete_after = "2 days"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertTrue('FormPost: x_delete_after not an integer' in body)
if __name__ == '__main__':
unittest.main()
|
|
'''
Common functions used throughout this cookbook's codebase.
'''
import ast
import collections
import logging
import os
import sys
from datetime import datetime, timedelta
from time import sleep
import boto
from boto.ec2.autoscale import Tag
from boto.exception import BotoServerError
verbose = False # keeping the linter happy
dry_run = False
def dry_run_necessaries(d, v):
global verbose
global dry_run
if d:
print(
"This is a dry run. Actions will not be executed and output is verbose.")
verbose = True
dry_run = True
elif v:
verbose = True
return verbose, dry_run
def set_up_logging():
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
return logger
def print_verbose(filename, log_lvl, *args):
if verbose:
log_lvl_map = {"info": 20, "warn": 30, "err": 40, "crit": 50}
for arg in args:
logger.log(log_lvl_map[log_lvl], filename + " - " + str(arg))
def handle_exception(exception):
exc_traceback = sys.exc_info()[2]
print_verbose(os.path.basename(__file__), 'err', "Exception caught on line %s of %s: %s" %
(exc_traceback.tb_lineno, exc_traceback.tb_frame.f_code.co_filename, str(exception)))
def get_launch_config(as_group):
try:
return as_group.connection.get_all_launch_configurations(names=[as_group.launch_config_name])[0]
except BotoServerError as e:
throttle_response(e)
return get_launch_config(as_group)
def get_image(as_group):
try:
launch_config = get_launch_config(as_group)
ec2_conn = boto.ec2.connect_to_region(as_group.connection.region.name)
image = ec2_conn.get_image(launch_config.image_id)
return image
except Exception as e:
handle_exception(e)
sys.exit(1)
def throttle_response(e):
if e.error_code == 'Throttling':
print_verbose(
os.path.basename(__file__), 'warn', 'Pausing for AWS throttling...')
sleep(1)
else:
handle_exception(e)
sys.exit(1)
def update_tags(as_conn, health_tags):
try:
as_conn.create_or_update_tags(health_tags)
except BotoServerError as e:
throttle_response(e)
update_tags(as_conn, health_tags)
def create_tag(as_group, key, value):
try:
tag = Tag(key=key, value=value, resource_id=as_group.name)
print_verbose(
os.path.basename(__file__), 'info', "Creating tag for %s." % key)
if dry_run:
return True
return as_group.connection.create_or_update_tags([tag])
# this often indicates tag limit has been exceeded
except BotoServerError as e:
throttle_response(e)
return create_tag(as_group, key, value)
def get_tag_dict_value(as_group, tag_key):
try:
tag_list = [t for t in as_group.tags if t.key == tag_key]
if len(tag_list) > 0:
return ast.literal_eval(tag_list[0].value)
else:
return False
except Exception as e:
handle_exception(e)
return False # this value needs to be tested each time
def get_bid(as_group):
try:
config = get_launch_config(as_group)
if config.spot_price:
return config.spot_price
else:
return get_tag_dict_value(as_group, 'ssr_config')['original_bid']
except BotoServerError as e:
throttle_response(e)
return get_bid(as_group)
except Exception as e:
handle_exception(e)
sys.exit(1)
def update_az_health_list_tag(as_group, health_dict):
try:
health_values = get_tag_dict_value(as_group, 'AZ_status')
for k, v in health_dict.items():
health_values[k]['health'].pop()
health_values[k]['health'].insert(0, v)
print_verbose(os.path.basename(__file__), 'info', health_values)
tag = Tag(
key='AZ_status', value=health_values, resource_id=as_group.name)
return tag
except Exception as e:
handle_exception(e)
sys.exit(1)
def mark_asg_az_disabled(as_group, zone):
try:
health_values = get_tag_dict_value(as_group, 'AZ_status')
health_values[zone]['use'] = False
print_verbose(os.path.basename(__file__), 'info', health_values)
tag = Tag(
key='AZ_status', value=health_values, resource_id=as_group.name)
return tag
except Exception as e:
handle_exception(e)
sys.exit(1)
def get_ssr_groups(as_conn):
try:
return [g for g in as_conn.get_all_groups() if
[t for t in g.tags if t.key == 'ssr_config' and get_tag_dict_value(g, 'ssr_config') and get_tag_dict_value(g, 'ssr_config')['enabled']]]
except BotoServerError as e:
throttle_response(e)
return get_ssr_groups(as_conn)
def get_current_spot_prices(as_group):
try:
ec2_conn = boto.ec2.connect_to_region(as_group.connection.region.name)
start_time = datetime.now() - timedelta(minutes=5)
start_time = start_time.isoformat()
end_time = datetime.now().isoformat()
image = get_image(as_group)
if image.platform == 'windows':
os_type = 'Windows'
elif 'SUSE Linux Enterprise Server' in image.description:
os_type = 'SUSE Linux'
else:
os_type = 'Linux/UNIX'
if as_group.vpc_zone_identifier:
os_type += ' (Amazon VPC)'
prices = ec2_conn.get_spot_price_history(
product_description=os_type,
end_time=end_time,
start_time=start_time,
instance_type=get_launch_config(as_group).instance_type
)
for AZ in [x for x, y in collections.Counter([p.availability_zone for p in prices]).items() if y > 1]:
old_duplicates = [p for p in prices if p.availability_zone == AZ]
old_duplicates.sort(key=lambda x: x.timestamp)
for dup in old_duplicates[:1]:
prices.remove(dup)
return prices
except Exception as e:
handle_exception(e)
sys.exit(1)
def get_potential_azs(as_group):
try:
ec2_conn = boto.ec2.connect_to_region(as_group.connection.region.name)
all_zones = ec2_conn.get_all_zones()
prices = get_current_spot_prices(as_group)
return [z.name for z in all_zones if z.name in list(set([p.availability_zone for p in prices])) and z.state == 'available']
except Exception as e:
handle_exception(e)
sys.exit(1)
def main():
pass
if __name__ == "__main__":
sys.exit(main())
else:
global logger
logger = set_up_logging()
|
|
"""Representation of Z-Wave binary sensors."""
from __future__ import annotations
from dataclasses import dataclass
import logging
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import CommandClass
from zwave_js_server.const.command_class.lock import DOOR_STATUS_PROPERTY
from zwave_js_server.const.command_class.notification import (
CC_SPECIFIC_NOTIFICATION_TYPE,
)
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_CLIENT, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
LOGGER = logging.getLogger(__name__)
NOTIFICATION_SMOKE_ALARM = "1"
NOTIFICATION_CARBON_MONOOXIDE = "2"
NOTIFICATION_CARBON_DIOXIDE = "3"
NOTIFICATION_HEAT = "4"
NOTIFICATION_WATER = "5"
NOTIFICATION_ACCESS_CONTROL = "6"
NOTIFICATION_HOME_SECURITY = "7"
NOTIFICATION_POWER_MANAGEMENT = "8"
NOTIFICATION_SYSTEM = "9"
NOTIFICATION_EMERGENCY = "10"
NOTIFICATION_CLOCK = "11"
NOTIFICATION_APPLIANCE = "12"
NOTIFICATION_HOME_HEALTH = "13"
NOTIFICATION_SIREN = "14"
NOTIFICATION_WATER_VALVE = "15"
NOTIFICATION_WEATHER = "16"
NOTIFICATION_IRRIGATION = "17"
NOTIFICATION_GAS = "18"
@dataclass
class NotificationZWaveJSEntityDescription(BinarySensorEntityDescription):
"""Represent a Z-Wave JS binary sensor entity description."""
off_state: str = "0"
states: tuple[str, ...] | None = None
@dataclass
class PropertyZWaveJSMixin:
"""Represent the mixin for property sensor descriptions."""
on_states: tuple[str, ...]
@dataclass
class PropertyZWaveJSEntityDescription(
BinarySensorEntityDescription, PropertyZWaveJSMixin
):
"""Represent the entity description for property name sensors."""
# Mappings for Notification sensors
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/config/config/notifications.json
NOTIFICATION_SENSOR_MAPPINGS: tuple[NotificationZWaveJSEntityDescription, ...] = (
NotificationZWaveJSEntityDescription(
# NotificationType 1: Smoke Alarm - State Id's 1 and 2 - Smoke detected
key=NOTIFICATION_SMOKE_ALARM,
states=("1", "2"),
device_class=BinarySensorDeviceClass.SMOKE,
),
NotificationZWaveJSEntityDescription(
# NotificationType 1: Smoke Alarm - All other State Id's
key=NOTIFICATION_SMOKE_ALARM,
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 2: Carbon Monoxide - State Id's 1 and 2
key=NOTIFICATION_CARBON_MONOOXIDE,
states=("1", "2"),
device_class=BinarySensorDeviceClass.GAS,
),
NotificationZWaveJSEntityDescription(
# NotificationType 2: Carbon Monoxide - All other State Id's
key=NOTIFICATION_CARBON_MONOOXIDE,
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 3: Carbon Dioxide - State Id's 1 and 2
key=NOTIFICATION_CARBON_DIOXIDE,
states=("1", "2"),
device_class=BinarySensorDeviceClass.GAS,
),
NotificationZWaveJSEntityDescription(
# NotificationType 3: Carbon Dioxide - All other State Id's
key=NOTIFICATION_CARBON_DIOXIDE,
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 4: Heat - State Id's 1, 2, 5, 6 (heat/underheat)
key=NOTIFICATION_HEAT,
states=("1", "2", "5", "6"),
device_class=BinarySensorDeviceClass.HEAT,
),
NotificationZWaveJSEntityDescription(
# NotificationType 4: Heat - All other State Id's
key=NOTIFICATION_HEAT,
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 5: Water - State Id's 1, 2, 3, 4
key=NOTIFICATION_WATER,
states=("1", "2", "3", "4"),
device_class=BinarySensorDeviceClass.MOISTURE,
),
NotificationZWaveJSEntityDescription(
# NotificationType 5: Water - All other State Id's
key=NOTIFICATION_WATER,
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 6: Access Control - State Id's 1, 2, 3, 4 (Lock)
key=NOTIFICATION_ACCESS_CONTROL,
states=("1", "2", "3", "4"),
device_class=BinarySensorDeviceClass.LOCK,
),
NotificationZWaveJSEntityDescription(
# NotificationType 6: Access Control - State Id's 11 (Lock jammed)
key=NOTIFICATION_ACCESS_CONTROL,
states=("11",),
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 6: Access Control - State Id 22 (door/window open)
key=NOTIFICATION_ACCESS_CONTROL,
off_state="23",
states=("22", "23"),
device_class=BinarySensorDeviceClass.DOOR,
),
NotificationZWaveJSEntityDescription(
# NotificationType 7: Home Security - State Id's 1, 2 (intrusion)
key=NOTIFICATION_HOME_SECURITY,
states=("1", "2"),
device_class=BinarySensorDeviceClass.SAFETY,
),
NotificationZWaveJSEntityDescription(
# NotificationType 7: Home Security - State Id's 3, 4, 9 (tampering)
key=NOTIFICATION_HOME_SECURITY,
states=("3", "4", "9"),
device_class=BinarySensorDeviceClass.TAMPER,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 7: Home Security - State Id's 5, 6 (glass breakage)
key=NOTIFICATION_HOME_SECURITY,
states=("5", "6"),
device_class=BinarySensorDeviceClass.SAFETY,
),
NotificationZWaveJSEntityDescription(
# NotificationType 7: Home Security - State Id's 7, 8 (motion)
key=NOTIFICATION_HOME_SECURITY,
states=("7", "8"),
device_class=BinarySensorDeviceClass.MOTION,
),
NotificationZWaveJSEntityDescription(
# NotificationType 8: Power Management -
# State Id's 2, 3 (Mains status)
key=NOTIFICATION_POWER_MANAGEMENT,
off_state="2",
states=("2", "3"),
device_class=BinarySensorDeviceClass.PLUG,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 8: Power Management -
# State Id's 6, 7, 8, 9 (power status)
key=NOTIFICATION_POWER_MANAGEMENT,
states=("6", "7", "8", "9"),
device_class=BinarySensorDeviceClass.SAFETY,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 8: Power Management -
# State Id's 10, 11, 17 (Battery maintenance status)
key=NOTIFICATION_POWER_MANAGEMENT,
states=("10", "11", "17"),
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 9: System - State Id's 1, 2, 3, 4, 6, 7
key=NOTIFICATION_SYSTEM,
states=("1", "2", "3", "4", "6", "7"),
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
NotificationZWaveJSEntityDescription(
# NotificationType 10: Emergency - State Id's 1, 2, 3
key=NOTIFICATION_EMERGENCY,
states=("1", "2", "3"),
device_class=BinarySensorDeviceClass.PROBLEM,
),
NotificationZWaveJSEntityDescription(
# NotificationType 14: Siren
key=NOTIFICATION_SIREN,
states=("1",),
device_class=BinarySensorDeviceClass.SOUND,
),
NotificationZWaveJSEntityDescription(
# NotificationType 18: Gas
key=NOTIFICATION_GAS,
states=("1", "2", "3", "4"),
device_class=BinarySensorDeviceClass.GAS,
),
NotificationZWaveJSEntityDescription(
# NotificationType 18: Gas
key=NOTIFICATION_GAS,
states=("6",),
device_class=BinarySensorDeviceClass.PROBLEM,
),
)
# Mappings for property sensors
PROPERTY_SENSOR_MAPPINGS: dict[str, PropertyZWaveJSEntityDescription] = {
DOOR_STATUS_PROPERTY: PropertyZWaveJSEntityDescription(
key=DOOR_STATUS_PROPERTY,
on_states=("open",),
device_class=BinarySensorDeviceClass.DOOR,
),
}
# Mappings for boolean sensors
BOOLEAN_SENSOR_MAPPINGS: dict[str, BinarySensorEntityDescription] = {
CommandClass.BATTERY: BinarySensorEntityDescription(
key=str(CommandClass.BATTERY),
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Z-Wave binary sensor from config entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_binary_sensor(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave Binary Sensor."""
entities: list[BinarySensorEntity] = []
if info.platform_hint == "notification":
# Get all sensors from Notification CC states
for state_key in info.primary_value.metadata.states:
# ignore idle key (0)
if state_key == "0":
continue
notification_description: NotificationZWaveJSEntityDescription | None = (
None
)
for description in NOTIFICATION_SENSOR_MAPPINGS:
if (
int(description.key)
== info.primary_value.metadata.cc_specific[
CC_SPECIFIC_NOTIFICATION_TYPE
]
) and (not description.states or state_key in description.states):
notification_description = description
break
if (
notification_description
and notification_description.off_state == state_key
):
continue
entities.append(
ZWaveNotificationBinarySensor(
config_entry, client, info, state_key, notification_description
)
)
elif info.platform_hint == "property" and (
property_description := PROPERTY_SENSOR_MAPPINGS.get(
info.primary_value.property_name
)
):
entities.append(
ZWavePropertyBinarySensor(
config_entry, client, info, property_description
)
)
else:
# boolean sensor
entities.append(ZWaveBooleanBinarySensor(config_entry, client, info))
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{BINARY_SENSOR_DOMAIN}",
async_add_binary_sensor,
)
)
class ZWaveBooleanBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor."""
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
) -> None:
"""Initialize a ZWaveBooleanBinarySensor entity."""
super().__init__(config_entry, client, info)
# Entity class attributes
self._attr_name = self.generate_name(include_value_name=True)
if description := BOOLEAN_SENSOR_MAPPINGS.get(
self.info.primary_value.command_class
):
self.entity_description = description
@property
def is_on(self) -> bool | None:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return bool(self.info.primary_value.value)
class ZWaveNotificationBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor from Notification CommandClass."""
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
state_key: str,
description: NotificationZWaveJSEntityDescription | None = None,
) -> None:
"""Initialize a ZWaveNotificationBinarySensor entity."""
super().__init__(config_entry, client, info)
self.state_key = state_key
if description:
self.entity_description = description
# Entity class attributes
self._attr_name = self.generate_name(
include_value_name=True,
alternate_value_name=self.info.primary_value.property_name,
additional_info=[self.info.primary_value.metadata.states[self.state_key]],
)
self._attr_unique_id = f"{self._attr_unique_id}.{self.state_key}"
@property
def is_on(self) -> bool | None:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return int(self.info.primary_value.value) == int(self.state_key)
class ZWavePropertyBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor from a property."""
entity_description: PropertyZWaveJSEntityDescription
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
description: PropertyZWaveJSEntityDescription,
) -> None:
"""Initialize a ZWavePropertyBinarySensor entity."""
super().__init__(config_entry, client, info)
self.entity_description = description
self._attr_name = self.generate_name(include_value_name=True)
@property
def is_on(self) -> bool | None:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return self.info.primary_value.value in self.entity_description.on_states
|
|
""" python-pcre
Copyright (c) 2012-2015, Arkadiusz Wahlig
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import _pcre
__version__ = '0.6'
class Pattern(_pcre.Pattern):
def search(self, string, pos=-1, endpos=-1, flags=0):
try:
return Match(self, string, pos, endpos, flags)
except NoMatch:
pass
def match(self, string, pos=-1, endpos=-1, flags=0):
try:
return Match(self, string, pos, endpos, flags | ANCHORED)
except NoMatch:
pass
def split(self, string, maxsplit=0, flags=0):
output = []
pos = n = 0
for match in self.finditer(string, flags=flags):
start, end = match.span()
if start != end:
output.append(string[pos:start])
output.extend(match.groups())
pos = end
n += 1
if 0 < maxsplit <= n:
break
output.append(string[pos:])
return output
def findall(self, string, pos=-1, endpos=-1, flags=0):
matches = self.finditer(string, pos, endpos, flags)
if self.groups == 0:
return [m.group() for m in matches]
if self.groups == 1:
return [m.groups('')[0] for m in matches]
return [m.groups('') for m in matches]
def finditer(self, string, pos=-1, endpos=-1, flags=0):
try:
while 1:
match = Match(self, string, pos, endpos, flags)
yield match
start, pos = match.span()
if pos == start:
pos += 1
except NoMatch:
pass
def sub(self, repl, string, count=0, flags=0):
return self.subn(repl, string, count, flags)[0]
def subn(self, repl, string, count=0, flags=0):
if not hasattr(repl, '__call__'):
repl = lambda match, tmpl=repl: match.expand(tmpl)
output = []
pos = n = 0
for match in self.finditer(string, flags=flags):
start, end = match.span()
if not pos == start == end or pos == 0:
output.extend((string[pos:start], repl(match)))
pos = end
n += 1
if 0 < count <= n:
break
output.append(string[pos:])
return (string[:0].join(output), n)
def __reduce__(self):
if self.pattern is None:
return (Pattern, (None, 0, self.dumps()))
return (Pattern, (self.pattern, self.flags))
def __repr__(self):
if self.pattern is None:
return '{0}.loads({1})'.format(__name__, repr(self.dumps()))
flags = self.flags
if flags:
v = []
for name in _FLAGS:
value = getattr(_pcre, name)
if flags & value:
v.append('{0}.{1}'.format(__name__, name))
flags &= ~value
if flags:
v.append(hex(flags))
return '{0}.compile({1}, {2})'.format(__name__, repr(self.pattern), '|'.join(v))
return '{0}.compile({1})'.format(__name__, repr(self.pattern))
class Match(_pcre.Match):
def expand(self, template):
return template.format(self.group(), *self.groups(''), **self.groupdict(''))
def __repr__(self):
cls = self.__class__
return '<{0}.{1} object; span={2}, match={3}>'.format(cls.__module__,
cls.__name__, repr(self.span()), repr(self.group()))
class REMatch(Match):
def expand(self, template):
groups = (self.group(),) + self.groups()
groupdict = self.groupdict()
def repl(match):
esc, index, group, badgroup = match.groups()
if esc:
return ('\\' + esc).decode('string-escape')
if badgroup:
raise PCREError(100, 'invalid group name')
try:
if index or group.isdigit():
result = groups[int(index or group)]
else:
result = groupdict[group]
except IndexError:
raise PCREError(15, 'invalid group reference')
except KeyError:
raise IndexError('unknown group name')
if result is None:
raise PCREError(101, 'unmatched group')
return result
return _REGEX_RE_TEMPLATE.sub(repl, template)
def compile(pattern, flags=0):
if isinstance(pattern, _pcre.Pattern):
if flags != 0:
raise ValueError('cannot process flags argument with a compiled pattern')
return pattern
return Pattern(pattern, flags)
def match(pattern, string, flags=0):
return compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
return compile(pattern, flags).search(string)
def split(pattern, string, maxsplit=0, flags=0):
return compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
return compile(pattern, flags).findall(string)
def finditer(pattern, string, flags=0):
return compile(pattern, flags).finditer(string)
def sub(pattern, repl, string, count=0, flags=0):
return compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
return compile(pattern, flags).subn(repl, string, count)
def loads(data):
# Loads a pattern serialized with Pattern.dumps().
return Pattern(None, loads=data)
def escape(pattern):
# Escapes a regular expression.
s = list(pattern)
alnum = _ALNUM
for i, c in enumerate(pattern):
if c not in alnum:
s[i] = '\\000' if c == '\000' else ('\\' + c)
return pattern[:0].join(s)
def escape_template(template):
# Escapes "{" and "}" characters in the template.
return template.replace('{', '{{').replace('}', '}}')
def convert_re_template(template):
# Converts re template r"\1\g<id>" to "{1}{id}" format.
def repl(match):
esc, index, group, badgroup = match.groups()
if esc:
return ('\\' + esc).decode('string-escape')
if badgroup:
raise PCREError(100, 'invalid group name')
return '{%s}' % (index or group)
return _REGEX_RE_TEMPLATE.sub(repl, escape_template(template))
def enable_re_template_mode():
# Makes calls to sub() take re templates instead of str.format() templates.
global Match
Match = REMatch
_ALNUM = frozenset('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890')
error = PCREError = _pcre.PCREError
NoMatch = _pcre.NoMatch
MAXREPEAT = 65536
# Provides PCRE build-time configuration.
config = type('config', (), _pcre.get_config())
# Pattern and/or match flags
_FLAGS = ('IGNORECASE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE',
'ANCHORED', 'NOTBOL', 'NOTEOL', 'NOTEMPTY', 'NOTEMPTY_ATSTART',
'UTF8', 'NO_UTF8_CHECK')
# Copy flags from _pcre module
ns = globals()
for name in _FLAGS:
ns[name] = getattr(_pcre, name)
del ns, name
# Short versions
I = IGNORECASE
M = MULTILINE
S = DOTALL
U = UNICODE
X = VERBOSE
# Study flags
STUDY_JIT = _pcre.STUDY_JIT
# Used to parse re templates.
_REGEX_RE_TEMPLATE = compile(r'\\(?:([\\abfnrtv]|0[0-7]{0,2}|[0-7]{3})|'
r'(\d{1,2})|g<(\d+|[^\d\W]\w*)>|(g[^>]*))')
|
|
import menusystem
import unittest
import os
import sys
import time
from subprocess import check_output
from subprocess import CalledProcessError
import ClydeLog
import ClydeUnitTest
import TestRig
RIG_PORT = ['/dev/ttyACM0', '/dev/ttyACM1']
testid = 0
# Handler functions
def run_test_session(names, board):
print("")
input = get_input("Do you want to run the first test? (Y/n) ")
next = input == 'y' or input == 'Y' or input == ''
print("")
while next:
run_tests(names, board)
input = get_input("Do you want to run the next test? (Y/n) ")
next = input == 'y' or input == 'Y' or input == ''
print("")
def run_main_test_session(first_names, second_names, board):
setMainPins()
#loadSerial()
print("Found test rig: %s" % 'MAIN');
#print("Last serial: %s" % lastSerial);
input = get_input("Do you want to run the first test? (Y/n) ")
next = input == 'y' or input == 'Y' or input == ''
print("")
while next:
run_main_tests(first_names, second_names, board)
input = get_input("Do you want to run the next test? (Y/n) ")
next = input == 'y' or input == 'Y' or input == ''
print("")
def get_input(prompt):
while True:
s = raw_input(prompt)
if set(s).issubset("ynYNqQ") or s == "":
return s
print("Please answer 'y' for yes, or 'n' for no.")
def run_tests(names, board):
global testid
names = names.split();
if (TestRig.rig.board is not None):
TestRig.rig.reset();
allTests = unittest.TestSuite()
for name in names:
module = __import__(name)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
allTests.addTests(tests)
testid += 1
runner = ClydeUnitTest.ClydeTestRunner(testid)
result = runner.run(board, allTests)
if (TestRig.rig.board is not None):
TestRig.rig.reset()
TestRig.rig.led('GREEN' if result.wasSuccessful() else 'RED', 'ON')
def run_main_tests(first_names, second_names, board):
global testid
#first set of tests
first_names = first_names.split();
allTests = unittest.TestSuite()
for name in first_names:
module = __import__(name)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
allTests.addTests(tests)
testid += 1
runner = ClydeUnitTest.ClydeTestRunner(testid)
result = runner.run(board, allTests)
if not result.wasSuccessful():
if TestRig.rig.board is not None:
TestRig.rig.reset()
TestRig.rig.led('RED', 'ON')
return
start = int(round(time.time() * 1000))
found = False
timeout = 15000.0
while not found and (int(round(time.time() * 1000)) - start < timeout):
sys.stdout.write("\rLoading [ %.3fs ]" % (int(int(round(time.time() * 1000)) - start) / 1000.0))
sys.stdout.flush()
output = check_output(["lsusb"])
if "1d50:609f" in output:
found = True
time.sleep(1)
sys.stdout.write("\n\n")
time.sleep(2)
TestRig.rig.connect(RIG_PORT)
#second set of tests
second_names = second_names.split();
if (TestRig.rig.board is not None):
TestRig.rig.reset();
allTests = unittest.TestSuite()
for name in second_names:
module = __import__(name)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
allTests.addTests(tests)
runner = ClydeUnitTest.ClydeTestRunner(testid)
result = runner.run(board, allTests)
if (TestRig.rig.board is not None):
TestRig.rig.reset()
TestRig.rig.led('GREEN' if result.wasSuccessful() else 'RED', 'ON')
def done(value):
if (TestRig.rig is not None):
TestRig.rig.disconnect()
TestRig.rig = None
return False
def setMainPins():
# List of pins that are connected to analog sensors on the board
measurementPins = [
TestRig.MeasurementPin('EYE_SIG', 0, 1, 0), # this could do 1/1023 * v + 0 to convert to 0-5V
TestRig.MeasurementPin('MODULE1_ANALOG', 1, 1, 0),
TestRig.MeasurementPin('MODULE2_ANALOG', 2, 1, 0),
]
# List of pins that control a relay on the test rig
relayPins = [
]
# List of pins that are connected directly to an I/O pin on the DUT,
# that should be used to do an n*n short test
# For nodes with reverse protection diodes (eg, VCC and GND), specifcy
# 'suppressHigh' to prevent them from being pulled higher than any other
# nets, and 'suppressLow' to prevent them from being pulled lower than any
# other nets.
shortTestPins = [
TestRig.ArduinoPin('WHITE_LIGHT', 11),
TestRig.ArduinoPin('RED_RGB', 5),
TestRig.ArduinoPin('GREEN_RGB', 6),
TestRig.ArduinoPin('BLUE_RGB', 9),
TestRig.ArduinoPin('MODULE1_DIGITAL', 7),
TestRig.ArduinoPin('MODULE2_DIGITAL', 8),
]
return TestRig.rig.setPins(measurementPins, relayPins, shortTestPins)
def setEyePins():
# List of pins that are connected to analog sensors on the board
measurementPins = [
TestRig.MeasurementPin('DUT_SIG', 0, 1, 0), # this could do 1/1023 * v + 0 to convert to 0-5V
]
# List of pins that control a relay on the test rig
relayPins = [
TestRig.ArduinoPin('HOST_VCC_LIMIT', 5),
TestRig.ArduinoPin('HOST_VCC', 6),
TestRig.ArduinoPin('HOST_GND', 8),
TestRig.ArduinoPin('DUT_SIG', 7),
]
# List of pins that are connected directly to an I/O pin on the DUT,
# that should be used to do an n*n short test
# For nodes with reverse protection diodes (eg, VCC and GND), specifcy
# 'suppressHigh' to prevent them from being pulled higher than any other
# nets, and 'suppressLow' to prevent them from being pulled lower than any
# other nets.
shortTestPins = [
]
return TestRig.rig.setPins(measurementPins, relayPins, shortTestPins)
def setModulePins():
# List of pins that are connected to analog sensors on the board
measurementPins = [
TestRig.MeasurementPin('DUT_ANALOG', 0, 1, 0), # this could do 1/1023 * v + 0 to convert to 0-5V
]
# List of pins that control a relay on the test rig
relayPins = [
TestRig.ArduinoPin('HOST_VCC_5V_LIMIT', 5),
TestRig.ArduinoPin('HOST_VCC_5V', 6),
TestRig.ArduinoPin('HOST_VCC_3V3_LIMIT', 13), #changing to analog 1 / digital 15 (A1)
TestRig.ArduinoPin('HOST_VCC_3V3', 4),
TestRig.ArduinoPin('HOST_GND', 7),
]
# List of pins that are connected directly to an I/O pin on the DUT,
# that should be used to do an n*n short test
# For nodes with reverse protection diodes (eg, VCC and GND), specify
# 'suppressHigh' to prevent them from being pulled higher than any other
# nets, and 'suppressLow' to prevent them from being pulled lower than any
# other nets.
shortTestPins = [
TestRig.ArduinoPin('DUT_DIGITAL', 11),
]
return TestRig.rig.setPins(measurementPins, relayPins, shortTestPins)
if __name__ == '__main__':
# Check if we are testing the main board
if len(sys.argv) > 1 and sys.argv[1] == 'MAIN':
run_main_test_session('TestMainICSP', 'TestMainFunctions', 'MAIN')
else:
# Connect test rig
TestRig.rig.connect(RIG_PORT)
if TestRig.rig.board == 'EYE':
setEyePins()
run_test_session('TestEyePowerOn TestEyeIRPair', TestRig.rig.board)
elif TestRig.rig.board == 'TOUCH':
setModulePins()
run_test_session('TestTouchyPowerOn TestTouchyFunctions', TestRig.rig.board)
elif TestRig.rig.board == 'AFRAID':
setModulePins()
run_test_session('TestAfraidPowerOn TestAfraidFunctions', TestRig.rig.board)
elif TestRig.rig.board == 'MAIN':
run_test_session('', TestRig.rig.board);
else:
# Create main Clyde test menu
menu_choices = []
menu_choices.append(menusystem.Choice(selector=1, value=1, handler=None, description='PCB000001-A: Test Main controller board'))
menu_choices.append(menusystem.Choice(selector=2, value='TestEyePowerOn TestEyeIRPair', handler=test_eye, description='PCB000002-A: Test Eye IR board'))
menu_choices.append(menusystem.Choice(selector=3, value='TestTouchyPowerOn TestTouchyFunctions', handler=test_touchy, description='PCB000003-A: Test Touchy-Feely module board'))
menu_choices.append(menusystem.Choice(selector=4, value='TestAfraidPowerOn TestAfraidFunctions', handler=test_afraid, description='PCB000004-A: Test Afraid of the Dark module board'))
menu_choices.append(menusystem.Choice(selector='q', value=0, handler=done, description='Quit'))
menu = menusystem.Menu(title='Clyde Tests', choice_list=menu_choices, prompt='Press 1-4 to select the type of PCB under test. Press \'q\' to quit. ')
menu.waitForInput()
|
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
from io import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
from ._compat import string_types
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
class IncompatibleTypes(dns.exception.DNSException):
"""An attempt was made to add DNS RR data of an incompatible type."""
class Rdataset(dns.set.Set):
"""A DNS rdataset."""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0):
"""Create a new rdataset of the specified class and type.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*covers*, an ``int``, the covered rdatatype.
*ttl*, an ``int``, the TTL.
"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = ttl
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Perform TTL minimization.
Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
*ttl*, an ``int``.
"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional *ttl* parameter is supplied, then
``self.update_ttl(ttl)`` will be called prior to adding the rdata.
*rd*, a ``dns.rdata.Rdata``, the rdata
*ttl*, an ``int``, the TTL.
Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
do not match the type and class of the rdataset.
Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
type and the covered type does not match that of the rdataset.
"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if ttl is not None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
*other*, a ``dns.rdataset.Rdataset``, the rdataset from which
to update.
"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
See ``dns.name.Name.choose_relativity`` for more information
on how *origin* and *relativize* determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
``to_text()`` method.
*name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
*name* as the owner name.
*origin*, a ``dns.name.Name`` or ``None``, the origin for relative
names.
*relativize*, a ``bool``. If ``True``, names will be relativized
to *origin*.
"""
if name is not None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO()
if override_rdclass is not None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
s.write(u'{}{}{} {}\n'.format(ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype)))
else:
for rd in self:
s.write(u'%s%s%d %s %s %s\n' %
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize,
**kw)))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
*name*, a ``dns.name.Name`` is the owner name to use.
*file* is the file where the name is emitted (typically a
BytesIO file).
*compress*, a ``dict``, is the compression table to use. If
``None`` (the default), names will not be compressed.
*origin* is a ``dns.name.Name`` or ``None``. If the name is
relative and origin is not ``None``, then *origin* will be appended
to it.
*override_rdclass*, an ``int``, is used as the class instead of the
class of the rdataset. This is useful when rendering rdatasets
associated with dynamic updates.
*want_shuffle*, a ``bool``. If ``True``, then the order of the
Rdatas within the Rdataset will be shuffled before rendering.
Returns an ``int``, the number of records emitted.
"""
if override_rdclass is not None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns ``True`` if this rdataset matches the specified class,
type, and covers.
"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if isinstance(rdclass, string_types):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
Returns a ``dns.rdataset.Rdataset`` object.
"""
return from_rdata_list(ttl, rdatas)
|
|
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import picklers
metadata = None
def step_numbering(step):
"""order in whole steps"""
def f(index, collection):
return step * index
return f
def fibonacci_numbering(order_col):
"""
almost fibonacci- skip the first 2 steps
e.g. 1, 2, 3, 5, 8, ... instead of 0, 1, 1, 2, 3, ...
otherwise ordering of the elements at '1' is undefined... ;)
"""
def f(index, collection):
if index == 0:
return 1
elif index == 1:
return 2
else:
return getattr(collection[index - 1], order_col) + getattr(
collection[index - 2], order_col
)
return f
def alpha_ordering(index, collection):
"""
0 -> A, 1 -> B, ... 25 -> Z, 26 -> AA, 27 -> AB, ...
"""
s = ""
while index > 25:
d = index / 26
s += chr((d % 26) + 64)
index -= d * 26
s += chr(index + 65)
return s
class OrderingListTest(fixtures.MappedTest):
def setup_test(self):
global metadata, slides_table, bullets_table, Slide, Bullet
slides_table, bullets_table = None, None
Slide, Bullet = None, None
metadata = MetaData()
def _setup(self, test_collection_class):
"""Build a relationship situation using the given
test_collection_class factory"""
global metadata, slides_table, bullets_table, Slide, Bullet
slides_table = Table(
"test_Slides",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(128)),
)
bullets_table = Table(
"test_Bullets",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("slide_id", Integer, ForeignKey("test_Slides.id")),
Column("position", Integer),
Column("text", String(128)),
)
class Slide:
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Slide "%s">' % self.name
class Bullet:
def __init__(self, text):
self.text = text
def __repr__(self):
return '<Bullet "%s" pos %s>' % (self.text, self.position)
clear_mappers()
self.mapper_registry.map_imperatively(
Slide,
slides_table,
properties={
"bullets": relationship(
Bullet,
lazy="joined",
collection_class=test_collection_class,
backref="slide",
order_by=[bullets_table.c.position],
)
},
)
self.mapper_registry.map_imperatively(Bullet, bullets_table)
metadata.create_all(testing.db)
def teardown_test(self):
metadata.drop_all(testing.db)
def test_append_no_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=False)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 4)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4"]
found = [b.text for b in srt.bullets]
self.assert_(titles == found)
def test_append_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=True)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._raw_append(Bullet("raw"))
self.assert_(s1.bullets[4].position is None)
s1.bullets._reorder()
self.assert_(s1.bullets[4].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 5)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
srt.bullets._raw_append(Bullet("raw2"))
srt.bullets[-1].position = 6
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw", "raw2"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
def test_insert(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets.append(Bullet("1"))
s1.bullets.append(Bullet("2"))
s1.bullets.append(Bullet("3"))
s1.bullets.append(Bullet("4"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
s1.bullets.insert(2, Bullet("insert_at_2"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
self.assert_(s1.bullets[4].position == 4)
self.assert_(s1.bullets[1].text == "2")
self.assert_(s1.bullets[2].text == "insert_at_2")
self.assert_(s1.bullets[3].text == "3")
s1.bullets.insert(999, Bullet("999"))
self.assert_(len(s1.bullets) == 6)
self.assert_(s1.bullets[5].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 6)
texts = ["1", "2", "insert_at_2", "3", "4", "999"]
found = [b.text for b in srt.bullets]
self.assert_(texts == found)
def test_slice(self):
self._setup(ordering_list("position"))
b = [
Bullet("1"),
Bullet("2"),
Bullet("3"),
Bullet("4"),
Bullet("5"),
Bullet("6"),
]
s1 = Slide("Slide #1")
# 1, 2, 3
s1.bullets[0:3] = b[0:3]
for i in 0, 1, 2:
self.assert_(s1.bullets[i].position == i)
self.assert_(s1.bullets[i] == b[i])
# 1, 4, 5, 6, 3
s1.bullets[1:2] = b[3:6]
for li, bi in (0, 0), (1, 3), (2, 4), (3, 5), (4, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
# 1, 6, 3
del s1.bullets[1:3]
for li, bi in (0, 0), (1, 5), (2, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
texts = ["1", "6", "3"]
for i, text in enumerate(texts):
self.assert_(srt.bullets[i].position == i)
self.assert_(srt.bullets[i].text == text)
def test_replace(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets = [Bullet("1"), Bullet("2"), Bullet("3")]
self.assert_(len(s1.bullets) == 3)
self.assert_(s1.bullets[2].position == 2)
session = fixture_session()
session.add(s1)
session.flush()
new_bullet = Bullet("new 2")
self.assert_(new_bullet.position is None)
# mark existing bullet as db-deleted before replacement.
# session.delete(s1.bullets[1])
s1.bullets[1] = new_bullet
self.assert_(new_bullet.position == 1)
self.assert_(len(s1.bullets) == 3)
id_ = s1.id
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
self.assert_(srt.bullets[1].text == "new 2")
self.assert_(srt.bullets[2].text == "3")
def test_replace_two(self):
"""test #3191"""
self._setup(ordering_list("position", reorder_on_append=True))
s1 = Slide("Slide #1")
b1, b2, b3, b4 = Bullet("1"), Bullet("2"), Bullet("3"), Bullet("4")
s1.bullets = [b1, b2, b3]
eq_([b.position for b in s1.bullets], [0, 1, 2])
s1.bullets = [b4, b2, b1]
eq_([b.position for b in s1.bullets], [0, 1, 2])
def test_funky_ordering(self):
class Pos:
def __init__(self):
self.position = None
step_factory = ordering_list(
"position", ordering_func=step_numbering(2)
)
stepped = step_factory()
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
for li, pos in (0, 0), (1, 2), (2, 4), (3, 6):
self.assert_(stepped[li].position == pos)
fib_factory = ordering_list(
"position", ordering_func=fibonacci_numbering("position")
)
fibbed = fib_factory()
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
for li, pos in (0, 1), (1, 2), (2, 3), (3, 5), (4, 8):
self.assert_(fibbed[li].position == pos)
fibbed.insert(2, Pos())
fibbed.insert(4, Pos())
fibbed.insert(6, Pos())
for li, pos in (
(0, 1),
(1, 2),
(2, 3),
(3, 5),
(4, 8),
(5, 13),
(6, 21),
(7, 34),
):
self.assert_(fibbed[li].position == pos)
alpha_factory = ordering_list("position", ordering_func=alpha_ordering)
alpha = alpha_factory()
alpha.append(Pos())
alpha.append(Pos())
alpha.append(Pos())
alpha.insert(1, Pos())
for li, pos in (0, "A"), (1, "B"), (2, "C"), (3, "D"):
self.assert_(alpha[li].position == pos)
def test_picklability(self):
from sqlalchemy.ext.orderinglist import OrderingList
olist = OrderingList("order", reorder_on_append=True)
olist.append(DummyItem())
for loads, dumps in picklers():
pck = dumps(olist)
copy = loads(pck)
self.assert_(copy == olist)
self.assert_(copy.__dict__ == olist.__dict__)
class DummyItem:
def __init__(self, order=None):
self.order = order
def __eq__(self, other):
return self.order == other.order
def __ne__(self, other):
return not (self == other)
|
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DisplayApplianceRecipient(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, cfr_part11=None, company=None, custom_field2=None, digital_signature_base64=None, digital_signatures_pending=None, email=None, first_name=None, full_name=None, initials_base64=None, in_person_email=None, is_notary=None, is_notary_transaction=None, job_title=None, last_name=None, notary_seal_base64=None, phone_number=None, recipient_complete_count=None, recipient_guid_id=None, recipient_id=None, recipient_status=None, recipient_type=None, require_signer_certificate=None, row_state=None, signature_base64=None, signature_image_id=None, signed=None, signer_apply_tabs=None, signer_attachment_base64=None, user_id=None, user_name=None):
"""
DisplayApplianceRecipient - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'cfr_part11': 'bool',
'company': 'str',
'custom_field2': 'str',
'digital_signature_base64': 'str',
'digital_signatures_pending': 'str',
'email': 'str',
'first_name': 'str',
'full_name': 'str',
'initials_base64': 'str',
'in_person_email': 'str',
'is_notary': 'bool',
'is_notary_transaction': 'bool',
'job_title': 'str',
'last_name': 'str',
'notary_seal_base64': 'str',
'phone_number': 'str',
'recipient_complete_count': 'int',
'recipient_guid_id': 'str',
'recipient_id': 'str',
'recipient_status': 'str',
'recipient_type': 'str',
'require_signer_certificate': 'str',
'row_state': 'str',
'signature_base64': 'str',
'signature_image_id': 'str',
'signed': 'bool',
'signer_apply_tabs': 'bool',
'signer_attachment_base64': 'str',
'user_id': 'str',
'user_name': 'str'
}
self.attribute_map = {
'cfr_part11': 'cfrPart11',
'company': 'company',
'custom_field2': 'customField2',
'digital_signature_base64': 'digitalSignatureBase64',
'digital_signatures_pending': 'digitalSignaturesPending',
'email': 'email',
'first_name': 'firstName',
'full_name': 'fullName',
'initials_base64': 'initialsBase64',
'in_person_email': 'inPersonEmail',
'is_notary': 'isNotary',
'is_notary_transaction': 'isNotaryTransaction',
'job_title': 'jobTitle',
'last_name': 'lastName',
'notary_seal_base64': 'notarySealBase64',
'phone_number': 'phoneNumber',
'recipient_complete_count': 'recipientCompleteCount',
'recipient_guid_id': 'recipientGuidId',
'recipient_id': 'recipientId',
'recipient_status': 'recipientStatus',
'recipient_type': 'recipientType',
'require_signer_certificate': 'requireSignerCertificate',
'row_state': 'rowState',
'signature_base64': 'signatureBase64',
'signature_image_id': 'signatureImageId',
'signed': 'signed',
'signer_apply_tabs': 'signerApplyTabs',
'signer_attachment_base64': 'signerAttachmentBase64',
'user_id': 'userId',
'user_name': 'userName'
}
self._cfr_part11 = cfr_part11
self._company = company
self._custom_field2 = custom_field2
self._digital_signature_base64 = digital_signature_base64
self._digital_signatures_pending = digital_signatures_pending
self._email = email
self._first_name = first_name
self._full_name = full_name
self._initials_base64 = initials_base64
self._in_person_email = in_person_email
self._is_notary = is_notary
self._is_notary_transaction = is_notary_transaction
self._job_title = job_title
self._last_name = last_name
self._notary_seal_base64 = notary_seal_base64
self._phone_number = phone_number
self._recipient_complete_count = recipient_complete_count
self._recipient_guid_id = recipient_guid_id
self._recipient_id = recipient_id
self._recipient_status = recipient_status
self._recipient_type = recipient_type
self._require_signer_certificate = require_signer_certificate
self._row_state = row_state
self._signature_base64 = signature_base64
self._signature_image_id = signature_image_id
self._signed = signed
self._signer_apply_tabs = signer_apply_tabs
self._signer_attachment_base64 = signer_attachment_base64
self._user_id = user_id
self._user_name = user_name
@property
def cfr_part11(self):
"""
Gets the cfr_part11 of this DisplayApplianceRecipient.
:return: The cfr_part11 of this DisplayApplianceRecipient.
:rtype: bool
"""
return self._cfr_part11
@cfr_part11.setter
def cfr_part11(self, cfr_part11):
"""
Sets the cfr_part11 of this DisplayApplianceRecipient.
:param cfr_part11: The cfr_part11 of this DisplayApplianceRecipient.
:type: bool
"""
self._cfr_part11 = cfr_part11
@property
def company(self):
"""
Gets the company of this DisplayApplianceRecipient.
:return: The company of this DisplayApplianceRecipient.
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""
Sets the company of this DisplayApplianceRecipient.
:param company: The company of this DisplayApplianceRecipient.
:type: str
"""
self._company = company
@property
def custom_field2(self):
"""
Gets the custom_field2 of this DisplayApplianceRecipient.
:return: The custom_field2 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._custom_field2
@custom_field2.setter
def custom_field2(self, custom_field2):
"""
Sets the custom_field2 of this DisplayApplianceRecipient.
:param custom_field2: The custom_field2 of this DisplayApplianceRecipient.
:type: str
"""
self._custom_field2 = custom_field2
@property
def digital_signature_base64(self):
"""
Gets the digital_signature_base64 of this DisplayApplianceRecipient.
:return: The digital_signature_base64 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._digital_signature_base64
@digital_signature_base64.setter
def digital_signature_base64(self, digital_signature_base64):
"""
Sets the digital_signature_base64 of this DisplayApplianceRecipient.
:param digital_signature_base64: The digital_signature_base64 of this DisplayApplianceRecipient.
:type: str
"""
self._digital_signature_base64 = digital_signature_base64
@property
def digital_signatures_pending(self):
"""
Gets the digital_signatures_pending of this DisplayApplianceRecipient.
:return: The digital_signatures_pending of this DisplayApplianceRecipient.
:rtype: str
"""
return self._digital_signatures_pending
@digital_signatures_pending.setter
def digital_signatures_pending(self, digital_signatures_pending):
"""
Sets the digital_signatures_pending of this DisplayApplianceRecipient.
:param digital_signatures_pending: The digital_signatures_pending of this DisplayApplianceRecipient.
:type: str
"""
self._digital_signatures_pending = digital_signatures_pending
@property
def email(self):
"""
Gets the email of this DisplayApplianceRecipient.
:return: The email of this DisplayApplianceRecipient.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this DisplayApplianceRecipient.
:param email: The email of this DisplayApplianceRecipient.
:type: str
"""
self._email = email
@property
def first_name(self):
"""
Gets the first_name of this DisplayApplianceRecipient.
The user's first name. Maximum Length: 50 characters.
:return: The first_name of this DisplayApplianceRecipient.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this DisplayApplianceRecipient.
The user's first name. Maximum Length: 50 characters.
:param first_name: The first_name of this DisplayApplianceRecipient.
:type: str
"""
self._first_name = first_name
@property
def full_name(self):
"""
Gets the full_name of this DisplayApplianceRecipient.
:return: The full_name of this DisplayApplianceRecipient.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""
Sets the full_name of this DisplayApplianceRecipient.
:param full_name: The full_name of this DisplayApplianceRecipient.
:type: str
"""
self._full_name = full_name
@property
def initials_base64(self):
"""
Gets the initials_base64 of this DisplayApplianceRecipient.
:return: The initials_base64 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._initials_base64
@initials_base64.setter
def initials_base64(self, initials_base64):
"""
Sets the initials_base64 of this DisplayApplianceRecipient.
:param initials_base64: The initials_base64 of this DisplayApplianceRecipient.
:type: str
"""
self._initials_base64 = initials_base64
@property
def in_person_email(self):
"""
Gets the in_person_email of this DisplayApplianceRecipient.
:return: The in_person_email of this DisplayApplianceRecipient.
:rtype: str
"""
return self._in_person_email
@in_person_email.setter
def in_person_email(self, in_person_email):
"""
Sets the in_person_email of this DisplayApplianceRecipient.
:param in_person_email: The in_person_email of this DisplayApplianceRecipient.
:type: str
"""
self._in_person_email = in_person_email
@property
def is_notary(self):
"""
Gets the is_notary of this DisplayApplianceRecipient.
:return: The is_notary of this DisplayApplianceRecipient.
:rtype: bool
"""
return self._is_notary
@is_notary.setter
def is_notary(self, is_notary):
"""
Sets the is_notary of this DisplayApplianceRecipient.
:param is_notary: The is_notary of this DisplayApplianceRecipient.
:type: bool
"""
self._is_notary = is_notary
@property
def is_notary_transaction(self):
"""
Gets the is_notary_transaction of this DisplayApplianceRecipient.
:return: The is_notary_transaction of this DisplayApplianceRecipient.
:rtype: bool
"""
return self._is_notary_transaction
@is_notary_transaction.setter
def is_notary_transaction(self, is_notary_transaction):
"""
Sets the is_notary_transaction of this DisplayApplianceRecipient.
:param is_notary_transaction: The is_notary_transaction of this DisplayApplianceRecipient.
:type: bool
"""
self._is_notary_transaction = is_notary_transaction
@property
def job_title(self):
"""
Gets the job_title of this DisplayApplianceRecipient.
:return: The job_title of this DisplayApplianceRecipient.
:rtype: str
"""
return self._job_title
@job_title.setter
def job_title(self, job_title):
"""
Sets the job_title of this DisplayApplianceRecipient.
:param job_title: The job_title of this DisplayApplianceRecipient.
:type: str
"""
self._job_title = job_title
@property
def last_name(self):
"""
Gets the last_name of this DisplayApplianceRecipient.
:return: The last_name of this DisplayApplianceRecipient.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this DisplayApplianceRecipient.
:param last_name: The last_name of this DisplayApplianceRecipient.
:type: str
"""
self._last_name = last_name
@property
def notary_seal_base64(self):
"""
Gets the notary_seal_base64 of this DisplayApplianceRecipient.
:return: The notary_seal_base64 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._notary_seal_base64
@notary_seal_base64.setter
def notary_seal_base64(self, notary_seal_base64):
"""
Sets the notary_seal_base64 of this DisplayApplianceRecipient.
:param notary_seal_base64: The notary_seal_base64 of this DisplayApplianceRecipient.
:type: str
"""
self._notary_seal_base64 = notary_seal_base64
@property
def phone_number(self):
"""
Gets the phone_number of this DisplayApplianceRecipient.
:return: The phone_number of this DisplayApplianceRecipient.
:rtype: str
"""
return self._phone_number
@phone_number.setter
def phone_number(self, phone_number):
"""
Sets the phone_number of this DisplayApplianceRecipient.
:param phone_number: The phone_number of this DisplayApplianceRecipient.
:type: str
"""
self._phone_number = phone_number
@property
def recipient_complete_count(self):
"""
Gets the recipient_complete_count of this DisplayApplianceRecipient.
:return: The recipient_complete_count of this DisplayApplianceRecipient.
:rtype: int
"""
return self._recipient_complete_count
@recipient_complete_count.setter
def recipient_complete_count(self, recipient_complete_count):
"""
Sets the recipient_complete_count of this DisplayApplianceRecipient.
:param recipient_complete_count: The recipient_complete_count of this DisplayApplianceRecipient.
:type: int
"""
self._recipient_complete_count = recipient_complete_count
@property
def recipient_guid_id(self):
"""
Gets the recipient_guid_id of this DisplayApplianceRecipient.
:return: The recipient_guid_id of this DisplayApplianceRecipient.
:rtype: str
"""
return self._recipient_guid_id
@recipient_guid_id.setter
def recipient_guid_id(self, recipient_guid_id):
"""
Sets the recipient_guid_id of this DisplayApplianceRecipient.
:param recipient_guid_id: The recipient_guid_id of this DisplayApplianceRecipient.
:type: str
"""
self._recipient_guid_id = recipient_guid_id
@property
def recipient_id(self):
"""
Gets the recipient_id of this DisplayApplianceRecipient.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:return: The recipient_id of this DisplayApplianceRecipient.
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""
Sets the recipient_id of this DisplayApplianceRecipient.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document.
:param recipient_id: The recipient_id of this DisplayApplianceRecipient.
:type: str
"""
self._recipient_id = recipient_id
@property
def recipient_status(self):
"""
Gets the recipient_status of this DisplayApplianceRecipient.
:return: The recipient_status of this DisplayApplianceRecipient.
:rtype: str
"""
return self._recipient_status
@recipient_status.setter
def recipient_status(self, recipient_status):
"""
Sets the recipient_status of this DisplayApplianceRecipient.
:param recipient_status: The recipient_status of this DisplayApplianceRecipient.
:type: str
"""
self._recipient_status = recipient_status
@property
def recipient_type(self):
"""
Gets the recipient_type of this DisplayApplianceRecipient.
:return: The recipient_type of this DisplayApplianceRecipient.
:rtype: str
"""
return self._recipient_type
@recipient_type.setter
def recipient_type(self, recipient_type):
"""
Sets the recipient_type of this DisplayApplianceRecipient.
:param recipient_type: The recipient_type of this DisplayApplianceRecipient.
:type: str
"""
self._recipient_type = recipient_type
@property
def require_signer_certificate(self):
"""
Gets the require_signer_certificate of this DisplayApplianceRecipient.
:return: The require_signer_certificate of this DisplayApplianceRecipient.
:rtype: str
"""
return self._require_signer_certificate
@require_signer_certificate.setter
def require_signer_certificate(self, require_signer_certificate):
"""
Sets the require_signer_certificate of this DisplayApplianceRecipient.
:param require_signer_certificate: The require_signer_certificate of this DisplayApplianceRecipient.
:type: str
"""
self._require_signer_certificate = require_signer_certificate
@property
def row_state(self):
"""
Gets the row_state of this DisplayApplianceRecipient.
:return: The row_state of this DisplayApplianceRecipient.
:rtype: str
"""
return self._row_state
@row_state.setter
def row_state(self, row_state):
"""
Sets the row_state of this DisplayApplianceRecipient.
:param row_state: The row_state of this DisplayApplianceRecipient.
:type: str
"""
self._row_state = row_state
@property
def signature_base64(self):
"""
Gets the signature_base64 of this DisplayApplianceRecipient.
:return: The signature_base64 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._signature_base64
@signature_base64.setter
def signature_base64(self, signature_base64):
"""
Sets the signature_base64 of this DisplayApplianceRecipient.
:param signature_base64: The signature_base64 of this DisplayApplianceRecipient.
:type: str
"""
self._signature_base64 = signature_base64
@property
def signature_image_id(self):
"""
Gets the signature_image_id of this DisplayApplianceRecipient.
:return: The signature_image_id of this DisplayApplianceRecipient.
:rtype: str
"""
return self._signature_image_id
@signature_image_id.setter
def signature_image_id(self, signature_image_id):
"""
Sets the signature_image_id of this DisplayApplianceRecipient.
:param signature_image_id: The signature_image_id of this DisplayApplianceRecipient.
:type: str
"""
self._signature_image_id = signature_image_id
@property
def signed(self):
"""
Gets the signed of this DisplayApplianceRecipient.
:return: The signed of this DisplayApplianceRecipient.
:rtype: bool
"""
return self._signed
@signed.setter
def signed(self, signed):
"""
Sets the signed of this DisplayApplianceRecipient.
:param signed: The signed of this DisplayApplianceRecipient.
:type: bool
"""
self._signed = signed
@property
def signer_apply_tabs(self):
"""
Gets the signer_apply_tabs of this DisplayApplianceRecipient.
:return: The signer_apply_tabs of this DisplayApplianceRecipient.
:rtype: bool
"""
return self._signer_apply_tabs
@signer_apply_tabs.setter
def signer_apply_tabs(self, signer_apply_tabs):
"""
Sets the signer_apply_tabs of this DisplayApplianceRecipient.
:param signer_apply_tabs: The signer_apply_tabs of this DisplayApplianceRecipient.
:type: bool
"""
self._signer_apply_tabs = signer_apply_tabs
@property
def signer_attachment_base64(self):
"""
Gets the signer_attachment_base64 of this DisplayApplianceRecipient.
:return: The signer_attachment_base64 of this DisplayApplianceRecipient.
:rtype: str
"""
return self._signer_attachment_base64
@signer_attachment_base64.setter
def signer_attachment_base64(self, signer_attachment_base64):
"""
Sets the signer_attachment_base64 of this DisplayApplianceRecipient.
:param signer_attachment_base64: The signer_attachment_base64 of this DisplayApplianceRecipient.
:type: str
"""
self._signer_attachment_base64 = signer_attachment_base64
@property
def user_id(self):
"""
Gets the user_id of this DisplayApplianceRecipient.
:return: The user_id of this DisplayApplianceRecipient.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this DisplayApplianceRecipient.
:param user_id: The user_id of this DisplayApplianceRecipient.
:type: str
"""
self._user_id = user_id
@property
def user_name(self):
"""
Gets the user_name of this DisplayApplianceRecipient.
:return: The user_name of this DisplayApplianceRecipient.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this DisplayApplianceRecipient.
:param user_name: The user_name of this DisplayApplianceRecipient.
:type: str
"""
self._user_name = user_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import pytest
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as splin
from numpy.testing import assert_allclose
try:
import sparse
except ImportError:
sparse = None
pytestmark = pytest.mark.skipif(sparse is None,
reason="pydata/sparse not installed")
msg = "pydata/sparse (0.8) does not implement necessary operations"
sparse_params = [pytest.param("COO"),
pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)])]
@pytest.fixture(params=sparse_params)
def sparse_cls(request):
return getattr(sparse, request.param)
@pytest.fixture
def matrices(sparse_cls):
np.random.seed(1234)
A_dense = np.random.rand(9, 9)
A_dense = A_dense @ A_dense.T
A_sparse = sparse_cls(A_dense)
b = np.random.rand(9)
return A_dense, A_sparse, b
def test_isolve_gmres(matrices):
# Several of the iterative solvers use the same
# isolve.utils.make_system wrapper code, so test just one of them.
A_dense, A_sparse, b = matrices
x, info = splin.gmres(A_sparse, b, atol=1e-15)
assert info == 0
assert isinstance(x, np.ndarray)
assert_allclose(A_sparse @ x, b)
def test_lsmr(matrices):
A_dense, A_sparse, b = matrices
res0 = splin.lsmr(A_dense, b)
res = splin.lsmr(A_sparse, b)
assert_allclose(res[0], res0[0], atol=1e-5)
def test_lsqr(matrices):
A_dense, A_sparse, b = matrices
res0 = splin.lsqr(A_dense, b)
res = splin.lsqr(A_sparse, b)
assert_allclose(res[0], res0[0], atol=1e-5)
def test_eigs(matrices):
A_dense, A_sparse, v0 = matrices
M_dense = np.diag(v0**2)
M_sparse = A_sparse.__class__(M_dense)
w_dense, v_dense = splin.eigs(A_dense, k=3, v0=v0)
w, v = splin.eigs(A_sparse, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
for M in [M_sparse, M_dense]:
w_dense, v_dense = splin.eigs(A_dense, M=M_dense, k=3, v0=v0)
w, v = splin.eigs(A_sparse, M=M, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
w_dense, v_dense = splin.eigsh(A_dense, M=M_dense, k=3, v0=v0)
w, v = splin.eigsh(A_sparse, M=M, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
def test_svds(matrices):
A_dense, A_sparse, v0 = matrices
u0, s0, vt0 = splin.svds(A_dense, k=2, v0=v0)
u, s, vt = splin.svds(A_sparse, k=2, v0=v0)
assert_allclose(s, s0)
assert_allclose(u, u0)
assert_allclose(vt, vt0)
def test_lobpcg(matrices):
A_dense, A_sparse, x = matrices
X = x[:,None]
w_dense, v_dense = splin.lobpcg(A_dense, X)
w, v = splin.lobpcg(A_sparse, X)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
def test_spsolve(matrices):
A_dense, A_sparse, b = matrices
b2 = np.random.rand(len(b), 3)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
x = splin.spsolve(A_sparse, b)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
x = splin.spsolve(A_sparse, b, use_umfpack=True)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b2)
x = splin.spsolve(A_sparse, b2)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense),
sp.csc_matrix(A_dense))
x = splin.spsolve(A_sparse, A_sparse)
assert isinstance(x, type(A_sparse))
assert_allclose(x.todense(), x0.todense())
def test_splu(matrices):
A_dense, A_sparse, b = matrices
n = len(b)
sparse_cls = type(A_sparse)
lu = splin.splu(A_sparse)
assert isinstance(lu.L, sparse_cls)
assert isinstance(lu.U, sparse_cls)
Pr = sparse_cls(sp.csc_matrix((np.ones(n), (lu.perm_r, np.arange(n)))))
Pc = sparse_cls(sp.csc_matrix((np.ones(n), (np.arange(n), lu.perm_c))))
A2 = Pr.T @ lu.L @ lu.U @ Pc.T
assert_allclose(A2.todense(), A_sparse.todense())
z = lu.solve(A_sparse.todense())
assert_allclose(z, np.eye(n), atol=1e-10)
def test_spilu(matrices):
A_dense, A_sparse, b = matrices
sparse_cls = type(A_sparse)
lu = splin.spilu(A_sparse)
assert isinstance(lu.L, sparse_cls)
assert isinstance(lu.U, sparse_cls)
z = lu.solve(A_sparse.todense())
assert_allclose(z, np.eye(len(b)), atol=1e-3)
def test_spsolve_triangular(matrices):
A_dense, A_sparse, b = matrices
A_sparse = sparse.tril(A_sparse)
x = splin.spsolve_triangular(A_sparse, b)
assert_allclose(A_sparse @ x, b)
def test_onenormest(matrices):
A_dense, A_sparse, b = matrices
est0 = splin.onenormest(A_dense)
est = splin.onenormest(A_sparse)
assert_allclose(est, est0)
def test_inv(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.inv(sp.csc_matrix(A_dense))
x = splin.inv(A_sparse)
assert_allclose(x.todense(), x0.todense())
def test_expm(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.expm(sp.csc_matrix(A_dense))
x = splin.expm(A_sparse)
assert_allclose(x.todense(), x0.todense())
def test_expm_multiply(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.expm_multiply(A_dense, b)
x = splin.expm_multiply(A_sparse, b)
assert_allclose(x, x0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.