body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def test_spam(self, name, staking_utxo_list, fRandomHeight=False, randomRange=0, randomRange2=0, fDoubleSpend=False, fMustPass=False, fZPoS=False, spending_utxo_list=[]): ' General method to create, send and test the spam blocks\n :param name: (string) chain branch (usually either "Main" or "Forked")\n staking_utxo_list: (string list) utxos to use for staking\n fRandomHeight: (bool) send blocks at random height\n randomRange: (int) if fRandomHeight=True, height is >= current-randomRange\n randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2\n fDoubleSpend: (bool) if true, stake input is double spent in block.vtx\n fMustPass: (bool) if true, the blocks must be stored on disk\n fZPoS: (bool) stake the block with zerocoin\n spending_utxo_list: (string list) utxos to use for spending\n :return: err_msgs: (string list) reports error messages from the test\n or an empty list if test is successful\n ' err_msgs = [] self.log_data_dir_size() block_count = self.node.getblockcount() pastBlockHash = self.node.getblockhash(block_count) randomCount = block_count self.log.info(('Current height: %d' % block_count)) for i in range(0, self.NUM_BLOCKS): if (i != 0): self.log.info(('Sent %d blocks out of %d' % (i, self.NUM_BLOCKS))) if fRandomHeight: randomCount = randint((block_count - randomRange), (block_count - randomRange2)) pastBlockHash = self.node.getblockhash(randomCount) current_block_n = (randomCount + 1) stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS) spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount) block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n, fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts) block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime)) block_size = (len(block.serialize()) / 1000) self.log.info('Sending block %d [%s...] - nTime: %s - Size (kb): %.2f', current_block_n, block.hash[:7], block_time, block_size) var = self.node.submitblock(bytes_to_hex_str(block.serialize())) time.sleep(1) if (((not fMustPass) and (var not in [None, 'bad-txns-invalid-ztdps'])) or (fMustPass and (var != 'inconclusive'))): self.log.error(('submitblock [fMustPass=%s] result: %s' % (str(fMustPass), str(var)))) err_msgs.append(('submitblock %d: %s' % (current_block_n, str(var)))) msg = msg_block(block) try: self.test_nodes[0].handle_connect() self.test_nodes[0].send_message(msg) time.sleep(2) block_ret = self.node.getblock(block.hash) if ((not fMustPass) and (block_ret is not None)): self.log.error(('Error, block stored in %s chain' % name)) err_msgs.append(('getblock %d: result not None' % current_block_n)) if fMustPass: if (block_ret is None): self.log.error(('Error, block NOT stored in %s chain' % name)) err_msgs.append(('getblock %d: result is None' % current_block_n)) else: self.log.info('Good. Block IS stored on disk.') except JSONRPCException as e: exc_msg = str(e) if (exc_msg == "Can't read block from disk (-32603)"): if fMustPass: self.log.warning('Bad! Block was NOT stored to disk.') err_msgs.append(exc_msg) else: self.log.info('Good. Block was not stored on disk.') else: self.log.warning(exc_msg) err_msgs.append(exc_msg) except Exception as e: exc_msg = str(e) self.log.error(exc_msg) err_msgs.append(exc_msg) self.log.info(('Sent all %s blocks.' % str(self.NUM_BLOCKS))) self.log_data_dir_size() return err_msgs
-3,516,039,327,091,069,400
General method to create, send and test the spam blocks :param name: (string) chain branch (usually either "Main" or "Forked") staking_utxo_list: (string list) utxos to use for staking fRandomHeight: (bool) send blocks at random height randomRange: (int) if fRandomHeight=True, height is >= current-randomRange randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2 fDoubleSpend: (bool) if true, stake input is double spent in block.vtx fMustPass: (bool) if true, the blocks must be stored on disk fZPoS: (bool) stake the block with zerocoin spending_utxo_list: (string list) utxos to use for spending :return: err_msgs: (string list) reports error messages from the test or an empty list if test is successful
test/functional/fake_stake/base_test.py
test_spam
tdpsdevextreme/TradePlusCoin
python
def test_spam(self, name, staking_utxo_list, fRandomHeight=False, randomRange=0, randomRange2=0, fDoubleSpend=False, fMustPass=False, fZPoS=False, spending_utxo_list=[]): ' General method to create, send and test the spam blocks\n :param name: (string) chain branch (usually either "Main" or "Forked")\n staking_utxo_list: (string list) utxos to use for staking\n fRandomHeight: (bool) send blocks at random height\n randomRange: (int) if fRandomHeight=True, height is >= current-randomRange\n randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2\n fDoubleSpend: (bool) if true, stake input is double spent in block.vtx\n fMustPass: (bool) if true, the blocks must be stored on disk\n fZPoS: (bool) stake the block with zerocoin\n spending_utxo_list: (string list) utxos to use for spending\n :return: err_msgs: (string list) reports error messages from the test\n or an empty list if test is successful\n ' err_msgs = [] self.log_data_dir_size() block_count = self.node.getblockcount() pastBlockHash = self.node.getblockhash(block_count) randomCount = block_count self.log.info(('Current height: %d' % block_count)) for i in range(0, self.NUM_BLOCKS): if (i != 0): self.log.info(('Sent %d blocks out of %d' % (i, self.NUM_BLOCKS))) if fRandomHeight: randomCount = randint((block_count - randomRange), (block_count - randomRange2)) pastBlockHash = self.node.getblockhash(randomCount) current_block_n = (randomCount + 1) stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS) spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount) block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n, fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts) block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime)) block_size = (len(block.serialize()) / 1000) self.log.info('Sending block %d [%s...] - nTime: %s - Size (kb): %.2f', current_block_n, block.hash[:7], block_time, block_size) var = self.node.submitblock(bytes_to_hex_str(block.serialize())) time.sleep(1) if (((not fMustPass) and (var not in [None, 'bad-txns-invalid-ztdps'])) or (fMustPass and (var != 'inconclusive'))): self.log.error(('submitblock [fMustPass=%s] result: %s' % (str(fMustPass), str(var)))) err_msgs.append(('submitblock %d: %s' % (current_block_n, str(var)))) msg = msg_block(block) try: self.test_nodes[0].handle_connect() self.test_nodes[0].send_message(msg) time.sleep(2) block_ret = self.node.getblock(block.hash) if ((not fMustPass) and (block_ret is not None)): self.log.error(('Error, block stored in %s chain' % name)) err_msgs.append(('getblock %d: result not None' % current_block_n)) if fMustPass: if (block_ret is None): self.log.error(('Error, block NOT stored in %s chain' % name)) err_msgs.append(('getblock %d: result is None' % current_block_n)) else: self.log.info('Good. Block IS stored on disk.') except JSONRPCException as e: exc_msg = str(e) if (exc_msg == "Can't read block from disk (-32603)"): if fMustPass: self.log.warning('Bad! Block was NOT stored to disk.') err_msgs.append(exc_msg) else: self.log.info('Good. Block was not stored on disk.') else: self.log.warning(exc_msg) err_msgs.append(exc_msg) except Exception as e: exc_msg = str(e) self.log.error(exc_msg) err_msgs.append(exc_msg) self.log.info(('Sent all %s blocks.' % str(self.NUM_BLOCKS))) self.log_data_dir_size() return err_msgs
def crossmul(**kwds): 'A factory for Crossmul' from .Crossmul import Crossmul return Crossmul(**kwds)
-7,984,824,203,532,728,000
A factory for Crossmul
python/packages/isce3/signal/__init__.py
crossmul
piyushrpt/isce3
python
def crossmul(**kwds): from .Crossmul import Crossmul return Crossmul(**kwds)
@pytest.mark.parametrize(('acquire', 'use', 'release', 'final_result', 'log'), [(_acquire_success, _use_success, _ReleaseSuccess, IOSuccess('use success'), [('acquire success', Success('use success'))]), (_acquire_success, _use_success, _ReleaseFailure, IOFailure('release failure'), []), (_acquire_success, _use_failure, _ReleaseSuccess, IOFailure('use failure'), [('acquire success', Failure('use failure'))]), (_acquire_success, _use_failure, _ReleaseFailure, IOFailure('release failure'), []), (_acquire_failure, _use_success, _ReleaseSuccess, IOFailure('acquire failure'), []), (_acquire_failure, _use_failure, _ReleaseSuccess, IOFailure('acquire failure'), []), (_acquire_failure, _use_success, _ReleaseFailure, IOFailure('acquire failure'), []), (_acquire_failure, _use_failure, _ReleaseFailure, IOFailure('acquire failure'), [])]) def test_all_success(acquire, use, release, final_result, log): 'Ensures that managed works as intended.' pipeline_logs: List[Tuple[(str, Result[(str, str)])]] = [] pipeline_result = managed(use, release(pipeline_logs))(acquire) assert (pipeline_result == final_result) assert (pipeline_logs == log)
5,979,508,904,576,942,000
Ensures that managed works as intended.
tests/test_pipeline/test_managed/test_managed_ioresult.py
test_all_success
CucumisSativus/returns
python
@pytest.mark.parametrize(('acquire', 'use', 'release', 'final_result', 'log'), [(_acquire_success, _use_success, _ReleaseSuccess, IOSuccess('use success'), [('acquire success', Success('use success'))]), (_acquire_success, _use_success, _ReleaseFailure, IOFailure('release failure'), []), (_acquire_success, _use_failure, _ReleaseSuccess, IOFailure('use failure'), [('acquire success', Failure('use failure'))]), (_acquire_success, _use_failure, _ReleaseFailure, IOFailure('release failure'), []), (_acquire_failure, _use_success, _ReleaseSuccess, IOFailure('acquire failure'), []), (_acquire_failure, _use_failure, _ReleaseSuccess, IOFailure('acquire failure'), []), (_acquire_failure, _use_success, _ReleaseFailure, IOFailure('acquire failure'), []), (_acquire_failure, _use_failure, _ReleaseFailure, IOFailure('acquire failure'), [])]) def test_all_success(acquire, use, release, final_result, log): pipeline_logs: List[Tuple[(str, Result[(str, str)])]] = [] pipeline_result = managed(use, release(pipeline_logs))(acquire) assert (pipeline_result == final_result) assert (pipeline_logs == log)
def test_full_typing(): 'This test is here to be a case for typing.' logs: List[Tuple[(str, Result[(str, str)])]] = [] pipeline_result = managed(_use_success, _ReleaseSuccess(logs))(_acquire_success) assert (pipeline_result == IOSuccess('use success')) assert (logs == [('acquire success', Success('use success'))])
2,958,457,838,396,522,000
This test is here to be a case for typing.
tests/test_pipeline/test_managed/test_managed_ioresult.py
test_full_typing
CucumisSativus/returns
python
def test_full_typing(): logs: List[Tuple[(str, Result[(str, str)])]] = [] pipeline_result = managed(_use_success, _ReleaseSuccess(logs))(_acquire_success) assert (pipeline_result == IOSuccess('use success')) assert (logs == [('acquire success', Success('use success'))])
def getDivisionFailure(*args, **kwargs): "\n Make a C{Failure} of a divide-by-zero error.\n\n @param args: Any C{*args} are passed to Failure's constructor.\n @param kwargs: Any C{**kwargs} are passed to Failure's constructor.\n " try: (1 / 0) except: f = failure.Failure(*args, **kwargs) return f
-8,652,234,052,711,301,000
Make a C{Failure} of a divide-by-zero error. @param args: Any C{*args} are passed to Failure's constructor. @param kwargs: Any C{**kwargs} are passed to Failure's constructor.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
getDivisionFailure
XZH950926/meizitu
python
def getDivisionFailure(*args, **kwargs): "\n Make a C{Failure} of a divide-by-zero error.\n\n @param args: Any C{*args} are passed to Failure's constructor.\n @param kwargs: Any C{**kwargs} are passed to Failure's constructor.\n " try: (1 / 0) except: f = failure.Failure(*args, **kwargs) return f
def test_failAndTrap(self): '\n Trapping a L{Failure}.\n ' try: raise NotImplementedError('test') except: f = failure.Failure() error = f.trap(SystemExit, RuntimeError) self.assertEqual(error, RuntimeError) self.assertEqual(f.type, NotImplementedError)
-6,052,471,848,568,529,000
Trapping a L{Failure}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failAndTrap
XZH950926/meizitu
python
def test_failAndTrap(self): '\n \n ' try: raise NotImplementedError('test') except: f = failure.Failure() error = f.trap(SystemExit, RuntimeError) self.assertEqual(error, RuntimeError) self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self): '\n If the wrapped C{Exception} is not a subclass of one of the\n expected types, L{failure.Failure.trap} raises the wrapped\n C{Exception}.\n ' if (not _PY3): raise SkipTest('\n Only expected behaviour on Python 3.\n @see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}\n ') exception = ValueError() try: raise exception except: f = failure.Failure() untrapped = self.assertRaises(ValueError, f.trap, OverflowError) self.assertIs(exception, untrapped)
-1,991,415,557,886,170,600
If the wrapped C{Exception} is not a subclass of one of the expected types, L{failure.Failure.trap} raises the wrapped C{Exception}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_trapRaisesWrappedException
XZH950926/meizitu
python
def test_trapRaisesWrappedException(self): '\n If the wrapped C{Exception} is not a subclass of one of the\n expected types, L{failure.Failure.trap} raises the wrapped\n C{Exception}.\n ' if (not _PY3): raise SkipTest('\n Only expected behaviour on Python 3.\n @see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}\n ') exception = ValueError() try: raise exception except: f = failure.Failure() untrapped = self.assertRaises(ValueError, f.trap, OverflowError) self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self): '\n If the wrapped C{Exception} is not a subclass of one of the\n expected types, L{failure.Failure.trap} raises itself.\n ' if _PY3: raise SkipTest('\n Only expected behaviour on Python 2.\n @see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}\n ') exception = ValueError() try: raise exception except: f = failure.Failure() untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError) self.assertIs(f, untrapped)
6,533,851,203,345,425,000
If the wrapped C{Exception} is not a subclass of one of the expected types, L{failure.Failure.trap} raises itself.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_trapRaisesSelf
XZH950926/meizitu
python
def test_trapRaisesSelf(self): '\n If the wrapped C{Exception} is not a subclass of one of the\n expected types, L{failure.Failure.trap} raises itself.\n ' if _PY3: raise SkipTest('\n Only expected behaviour on Python 2.\n @see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}\n ') exception = ValueError() try: raise exception except: f = failure.Failure() untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError) self.assertIs(f, untrapped)
def test_failureValueFromFailure(self): '\n A L{failure.Failure} constructed from another\n L{failure.Failure} instance, has its C{value} property set to\n the value of that L{failure.Failure} instance.\n ' exception = ValueError() f1 = failure.Failure(exception) f2 = failure.Failure(f1) self.assertIs(f2.value, exception)
-3,928,023,598,779,263,500
A L{failure.Failure} constructed from another L{failure.Failure} instance, has its C{value} property set to the value of that L{failure.Failure} instance.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failureValueFromFailure
XZH950926/meizitu
python
def test_failureValueFromFailure(self): '\n A L{failure.Failure} constructed from another\n L{failure.Failure} instance, has its C{value} property set to\n the value of that L{failure.Failure} instance.\n ' exception = ValueError() f1 = failure.Failure(exception) f2 = failure.Failure(f1) self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self): '\n A L{failure.Failure} constructed without a C{exc_value}\n argument, will search for an "original" C{Failure}, and if\n found, its value will be used as the value for the new\n C{Failure}.\n ' exception = ValueError() f1 = failure.Failure(exception) try: f1.trap(OverflowError) except: f2 = failure.Failure() self.assertIs(f2.value, exception)
-6,646,359,321,119,548,000
A L{failure.Failure} constructed without a C{exc_value} argument, will search for an "original" C{Failure}, and if found, its value will be used as the value for the new C{Failure}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failureValueFromFoundFailure
XZH950926/meizitu
python
def test_failureValueFromFoundFailure(self): '\n A L{failure.Failure} constructed without a C{exc_value}\n argument, will search for an "original" C{Failure}, and if\n found, its value will be used as the value for the new\n C{Failure}.\n ' exception = ValueError() f1 = failure.Failure(exception) try: f1.trap(OverflowError) except: f2 = failure.Failure() self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix): '\n Assert that C{s} starts with a particular C{prefix}.\n\n @param s: The input string.\n @type s: C{str}\n @param prefix: The string that C{s} should start with.\n @type prefix: C{str}\n ' self.assertTrue(s.startswith(prefix), ('%r is not the start of %r' % (prefix, s)))
-6,953,724,664,444,620,000
Assert that C{s} starts with a particular C{prefix}. @param s: The input string. @type s: C{str} @param prefix: The string that C{s} should start with. @type prefix: C{str}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertStartsWith
XZH950926/meizitu
python
def assertStartsWith(self, s, prefix): '\n Assert that C{s} starts with a particular C{prefix}.\n\n @param s: The input string.\n @type s: C{str}\n @param prefix: The string that C{s} should start with.\n @type prefix: C{str}\n ' self.assertTrue(s.startswith(prefix), ('%r is not the start of %r' % (prefix, s)))
def assertEndsWith(self, s, suffix): '\n Assert that C{s} end with a particular C{suffix}.\n\n @param s: The input string.\n @type s: C{str}\n @param suffix: The string that C{s} should end with.\n @type suffix: C{str}\n ' self.assertTrue(s.endswith(suffix), ('%r is not the end of %r' % (suffix, s)))
-6,076,212,063,322,656,000
Assert that C{s} end with a particular C{suffix}. @param s: The input string. @type s: C{str} @param suffix: The string that C{s} should end with. @type suffix: C{str}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertEndsWith
XZH950926/meizitu
python
def assertEndsWith(self, s, suffix): '\n Assert that C{s} end with a particular C{suffix}.\n\n @param s: The input string.\n @type s: C{str}\n @param suffix: The string that C{s} should end with.\n @type suffix: C{str}\n ' self.assertTrue(s.endswith(suffix), ('%r is not the end of %r' % (suffix, s)))
def assertTracebackFormat(self, tb, prefix, suffix): '\n Assert that the C{tb} traceback contains a particular C{prefix} and\n C{suffix}.\n\n @param tb: The traceback string.\n @type tb: C{str}\n @param prefix: The string that C{tb} should start with.\n @type prefix: C{str}\n @param suffix: The string that C{tb} should end with.\n @type suffix: C{str}\n ' self.assertStartsWith(tb, prefix) self.assertEndsWith(tb, suffix)
3,913,969,195,792,636,000
Assert that the C{tb} traceback contains a particular C{prefix} and C{suffix}. @param tb: The traceback string. @type tb: C{str} @param prefix: The string that C{tb} should start with. @type prefix: C{str} @param suffix: The string that C{tb} should end with. @type suffix: C{str}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertTracebackFormat
XZH950926/meizitu
python
def assertTracebackFormat(self, tb, prefix, suffix): '\n Assert that the C{tb} traceback contains a particular C{prefix} and\n C{suffix}.\n\n @param tb: The traceback string.\n @type tb: C{str}\n @param prefix: The string that C{tb} should start with.\n @type prefix: C{str}\n @param suffix: The string that C{tb} should end with.\n @type suffix: C{str}\n ' self.assertStartsWith(tb, prefix) self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False): "\n Assert that L{printDetailedTraceback} produces and prints a detailed\n traceback.\n\n The detailed traceback consists of a header::\n\n *--- Failure #20 ---\n\n The body contains the stacktrace::\n\n /twisted/trial/_synctest.py:1180: _run(...)\n /twisted/python/util.py:1076: runWithWarningsSuppressed(...)\n --- <exception caught here> ---\n /twisted/test/test_failure.py:39: getDivisionFailure(...)\n\n If C{captureVars} is enabled the body also includes a list of\n globals and locals::\n\n [ Locals ]\n exampleLocalVar : 'xyz'\n ...\n ( Globals )\n ...\n\n Or when C{captureVars} is disabled::\n\n [Capture of Locals and Globals disabled (use captureVars=True)]\n\n When C{cleanFailure} is enabled references to other objects are removed\n and replaced with strings.\n\n And finally the footer with the L{Failure}'s value::\n\n exceptions.ZeroDivisionError: float division\n *--- End of Failure #20 ---\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n @param cleanFailure: Enables L{Failure.cleanFailure}.\n @type cleanFailure: C{bool}\n " if captureVars: exampleLocalVar = 'xyz' exampleLocalVar f = getDivisionFailure(captureVars=captureVars) out = NativeStringIO() if cleanFailure: f.cleanFailure() f.printDetailedTraceback(out) tb = out.getvalue() start = ('*--- Failure #%d%s---\n' % (f.count, ((f.pickled and ' (pickled) ') or ' '))) end = ('%s: %s\n*--- End of Failure #%s ---\n' % (reflect.qual(f.type), reflect.safe_str(f.value), f.count)) self.assertTracebackFormat(tb, start, end) linesWithVars = [line for line in tb.splitlines() if line.startswith(' ')] if captureVars: self.assertNotEqual([], linesWithVars) if cleanFailure: line = ' exampleLocalVar : "\'xyz\'"' else: line = " exampleLocalVar : 'xyz'" self.assertIn(line, linesWithVars) else: self.assertEqual([], linesWithVars) self.assertIn(' [Capture of Locals and Globals disabled (use captureVars=True)]\n', tb)
-5,696,409,047,367,107,000
Assert that L{printDetailedTraceback} produces and prints a detailed traceback. The detailed traceback consists of a header:: *--- Failure #20 --- The body contains the stacktrace:: /twisted/trial/_synctest.py:1180: _run(...) /twisted/python/util.py:1076: runWithWarningsSuppressed(...) --- <exception caught here> --- /twisted/test/test_failure.py:39: getDivisionFailure(...) If C{captureVars} is enabled the body also includes a list of globals and locals:: [ Locals ] exampleLocalVar : 'xyz' ... ( Globals ) ... Or when C{captureVars} is disabled:: [Capture of Locals and Globals disabled (use captureVars=True)] When C{cleanFailure} is enabled references to other objects are removed and replaced with strings. And finally the footer with the L{Failure}'s value:: exceptions.ZeroDivisionError: float division *--- End of Failure #20 --- @param captureVars: Enables L{Failure.captureVars}. @type captureVars: C{bool} @param cleanFailure: Enables L{Failure.cleanFailure}. @type cleanFailure: C{bool}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertDetailedTraceback
XZH950926/meizitu
python
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False): "\n Assert that L{printDetailedTraceback} produces and prints a detailed\n traceback.\n\n The detailed traceback consists of a header::\n\n *--- Failure #20 ---\n\n The body contains the stacktrace::\n\n /twisted/trial/_synctest.py:1180: _run(...)\n /twisted/python/util.py:1076: runWithWarningsSuppressed(...)\n --- <exception caught here> ---\n /twisted/test/test_failure.py:39: getDivisionFailure(...)\n\n If C{captureVars} is enabled the body also includes a list of\n globals and locals::\n\n [ Locals ]\n exampleLocalVar : 'xyz'\n ...\n ( Globals )\n ...\n\n Or when C{captureVars} is disabled::\n\n [Capture of Locals and Globals disabled (use captureVars=True)]\n\n When C{cleanFailure} is enabled references to other objects are removed\n and replaced with strings.\n\n And finally the footer with the L{Failure}'s value::\n\n exceptions.ZeroDivisionError: float division\n *--- End of Failure #20 ---\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n @param cleanFailure: Enables L{Failure.cleanFailure}.\n @type cleanFailure: C{bool}\n " if captureVars: exampleLocalVar = 'xyz' exampleLocalVar f = getDivisionFailure(captureVars=captureVars) out = NativeStringIO() if cleanFailure: f.cleanFailure() f.printDetailedTraceback(out) tb = out.getvalue() start = ('*--- Failure #%d%s---\n' % (f.count, ((f.pickled and ' (pickled) ') or ' '))) end = ('%s: %s\n*--- End of Failure #%s ---\n' % (reflect.qual(f.type), reflect.safe_str(f.value), f.count)) self.assertTracebackFormat(tb, start, end) linesWithVars = [line for line in tb.splitlines() if line.startswith(' ')] if captureVars: self.assertNotEqual([], linesWithVars) if cleanFailure: line = ' exampleLocalVar : "\'xyz\'"' else: line = " exampleLocalVar : 'xyz'" self.assertIn(line, linesWithVars) else: self.assertEqual([], linesWithVars) self.assertIn(' [Capture of Locals and Globals disabled (use captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False): "\n Assert that L{printBriefTraceback} produces and prints a brief\n traceback.\n\n The brief traceback consists of a header::\n\n Traceback: <type 'exceptions.ZeroDivisionError'>: float division\n\n The body with the stacktrace::\n\n /twisted/trial/_synctest.py:1180:_run\n /twisted/python/util.py:1076:runWithWarningsSuppressed\n\n And the footer::\n\n --- <exception caught here> ---\n /twisted/test/test_failure.py:39:getDivisionFailure\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n " if captureVars: exampleLocalVar = 'abcde' exampleLocalVar f = getDivisionFailure() out = NativeStringIO() f.printBriefTraceback(out) tb = out.getvalue() stack = '' for (method, filename, lineno, localVars, globalVars) in f.frames: stack += ('%s:%s:%s\n' % (filename, lineno, method)) zde = repr(ZeroDivisionError) self.assertTracebackFormat(tb, ('Traceback: %s: ' % (zde,)), ('%s\n%s' % (failure.EXCEPTION_CAUGHT_HERE, stack))) if captureVars: self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
4,406,321,726,828,651,500
Assert that L{printBriefTraceback} produces and prints a brief traceback. The brief traceback consists of a header:: Traceback: <type 'exceptions.ZeroDivisionError'>: float division The body with the stacktrace:: /twisted/trial/_synctest.py:1180:_run /twisted/python/util.py:1076:runWithWarningsSuppressed And the footer:: --- <exception caught here> --- /twisted/test/test_failure.py:39:getDivisionFailure @param captureVars: Enables L{Failure.captureVars}. @type captureVars: C{bool}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertBriefTraceback
XZH950926/meizitu
python
def assertBriefTraceback(self, captureVars=False): "\n Assert that L{printBriefTraceback} produces and prints a brief\n traceback.\n\n The brief traceback consists of a header::\n\n Traceback: <type 'exceptions.ZeroDivisionError'>: float division\n\n The body with the stacktrace::\n\n /twisted/trial/_synctest.py:1180:_run\n /twisted/python/util.py:1076:runWithWarningsSuppressed\n\n And the footer::\n\n --- <exception caught here> ---\n /twisted/test/test_failure.py:39:getDivisionFailure\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n " if captureVars: exampleLocalVar = 'abcde' exampleLocalVar f = getDivisionFailure() out = NativeStringIO() f.printBriefTraceback(out) tb = out.getvalue() stack = for (method, filename, lineno, localVars, globalVars) in f.frames: stack += ('%s:%s:%s\n' % (filename, lineno, method)) zde = repr(ZeroDivisionError) self.assertTracebackFormat(tb, ('Traceback: %s: ' % (zde,)), ('%s\n%s' % (failure.EXCEPTION_CAUGHT_HERE, stack))) if captureVars: self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False): '\n Assert that L{printTraceback} produces and prints a default traceback.\n\n The default traceback consists of a header::\n\n Traceback (most recent call last):\n\n The body with traceback::\n\n File "/twisted/trial/_synctest.py", line 1180, in _run\n runWithWarningsSuppressed(suppress, method)\n\n And the footer::\n\n --- <exception caught here> ---\n File "twisted/test/test_failure.py", line 39, in getDivisionFailure\n 1/0\n exceptions.ZeroDivisionError: float division\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n ' if captureVars: exampleLocalVar = 'xyzzy' exampleLocalVar f = getDivisionFailure(captureVars=captureVars) out = NativeStringIO() f.printTraceback(out) tb = out.getvalue() stack = '' for (method, filename, lineno, localVars, globalVars) in f.frames: stack += (' File "%s", line %s, in %s\n' % (filename, lineno, method)) stack += (' %s\n' % (linecache.getline(filename, lineno).strip(),)) self.assertTracebackFormat(tb, 'Traceback (most recent call last):', ('%s\n%s%s: %s\n' % (failure.EXCEPTION_CAUGHT_HERE, stack, reflect.qual(f.type), reflect.safe_str(f.value)))) if captureVars: self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
-281,057,180,494,909,540
Assert that L{printTraceback} produces and prints a default traceback. The default traceback consists of a header:: Traceback (most recent call last): The body with traceback:: File "/twisted/trial/_synctest.py", line 1180, in _run runWithWarningsSuppressed(suppress, method) And the footer:: --- <exception caught here> --- File "twisted/test/test_failure.py", line 39, in getDivisionFailure 1/0 exceptions.ZeroDivisionError: float division @param captureVars: Enables L{Failure.captureVars}. @type captureVars: C{bool}
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
assertDefaultTraceback
XZH950926/meizitu
python
def assertDefaultTraceback(self, captureVars=False): '\n Assert that L{printTraceback} produces and prints a default traceback.\n\n The default traceback consists of a header::\n\n Traceback (most recent call last):\n\n The body with traceback::\n\n File "/twisted/trial/_synctest.py", line 1180, in _run\n runWithWarningsSuppressed(suppress, method)\n\n And the footer::\n\n --- <exception caught here> ---\n File "twisted/test/test_failure.py", line 39, in getDivisionFailure\n 1/0\n exceptions.ZeroDivisionError: float division\n\n @param captureVars: Enables L{Failure.captureVars}.\n @type captureVars: C{bool}\n ' if captureVars: exampleLocalVar = 'xyzzy' exampleLocalVar f = getDivisionFailure(captureVars=captureVars) out = NativeStringIO() f.printTraceback(out) tb = out.getvalue() stack = for (method, filename, lineno, localVars, globalVars) in f.frames: stack += (' File "%s", line %s, in %s\n' % (filename, lineno, method)) stack += (' %s\n' % (linecache.getline(filename, lineno).strip(),)) self.assertTracebackFormat(tb, 'Traceback (most recent call last):', ('%s\n%s%s: %s\n' % (failure.EXCEPTION_CAUGHT_HERE, stack, reflect.qual(f.type), reflect.safe_str(f.value)))) if captureVars: self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self): "\n L{printDetailedTraceback} returns a detailed traceback including the\n L{Failure}'s count.\n " self.assertDetailedTraceback()
4,912,148,342,093,289,000
L{printDetailedTraceback} returns a detailed traceback including the L{Failure}'s count.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printDetailedTraceback
XZH950926/meizitu
python
def test_printDetailedTraceback(self): "\n L{printDetailedTraceback} returns a detailed traceback including the\n L{Failure}'s count.\n " self.assertDetailedTraceback()
def test_printBriefTraceback(self): '\n L{printBriefTraceback} returns a brief traceback.\n ' self.assertBriefTraceback()
3,682,398,796,826,470,400
L{printBriefTraceback} returns a brief traceback.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printBriefTraceback
XZH950926/meizitu
python
def test_printBriefTraceback(self): '\n \n ' self.assertBriefTraceback()
def test_printTraceback(self): '\n L{printTraceback} returns a traceback.\n ' self.assertDefaultTraceback()
4,268,356,974,114,568,000
L{printTraceback} returns a traceback.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printTraceback
XZH950926/meizitu
python
def test_printTraceback(self): '\n \n ' self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self): '\n L{printDetailedTraceback} captures the locals and globals for its\n stack frames and adds them to the traceback, when called on a\n L{Failure} constructed with C{captureVars=True}.\n ' self.assertDetailedTraceback(captureVars=True)
-1,798,578,512,415,068,000
L{printDetailedTraceback} captures the locals and globals for its stack frames and adds them to the traceback, when called on a L{Failure} constructed with C{captureVars=True}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printDetailedTracebackCapturedVars
XZH950926/meizitu
python
def test_printDetailedTracebackCapturedVars(self): '\n L{printDetailedTraceback} captures the locals and globals for its\n stack frames and adds them to the traceback, when called on a\n L{Failure} constructed with C{captureVars=True}.\n ' self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self): '\n L{printBriefTraceback} returns a brief traceback when called on a\n L{Failure} constructed with C{captureVars=True}.\n\n Local variables on the stack can not be seen in the resulting\n traceback.\n ' self.assertBriefTraceback(captureVars=True)
7,831,598,147,435,002,000
L{printBriefTraceback} returns a brief traceback when called on a L{Failure} constructed with C{captureVars=True}. Local variables on the stack can not be seen in the resulting traceback.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printBriefTracebackCapturedVars
XZH950926/meizitu
python
def test_printBriefTracebackCapturedVars(self): '\n L{printBriefTraceback} returns a brief traceback when called on a\n L{Failure} constructed with C{captureVars=True}.\n\n Local variables on the stack can not be seen in the resulting\n traceback.\n ' self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self): '\n L{printTraceback} returns a traceback when called on a L{Failure}\n constructed with C{captureVars=True}.\n\n Local variables on the stack can not be seen in the resulting\n traceback.\n ' self.assertDefaultTraceback(captureVars=True)
-5,146,097,845,496,934,000
L{printTraceback} returns a traceback when called on a L{Failure} constructed with C{captureVars=True}. Local variables on the stack can not be seen in the resulting traceback.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printTracebackCapturedVars
XZH950926/meizitu
python
def test_printTracebackCapturedVars(self): '\n L{printTraceback} returns a traceback when called on a L{Failure}\n constructed with C{captureVars=True}.\n\n Local variables on the stack can not be seen in the resulting\n traceback.\n ' self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self): '\n C{printDetailedTraceback} includes information about local variables on\n the stack after C{cleanFailure} has been called.\n ' self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
142,082,274,718,069,710
C{printDetailedTraceback} includes information about local variables on the stack after C{cleanFailure} has been called.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_printDetailedTracebackCapturedVarsCleaned
XZH950926/meizitu
python
def test_printDetailedTracebackCapturedVarsCleaned(self): '\n C{printDetailedTraceback} includes information about local variables on\n the stack after C{cleanFailure} has been called.\n ' self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self): '\n L{failure.format_frames} raises a L{ValueError} if the supplied\n C{detail} level is unknown.\n ' self.assertRaises(ValueError, failure.format_frames, None, None, detail='noisia')
264,949,235,067,547,040
L{failure.format_frames} raises a L{ValueError} if the supplied C{detail} level is unknown.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_invalidFormatFramesDetail
XZH950926/meizitu
python
def test_invalidFormatFramesDetail(self): '\n L{failure.format_frames} raises a L{ValueError} if the supplied\n C{detail} level is unknown.\n ' self.assertRaises(ValueError, failure.format_frames, None, None, detail='noisia')
def test_stringExceptionConstruction(self): '\n Constructing a C{Failure} with a string as its exception value raises\n a C{TypeError}, as this is no longer supported as of Python 2.6.\n ' exc = self.assertRaises(TypeError, failure.Failure, 'ono!') self.assertIn('Strings are not supported by Failure', str(exc))
7,303,733,056,566,425,000
Constructing a C{Failure} with a string as its exception value raises a C{TypeError}, as this is no longer supported as of Python 2.6.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_stringExceptionConstruction
XZH950926/meizitu
python
def test_stringExceptionConstruction(self): '\n Constructing a C{Failure} with a string as its exception value raises\n a C{TypeError}, as this is no longer supported as of Python 2.6.\n ' exc = self.assertRaises(TypeError, failure.Failure, 'ono!') self.assertIn('Strings are not supported by Failure', str(exc))
def test_ConstructionFails(self): '\n Creating a Failure with no arguments causes it to try to discover the\n current interpreter exception state. If no such state exists, creating\n the Failure should raise a synchronous exception.\n ' if (sys.version_info < (3, 0)): sys.exc_clear() self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
5,796,733,913,821,718,000
Creating a Failure with no arguments causes it to try to discover the current interpreter exception state. If no such state exists, creating the Failure should raise a synchronous exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_ConstructionFails
XZH950926/meizitu
python
def test_ConstructionFails(self): '\n Creating a Failure with no arguments causes it to try to discover the\n current interpreter exception state. If no such state exists, creating\n the Failure should raise a synchronous exception.\n ' if (sys.version_info < (3, 0)): sys.exc_clear() self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self): '\n If the C{Failure} has not been cleaned, then C{getTracebackObject}\n returns the traceback object that captured in its constructor.\n ' f = getDivisionFailure() self.assertEqual(f.getTracebackObject(), f.tb)
2,543,048,896,884,482,000
If the C{Failure} has not been cleaned, then C{getTracebackObject} returns the traceback object that captured in its constructor.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_getTracebackObject
XZH950926/meizitu
python
def test_getTracebackObject(self): '\n If the C{Failure} has not been cleaned, then C{getTracebackObject}\n returns the traceback object that captured in its constructor.\n ' f = getDivisionFailure() self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self): '\n C{captureVars=True} has no effect on the result of\n C{getTracebackObject}.\n ' try: (1 / 0) except ZeroDivisionError: noVarsFailure = failure.Failure() varsFailure = failure.Failure(captureVars=True) self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
-2,149,188,538,576,567,600
C{captureVars=True} has no effect on the result of C{getTracebackObject}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_getTracebackObjectFromCaptureVars
XZH950926/meizitu
python
def test_getTracebackObjectFromCaptureVars(self): '\n C{captureVars=True} has no effect on the result of\n C{getTracebackObject}.\n ' try: (1 / 0) except ZeroDivisionError: noVarsFailure = failure.Failure() varsFailure = failure.Failure(captureVars=True) self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self): '\n If the Failure has been cleaned, then C{getTracebackObject} returns an\n object that looks the same to L{traceback.extract_tb}.\n ' f = getDivisionFailure() expected = traceback.extract_tb(f.getTracebackObject()) f.cleanFailure() observed = traceback.extract_tb(f.getTracebackObject()) self.assertIsNotNone(expected) self.assertEqual(expected, observed)
-7,771,214,461,307,343,000
If the Failure has been cleaned, then C{getTracebackObject} returns an object that looks the same to L{traceback.extract_tb}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_getTracebackObjectFromClean
XZH950926/meizitu
python
def test_getTracebackObjectFromClean(self): '\n If the Failure has been cleaned, then C{getTracebackObject} returns an\n object that looks the same to L{traceback.extract_tb}.\n ' f = getDivisionFailure() expected = traceback.extract_tb(f.getTracebackObject()) f.cleanFailure() observed = traceback.extract_tb(f.getTracebackObject()) self.assertIsNotNone(expected) self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self): '\n If the Failure was created with captureVars, then C{getTracebackObject}\n returns an object that looks the same to L{traceback.extract_tb}.\n ' f = getDivisionFailure(captureVars=True) expected = traceback.extract_tb(f.getTracebackObject()) f.cleanFailure() observed = traceback.extract_tb(f.getTracebackObject()) self.assertEqual(expected, observed)
-3,117,233,068,195,542,000
If the Failure was created with captureVars, then C{getTracebackObject} returns an object that looks the same to L{traceback.extract_tb}.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_getTracebackObjectFromCaptureVarsAndClean
XZH950926/meizitu
python
def test_getTracebackObjectFromCaptureVarsAndClean(self): '\n If the Failure was created with captureVars, then C{getTracebackObject}\n returns an object that looks the same to L{traceback.extract_tb}.\n ' f = getDivisionFailure(captureVars=True) expected = traceback.extract_tb(f.getTracebackObject()) f.cleanFailure() observed = traceback.extract_tb(f.getTracebackObject()) self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self): '\n L{failure.Failure}s need not be constructed with traceback objects. If\n a C{Failure} has no traceback information at all, C{getTracebackObject}\n just returns None.\n\n None is a good value, because traceback.extract_tb(None) -> [].\n ' f = failure.Failure(Exception('some error')) self.assertIsNone(f.getTracebackObject())
4,546,643,067,208,967,000
L{failure.Failure}s need not be constructed with traceback objects. If a C{Failure} has no traceback information at all, C{getTracebackObject} just returns None. None is a good value, because traceback.extract_tb(None) -> [].
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_getTracebackObjectWithoutTraceback
XZH950926/meizitu
python
def test_getTracebackObjectWithoutTraceback(self): '\n L{failure.Failure}s need not be constructed with traceback objects. If\n a C{Failure} has no traceback information at all, C{getTracebackObject}\n just returns None.\n\n None is a good value, because traceback.extract_tb(None) -> [].\n ' f = failure.Failure(Exception('some error')) self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self): "\n If a L{failure.Failure} is constructed with an exception but no\n traceback in Python 3, the traceback will be extracted from the\n exception's C{__traceback__} attribute.\n " try: (1 / 0) except: (klass, exception, tb) = sys.exc_info() f = failure.Failure(exception) self.assertIs(f.tb, tb)
-6,128,524,984,914,566,000
If a L{failure.Failure} is constructed with an exception but no traceback in Python 3, the traceback will be extracted from the exception's C{__traceback__} attribute.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_tracebackFromExceptionInPython3
XZH950926/meizitu
python
def test_tracebackFromExceptionInPython3(self): "\n If a L{failure.Failure} is constructed with an exception but no\n traceback in Python 3, the traceback will be extracted from the\n exception's C{__traceback__} attribute.\n " try: (1 / 0) except: (klass, exception, tb) = sys.exc_info() f = failure.Failure(exception) self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self): '\n L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of\n the exception to L{None} in Python 3.\n ' f = getDivisionFailure() self.assertIsNotNone(f.tb) self.assertIs(f.value.__traceback__, f.tb) f.cleanFailure() self.assertIsNone(f.value.__traceback__)
5,851,350,304,530,237,000
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of the exception to L{None} in Python 3.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_cleanFailureRemovesTracebackInPython3
XZH950926/meizitu
python
def test_cleanFailureRemovesTracebackInPython3(self): '\n L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of\n the exception to L{None} in Python 3.\n ' f = getDivisionFailure() self.assertIsNotNone(f.tb) self.assertIs(f.value.__traceback__, f.tb) f.cleanFailure() self.assertIsNone(f.value.__traceback__)
def test_repr(self): '\n The C{repr} of a L{failure.Failure} shows the type and string\n representation of the underlying exception.\n ' f = getDivisionFailure() typeName = reflect.fullyQualifiedName(ZeroDivisionError) self.assertEqual(repr(f), ('<twisted.python.failure.Failure %s: division by zero>' % (typeName,)))
58,509,812,011,884,360
The C{repr} of a L{failure.Failure} shows the type and string representation of the underlying exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_repr
XZH950926/meizitu
python
def test_repr(self): '\n The C{repr} of a L{failure.Failure} shows the type and string\n representation of the underlying exception.\n ' f = getDivisionFailure() typeName = reflect.fullyQualifiedName(ZeroDivisionError) self.assertEqual(repr(f), ('<twisted.python.failure.Failure %s: division by zero>' % (typeName,)))
def _brokenValueTest(self, detail): '\n Construct a L{Failure} with an exception that raises an exception from\n its C{__str__} method and then call C{getTraceback} with the specified\n detail and verify that it returns a string.\n ' x = BrokenStr() f = failure.Failure(x) traceback = f.getTraceback(detail=detail) self.assertIsInstance(traceback, str)
5,467,890,743,580,913,000
Construct a L{Failure} with an exception that raises an exception from its C{__str__} method and then call C{getTraceback} with the specified detail and verify that it returns a string.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
_brokenValueTest
XZH950926/meizitu
python
def _brokenValueTest(self, detail): '\n Construct a L{Failure} with an exception that raises an exception from\n its C{__str__} method and then call C{getTraceback} with the specified\n detail and verify that it returns a string.\n ' x = BrokenStr() f = failure.Failure(x) traceback = f.getTraceback(detail=detail) self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self): '\n A L{Failure} might wrap an exception with a C{__str__} method which\n raises an exception. In this case, calling C{getTraceback} on the\n failure with the C{"brief"} detail does not raise an exception.\n ' self._brokenValueTest('brief')
-1,727,253,668,860,962,600
A L{Failure} might wrap an exception with a C{__str__} method which raises an exception. In this case, calling C{getTraceback} on the failure with the C{"brief"} detail does not raise an exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenValueBriefDetail
XZH950926/meizitu
python
def test_brokenValueBriefDetail(self): '\n A L{Failure} might wrap an exception with a C{__str__} method which\n raises an exception. In this case, calling C{getTraceback} on the\n failure with the C{"brief"} detail does not raise an exception.\n ' self._brokenValueTest('brief')
def test_brokenValueDefaultDetail(self): '\n Like test_brokenValueBriefDetail, but for the C{"default"} detail case.\n ' self._brokenValueTest('default')
-260,785,803,897,932,000
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenValueDefaultDetail
XZH950926/meizitu
python
def test_brokenValueDefaultDetail(self): '\n \n ' self._brokenValueTest('default')
def test_brokenValueVerboseDetail(self): '\n Like test_brokenValueBriefDetail, but for the C{"default"} detail case.\n ' self._brokenValueTest('verbose')
8,684,888,862,353,874,000
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenValueVerboseDetail
XZH950926/meizitu
python
def test_brokenValueVerboseDetail(self): '\n \n ' self._brokenValueTest('verbose')
def _brokenTypeTest(self, detail): '\n Construct a L{Failure} with an exception type that raises an exception\n from its C{__str__} method and then call C{getTraceback} with the\n specified detail and verify that it returns a string.\n ' f = failure.Failure(BrokenExceptionType()) traceback = f.getTraceback(detail=detail) self.assertIsInstance(traceback, str)
-6,259,474,326,034,104,000
Construct a L{Failure} with an exception type that raises an exception from its C{__str__} method and then call C{getTraceback} with the specified detail and verify that it returns a string.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
_brokenTypeTest
XZH950926/meizitu
python
def _brokenTypeTest(self, detail): '\n Construct a L{Failure} with an exception type that raises an exception\n from its C{__str__} method and then call C{getTraceback} with the\n specified detail and verify that it returns a string.\n ' f = failure.Failure(BrokenExceptionType()) traceback = f.getTraceback(detail=detail) self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self): '\n A L{Failure} might wrap an exception the type object of which has a\n C{__str__} method which raises an exception. In this case, calling\n C{getTraceback} on the failure with the C{"brief"} detail does not raise\n an exception.\n ' self._brokenTypeTest('brief')
4,389,251,325,825,251,000
A L{Failure} might wrap an exception the type object of which has a C{__str__} method which raises an exception. In this case, calling C{getTraceback} on the failure with the C{"brief"} detail does not raise an exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenTypeBriefDetail
XZH950926/meizitu
python
def test_brokenTypeBriefDetail(self): '\n A L{Failure} might wrap an exception the type object of which has a\n C{__str__} method which raises an exception. In this case, calling\n C{getTraceback} on the failure with the C{"brief"} detail does not raise\n an exception.\n ' self._brokenTypeTest('brief')
def test_brokenTypeDefaultDetail(self): '\n Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.\n ' self._brokenTypeTest('default')
-5,164,781,728,596,901,000
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenTypeDefaultDetail
XZH950926/meizitu
python
def test_brokenTypeDefaultDetail(self): '\n \n ' self._brokenTypeTest('default')
def test_brokenTypeVerboseDetail(self): '\n Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.\n ' self._brokenTypeTest('verbose')
7,688,043,825,222,599,000
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_brokenTypeVerboseDetail
XZH950926/meizitu
python
def test_brokenTypeVerboseDetail(self): '\n \n ' self._brokenTypeTest('verbose')
def test_findNoFailureInExceptionHandler(self): '\n Within an exception handler, _findFailure should return\n L{None} in case no Failure is associated with the current\n exception.\n ' try: (1 / 0) except: self.assertIsNone(failure.Failure._findFailure()) else: self.fail('No exception raised from 1/0!?')
970,034,958,631,364,700
Within an exception handler, _findFailure should return L{None} in case no Failure is associated with the current exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_findNoFailureInExceptionHandler
XZH950926/meizitu
python
def test_findNoFailureInExceptionHandler(self): '\n Within an exception handler, _findFailure should return\n L{None} in case no Failure is associated with the current\n exception.\n ' try: (1 / 0) except: self.assertIsNone(failure.Failure._findFailure()) else: self.fail('No exception raised from 1/0!?')
def test_findNoFailure(self): '\n Outside of an exception handler, _findFailure should return None.\n ' if (sys.version_info < (3, 0)): sys.exc_clear() self.assertIsNone(sys.exc_info()[(- 1)]) self.assertIsNone(failure.Failure._findFailure())
-2,677,873,085,622,452,700
Outside of an exception handler, _findFailure should return None.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_findNoFailure
XZH950926/meizitu
python
def test_findNoFailure(self): '\n \n ' if (sys.version_info < (3, 0)): sys.exc_clear() self.assertIsNone(sys.exc_info()[(- 1)]) self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self): '\n Within an exception handler, it should be possible to find the\n original Failure that caused the current exception (if it was\n caused by raiseException).\n ' f = getDivisionFailure() f.cleanFailure() try: f.raiseException() except: self.assertEqual(failure.Failure._findFailure(), f) else: self.fail('No exception raised from raiseException!?')
8,117,886,591,194,119,000
Within an exception handler, it should be possible to find the original Failure that caused the current exception (if it was caused by raiseException).
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_findFailure
XZH950926/meizitu
python
def test_findFailure(self): '\n Within an exception handler, it should be possible to find the\n original Failure that caused the current exception (if it was\n caused by raiseException).\n ' f = getDivisionFailure() f.cleanFailure() try: f.raiseException() except: self.assertEqual(failure.Failure._findFailure(), f) else: self.fail('No exception raised from raiseException!?')
def test_failureConstructionFindsOriginalFailure(self): '\n When a Failure is constructed in the context of an exception\n handler that is handling an exception raised by\n raiseException, the new Failure should be chained to that\n original Failure.\n ' f = getDivisionFailure() f.cleanFailure() try: f.raiseException() except: newF = failure.Failure() self.assertEqual(f.getTraceback(), newF.getTraceback()) else: self.fail('No exception raised from raiseException!?')
6,428,602,133,564,756,000
When a Failure is constructed in the context of an exception handler that is handling an exception raised by raiseException, the new Failure should be chained to that original Failure.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failureConstructionFindsOriginalFailure
XZH950926/meizitu
python
def test_failureConstructionFindsOriginalFailure(self): '\n When a Failure is constructed in the context of an exception\n handler that is handling an exception raised by\n raiseException, the new Failure should be chained to that\n original Failure.\n ' f = getDivisionFailure() f.cleanFailure() try: f.raiseException() except: newF = failure.Failure() self.assertEqual(f.getTraceback(), newF.getTraceback()) else: self.fail('No exception raised from raiseException!?')
def test_failureConstructionWithMungedStackSucceeds(self): '\n Pyrex and Cython are known to insert fake stack frames so as to give\n more Python-like tracebacks. These stack frames with empty code objects\n should not break extraction of the exception.\n ' try: raiser.raiseException() except raiser.RaiserException: f = failure.Failure() self.assertTrue(f.check(raiser.RaiserException)) else: self.fail('No exception raised from extension?!')
6,947,278,950,813,051,000
Pyrex and Cython are known to insert fake stack frames so as to give more Python-like tracebacks. These stack frames with empty code objects should not break extraction of the exception.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failureConstructionWithMungedStackSucceeds
XZH950926/meizitu
python
def test_failureConstructionWithMungedStackSucceeds(self): '\n Pyrex and Cython are known to insert fake stack frames so as to give\n more Python-like tracebacks. These stack frames with empty code objects\n should not break extraction of the exception.\n ' try: raiser.raiseException() except raiser.RaiserException: f = failure.Failure() self.assertTrue(f.check(raiser.RaiserException)) else: self.fail('No exception raised from extension?!')
def test_singleFrame(self): '\n A C{_Traceback} object constructed with a single frame should be able\n to be passed to L{traceback.extract_tb}, and we should get a singleton\n list containing a (filename, lineno, methodname, line) tuple.\n ' tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]]) self.assertEqual(traceback.extract_tb(tb), [_tb('filename.py', 123, 'method', None)])
-1,393,764,764,254,306,000
A C{_Traceback} object constructed with a single frame should be able to be passed to L{traceback.extract_tb}, and we should get a singleton list containing a (filename, lineno, methodname, line) tuple.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_singleFrame
XZH950926/meizitu
python
def test_singleFrame(self): '\n A C{_Traceback} object constructed with a single frame should be able\n to be passed to L{traceback.extract_tb}, and we should get a singleton\n list containing a (filename, lineno, methodname, line) tuple.\n ' tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]]) self.assertEqual(traceback.extract_tb(tb), [_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self): '\n A C{_Traceback} object constructed with multiple frames should be able\n to be passed to L{traceback.extract_tb}, and we should get a list\n containing a tuple for each frame.\n ' tb = failure._Traceback([['method1', 'filename.py', 123, {}, {}], ['method2', 'filename.py', 235, {}, {}]]) self.assertEqual(traceback.extract_tb(tb), [_tb('filename.py', 123, 'method1', None), _tb('filename.py', 235, 'method2', None)])
-3,130,121,095,846,734,300
A C{_Traceback} object constructed with multiple frames should be able to be passed to L{traceback.extract_tb}, and we should get a list containing a tuple for each frame.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_manyFrames
XZH950926/meizitu
python
def test_manyFrames(self): '\n A C{_Traceback} object constructed with multiple frames should be able\n to be passed to L{traceback.extract_tb}, and we should get a list\n containing a tuple for each frame.\n ' tb = failure._Traceback([['method1', 'filename.py', 123, {}, {}], ['method2', 'filename.py', 235, {}, {}]]) self.assertEqual(traceback.extract_tb(tb), [_tb('filename.py', 123, 'method1', None), _tb('filename.py', 235, 'method2', None)])
def test_fakeFrameAttributes(self): '\n L{_Frame} instances have the C{f_globals} and C{f_locals} attributes\n bound to C{dict} instance. They also have the C{f_code} attribute\n bound to something like a code object.\n ' frame = failure._Frame('dummyname', 'dummyfilename') self.assertIsInstance(frame.f_globals, dict) self.assertIsInstance(frame.f_locals, dict) self.assertIsInstance(frame.f_code, failure._Code)
-2,995,569,665,752,570,400
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes bound to C{dict} instance. They also have the C{f_code} attribute bound to something like a code object.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_fakeFrameAttributes
XZH950926/meizitu
python
def test_fakeFrameAttributes(self): '\n L{_Frame} instances have the C{f_globals} and C{f_locals} attributes\n bound to C{dict} instance. They also have the C{f_code} attribute\n bound to something like a code object.\n ' frame = failure._Frame('dummyname', 'dummyfilename') self.assertIsInstance(frame.f_globals, dict) self.assertIsInstance(frame.f_locals, dict) self.assertIsInstance(frame.f_code, failure._Code)
def setUp(self): "\n Override pdb.post_mortem so we can make sure it's called.\n " post_mortem = pdb.post_mortem origInit = failure.Failure.__init__ def restore(): pdb.post_mortem = post_mortem failure.Failure.__init__ = origInit self.addCleanup(restore) self.result = [] pdb.post_mortem = self.result.append failure.startDebugMode()
6,436,035,593,134,350,000
Override pdb.post_mortem so we can make sure it's called.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
setUp
XZH950926/meizitu
python
def setUp(self): "\n \n " post_mortem = pdb.post_mortem origInit = failure.Failure.__init__ def restore(): pdb.post_mortem = post_mortem failure.Failure.__init__ = origInit self.addCleanup(restore) self.result = [] pdb.post_mortem = self.result.append failure.startDebugMode()
def test_regularFailure(self): '\n If startDebugMode() is called, calling Failure() will first call\n pdb.post_mortem with the traceback.\n ' try: (1 / 0) except: (typ, exc, tb) = sys.exc_info() f = failure.Failure() self.assertEqual(self.result, [tb]) self.assertFalse(f.captureVars)
-740,433,790,781,489,700
If startDebugMode() is called, calling Failure() will first call pdb.post_mortem with the traceback.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_regularFailure
XZH950926/meizitu
python
def test_regularFailure(self): '\n If startDebugMode() is called, calling Failure() will first call\n pdb.post_mortem with the traceback.\n ' try: (1 / 0) except: (typ, exc, tb) = sys.exc_info() f = failure.Failure() self.assertEqual(self.result, [tb]) self.assertFalse(f.captureVars)
def test_captureVars(self): '\n If startDebugMode() is called, passing captureVars to Failure() will\n not blow up.\n ' try: (1 / 0) except: (typ, exc, tb) = sys.exc_info() f = failure.Failure(captureVars=True) self.assertEqual(self.result, [tb]) self.assertTrue(f.captureVars)
-213,307,526,298,185,900
If startDebugMode() is called, passing captureVars to Failure() will not blow up.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_captureVars
XZH950926/meizitu
python
def test_captureVars(self): '\n If startDebugMode() is called, passing captureVars to Failure() will\n not blow up.\n ' try: (1 / 0) except: (typ, exc, tb) = sys.exc_info() f = failure.Failure(captureVars=True) self.assertEqual(self.result, [tb]) self.assertTrue(f.captureVars)
def test_throwExceptionIntoGenerator(self): '\n It should be possible to throw the exception that a Failure\n represents into a generator.\n ' stuff = [] def generator(): try: (yield) except: stuff.append(sys.exc_info()) else: self.fail('Yield should have yielded exception.') g = generator() f = getDivisionFailure() next(g) self._throwIntoGenerator(f, g) self.assertEqual(stuff[0][0], ZeroDivisionError) self.assertIsInstance(stuff[0][1], ZeroDivisionError) self.assertEqual(traceback.extract_tb(stuff[0][2])[(- 1)][(- 1)], '1/0')
-7,581,980,284,546,282,000
It should be possible to throw the exception that a Failure represents into a generator.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_throwExceptionIntoGenerator
XZH950926/meizitu
python
def test_throwExceptionIntoGenerator(self): '\n It should be possible to throw the exception that a Failure\n represents into a generator.\n ' stuff = [] def generator(): try: (yield) except: stuff.append(sys.exc_info()) else: self.fail('Yield should have yielded exception.') g = generator() f = getDivisionFailure() next(g) self._throwIntoGenerator(f, g) self.assertEqual(stuff[0][0], ZeroDivisionError) self.assertIsInstance(stuff[0][1], ZeroDivisionError) self.assertEqual(traceback.extract_tb(stuff[0][2])[(- 1)][(- 1)], '1/0')
def test_findFailureInGenerator(self): '\n Within an exception handler, it should be possible to find the\n original Failure that caused the current exception (if it was\n caused by throwExceptionIntoGenerator).\n ' f = getDivisionFailure() f.cleanFailure() foundFailures = [] def generator(): try: (yield) except: foundFailures.append(failure.Failure._findFailure()) else: self.fail('No exception sent to generator') g = generator() next(g) self._throwIntoGenerator(f, g) self.assertEqual(foundFailures, [f])
-5,148,197,409,422,034,000
Within an exception handler, it should be possible to find the original Failure that caused the current exception (if it was caused by throwExceptionIntoGenerator).
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_findFailureInGenerator
XZH950926/meizitu
python
def test_findFailureInGenerator(self): '\n Within an exception handler, it should be possible to find the\n original Failure that caused the current exception (if it was\n caused by throwExceptionIntoGenerator).\n ' f = getDivisionFailure() f.cleanFailure() foundFailures = [] def generator(): try: (yield) except: foundFailures.append(failure.Failure._findFailure()) else: self.fail('No exception sent to generator') g = generator() next(g) self._throwIntoGenerator(f, g) self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self): '\n When a Failure is constructed in the context of an exception\n handler that is handling an exception raised by\n throwExceptionIntoGenerator, the new Failure should be chained to that\n original Failure.\n ' f = getDivisionFailure() f.cleanFailure() newFailures = [] def generator(): try: (yield) except: newFailures.append(failure.Failure()) else: self.fail('No exception sent to generator') g = generator() next(g) self._throwIntoGenerator(f, g) self.assertEqual(len(newFailures), 1) self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
4,226,784,292,072,206,000
When a Failure is constructed in the context of an exception handler that is handling an exception raised by throwExceptionIntoGenerator, the new Failure should be chained to that original Failure.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_failureConstructionFindsOriginalFailure
XZH950926/meizitu
python
def test_failureConstructionFindsOriginalFailure(self): '\n When a Failure is constructed in the context of an exception\n handler that is handling an exception raised by\n throwExceptionIntoGenerator, the new Failure should be chained to that\n original Failure.\n ' f = getDivisionFailure() f.cleanFailure() newFailures = [] def generator(): try: (yield) except: newFailures.append(failure.Failure()) else: self.fail('No exception sent to generator') g = generator() next(g) self._throwIntoGenerator(f, g) self.assertEqual(len(newFailures), 1) self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self): '\n When a generator reraises a different exception,\n L{Failure._findFailure} inside the generator should find the reraised\n exception rather than original one.\n ' def generator(): try: try: (yield) except: [][1] except: self.assertIsInstance(failure.Failure().value, IndexError) g = generator() next(g) f = getDivisionFailure() self._throwIntoGenerator(f, g)
-2,900,656,017,749,723,600
When a generator reraises a different exception, L{Failure._findFailure} inside the generator should find the reraised exception rather than original one.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_ambiguousFailureInGenerator
XZH950926/meizitu
python
def test_ambiguousFailureInGenerator(self): '\n When a generator reraises a different exception,\n L{Failure._findFailure} inside the generator should find the reraised\n exception rather than original one.\n ' def generator(): try: try: (yield) except: [][1] except: self.assertIsInstance(failure.Failure().value, IndexError) g = generator() next(g) f = getDivisionFailure() self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self): '\n When a generator reraises a different exception,\n L{Failure._findFailure} above the generator should find the reraised\n exception rather than original one.\n ' def generator(): try: (yield) except: [][1] g = generator() next(g) f = getDivisionFailure() try: self._throwIntoGenerator(f, g) except: self.assertIsInstance(failure.Failure().value, IndexError)
2,801,333,408,657,701,400
When a generator reraises a different exception, L{Failure._findFailure} above the generator should find the reraised exception rather than original one.
fang/Twisted-18.4.0/src/twisted/test/test_failure.py
test_ambiguousFailureFromGenerator
XZH950926/meizitu
python
def test_ambiguousFailureFromGenerator(self): '\n When a generator reraises a different exception,\n L{Failure._findFailure} above the generator should find the reraised\n exception rather than original one.\n ' def generator(): try: (yield) except: [][1] g = generator() next(g) f = getDivisionFailure() try: self._throwIntoGenerator(f, g) except: self.assertIsInstance(failure.Failure().value, IndexError)
def __init__(self, ad: AppDaemon): 'Constructor.\n\n Args:\n ad: Reference to the AppDaemon object\n ' self.AD = ad self.logger = ad.logging.get_child('_events')
6,886,154,475,808,460,000
Constructor. Args: ad: Reference to the AppDaemon object
appdaemon/events.py
__init__
DTTerastar/appdaemon
python
def __init__(self, ad: AppDaemon): 'Constructor.\n\n Args:\n ad: Reference to the AppDaemon object\n ' self.AD = ad self.logger = ad.logging.get_child('_events')
async def add_event_callback(self, name, namespace, cb, event, **kwargs): 'Adds a callback for an event which is called internally by apps.\n\n Args:\n name (str): Name of the app.\n namespace (str): Namespace of the event.\n cb: Callback function.\n event (str): Name of the event.\n **kwargs: List of values to filter on, and additional arguments to pass to the callback.\n\n Returns:\n ``None`` or the reference to the callback handle.\n\n ' if (self.AD.threading.validate_pin(name, kwargs) is True): if ('pin' in kwargs): pin_app = kwargs['pin_app'] else: pin_app = self.AD.app_management.objects[name]['pin_app'] if ('pin_thread' in kwargs): pin_thread = kwargs['pin_thread'] pin_app = True else: pin_thread = self.AD.app_management.objects[name]['pin_thread'] async with self.AD.callbacks.callbacks_lock: if (name not in self.AD.callbacks.callbacks): self.AD.callbacks.callbacks[name] = {} handle = uuid.uuid4().hex self.AD.callbacks.callbacks[name][handle] = {'name': name, 'id': self.AD.app_management.objects[name]['id'], 'type': 'event', 'function': cb, 'namespace': namespace, 'event': event, 'pin_app': pin_app, 'pin_thread': pin_thread, 'kwargs': kwargs} if ('timeout' in kwargs): exec_time = ((await self.AD.sched.get_now()) + datetime.timedelta(seconds=int(kwargs['timeout']))) kwargs['__timeout'] = (await self.AD.sched.insert_schedule(name, exec_time, None, False, None, __event_handle=handle)) (await self.AD.state.add_entity('admin', 'event_callback.{}'.format(handle), 'active', {'app': name, 'event_name': event, 'function': cb.__name__, 'pinned': pin_app, 'pinned_thread': pin_thread, 'fired': 0, 'executed': 0, 'kwargs': kwargs})) return handle else: return None
-440,333,686,961,987,840
Adds a callback for an event which is called internally by apps. Args: name (str): Name of the app. namespace (str): Namespace of the event. cb: Callback function. event (str): Name of the event. **kwargs: List of values to filter on, and additional arguments to pass to the callback. Returns: ``None`` or the reference to the callback handle.
appdaemon/events.py
add_event_callback
DTTerastar/appdaemon
python
async def add_event_callback(self, name, namespace, cb, event, **kwargs): 'Adds a callback for an event which is called internally by apps.\n\n Args:\n name (str): Name of the app.\n namespace (str): Namespace of the event.\n cb: Callback function.\n event (str): Name of the event.\n **kwargs: List of values to filter on, and additional arguments to pass to the callback.\n\n Returns:\n ``None`` or the reference to the callback handle.\n\n ' if (self.AD.threading.validate_pin(name, kwargs) is True): if ('pin' in kwargs): pin_app = kwargs['pin_app'] else: pin_app = self.AD.app_management.objects[name]['pin_app'] if ('pin_thread' in kwargs): pin_thread = kwargs['pin_thread'] pin_app = True else: pin_thread = self.AD.app_management.objects[name]['pin_thread'] async with self.AD.callbacks.callbacks_lock: if (name not in self.AD.callbacks.callbacks): self.AD.callbacks.callbacks[name] = {} handle = uuid.uuid4().hex self.AD.callbacks.callbacks[name][handle] = {'name': name, 'id': self.AD.app_management.objects[name]['id'], 'type': 'event', 'function': cb, 'namespace': namespace, 'event': event, 'pin_app': pin_app, 'pin_thread': pin_thread, 'kwargs': kwargs} if ('timeout' in kwargs): exec_time = ((await self.AD.sched.get_now()) + datetime.timedelta(seconds=int(kwargs['timeout']))) kwargs['__timeout'] = (await self.AD.sched.insert_schedule(name, exec_time, None, False, None, __event_handle=handle)) (await self.AD.state.add_entity('admin', 'event_callback.{}'.format(handle), 'active', {'app': name, 'event_name': event, 'function': cb.__name__, 'pinned': pin_app, 'pinned_thread': pin_thread, 'fired': 0, 'executed': 0, 'kwargs': kwargs})) return handle else: return None
async def cancel_event_callback(self, name, handle): 'Cancels an event callback.\n\n Args:\n name (str): Name of the app or module.\n handle: Previously supplied callback handle for the callback.\n\n Returns:\n None.\n\n ' executed = False async with self.AD.callbacks.callbacks_lock: if ((name in self.AD.callbacks.callbacks) and (handle in self.AD.callbacks.callbacks[name])): del self.AD.callbacks.callbacks[name][handle] (await self.AD.state.remove_entity('admin', 'event_callback.{}'.format(handle))) executed = True if ((name in self.AD.callbacks.callbacks) and (self.AD.callbacks.callbacks[name] == {})): del self.AD.callbacks.callbacks[name] if (not executed): self.logger.warning("Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)) return executed
895,694,378,937,561,600
Cancels an event callback. Args: name (str): Name of the app or module. handle: Previously supplied callback handle for the callback. Returns: None.
appdaemon/events.py
cancel_event_callback
DTTerastar/appdaemon
python
async def cancel_event_callback(self, name, handle): 'Cancels an event callback.\n\n Args:\n name (str): Name of the app or module.\n handle: Previously supplied callback handle for the callback.\n\n Returns:\n None.\n\n ' executed = False async with self.AD.callbacks.callbacks_lock: if ((name in self.AD.callbacks.callbacks) and (handle in self.AD.callbacks.callbacks[name])): del self.AD.callbacks.callbacks[name][handle] (await self.AD.state.remove_entity('admin', 'event_callback.{}'.format(handle))) executed = True if ((name in self.AD.callbacks.callbacks) and (self.AD.callbacks.callbacks[name] == {})): del self.AD.callbacks.callbacks[name] if (not executed): self.logger.warning("Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)) return executed
async def info_event_callback(self, name, handle): 'Gets the information of an event callback.\n\n Args:\n name (str): Name of the app or subsystem.\n handle: Previously supplied handle for the callback.\n\n Returns:\n A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.\n\n ' async with self.AD.callbacks.callbacks_lock: if ((name in self.AD.callbacks.callbacks) and (handle in self.AD.callbacks.callbacks[name])): callback = self.AD.callbacks.callbacks[name][handle] return (callback['event'], callback['kwargs'].copy()) else: raise ValueError('Invalid handle: {}'.format(handle))
-1,162,147,695,080,955,400
Gets the information of an event callback. Args: name (str): Name of the app or subsystem. handle: Previously supplied handle for the callback. Returns: A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
appdaemon/events.py
info_event_callback
DTTerastar/appdaemon
python
async def info_event_callback(self, name, handle): 'Gets the information of an event callback.\n\n Args:\n name (str): Name of the app or subsystem.\n handle: Previously supplied handle for the callback.\n\n Returns:\n A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.\n\n ' async with self.AD.callbacks.callbacks_lock: if ((name in self.AD.callbacks.callbacks) and (handle in self.AD.callbacks.callbacks[name])): callback = self.AD.callbacks.callbacks[name][handle] return (callback['event'], callback['kwargs'].copy()) else: raise ValueError('Invalid handle: {}'.format(handle))
async def fire_event(self, namespace, event, **kwargs): 'Fires an event.\n\n If the namespace does not have a plugin associated with it, the event will be fired locally.\n If a plugin is associated, the firing of the event will be delegated to the plugin, under the\n understanding that when the event is fired, the plugin will notify appdaemon that it occurred,\n usually via the system the plugin is communicating with.\n\n Args:\n namespace (str): Namespace for the event to be fired in.\n event (str): Name of the event.\n **kwargs: Arguments to associate with the event.\n\n Returns:\n None.\n\n ' self.logger.debug('fire_plugin_event() %s %s %s', namespace, event, kwargs) plugin = (await self.AD.plugins.get_plugin_object(namespace)) if hasattr(plugin, 'fire_plugin_event'): (await plugin.fire_plugin_event(event, namespace, **kwargs)) else: (await self.AD.events.process_event(namespace, {'event_type': event, 'data': kwargs}))
1,890,551,796,475,495,000
Fires an event. If the namespace does not have a plugin associated with it, the event will be fired locally. If a plugin is associated, the firing of the event will be delegated to the plugin, under the understanding that when the event is fired, the plugin will notify appdaemon that it occurred, usually via the system the plugin is communicating with. Args: namespace (str): Namespace for the event to be fired in. event (str): Name of the event. **kwargs: Arguments to associate with the event. Returns: None.
appdaemon/events.py
fire_event
DTTerastar/appdaemon
python
async def fire_event(self, namespace, event, **kwargs): 'Fires an event.\n\n If the namespace does not have a plugin associated with it, the event will be fired locally.\n If a plugin is associated, the firing of the event will be delegated to the plugin, under the\n understanding that when the event is fired, the plugin will notify appdaemon that it occurred,\n usually via the system the plugin is communicating with.\n\n Args:\n namespace (str): Namespace for the event to be fired in.\n event (str): Name of the event.\n **kwargs: Arguments to associate with the event.\n\n Returns:\n None.\n\n ' self.logger.debug('fire_plugin_event() %s %s %s', namespace, event, kwargs) plugin = (await self.AD.plugins.get_plugin_object(namespace)) if hasattr(plugin, 'fire_plugin_event'): (await plugin.fire_plugin_event(event, namespace, **kwargs)) else: (await self.AD.events.process_event(namespace, {'event_type': event, 'data': kwargs}))
async def process_event(self, namespace, data): 'Processes an event that has been received either locally or from a plugin.\n\n Args:\n namespace (str): Namespace the event was fired in.\n data: Data associated with the event.\n\n Returns:\n None.\n\n ' try: self.logger.debug('Event type:%s:', data['event_type']) self.logger.debug(data['data']) if ((self.AD.sched is not None) and (self.AD.sched.realtime is False) and (namespace != 'admin')): (await self.AD.sched.kick()) if (data['event_type'] == 'state_changed'): if (('entity_id' in data['data']) and ('new_state' in data['data'])): if (data['data']['new_state'] is None): return entity_id = data['data']['entity_id'] self.AD.state.set_state_simple(namespace, entity_id, data['data']['new_state']) if ((self.AD.apps is True) and (namespace != 'admin')): (await self.AD.state.process_state_callbacks(namespace, data)) else: self.logger.warning("Malformed 'state_changed' event: %s", data['data']) return if (data['event_type'] == '__AD_LOG_EVENT'): if (await self.has_log_callback(data['data']['app_name'])): self.logger.debug('Discarding event for loop avoidance') return (await self.AD.logging.process_log_callbacks(namespace, data)) if (self.AD.apps is True): (await self.process_event_callbacks(namespace, data)) if (self.AD.http is not None): if (data['event_type'] == 'state_changed'): if (data['data']['new_state'] == data['data']['old_state']): return if ('ts' in data['data']): ts = data['data'].pop('ts') mydata = deepcopy(data) data['data']['ts'] = ts else: mydata = deepcopy(data) (await self.AD.http.stream_update(namespace, mydata)) except Exception: self.logger.warning(('-' * 60)) self.logger.warning('Unexpected error during process_event()') self.logger.warning(('-' * 60)) self.logger.warning(traceback.format_exc()) self.logger.warning(('-' * 60))
6,674,242,740,453,871,000
Processes an event that has been received either locally or from a plugin. Args: namespace (str): Namespace the event was fired in. data: Data associated with the event. Returns: None.
appdaemon/events.py
process_event
DTTerastar/appdaemon
python
async def process_event(self, namespace, data): 'Processes an event that has been received either locally or from a plugin.\n\n Args:\n namespace (str): Namespace the event was fired in.\n data: Data associated with the event.\n\n Returns:\n None.\n\n ' try: self.logger.debug('Event type:%s:', data['event_type']) self.logger.debug(data['data']) if ((self.AD.sched is not None) and (self.AD.sched.realtime is False) and (namespace != 'admin')): (await self.AD.sched.kick()) if (data['event_type'] == 'state_changed'): if (('entity_id' in data['data']) and ('new_state' in data['data'])): if (data['data']['new_state'] is None): return entity_id = data['data']['entity_id'] self.AD.state.set_state_simple(namespace, entity_id, data['data']['new_state']) if ((self.AD.apps is True) and (namespace != 'admin')): (await self.AD.state.process_state_callbacks(namespace, data)) else: self.logger.warning("Malformed 'state_changed' event: %s", data['data']) return if (data['event_type'] == '__AD_LOG_EVENT'): if (await self.has_log_callback(data['data']['app_name'])): self.logger.debug('Discarding event for loop avoidance') return (await self.AD.logging.process_log_callbacks(namespace, data)) if (self.AD.apps is True): (await self.process_event_callbacks(namespace, data)) if (self.AD.http is not None): if (data['event_type'] == 'state_changed'): if (data['data']['new_state'] == data['data']['old_state']): return if ('ts' in data['data']): ts = data['data'].pop('ts') mydata = deepcopy(data) data['data']['ts'] = ts else: mydata = deepcopy(data) (await self.AD.http.stream_update(namespace, mydata)) except Exception: self.logger.warning(('-' * 60)) self.logger.warning('Unexpected error during process_event()') self.logger.warning(('-' * 60)) self.logger.warning(traceback.format_exc()) self.logger.warning(('-' * 60))
async def has_log_callback(self, name): 'Returns ``True`` if the app has a log callback, ``False`` otherwise.\n\n Used to prevent callback loops. In the calling logic, if this function returns\n ``True`` the resulting logging event will be suppressed.\n\n Args:\n name (str): Name of the app.\n\n ' has_log_callback = False if (name == 'AppDaemon._stream'): has_log_callback = True else: async with self.AD.callbacks.callbacks_lock: for callback in self.AD.callbacks.callbacks: for _uuid in self.AD.callbacks.callbacks[callback]: cb = self.AD.callbacks.callbacks[callback][_uuid] if ((cb['name'] == name) and (cb['type'] == 'event') and (cb['event'] == '__AD_LOG_EVENT')): has_log_callback = True elif ((cb['name'] == name) and (cb['type'] == 'log')): has_log_callback = True return has_log_callback
1,133,120,197,770,422,000
Returns ``True`` if the app has a log callback, ``False`` otherwise. Used to prevent callback loops. In the calling logic, if this function returns ``True`` the resulting logging event will be suppressed. Args: name (str): Name of the app.
appdaemon/events.py
has_log_callback
DTTerastar/appdaemon
python
async def has_log_callback(self, name): 'Returns ``True`` if the app has a log callback, ``False`` otherwise.\n\n Used to prevent callback loops. In the calling logic, if this function returns\n ``True`` the resulting logging event will be suppressed.\n\n Args:\n name (str): Name of the app.\n\n ' has_log_callback = False if (name == 'AppDaemon._stream'): has_log_callback = True else: async with self.AD.callbacks.callbacks_lock: for callback in self.AD.callbacks.callbacks: for _uuid in self.AD.callbacks.callbacks[callback]: cb = self.AD.callbacks.callbacks[callback][_uuid] if ((cb['name'] == name) and (cb['type'] == 'event') and (cb['event'] == '__AD_LOG_EVENT')): has_log_callback = True elif ((cb['name'] == name) and (cb['type'] == 'log')): has_log_callback = True return has_log_callback
async def process_event_callbacks(self, namespace, data): 'Processes a pure event callback.\n\n Locate any callbacks that may be registered for this event, check for filters and if appropriate,\n dispatch the event for further checking and eventual action.\n\n Args:\n namespace (str): Namespace of the event.\n data: Data associated with the event.\n\n Returns:\n None.\n\n ' self.logger.debug('process_event_callbacks() %s %s', namespace, data) removes = [] async with self.AD.callbacks.callbacks_lock: for name in self.AD.callbacks.callbacks.keys(): for uuid_ in self.AD.callbacks.callbacks[name]: callback = self.AD.callbacks.callbacks[name][uuid_] if ((callback['namespace'] == namespace) or (callback['namespace'] == 'global') or (namespace == 'global')): if (('event' in callback) and (((callback['event'] is None) and (data['event_type'][:2] != '__')) or (data['event_type'] == callback['event']))): _run = True for key in callback['kwargs']: if ((key in data['data']) and (callback['kwargs'][key] != data['data'][key])): _run = False if (data['event_type'] == '__AD_LOG_EVENT'): if (('log' in callback['kwargs']) and (callback['kwargs']['log'] != data['data']['log_type'])): _run = False if _run: if (name in self.AD.app_management.objects): executed = (await self.AD.threading.dispatch_worker(name, {'id': uuid_, 'name': name, 'objectid': self.AD.app_management.objects[name]['id'], 'type': 'event', 'event': data['event_type'], 'function': callback['function'], 'data': data['data'], 'pin_app': callback['pin_app'], 'pin_thread': callback['pin_thread'], 'kwargs': callback['kwargs']})) if (executed is True): remove = callback['kwargs'].get('oneshot', False) if (remove is True): removes.append({'name': name, 'uuid': uuid_}) for remove in removes: (await self.cancel_event_callback(remove['name'], remove['uuid']))
1,583,161,895,843,194,000
Processes a pure event callback. Locate any callbacks that may be registered for this event, check for filters and if appropriate, dispatch the event for further checking and eventual action. Args: namespace (str): Namespace of the event. data: Data associated with the event. Returns: None.
appdaemon/events.py
process_event_callbacks
DTTerastar/appdaemon
python
async def process_event_callbacks(self, namespace, data): 'Processes a pure event callback.\n\n Locate any callbacks that may be registered for this event, check for filters and if appropriate,\n dispatch the event for further checking and eventual action.\n\n Args:\n namespace (str): Namespace of the event.\n data: Data associated with the event.\n\n Returns:\n None.\n\n ' self.logger.debug('process_event_callbacks() %s %s', namespace, data) removes = [] async with self.AD.callbacks.callbacks_lock: for name in self.AD.callbacks.callbacks.keys(): for uuid_ in self.AD.callbacks.callbacks[name]: callback = self.AD.callbacks.callbacks[name][uuid_] if ((callback['namespace'] == namespace) or (callback['namespace'] == 'global') or (namespace == 'global')): if (('event' in callback) and (((callback['event'] is None) and (data['event_type'][:2] != '__')) or (data['event_type'] == callback['event']))): _run = True for key in callback['kwargs']: if ((key in data['data']) and (callback['kwargs'][key] != data['data'][key])): _run = False if (data['event_type'] == '__AD_LOG_EVENT'): if (('log' in callback['kwargs']) and (callback['kwargs']['log'] != data['data']['log_type'])): _run = False if _run: if (name in self.AD.app_management.objects): executed = (await self.AD.threading.dispatch_worker(name, {'id': uuid_, 'name': name, 'objectid': self.AD.app_management.objects[name]['id'], 'type': 'event', 'event': data['event_type'], 'function': callback['function'], 'data': data['data'], 'pin_app': callback['pin_app'], 'pin_thread': callback['pin_thread'], 'kwargs': callback['kwargs']})) if (executed is True): remove = callback['kwargs'].get('oneshot', False) if (remove is True): removes.append({'name': name, 'uuid': uuid_}) for remove in removes: (await self.cancel_event_callback(remove['name'], remove['uuid']))
def source_regex_locations(self): ' Test that restricting source expressions to files & to functions. ' exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) target_files = lldb.SBFileSpecList() target_files.Append(lldb.SBFileSpec('a.c')) func_names = lldb.SBStringList() func_names.AppendString('a_func') source_regex = 'Set . breakpoint here' main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, func_names) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 1), ('a.c in a_func should give one breakpoint, got %d.' % num_locations)) loc = main_break.GetLocationAtIndex(0) self.assertTrue(loc.IsValid(), 'Got a valid location.') address = loc.GetAddress() self.assertTrue(address.IsValid(), 'Got a valid address from the location.') a_func_line = line_number('a.c', 'Set A breakpoint here') line_entry = address.GetLineEntry() self.assertTrue(line_entry.IsValid(), 'Got a valid line entry.') self.assertTrue((line_entry.line == a_func_line), 'Our line number matches the one lldbtest found.')
9,138,448,826,118,702,000
Test that restricting source expressions to files & to functions.
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/source_regexp/TestSourceRegexBreakpoints.py
source_regex_locations
Polidea/SiriusObfuscator
python
def source_regex_locations(self): ' ' exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) target_files = lldb.SBFileSpecList() target_files.Append(lldb.SBFileSpec('a.c')) func_names = lldb.SBStringList() func_names.AppendString('a_func') source_regex = 'Set . breakpoint here' main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, func_names) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 1), ('a.c in a_func should give one breakpoint, got %d.' % num_locations)) loc = main_break.GetLocationAtIndex(0) self.assertTrue(loc.IsValid(), 'Got a valid location.') address = loc.GetAddress() self.assertTrue(address.IsValid(), 'Got a valid address from the location.') a_func_line = line_number('a.c', 'Set A breakpoint here') line_entry = address.GetLineEntry() self.assertTrue(line_entry.IsValid(), 'Got a valid line entry.') self.assertTrue((line_entry.line == a_func_line), 'Our line number matches the one lldbtest found.')
def source_regex_restrictions(self): ' Test that restricting source expressions to files & to functions. ' exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) target_files = lldb.SBFileSpecList() target_files.Append(lldb.SBFileSpec('main.c')) source_regex = 'Set . breakpoint here' main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList()) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 2), ('main.c should have 2 matches, got %d.' % num_locations)) target_files.Append(lldb.SBFileSpec('a.c')) main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList()) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 4), ('main.c and a.c should have 4 matches, got %d.' % num_locations)) func_names = lldb.SBStringList() func_names.AppendString('main_func') main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, func_names) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 2), ('main_func in main.c and a.c should have 2 matches, got %d.' % num_locations))
8,368,960,995,021,304,000
Test that restricting source expressions to files & to functions.
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/source_regexp/TestSourceRegexBreakpoints.py
source_regex_restrictions
Polidea/SiriusObfuscator
python
def source_regex_restrictions(self): ' ' exe = os.path.join(os.getcwd(), 'a.out') target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) target_files = lldb.SBFileSpecList() target_files.Append(lldb.SBFileSpec('main.c')) source_regex = 'Set . breakpoint here' main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList()) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 2), ('main.c should have 2 matches, got %d.' % num_locations)) target_files.Append(lldb.SBFileSpec('a.c')) main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList()) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 4), ('main.c and a.c should have 4 matches, got %d.' % num_locations)) func_names = lldb.SBStringList() func_names.AppendString('main_func') main_break = target.BreakpointCreateBySourceRegex(source_regex, lldb.SBFileSpecList(), target_files, func_names) num_locations = main_break.GetNumLocations() self.assertTrue((num_locations == 2), ('main_func in main.c and a.c should have 2 matches, got %d.' % num_locations))
def new_connection(self, workerId: str, socket): ' Create a mapping structure to establish a bond between a workerId and a socket descriptor.\n\n Args:\n workerId: UUID string used to identify workers.\n socket: Socket descriptor that will be used to send/receive messages from this client.\n\n Returns:\n Worker: a worker instance with the corresponding workerId\n ' if (workerId not in self.connections): self.connections[workerId] = Worker(workerId, socket) else: worker = self.connections[workerId] if (worker.status == WORKER_PROPERTIES.OFFLINE): worker._socket = socket return self.connections[workerId]
7,135,368,950,734,366,000
Create a mapping structure to establish a bond between a workerId and a socket descriptor. Args: workerId: UUID string used to identify workers. socket: Socket descriptor that will be used to send/receive messages from this client. Returns: Worker: a worker instance with the corresponding workerId
gridnetwork/events/socket_handler.py
new_connection
kuronosec/PyGridNetwork
python
def new_connection(self, workerId: str, socket): ' Create a mapping structure to establish a bond between a workerId and a socket descriptor.\n\n Args:\n workerId: UUID string used to identify workers.\n socket: Socket descriptor that will be used to send/receive messages from this client.\n\n Returns:\n Worker: a worker instance with the corresponding workerId\n ' if (workerId not in self.connections): self.connections[workerId] = Worker(workerId, socket) else: worker = self.connections[workerId] if (worker.status == WORKER_PROPERTIES.OFFLINE): worker._socket = socket return self.connections[workerId]
def send_msg(self, workerId: str, message: str): ' Find the socket descriptor mapped by workerId and send them a message.\n\n Args:\n workerId: UUID string used to identify and retrieve a worker.\n message: Message to be send.\n ' socket = self.connections.get(workerId, None) if socket: socket.send(message)
-830,059,463,909,367,300
Find the socket descriptor mapped by workerId and send them a message. Args: workerId: UUID string used to identify and retrieve a worker. message: Message to be send.
gridnetwork/events/socket_handler.py
send_msg
kuronosec/PyGridNetwork
python
def send_msg(self, workerId: str, message: str): ' Find the socket descriptor mapped by workerId and send them a message.\n\n Args:\n workerId: UUID string used to identify and retrieve a worker.\n message: Message to be send.\n ' socket = self.connections.get(workerId, None) if socket: socket.send(message)
def get(self, query): 'Retrieve a worker by its UUID string or its socket descriptor.' if isinstance(query, str): return self.connections.get(query, None) else: return self.__retrieve_worker_by_socket(query)
5,881,453,422,708,374,000
Retrieve a worker by its UUID string or its socket descriptor.
gridnetwork/events/socket_handler.py
get
kuronosec/PyGridNetwork
python
def get(self, query): if isinstance(query, str): return self.connections.get(query, None) else: return self.__retrieve_worker_by_socket(query)
def remove(self, socket) -> str: ' Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed.\n\n Args:\n socket: socket descriptor used to send/receive messages.\n\n Returns:\n workerId: Worker id linked to that connection.\n ' worker = self.__retrieve_worker_by_socket(socket) if worker: self.connections[worker._id]._socket = None self.connections[worker._id].connected_nodes = [] return worker._id
-1,422,404,848,341,396,700
Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed. Args: socket: socket descriptor used to send/receive messages. Returns: workerId: Worker id linked to that connection.
gridnetwork/events/socket_handler.py
remove
kuronosec/PyGridNetwork
python
def remove(self, socket) -> str: ' Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed.\n\n Args:\n socket: socket descriptor used to send/receive messages.\n\n Returns:\n workerId: Worker id linked to that connection.\n ' worker = self.__retrieve_worker_by_socket(socket) if worker: self.connections[worker._id]._socket = None self.connections[worker._id].connected_nodes = [] return worker._id
@property def nodes(self) -> list: 'Return all the connected nodes as a list of tuples of (worker_id, worker)' return list(self.connections.items())
6,549,633,896,146,046,000
Return all the connected nodes as a list of tuples of (worker_id, worker)
gridnetwork/events/socket_handler.py
nodes
kuronosec/PyGridNetwork
python
@property def nodes(self) -> list: return list(self.connections.items())
def __len__(self) -> int: ' Number of connections handled by this server.\n\n Returns:\n length: number of connections handled by this server.\n ' return len(self.connections)
-5,961,706,619,804,543,000
Number of connections handled by this server. Returns: length: number of connections handled by this server.
gridnetwork/events/socket_handler.py
__len__
kuronosec/PyGridNetwork
python
def __len__(self) -> int: ' Number of connections handled by this server.\n\n Returns:\n length: number of connections handled by this server.\n ' return len(self.connections)
def _get_vco(verts_orig, loop): '\n Get vertex original coordinate from loop\n ' for vo in verts_orig: if ((vo['vidx'] == loop.vert.index) and (vo['moved'] is False)): return vo['vco'] return loop.vert.co
-7,784,488,194,725,501,000
Get vertex original coordinate from loop
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
_get_vco
byteinc/Phasor
python
def _get_vco(verts_orig, loop): '\n \n ' for vo in verts_orig: if ((vo['vidx'] == loop.vert.index) and (vo['moved'] is False)): return vo['vco'] return loop.vert.co
def _get_link_loops(vert): '\n Get loop linked to vertex\n ' link_loops = [] for f in vert.link_faces: adj_loops = [] for loop in f.loops: if (loop.vert == vert): l = loop else: for e in loop.vert.link_edges: if (e.other_vert(loop.vert) == vert): adj_loops.append(loop) if (len(adj_loops) < 2): return None link_loops.append({'l': l, 'l0': adj_loops[0], 'l1': adj_loops[1]}) return link_loops
-2,814,715,461,684,407,000
Get loop linked to vertex
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
_get_link_loops
byteinc/Phasor
python
def _get_link_loops(vert): '\n \n ' link_loops = [] for f in vert.link_faces: adj_loops = [] for loop in f.loops: if (loop.vert == vert): l = loop else: for e in loop.vert.link_edges: if (e.other_vert(loop.vert) == vert): adj_loops.append(loop) if (len(adj_loops) < 2): return None link_loops.append({'l': l, 'l0': adj_loops[0], 'l1': adj_loops[1]}) return link_loops
def _get_ini_geom(link_loop, uv_layer, verts_orig, v_orig): '\n Get initial geometory\n (Get interior angle of face in vertex/UV space)\n ' u = link_loop['l'][uv_layer].uv v0 = _get_vco(verts_orig, link_loop['l0']) u0 = link_loop['l0'][uv_layer].uv v1 = _get_vco(verts_orig, link_loop['l1']) u1 = link_loop['l1'][uv_layer].uv v0v1 = (v1 - v0) v0v = (v_orig['vco'] - v0) v1v = (v_orig['vco'] - v1) theta0 = v0v1.angle(v0v) theta1 = v0v1.angle((- v1v)) if ((theta0 + theta1) > math.pi): theta0 = v0v1.angle((- v0v)) theta1 = v0v1.angle(v1v) u0u1 = (u1 - u0) u0u = (u - u0) u1u = (u - u1) phi0 = u0u1.angle(u0u) phi1 = u0u1.angle((- u1u)) if ((phi0 + phi1) > math.pi): phi0 = u0u1.angle((- u0u)) phi1 = u0u1.angle(u1u) dir0 = (u0u1.cross(u0u) > 0) dir1 = (u0u1.cross(u1u) > 0) return {'theta0': theta0, 'theta1': theta1, 'phi0': phi0, 'phi1': phi1, 'dir0': dir0, 'dir1': dir1}
3,110,401,165,743,732,000
Get initial geometory (Get interior angle of face in vertex/UV space)
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
_get_ini_geom
byteinc/Phasor
python
def _get_ini_geom(link_loop, uv_layer, verts_orig, v_orig): '\n Get initial geometory\n (Get interior angle of face in vertex/UV space)\n ' u = link_loop['l'][uv_layer].uv v0 = _get_vco(verts_orig, link_loop['l0']) u0 = link_loop['l0'][uv_layer].uv v1 = _get_vco(verts_orig, link_loop['l1']) u1 = link_loop['l1'][uv_layer].uv v0v1 = (v1 - v0) v0v = (v_orig['vco'] - v0) v1v = (v_orig['vco'] - v1) theta0 = v0v1.angle(v0v) theta1 = v0v1.angle((- v1v)) if ((theta0 + theta1) > math.pi): theta0 = v0v1.angle((- v0v)) theta1 = v0v1.angle(v1v) u0u1 = (u1 - u0) u0u = (u - u0) u1u = (u - u1) phi0 = u0u1.angle(u0u) phi1 = u0u1.angle((- u1u)) if ((phi0 + phi1) > math.pi): phi0 = u0u1.angle((- u0u)) phi1 = u0u1.angle(u1u) dir0 = (u0u1.cross(u0u) > 0) dir1 = (u0u1.cross(u1u) > 0) return {'theta0': theta0, 'theta1': theta1, 'phi0': phi0, 'phi1': phi1, 'dir0': dir0, 'dir1': dir1}
def _get_target_uv(link_loop, uv_layer, verts_orig, v, ini_geom): '\n Get target UV coordinate\n ' v0 = _get_vco(verts_orig, link_loop['l0']) lo0 = link_loop['l0'] v1 = _get_vco(verts_orig, link_loop['l1']) lo1 = link_loop['l1'] v0v1 = (v1 - v0) v0v = (v.co - v0) v1v = (v.co - v1) theta0 = v0v1.angle(v0v) theta1 = v0v1.angle((- v1v)) if ((theta0 + theta1) > math.pi): theta0 = v0v1.angle((- v0v)) theta1 = v0v1.angle(v1v) phi0 = ((theta0 * ini_geom['phi0']) / ini_geom['theta0']) phi1 = ((theta1 * ini_geom['phi1']) / ini_geom['theta1']) uv0 = lo0[uv_layer].uv uv1 = lo1[uv_layer].uv (tuv0, tuv1) = _calc_tri_vert(uv0, uv1, phi0, phi1) u0u1 = (uv1 - uv0) u0u = (tuv0 - uv0) u1u = (tuv0 - uv1) dir0 = (u0u1.cross(u0u) > 0) dir1 = (u0u1.cross(u1u) > 0) if ((ini_geom['dir0'] != dir0) or (ini_geom['dir1'] != dir1)): return tuv1 return tuv0
3,503,573,570,658,573,000
Get target UV coordinate
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
_get_target_uv
byteinc/Phasor
python
def _get_target_uv(link_loop, uv_layer, verts_orig, v, ini_geom): '\n \n ' v0 = _get_vco(verts_orig, link_loop['l0']) lo0 = link_loop['l0'] v1 = _get_vco(verts_orig, link_loop['l1']) lo1 = link_loop['l1'] v0v1 = (v1 - v0) v0v = (v.co - v0) v1v = (v.co - v1) theta0 = v0v1.angle(v0v) theta1 = v0v1.angle((- v1v)) if ((theta0 + theta1) > math.pi): theta0 = v0v1.angle((- v0v)) theta1 = v0v1.angle(v1v) phi0 = ((theta0 * ini_geom['phi0']) / ini_geom['theta0']) phi1 = ((theta1 * ini_geom['phi1']) / ini_geom['theta1']) uv0 = lo0[uv_layer].uv uv1 = lo1[uv_layer].uv (tuv0, tuv1) = _calc_tri_vert(uv0, uv1, phi0, phi1) u0u1 = (uv1 - uv0) u0u = (tuv0 - uv0) u1u = (tuv0 - uv1) dir0 = (u0u1.cross(u0u) > 0) dir1 = (u0u1.cross(u1u) > 0) if ((ini_geom['dir0'] != dir0) or (ini_geom['dir1'] != dir1)): return tuv1 return tuv0
def _calc_tri_vert(v0, v1, angle0, angle1): '\n Calculate rest coordinate from other coordinates and angle of end\n ' angle = ((math.pi - angle0) - angle1) alpha = atan2((v1.y - v0.y), (v1.x - v0.x)) d = ((v1.x - v0.x) / cos(alpha)) a = ((d * sin(angle0)) / sin(angle)) b = ((d * sin(angle1)) / sin(angle)) s = (((a + b) + d) / 2.0) if (fabs(d) < 1e-07): xd = 0 yd = 0 else: r = (((s * (s - a)) * (s - b)) * (s - d)) if (r < 0): xd = 0 yd = 0 else: xd = ((((b * b) - (a * a)) + (d * d)) / (2 * d)) yd = ((2 * sqrt(r)) / d) x1 = (((xd * cos(alpha)) - (yd * sin(alpha))) + v0.x) y1 = (((xd * sin(alpha)) + (yd * cos(alpha))) + v0.y) x2 = (((xd * cos(alpha)) + (yd * sin(alpha))) + v0.x) y2 = (((xd * sin(alpha)) - (yd * cos(alpha))) + v0.y) return (Vector((x1, y1)), Vector((x2, y2)))
240,756,091,218,326,300
Calculate rest coordinate from other coordinates and angle of end
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
_calc_tri_vert
byteinc/Phasor
python
def _calc_tri_vert(v0, v1, angle0, angle1): '\n \n ' angle = ((math.pi - angle0) - angle1) alpha = atan2((v1.y - v0.y), (v1.x - v0.x)) d = ((v1.x - v0.x) / cos(alpha)) a = ((d * sin(angle0)) / sin(angle)) b = ((d * sin(angle1)) / sin(angle)) s = (((a + b) + d) / 2.0) if (fabs(d) < 1e-07): xd = 0 yd = 0 else: r = (((s * (s - a)) * (s - b)) * (s - d)) if (r < 0): xd = 0 yd = 0 else: xd = ((((b * b) - (a * a)) + (d * d)) / (2 * d)) yd = ((2 * sqrt(r)) / d) x1 = (((xd * cos(alpha)) - (yd * sin(alpha))) + v0.x) y1 = (((xd * sin(alpha)) + (yd * cos(alpha))) + v0.y) x2 = (((xd * cos(alpha)) + (yd * sin(alpha))) + v0.x) y2 = (((xd * sin(alpha)) - (yd * cos(alpha))) + v0.y) return (Vector((x1, y1)), Vector((x2, y2)))
def __update_uv(self, context): '\n Update UV when vertex coordinates are changed\n ' obj = context.active_object bm = bmesh.from_edit_mesh(obj.data) if (common.check_version(2, 73, 0) >= 0): bm.verts.ensure_lookup_table() bm.edges.ensure_lookup_table() bm.faces.ensure_lookup_table() if (not bm.loops.layers.uv): self.report({'WARNING'}, 'Object must have more than one UV map') return uv_layer = bm.loops.layers.uv.verify() verts = [v.index for v in bm.verts if v.select] verts_orig = self.__intr_verts_orig for (vidx, v_orig) in zip(verts, verts_orig): if (vidx != v_orig['vidx']): self.report({'ERROR'}, 'Internal Error') return v = bm.verts[vidx] link_loops = _get_link_loops(v) result = [] for ll in link_loops: ini_geom = _get_ini_geom(ll, uv_layer, verts_orig, v_orig) target_uv = _get_target_uv(ll, uv_layer, verts_orig, v, ini_geom) result.append({'l': ll['l'], 'uv': target_uv}) ave = Vector((0.0, 0.0)) for r in result: ave = (ave + r['uv']) ave = (ave / len(result)) for r in result: r['l'][uv_layer].uv = ave v_orig['moved'] = True bmesh.update_edit_mesh(obj.data) common.redraw_all_areas() self.__intr_verts_orig = [{'vidx': v.index, 'vco': v.co.copy(), 'moved': False} for v in bm.verts if v.select]
9,013,071,885,757,533,000
Update UV when vertex coordinates are changed
engine/2.80/scripts/addons/magic_uv/op/texture_lock.py
__update_uv
byteinc/Phasor
python
def __update_uv(self, context): '\n \n ' obj = context.active_object bm = bmesh.from_edit_mesh(obj.data) if (common.check_version(2, 73, 0) >= 0): bm.verts.ensure_lookup_table() bm.edges.ensure_lookup_table() bm.faces.ensure_lookup_table() if (not bm.loops.layers.uv): self.report({'WARNING'}, 'Object must have more than one UV map') return uv_layer = bm.loops.layers.uv.verify() verts = [v.index for v in bm.verts if v.select] verts_orig = self.__intr_verts_orig for (vidx, v_orig) in zip(verts, verts_orig): if (vidx != v_orig['vidx']): self.report({'ERROR'}, 'Internal Error') return v = bm.verts[vidx] link_loops = _get_link_loops(v) result = [] for ll in link_loops: ini_geom = _get_ini_geom(ll, uv_layer, verts_orig, v_orig) target_uv = _get_target_uv(ll, uv_layer, verts_orig, v, ini_geom) result.append({'l': ll['l'], 'uv': target_uv}) ave = Vector((0.0, 0.0)) for r in result: ave = (ave + r['uv']) ave = (ave / len(result)) for r in result: r['l'][uv_layer].uv = ave v_orig['moved'] = True bmesh.update_edit_mesh(obj.data) common.redraw_all_areas() self.__intr_verts_orig = [{'vidx': v.index, 'vco': v.co.copy(), 'moved': False} for v in bm.verts if v.select]
def create_figure_and_sliders(name, state_dim): '\n Creating a window for the latent space visualization,\n and another one for the sliders to control it.\n\n :param name: name of model (str)\n :param state_dim: (int)\n :return:\n ' cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 500, 500) cv2.namedWindow(('slider for ' + name)) for i in range(state_dim): cv2.createTrackbar(str(i), ('slider for ' + name), 50, 100, (lambda a: None))
-3,995,013,066,744,532,500
Creating a window for the latent space visualization, and another one for the sliders to control it. :param name: name of model (str) :param state_dim: (int) :return:
ae/enjoy_latent.py
create_figure_and_sliders
araffin/aae-train-donkeycar
python
def create_figure_and_sliders(name, state_dim): '\n Creating a window for the latent space visualization,\n and another one for the sliders to control it.\n\n :param name: name of model (str)\n :param state_dim: (int)\n :return:\n ' cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 500, 500) cv2.namedWindow(('slider for ' + name)) for i in range(state_dim): cv2.createTrackbar(str(i), ('slider for ' + name), 50, 100, (lambda a: None))
def nms(dets, thresh): 'Apply classic DPM-style greedy NMS.' if (dets.shape[0] == 0): return dets[[], :] scores = dets[:, 0] x1 = dets[:, 1] y1 = dets[:, 2] x2 = dets[:, 3] y2 = dets[:, 4] areas = (((x2 - x1) + 1) * ((y2 - y1) + 1)) order = scores.argsort()[::(- 1)] ndets = dets.shape[0] suppressed = np.zeros(ndets, dtype=np.int) for _i in range(ndets): i = order[_i] if (suppressed[i] == 1): continue ix1 = x1[i] iy1 = y1[i] ix2 = x2[i] iy2 = y2[i] iarea = areas[i] for _j in range((_i + 1), ndets): j = order[_j] if (suppressed[j] == 1): continue xx1 = max(ix1, x1[j]) yy1 = max(iy1, y1[j]) xx2 = min(ix2, x2[j]) yy2 = min(iy2, y2[j]) w = max(0.0, ((xx2 - xx1) + 1)) h = max(0.0, ((yy2 - yy1) + 1)) inter = (w * h) ovr = (inter / ((iarea + areas[j]) - inter)) if (ovr >= thresh): suppressed[j] = 1 keep = np.where((suppressed == 0))[0] dets = dets[keep, :] return dets
80,342,114,398,799,000
Apply classic DPM-style greedy NMS.
ppdet/modeling/post_process.py
nms
gbstack/PaddleDetection
python
def nms(dets, thresh): if (dets.shape[0] == 0): return dets[[], :] scores = dets[:, 0] x1 = dets[:, 1] y1 = dets[:, 2] x2 = dets[:, 3] y2 = dets[:, 4] areas = (((x2 - x1) + 1) * ((y2 - y1) + 1)) order = scores.argsort()[::(- 1)] ndets = dets.shape[0] suppressed = np.zeros(ndets, dtype=np.int) for _i in range(ndets): i = order[_i] if (suppressed[i] == 1): continue ix1 = x1[i] iy1 = y1[i] ix2 = x2[i] iy2 = y2[i] iarea = areas[i] for _j in range((_i + 1), ndets): j = order[_j] if (suppressed[j] == 1): continue xx1 = max(ix1, x1[j]) yy1 = max(iy1, y1[j]) xx2 = min(ix2, x2[j]) yy2 = min(iy2, y2[j]) w = max(0.0, ((xx2 - xx1) + 1)) h = max(0.0, ((yy2 - yy1) + 1)) inter = (w * h) ovr = (inter / ((iarea + areas[j]) - inter)) if (ovr >= thresh): suppressed[j] = 1 keep = np.where((suppressed == 0))[0] dets = dets[keep, :] return dets
def forward(self, head_out, rois, im_shape, scale_factor): '\n Decode the bbox and do NMS if needed. \n\n Args:\n head_out (tuple): bbox_pred and cls_prob of bbox_head output.\n rois (tuple): roi and rois_num of rpn_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n ' if (self.nms is not None): (bboxes, score) = self.decode(head_out, rois, im_shape, scale_factor) (bbox_pred, bbox_num, _) = self.nms(bboxes, score, self.num_classes) else: (bbox_pred, bbox_num) = self.decode(head_out, rois, im_shape, scale_factor) return (bbox_pred, bbox_num)
-2,970,551,212,598,684,000
Decode the bbox and do NMS if needed. Args: head_out (tuple): bbox_pred and cls_prob of bbox_head output. rois (tuple): roi and rois_num of rpn_head output. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: bbox_pred (Tensor): The output prediction with shape [N, 6], including labels, scores and bboxes. The size of bboxes are corresponding to the input image, the bboxes may be used in other branch. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N.
ppdet/modeling/post_process.py
forward
gbstack/PaddleDetection
python
def forward(self, head_out, rois, im_shape, scale_factor): '\n Decode the bbox and do NMS if needed. \n\n Args:\n head_out (tuple): bbox_pred and cls_prob of bbox_head output.\n rois (tuple): roi and rois_num of rpn_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n ' if (self.nms is not None): (bboxes, score) = self.decode(head_out, rois, im_shape, scale_factor) (bbox_pred, bbox_num, _) = self.nms(bboxes, score, self.num_classes) else: (bbox_pred, bbox_num) = self.decode(head_out, rois, im_shape, scale_factor) return (bbox_pred, bbox_num)
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): '\n Rescale, clip and filter the bbox from the output of NMS to \n get final prediction. \n \n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n pred_result (Tensor): The final prediction results with shape [N, 6]\n including labels, scores and bboxes.\n ' if (bboxes.shape[0] == 0): bboxes = self.fake_bboxes bbox_num = self.fake_bbox_num origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) origin_shape_list = [] scale_factor_list = [] for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:(i + 1), :], [bbox_num[i], 2]) (scale_y, scale_x) = (scale_factor[i][0], scale_factor[i][1]) scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] scaled_bbox = (pred_bbox / scale_factor_list) origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=(- 1)) keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, (paddle.ones_like(pred_label) * (- 1))) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return pred_result
3,043,091,144,749,047,000
Rescale, clip and filter the bbox from the output of NMS to get final prediction. Notes: Currently only support bs = 1. Args: bboxes (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: pred_result (Tensor): The final prediction results with shape [N, 6] including labels, scores and bboxes.
ppdet/modeling/post_process.py
get_pred
gbstack/PaddleDetection
python
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): '\n Rescale, clip and filter the bbox from the output of NMS to \n get final prediction. \n \n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n pred_result (Tensor): The final prediction results with shape [N, 6]\n including labels, scores and bboxes.\n ' if (bboxes.shape[0] == 0): bboxes = self.fake_bboxes bbox_num = self.fake_bbox_num origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) origin_shape_list = [] scale_factor_list = [] for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:(i + 1), :], [bbox_num[i], 2]) (scale_y, scale_x) = (scale_factor[i][0], scale_factor[i][1]) scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] scaled_bbox = (pred_bbox / scale_factor_list) origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=(- 1)) keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, (paddle.ones_like(pred_label) * (- 1))) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return pred_result
def paste_mask(self, masks, boxes, im_h, im_w): '\n Paste the mask prediction to the original image.\n ' (x0, y0, x1, y1) = paddle.split(boxes, 4, axis=1) masks = paddle.unsqueeze(masks, [0, 1]) img_y = (paddle.arange(0, im_h, dtype='float32') + 0.5) img_x = (paddle.arange(0, im_w, dtype='float32') + 0.5) img_y = ((((img_y - y0) / (y1 - y0)) * 2) - 1) img_x = ((((img_x - x0) / (x1 - x0)) * 2) - 1) img_x = paddle.unsqueeze(img_x, [1]) img_y = paddle.unsqueeze(img_y, [2]) N = boxes.shape[0] gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]]) gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]]) grid = paddle.stack([gx, gy], axis=3) img_masks = F.grid_sample(masks, grid, align_corners=False) return img_masks[:, 0]
8,498,504,445,264,646,000
Paste the mask prediction to the original image.
ppdet/modeling/post_process.py
paste_mask
gbstack/PaddleDetection
python
def paste_mask(self, masks, boxes, im_h, im_w): '\n \n ' (x0, y0, x1, y1) = paddle.split(boxes, 4, axis=1) masks = paddle.unsqueeze(masks, [0, 1]) img_y = (paddle.arange(0, im_h, dtype='float32') + 0.5) img_x = (paddle.arange(0, im_w, dtype='float32') + 0.5) img_y = ((((img_y - y0) / (y1 - y0)) * 2) - 1) img_x = ((((img_x - x0) / (x1 - x0)) * 2) - 1) img_x = paddle.unsqueeze(img_x, [1]) img_y = paddle.unsqueeze(img_y, [2]) N = boxes.shape[0] gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]]) gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]]) grid = paddle.stack([gx, gy], axis=3) img_masks = F.grid_sample(masks, grid, align_corners=False) return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape): '\n Decode the mask_out and paste the mask to the origin image.\n\n Args:\n mask_out (Tensor): mask_head output with shape [N, 28, 28].\n bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n origin_shape (Tensor): The origin shape of the input image, the tensor\n shape is [N, 2], and each row is [h, w].\n Returns:\n pred_result (Tensor): The final prediction mask results with shape\n [N, h, w] in binary mask style.\n ' num_mask = mask_out.shape[0] origin_shape = paddle.cast(origin_shape, 'int32') pred_result = paddle.zeros([num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32') if ((bbox_num == 1) and (bboxes[0][0] == (- 1))): return pred_result pred_result = [] for i in range(bboxes.shape[0]): (im_h, im_w) = (origin_shape[i][0], origin_shape[i][1]) pred_mask = self.paste_mask(mask_out[i], bboxes[i:(i + 1), 2:], im_h, im_w) pred_mask = (pred_mask >= self.binary_thresh) pred_mask = paddle.cast(pred_mask, 'int32') pred_result.append(pred_mask) pred_result = paddle.concat(pred_result) return pred_result
-6,449,566,251,477,395,000
Decode the mask_out and paste the mask to the origin image. Args: mask_out (Tensor): mask_head output with shape [N, 28, 28]. bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. origin_shape (Tensor): The origin shape of the input image, the tensor shape is [N, 2], and each row is [h, w]. Returns: pred_result (Tensor): The final prediction mask results with shape [N, h, w] in binary mask style.
ppdet/modeling/post_process.py
__call__
gbstack/PaddleDetection
python
def __call__(self, mask_out, bboxes, bbox_num, origin_shape): '\n Decode the mask_out and paste the mask to the origin image.\n\n Args:\n mask_out (Tensor): mask_head output with shape [N, 28, 28].\n bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n origin_shape (Tensor): The origin shape of the input image, the tensor\n shape is [N, 2], and each row is [h, w].\n Returns:\n pred_result (Tensor): The final prediction mask results with shape\n [N, h, w] in binary mask style.\n ' num_mask = mask_out.shape[0] origin_shape = paddle.cast(origin_shape, 'int32') pred_result = paddle.zeros([num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32') if ((bbox_num == 1) and (bboxes[0][0] == (- 1))): return pred_result pred_result = [] for i in range(bboxes.shape[0]): (im_h, im_w) = (origin_shape[i][0], origin_shape[i][1]) pred_mask = self.paste_mask(mask_out[i], bboxes[i:(i + 1), 2:], im_h, im_w) pred_mask = (pred_mask >= self.binary_thresh) pred_mask = paddle.cast(pred_mask, 'int32') pred_result.append(pred_mask) pred_result = paddle.concat(pred_result) return pred_result
def __call__(self, fcos_head_outs, scale_factor): '\n Decode the bbox and do NMS in FCOS.\n ' (locations, cls_logits, bboxes_reg, centerness) = fcos_head_outs (bboxes, score) = self.decode(locations, cls_logits, bboxes_reg, centerness, scale_factor) (bbox_pred, bbox_num, _) = self.nms(bboxes, score) return (bbox_pred, bbox_num)
-9,044,059,880,495,126,000
Decode the bbox and do NMS in FCOS.
ppdet/modeling/post_process.py
__call__
gbstack/PaddleDetection
python
def __call__(self, fcos_head_outs, scale_factor): '\n \n ' (locations, cls_logits, bboxes_reg, centerness) = fcos_head_outs (bboxes, score) = self.decode(locations, cls_logits, bboxes_reg, centerness, scale_factor) (bbox_pred, bbox_num, _) = self.nms(bboxes, score) return (bbox_pred, bbox_num)
def forward(self, pred_scores, pred_bboxes): '\n pred_scores : [N, M] score\n pred_bboxes : [N, 5] xc, yc, w, h, a\n im_shape : [N, 2] im_shape\n scale_factor : [N, 2] scale_factor\n ' pred_ploys0 = rbox2poly(pred_bboxes) pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0) pred_scores0 = paddle.transpose(pred_scores, [1, 0]) pred_scores = paddle.unsqueeze(pred_scores0, axis=0) (pred_cls_score_bbox, bbox_num, _) = self.nms(pred_ploys, pred_scores, self.num_classes) if ((pred_cls_score_bbox.shape[0] <= 0) or (pred_cls_score_bbox.shape[1] <= 1)): pred_cls_score_bbox = self.fake_pred_cls_score_bbox bbox_num = self.fake_bbox_num pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [(- 1), 10]) return (pred_cls_score_bbox, bbox_num)
-2,744,508,617,960,272,400
pred_scores : [N, M] score pred_bboxes : [N, 5] xc, yc, w, h, a im_shape : [N, 2] im_shape scale_factor : [N, 2] scale_factor
ppdet/modeling/post_process.py
forward
gbstack/PaddleDetection
python
def forward(self, pred_scores, pred_bboxes): '\n pred_scores : [N, M] score\n pred_bboxes : [N, 5] xc, yc, w, h, a\n im_shape : [N, 2] im_shape\n scale_factor : [N, 2] scale_factor\n ' pred_ploys0 = rbox2poly(pred_bboxes) pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0) pred_scores0 = paddle.transpose(pred_scores, [1, 0]) pred_scores = paddle.unsqueeze(pred_scores0, axis=0) (pred_cls_score_bbox, bbox_num, _) = self.nms(pred_ploys, pred_scores, self.num_classes) if ((pred_cls_score_bbox.shape[0] <= 0) or (pred_cls_score_bbox.shape[1] <= 1)): pred_cls_score_bbox = self.fake_pred_cls_score_bbox bbox_num = self.fake_bbox_num pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [(- 1), 10]) return (pred_cls_score_bbox, bbox_num)
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): '\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n bbox_pred(Tensor): The output is the prediction with shape [N, 8]\n including labels, scores and bboxes. The size of\n bboxes are corresponding to the original image.\n ' origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) origin_shape_list = [] scale_factor_list = [] for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:(i + 1), :], [bbox_num[i], 2]) (scale_y, scale_x) = (scale_factor[i][0], scale_factor[i][1]) scale = paddle.concat([scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] pred_bbox = pred_bbox.reshape([(- 1), 8]) scaled_bbox = (pred_bbox / scale_factor_list) origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], (origin_w - 1)), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], (origin_h - 1)), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], (origin_w - 1)), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], (origin_h - 1)), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], (origin_w - 1)), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], (origin_h - 1)), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], (origin_w - 1)), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], (origin_h - 1)), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=(- 1)) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result
-2,542,100,866,437,608,400
Rescale, clip and filter the bbox from the output of NMS to get final prediction. Args: bboxes(Tensor): bboxes [N, 10] bbox_num(Tensor): bbox_num im_shape(Tensor): [1 2] scale_factor(Tensor): [1 2] Returns: bbox_pred(Tensor): The output is the prediction with shape [N, 8] including labels, scores and bboxes. The size of bboxes are corresponding to the original image.
ppdet/modeling/post_process.py
get_pred
gbstack/PaddleDetection
python
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): '\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n bbox_pred(Tensor): The output is the prediction with shape [N, 8]\n including labels, scores and bboxes. The size of\n bboxes are corresponding to the original image.\n ' origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) origin_shape_list = [] scale_factor_list = [] for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:(i + 1), :], [bbox_num[i], 2]) (scale_y, scale_x) = (scale_factor[i][0], scale_factor[i][1]) scale = paddle.concat([scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] pred_bbox = pred_bbox.reshape([(- 1), 8]) scaled_bbox = (pred_bbox / scale_factor_list) origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], (origin_w - 1)), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], (origin_h - 1)), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], (origin_w - 1)), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], (origin_h - 1)), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], (origin_w - 1)), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], (origin_h - 1)), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], (origin_w - 1)), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], (origin_h - 1)), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=(- 1)) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result
def forward(self, head_out, anchors): "\n Decode the bbox and do NMS for JDE model. \n\n Args:\n head_out (list): Bbox_pred and cls_prob of bbox_head output.\n anchors (list): Anchors of JDE model.\n\n Returns:\n boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'. \n bbox_pred (Tensor): The output is the prediction with shape [N, 6]\n including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction of each batch with shape [N].\n nms_keep_idx (Tensor): The index of kept bboxes after NMS. \n " (boxes_idx, yolo_boxes_scores) = self.decode(head_out, anchors) if (len(boxes_idx) == 0): boxes_idx = self.fake_boxes_idx yolo_boxes_out = self.fake_yolo_boxes_out yolo_scores_out = self.fake_yolo_scores_out else: yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx) yolo_boxes_out = paddle.reshape(yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4]) yolo_scores_out = paddle.reshape(yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)]) boxes_idx = boxes_idx[:, 1:] if self.return_idx: (bbox_pred, bbox_num, nms_keep_idx) = self.nms(yolo_boxes_out, yolo_scores_out, self.num_classes) if (bbox_pred.shape[0] == 0): bbox_pred = self.fake_bbox_pred bbox_num = self.fake_bbox_num nms_keep_idx = self.fake_nms_keep_idx return (boxes_idx, bbox_pred, bbox_num, nms_keep_idx) else: (bbox_pred, bbox_num, _) = self.nms(yolo_boxes_out, yolo_scores_out, self.num_classes) if (bbox_pred.shape[0] == 0): bbox_pred = self.fake_bbox_pred bbox_num = self.fake_bbox_num return (_, bbox_pred, bbox_num, _)
1,565,260,054,661,251,800
Decode the bbox and do NMS for JDE model. Args: head_out (list): Bbox_pred and cls_prob of bbox_head output. anchors (list): Anchors of JDE model. Returns: boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'. bbox_pred (Tensor): The output is the prediction with shape [N, 6] including labels, scores and bboxes. bbox_num (Tensor): The number of prediction of each batch with shape [N]. nms_keep_idx (Tensor): The index of kept bboxes after NMS.
ppdet/modeling/post_process.py
forward
gbstack/PaddleDetection
python
def forward(self, head_out, anchors): "\n Decode the bbox and do NMS for JDE model. \n\n Args:\n head_out (list): Bbox_pred and cls_prob of bbox_head output.\n anchors (list): Anchors of JDE model.\n\n Returns:\n boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'. \n bbox_pred (Tensor): The output is the prediction with shape [N, 6]\n including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction of each batch with shape [N].\n nms_keep_idx (Tensor): The index of kept bboxes after NMS. \n " (boxes_idx, yolo_boxes_scores) = self.decode(head_out, anchors) if (len(boxes_idx) == 0): boxes_idx = self.fake_boxes_idx yolo_boxes_out = self.fake_yolo_boxes_out yolo_scores_out = self.fake_yolo_scores_out else: yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx) yolo_boxes_out = paddle.reshape(yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4]) yolo_scores_out = paddle.reshape(yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)]) boxes_idx = boxes_idx[:, 1:] if self.return_idx: (bbox_pred, bbox_num, nms_keep_idx) = self.nms(yolo_boxes_out, yolo_scores_out, self.num_classes) if (bbox_pred.shape[0] == 0): bbox_pred = self.fake_bbox_pred bbox_num = self.fake_bbox_num nms_keep_idx = self.fake_nms_keep_idx return (boxes_idx, bbox_pred, bbox_num, nms_keep_idx) else: (bbox_pred, bbox_num, _) = self.nms(yolo_boxes_out, yolo_scores_out, self.num_classes) if (bbox_pred.shape[0] == 0): bbox_pred = self.fake_bbox_pred bbox_num = self.fake_bbox_num return (_, bbox_pred, bbox_num, _)
def __call__(self, head_out, im_shape, scale_factor): '\n Decode the bbox.\n\n Args:\n head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [bs], and is N.\n ' (bboxes, logits, masks) = head_out bbox_pred = bbox_cxcywh_to_xyxy(bboxes) origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) (img_h, img_w) = origin_shape.unbind(1) origin_shape = paddle.stack([img_w, img_h, img_w, img_h], axis=(- 1)).unsqueeze(0) bbox_pred *= origin_shape scores = (F.sigmoid(logits) if self.use_focal_loss else F.softmax(logits)[:, :, :(- 1)]) if (not self.use_focal_loss): (scores, labels) = (scores.max((- 1)), scores.argmax((- 1))) if (scores.shape[1] > self.num_top_queries): (scores, index) = paddle.topk(scores, self.num_top_queries, axis=(- 1)) labels = paddle.stack([paddle.gather(l, i) for (l, i) in zip(labels, index)]) bbox_pred = paddle.stack([paddle.gather(b, i) for (b, i) in zip(bbox_pred, index)]) else: (scores, index) = paddle.topk(scores.reshape([logits.shape[0], (- 1)]), self.num_top_queries, axis=(- 1)) labels = (index % logits.shape[2]) index = (index // logits.shape[2]) bbox_pred = paddle.stack([paddle.gather(b, i) for (b, i) in zip(bbox_pred, index)]) bbox_pred = paddle.concat([labels.unsqueeze((- 1)).astype('float32'), scores.unsqueeze((- 1)), bbox_pred], axis=(- 1)) bbox_num = paddle.to_tensor(bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]]) bbox_pred = bbox_pred.reshape([(- 1), 6]) return (bbox_pred, bbox_num)
-4,586,366,687,494,497,300
Decode the bbox. Args: head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: bbox_pred (Tensor): The output prediction with shape [N, 6], including labels, scores and bboxes. The size of bboxes are corresponding to the input image, the bboxes may be used in other branch. bbox_num (Tensor): The number of prediction boxes of each batch with shape [bs], and is N.
ppdet/modeling/post_process.py
__call__
gbstack/PaddleDetection
python
def __call__(self, head_out, im_shape, scale_factor): '\n Decode the bbox.\n\n Args:\n head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [bs], and is N.\n ' (bboxes, logits, masks) = head_out bbox_pred = bbox_cxcywh_to_xyxy(bboxes) origin_shape = paddle.floor(((im_shape / scale_factor) + 0.5)) (img_h, img_w) = origin_shape.unbind(1) origin_shape = paddle.stack([img_w, img_h, img_w, img_h], axis=(- 1)).unsqueeze(0) bbox_pred *= origin_shape scores = (F.sigmoid(logits) if self.use_focal_loss else F.softmax(logits)[:, :, :(- 1)]) if (not self.use_focal_loss): (scores, labels) = (scores.max((- 1)), scores.argmax((- 1))) if (scores.shape[1] > self.num_top_queries): (scores, index) = paddle.topk(scores, self.num_top_queries, axis=(- 1)) labels = paddle.stack([paddle.gather(l, i) for (l, i) in zip(labels, index)]) bbox_pred = paddle.stack([paddle.gather(b, i) for (b, i) in zip(bbox_pred, index)]) else: (scores, index) = paddle.topk(scores.reshape([logits.shape[0], (- 1)]), self.num_top_queries, axis=(- 1)) labels = (index % logits.shape[2]) index = (index // logits.shape[2]) bbox_pred = paddle.stack([paddle.gather(b, i) for (b, i) in zip(bbox_pred, index)]) bbox_pred = paddle.concat([labels.unsqueeze((- 1)).astype('float32'), scores.unsqueeze((- 1)), bbox_pred], axis=(- 1)) bbox_num = paddle.to_tensor(bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]]) bbox_pred = bbox_pred.reshape([(- 1), 6]) return (bbox_pred, bbox_num)
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh): '\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).\n The tensor predicts the classification probability for each proposal.\n box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every proposal\n scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img\n img_whwh (Tensor): tensors of shape [batch_size, 4]\n Returns:\n bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:\n [label, confidence, xmin, ymin, xmax, ymax]\n bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.\n ' assert (len(box_cls) == len(scale_factor_wh) == len(img_whwh)) img_wh = img_whwh[:, :2] scores = F.sigmoid(box_cls) labels = paddle.arange(0, self.num_classes).unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1) classes_all = [] scores_all = [] boxes_all = [] for (i, (scores_per_image, box_pred_per_image)) in enumerate(zip(scores, box_pred)): (scores_per_image, topk_indices) = scores_per_image.flatten(0, 1).topk(self.num_proposals, sorted=False) labels_per_image = paddle.gather(labels, topk_indices, axis=0) box_pred_per_image = box_pred_per_image.reshape([(- 1), 1, 4]).tile([1, self.num_classes, 1]).reshape([(- 1), 4]) box_pred_per_image = paddle.gather(box_pred_per_image, topk_indices, axis=0) classes_all.append(labels_per_image) scores_all.append(scores_per_image) boxes_all.append(box_pred_per_image) bbox_num = paddle.zeros([len(scale_factor_wh)], dtype='int32') boxes_final = [] for i in range(len(scale_factor_wh)): classes = classes_all[i] boxes = boxes_all[i] scores = scores_all[i] boxes[:, 0::2] = (paddle.clip(boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]) boxes[:, 1::2] = (paddle.clip(boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]) (boxes_w, boxes_h) = ((boxes[:, 2] - boxes[:, 0]).numpy(), (boxes[:, 3] - boxes[:, 1]).numpy()) keep = ((boxes_w > 1.0) & (boxes_h > 1.0)) if (keep.sum() == 0): bboxes = paddle.zeros([1, 6]).astype('float32') else: boxes = paddle.to_tensor(boxes.numpy()[keep]).astype('float32') classes = paddle.to_tensor(classes.numpy()[keep]).astype('float32').unsqueeze((- 1)) scores = paddle.to_tensor(scores.numpy()[keep]).astype('float32').unsqueeze((- 1)) bboxes = paddle.concat([classes, scores, boxes], axis=(- 1)) boxes_final.append(bboxes) bbox_num[i] = bboxes.shape[0] bbox_pred = paddle.concat(boxes_final) return (bbox_pred, bbox_num)
-8,142,696,661,722,245,000
Arguments: box_cls (Tensor): tensor of shape (batch_size, num_proposals, K). The tensor predicts the classification probability for each proposal. box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4). The tensor predicts 4-vector (x,y,w,h) box regression values for every proposal scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img img_whwh (Tensor): tensors of shape [batch_size, 4] Returns: bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax] bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
ppdet/modeling/post_process.py
__call__
gbstack/PaddleDetection
python
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh): '\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).\n The tensor predicts the classification probability for each proposal.\n box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every proposal\n scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img\n img_whwh (Tensor): tensors of shape [batch_size, 4]\n Returns:\n bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:\n [label, confidence, xmin, ymin, xmax, ymax]\n bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.\n ' assert (len(box_cls) == len(scale_factor_wh) == len(img_whwh)) img_wh = img_whwh[:, :2] scores = F.sigmoid(box_cls) labels = paddle.arange(0, self.num_classes).unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1) classes_all = [] scores_all = [] boxes_all = [] for (i, (scores_per_image, box_pred_per_image)) in enumerate(zip(scores, box_pred)): (scores_per_image, topk_indices) = scores_per_image.flatten(0, 1).topk(self.num_proposals, sorted=False) labels_per_image = paddle.gather(labels, topk_indices, axis=0) box_pred_per_image = box_pred_per_image.reshape([(- 1), 1, 4]).tile([1, self.num_classes, 1]).reshape([(- 1), 4]) box_pred_per_image = paddle.gather(box_pred_per_image, topk_indices, axis=0) classes_all.append(labels_per_image) scores_all.append(scores_per_image) boxes_all.append(box_pred_per_image) bbox_num = paddle.zeros([len(scale_factor_wh)], dtype='int32') boxes_final = [] for i in range(len(scale_factor_wh)): classes = classes_all[i] boxes = boxes_all[i] scores = scores_all[i] boxes[:, 0::2] = (paddle.clip(boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]) boxes[:, 1::2] = (paddle.clip(boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]) (boxes_w, boxes_h) = ((boxes[:, 2] - boxes[:, 0]).numpy(), (boxes[:, 3] - boxes[:, 1]).numpy()) keep = ((boxes_w > 1.0) & (boxes_h > 1.0)) if (keep.sum() == 0): bboxes = paddle.zeros([1, 6]).astype('float32') else: boxes = paddle.to_tensor(boxes.numpy()[keep]).astype('float32') classes = paddle.to_tensor(classes.numpy()[keep]).astype('float32').unsqueeze((- 1)) scores = paddle.to_tensor(scores.numpy()[keep]).astype('float32').unsqueeze((- 1)) bboxes = paddle.concat([classes, scores, boxes], axis=(- 1)) boxes_final.append(bboxes) bbox_num[i] = bboxes.shape[0] bbox_pred = paddle.concat(boxes_final) return (bbox_pred, bbox_num)
def __init__(self, lst, start_x, start_y, height, width, headers=None) -> None: 'Lst is 2-d. i th list in lst is content of i+1 tab\n\n Each string in lst should not be of more length than width\n scroling is available only in vertical direction\n ' self.start_x = start_x self.start_y = start_y self.height = height self.width = width self.currentpos = 0 self.currenttab = 0 self.lst = lst curses.curs_set(0) newwin = curses.newwin(height, width, start_y, start_x) newwin.border(0) newwin.refresh() self.display()
3,092,889,274,640,904,000
Lst is 2-d. i th list in lst is content of i+1 tab Each string in lst should not be of more length than width scroling is available only in vertical direction
src/client/ui/widget/displaylist.py
__init__
Tubular-Terriers/code-jam
python
def __init__(self, lst, start_x, start_y, height, width, headers=None) -> None: 'Lst is 2-d. i th list in lst is content of i+1 tab\n\n Each string in lst should not be of more length than width\n scroling is available only in vertical direction\n ' self.start_x = start_x self.start_y = start_y self.height = height self.width = width self.currentpos = 0 self.currenttab = 0 self.lst = lst curses.curs_set(0) newwin = curses.newwin(height, width, start_y, start_x) newwin.border(0) newwin.refresh() self.display()
def authenticated_view(request): '\n This view can be used to test requests with an authenticated user. Create a\n user with a default username, save it and then use this user to log in.\n Always returns a 200.\n ' user = User(username='Jane Doe') user.save() login(request, user) return HttpResponse(status=200)
-7,649,399,858,114,833,000
This view can be used to test requests with an authenticated user. Create a user with a default username, save it and then use this user to log in. Always returns a 200.
tests/contrib/django/django_app/urls.py
authenticated_view
AlexandreYang/dd-trace-py
python
def authenticated_view(request): '\n This view can be used to test requests with an authenticated user. Create a\n user with a default username, save it and then use this user to log in.\n Always returns a 200.\n ' user = User(username='Jane Doe') user.save() login(request, user) return HttpResponse(status=200)
def gelu(x): 'Gaussian Error Linear Unit.\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n Returns:\n `x` with the GELU activation applied.\n ' cdf = (0.5 * (1.0 + tf.tanh((np.sqrt((2 / np.pi)) * (x + (0.044715 * tf.pow(x, 3))))))) return (x * cdf)
3,645,099,331,411,212,300
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
transformers/modeling_tf_openai.py
gelu
richardbaihe/semantic_unwritten
python
def gelu(x): 'Gaussian Error Linear Unit.\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n Returns:\n `x` with the GELU activation applied.\n ' cdf = (0.5 * (1.0 + tf.tanh((np.sqrt((2 / np.pi)) * (x + (0.044715 * tf.pow(x, 3))))))) return (x * cdf)
@staticmethod def causal_attention_mask(nd, ns, dtype): "1's in the lower triangle, counting from the lower right corner.\n Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.\n " i = tf.range(nd)[:, None] j = tf.range(ns) m = (i >= ((j - ns) + nd)) return tf.cast(m, dtype)
458,533,279,018,134,600
1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
transformers/modeling_tf_openai.py
causal_attention_mask
richardbaihe/semantic_unwritten
python
@staticmethod def causal_attention_mask(nd, ns, dtype): "1's in the lower triangle, counting from the lower right corner.\n Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.\n " i = tf.range(nd)[:, None] j = tf.range(ns) m = (i >= ((j - ns) + nd)) return tf.cast(m, dtype)
def _prune_heads(self, heads_to_prune): ' Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n ' raise NotImplementedError
-86,650,990,666,581,820
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
transformers/modeling_tf_openai.py
_prune_heads
richardbaihe/semantic_unwritten
python
def _prune_heads(self, heads_to_prune): ' Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n ' raise NotImplementedError
def get_pileup_vect(alignments, contig, pos, ref, alt): 'Create feature vector for selected variant\n\n Args:\n alignments(pysam.AlignmentFile) : Handle to alignmentfile\n contig(str) : contig to perform pileup\n pos(int) : zeros based position of variant to pileup\n ref(str) : reference base\n alt(str) : alternative base\n\n Returns\n total(int) : Total amount of bases overlapping with selected location\n ref_calls : Total amount of bases matching ref\n alt_calls : Total amount of bases matching alt\n other_calls : Total amount of bases matching neither ref nor alt\n ' total = 0 ref_calls = 0 alt_calls = 0 other_calls = 0 start = pos stop = (pos + 1) for pileupcolumn in pileup_truncated(alignments, contig, start, stop, stepper='all'): for (i, pileupread) in enumerate(pileupcolumn.pileups): if ((not pileupread.is_del) and (not pileupread.is_refskip)): call = pileupread.alignment.query_sequence[pileupread.query_position] if (call == ref): ref_calls += 1 elif (call == alt): alt_calls += 1 else: other_calls += 1 other_calls += 1 return (total, ref_calls, alt_calls, other_calls)
5,540,029,937,697,622,000
Create feature vector for selected variant Args: alignments(pysam.AlignmentFile) : Handle to alignmentfile contig(str) : contig to perform pileup pos(int) : zeros based position of variant to pileup ref(str) : reference base alt(str) : alternative base Returns total(int) : Total amount of bases overlapping with selected location ref_calls : Total amount of bases matching ref alt_calls : Total amount of bases matching alt other_calls : Total amount of bases matching neither ref nor alt
singlecellmultiomics/bamProcessing/bamFeatures.py
get_pileup_vect
BuysDB/SingleCellMultiOmics
python
def get_pileup_vect(alignments, contig, pos, ref, alt): 'Create feature vector for selected variant\n\n Args:\n alignments(pysam.AlignmentFile) : Handle to alignmentfile\n contig(str) : contig to perform pileup\n pos(int) : zeros based position of variant to pileup\n ref(str) : reference base\n alt(str) : alternative base\n\n Returns\n total(int) : Total amount of bases overlapping with selected location\n ref_calls : Total amount of bases matching ref\n alt_calls : Total amount of bases matching alt\n other_calls : Total amount of bases matching neither ref nor alt\n ' total = 0 ref_calls = 0 alt_calls = 0 other_calls = 0 start = pos stop = (pos + 1) for pileupcolumn in pileup_truncated(alignments, contig, start, stop, stepper='all'): for (i, pileupread) in enumerate(pileupcolumn.pileups): if ((not pileupread.is_del) and (not pileupread.is_refskip)): call = pileupread.alignment.query_sequence[pileupread.query_position] if (call == ref): ref_calls += 1 elif (call == alt): alt_calls += 1 else: other_calls += 1 other_calls += 1 return (total, ref_calls, alt_calls, other_calls)
def get_mapping_q_vect(alignments_handle, contig, pos, radius=150): 'Obtain histogram of mapping qualties, clipped at 60\n\n Args:\n alignments(pysam.AlignmentFile) : Handle to alignmentfile\n contig(str) : contig\n pos(int) : zeros based position of location to check mapping qualties\n radius(int) : radius to check around selected location\n\n Returns:\n mapping_qualities(list) : Histogram with 7 bins (0 to highest mapping quality)\n ' mapping_qualities = ([0] * 7) for read in alignments_handle.fetch(contig, (pos - radius), (pos + radius)): mapping_qualities[min(60, int((read.mapping_quality / 10)))] += 1 return mapping_qualities
4,255,158,074,448,860,000
Obtain histogram of mapping qualties, clipped at 60 Args: alignments(pysam.AlignmentFile) : Handle to alignmentfile contig(str) : contig pos(int) : zeros based position of location to check mapping qualties radius(int) : radius to check around selected location Returns: mapping_qualities(list) : Histogram with 7 bins (0 to highest mapping quality)
singlecellmultiomics/bamProcessing/bamFeatures.py
get_mapping_q_vect
BuysDB/SingleCellMultiOmics
python
def get_mapping_q_vect(alignments_handle, contig, pos, radius=150): 'Obtain histogram of mapping qualties, clipped at 60\n\n Args:\n alignments(pysam.AlignmentFile) : Handle to alignmentfile\n contig(str) : contig\n pos(int) : zeros based position of location to check mapping qualties\n radius(int) : radius to check around selected location\n\n Returns:\n mapping_qualities(list) : Histogram with 7 bins (0 to highest mapping quality)\n ' mapping_qualities = ([0] * 7) for read in alignments_handle.fetch(contig, (pos - radius), (pos + radius)): mapping_qualities[min(60, int((read.mapping_quality / 10)))] += 1 return mapping_qualities
def __init__(self, default_parameter, trial_run_parameter, lower_is_better, objective, filename_objective, argo_ip, argo_port, k8_namespace, storage_strategy='keep', output_dir=''): "Set init values\n\n Args:\n default_parameter (dict): Parameter that will be submitted with the argo workflow in a kind of input flags.\n rebuild_parameter (dict): Parameter that were genereted when creating the hp for sherpa.\n lower_is_better (bool): whether to minimize or maximize the objective\n objective (str): Name of the objective that will be optimized for. Must be a key/name from the metrics that were generated within a trial run.\n filename_objective (str): Filename of the file that contains the objective value which was created within a trial run.\n argo_ip (str): Argo server ip\n argp_port (str): Argo server port\n k8_namespace (str): Name of the kubernetes namespace where the trial container should be executed.\n storage_strategy (str, optional): wether to keep all, delete all or keep the files from the best run. Defaults to 'keep'.\n output_dir (str): needed for sherpa api\n " if ('api_exec_token' in os.environ): api_token = ('Bearer ' + os.environ['api_exec_token']) else: logging.error('No Authorization Token detected. Check Kubernetes Secrets and Argo Template!') logging.info('Default Parameter: {}'.format(default_parameter)) self.submit_url = (((((('https://' + argo_ip) + ':') + argo_port) + '/api/v1/workflows/') + k8_namespace) + '/submit') self.status_url = (((((('https://' + argo_ip) + ':') + argo_port) + '/api/v1/workflows/') + k8_namespace) + '/') self.delete_url = self.status_url self.client = Client() self.best_metric = {'job_id': None, 'metric': None} self.headers = {'Authorization': api_token} self.killed_jobs = [] self.output_dir = output_dir self.default_parameter = default_parameter self.trial_run_parameter = trial_run_parameter self.storage_strategy = storage_strategy self.hostname = socket.gethostname() self.trials = {} self.run_name = self.default_parameter['run_name'] self.metrics_filename = filename_objective self.objective = objective self.lower_is_better = lower_is_better self.output_path = self.default_parameter['output_path'] self.decode_status = {'Succeeded': _JobStatus.finished, 'Running': _JobStatus.running, 'Pending': _JobStatus.queued, 'Failed': _JobStatus.failed, 'Stopped': _JobStatus.killed, 'Other': _JobStatus.other}
-1,925,804,667,319,815,000
Set init values Args: default_parameter (dict): Parameter that will be submitted with the argo workflow in a kind of input flags. rebuild_parameter (dict): Parameter that were genereted when creating the hp for sherpa. lower_is_better (bool): whether to minimize or maximize the objective objective (str): Name of the objective that will be optimized for. Must be a key/name from the metrics that were generated within a trial run. filename_objective (str): Filename of the file that contains the objective value which was created within a trial run. argo_ip (str): Argo server ip argp_port (str): Argo server port k8_namespace (str): Name of the kubernetes namespace where the trial container should be executed. storage_strategy (str, optional): wether to keep all, delete all or keep the files from the best run. Defaults to 'keep'. output_dir (str): needed for sherpa api
argo_scheduler.py
__init__
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def __init__(self, default_parameter, trial_run_parameter, lower_is_better, objective, filename_objective, argo_ip, argo_port, k8_namespace, storage_strategy='keep', output_dir=): "Set init values\n\n Args:\n default_parameter (dict): Parameter that will be submitted with the argo workflow in a kind of input flags.\n rebuild_parameter (dict): Parameter that were genereted when creating the hp for sherpa.\n lower_is_better (bool): whether to minimize or maximize the objective\n objective (str): Name of the objective that will be optimized for. Must be a key/name from the metrics that were generated within a trial run.\n filename_objective (str): Filename of the file that contains the objective value which was created within a trial run.\n argo_ip (str): Argo server ip\n argp_port (str): Argo server port\n k8_namespace (str): Name of the kubernetes namespace where the trial container should be executed.\n storage_strategy (str, optional): wether to keep all, delete all or keep the files from the best run. Defaults to 'keep'.\n output_dir (str): needed for sherpa api\n " if ('api_exec_token' in os.environ): api_token = ('Bearer ' + os.environ['api_exec_token']) else: logging.error('No Authorization Token detected. Check Kubernetes Secrets and Argo Template!') logging.info('Default Parameter: {}'.format(default_parameter)) self.submit_url = (((((('https://' + argo_ip) + ':') + argo_port) + '/api/v1/workflows/') + k8_namespace) + '/submit') self.status_url = (((((('https://' + argo_ip) + ':') + argo_port) + '/api/v1/workflows/') + k8_namespace) + '/') self.delete_url = self.status_url self.client = Client() self.best_metric = {'job_id': None, 'metric': None} self.headers = {'Authorization': api_token} self.killed_jobs = [] self.output_dir = output_dir self.default_parameter = default_parameter self.trial_run_parameter = trial_run_parameter self.storage_strategy = storage_strategy self.hostname = socket.gethostname() self.trials = {} self.run_name = self.default_parameter['run_name'] self.metrics_filename = filename_objective self.objective = objective self.lower_is_better = lower_is_better self.output_path = self.default_parameter['output_path'] self.decode_status = {'Succeeded': _JobStatus.finished, 'Running': _JobStatus.running, 'Pending': _JobStatus.queued, 'Failed': _JobStatus.failed, 'Stopped': _JobStatus.killed, 'Other': _JobStatus.other}