code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def bech32_create_checksum(hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
|
Compute the checksum values given HRP and data.
|
bech32_create_checksum
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/bech32.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/bech32.py
|
MIT
|
def bech32_encode(hrp, data):
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
|
Compute a Bech32 string given HRP and data values.
|
bech32_encode
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/bech32.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/bech32.py
|
MIT
|
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
|
Validate a Bech32 string, and determine HRP and data.
|
bech32_decode
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/bech32.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/bech32.py
|
MIT
|
def _get_reverse_table(self):
"""Build an internal table used in the assembler."""
reverse_table = {}
for (opcode, (mnemonic, immediate_operand_size,
pops, pushes, gas, description)) in _table.items():
reverse_table[mnemonic] = opcode, mnemonic, immediate_operand_size, \
pops, pushes, gas, description
reverse_table['OP_FALSE'] = 0x00, 'OP_FALSE', 0, 0, 1, gas, 'An empty array of bytes is pushed onto the stack.'
reverse_table['OP_TRUE'] = 0x51, 'OP_TRUE', 0, 0, 1, gas, 'The number 1 is pushed onto the stack.'
reverse_table['OP_1'] = 0x51, 0, 0, 1, gas, 'The number 1 is pushed onto the stack.'
reverse_table['OP_2'] = 0x52, 0, 0, 1, gas, 'The number 2 is pushed onto the stack.'
reverse_table['OP_3'] = 0x53, 0, 0, 1, gas, 'The number 3 is pushed onto the stack.'
reverse_table['OP_4'] = 0x54, 0, 0, 1, gas, 'The number 4 is pushed onto the stack.'
reverse_table['OP_5'] = 0x55, 0, 0, 1, gas, 'The number 5 is pushed onto the stack.'
reverse_table['OP_6'] = 0x56, 0, 0, 1, gas, 'The number 6 is pushed onto the stack.'
reverse_table['OP_7'] = 0x57, 0, 0, 1, gas, 'The number 7 is pushed onto the stack.'
reverse_table['OP_8'] = 0x58, 0, 0, 1, gas, 'The number 8 is pushed onto the stack.'
reverse_table['OP_9'] = 0x59, 0, 0, 1, gas, 'The number 9 is pushed onto the stack.'
reverse_table['OP_10'] = 0x5A, 0, 0, 1, gas, 'The number 10 is pushed onto the stack.'
reverse_table['OP_11'] = 0x5B, 0, 0, 1, gas, 'The number 11 is pushed onto the stack.'
reverse_table['OP_12'] = 0x5C, 0, 0, 1, gas, 'The number 12 is pushed onto the stack.'
reverse_table['OP_13'] = 0x5D, 0, 0, 1, gas, 'The number 13 is pushed onto the stack.'
reverse_table['OP_14'] = 0x5E, 0, 0, 1, gas, 'The number 14 is pushed onto the stack.'
reverse_table['OP_15'] = 0x5F, 0, 0, 1, gas, 'The number 15 is pushed onto the stack.'
reverse_table['OP_16'] = 0x60, 0, 0, 1, gas, 'The number 16 is pushed onto the stack.'
return reverse_table
|
Build an internal table used in the assembler.
|
_get_reverse_table
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/btcscript.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/btcscript.py
|
MIT
|
def gettxoutproof(self, txids, blockhash=None):
'''
TESTED
http://chainquery.com/bitcoin-api/gettxoutproof
Returns a hex-encoded proof that "txid" was included in a block.
NOTE: By default this function only works sometimes. This is when there is an
unspent output in the utxo for this transaction. To make it always work,
you need to maintain a transaction index, using the -txindex command line option or
specify the block in which the transaction is included manually (by blockhash).
Arguments:
1. "txids" (string) A json array of txids to filter
[
"txid" (string) A transaction hash
,...
]
2. "blockhash" (string, optional) If specified, looks for txid in the block with this hash
Result:
"data" (string) A string that is a serialized, hex-encoded data for the proof.
'''
if blockhash is None:
return self.call('gettxoutproof', [txids])
else:
return self.call('gettxoutproof', [txids, blockhash])
|
TESTED
http://chainquery.com/bitcoin-api/gettxoutproof
Returns a hex-encoded proof that "txid" was included in a block.
NOTE: By default this function only works sometimes. This is when there is an
unspent output in the utxo for this transaction. To make it always work,
you need to maintain a transaction index, using the -txindex command line option or
specify the block in which the transaction is included manually (by blockhash).
Arguments:
1. "txids" (string) A json array of txids to filter
[
"txid" (string) A transaction hash
,...
]
2. "blockhash" (string, optional) If specified, looks for txid in the block with this hash
Result:
"data" (string) A string that is a serialized, hex-encoded data for the proof.
|
gettxoutproof
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def help(self, command=None):
'''
TESTED
http://chainquery.com/bitcoin-api/help
List all commands, or get help for a specified command.
Arguments:
1. "command" (string, optional) The command to get help on
Result:
"text" (string) The help text
'''
if command is None:
return self.call('help')
else:
return self.call('help', [command])
|
TESTED
http://chainquery.com/bitcoin-api/help
List all commands, or get help for a specified command.
Arguments:
1. "command" (string, optional) The command to get help on
Result:
"text" (string) The help text
|
help
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def lockunspent(self, unlock, transactions=None):
'''
TESTED
http://chainquery.com/bitcoin-api/lockunspent
Updates list of temporarily unspendable outputs.
Temporarily lock (unlock=false) or unlock (unlock=true) specified transaction outputs.
If no transaction outputs are specified when unlocking then all current locked transaction outputs are unlocked.
A locked transaction output will not be chosen by automatic coin selection, when spending bitcoins.
Locks are stored in memory only. Nodes start with zero locked outputs, and the locked output list
is always cleared (by virtue of process exit) when a node stops or fails.
Also see the listunspent call
Arguments:
1. unlock (boolean, required) Whether to unlock (true) or lock (false) the specified transactions
2. "transactions" (string, optional) A json array of objects. Each object the txid (string) vout (numeric)
[ (json array of json objects)
{
"txid":"id", (string) The transaction id
"vout": n (numeric) The output number
}
,...
]
Result:
true|false (boolean) Whether the command was successful or not
'''
if transactions is None:
return self.call('lockunspent', [unlock])
else:
return self.call('lockunspent', [unlock, transactions])
|
TESTED
http://chainquery.com/bitcoin-api/lockunspent
Updates list of temporarily unspendable outputs.
Temporarily lock (unlock=false) or unlock (unlock=true) specified transaction outputs.
If no transaction outputs are specified when unlocking then all current locked transaction outputs are unlocked.
A locked transaction output will not be chosen by automatic coin selection, when spending bitcoins.
Locks are stored in memory only. Nodes start with zero locked outputs, and the locked output list
is always cleared (by virtue of process exit) when a node stops or fails.
Also see the listunspent call
Arguments:
1. unlock (boolean, required) Whether to unlock (true) or lock (false) the specified transactions
2. "transactions" (string, optional) A json array of objects. Each object the txid (string) vout (numeric)
[ (json array of json objects)
{
"txid":"id", (string) The transaction id
"vout": n (numeric) The output number
}
,...
]
Result:
true|false (boolean) Whether the command was successful or not
|
lockunspent
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def sendfrom(self, fromaccount, toaddress, amount, minconf=1, comment=None, comment_to=None):
'''
http://chainquery.com/bitcoin-api/sendfrom
NOT TESTED
DEPRECATED (use sendtoaddress). Sent an amount from an account to a bitcoin address.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "fromaccount" (string, required) The name of the account to send funds from. May be the default account using "".
Specifying an account does not influence coin selection, but it does associate the newly created
transaction with the account, so the account's balance computation and transaction history can reflect
the spend.
2. "toaddress" (string, required) The bitcoin address to send funds to.
3. amount (numeric or string, required) The amount in BTC (transaction fee is added on top).
4. minconf (numeric, optional, default=1) Only use funds with at least this many confirmations.
5. "comment" (string, optional) A comment used to store what the transaction is for.
This is not part of the transaction, just kept in your wallet.
6. "comment_to" (string, optional) An optional comment to store the name of the person or organization
to which you're sending the transaction. This is not part of the transaction,
it is just kept in your wallet.
Result:
"txid" (string) The transaction id.
'''
param = [fromaccount, toaddress, amount]
if comment is not None:
param.append(comment)
if comment_to is not None:
param.append(comment_to)
return self.call('sendfrom', param)
|
http://chainquery.com/bitcoin-api/sendfrom
NOT TESTED
DEPRECATED (use sendtoaddress). Sent an amount from an account to a bitcoin address.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "fromaccount" (string, required) The name of the account to send funds from. May be the default account using "".
Specifying an account does not influence coin selection, but it does associate the newly created
transaction with the account, so the account's balance computation and transaction history can reflect
the spend.
2. "toaddress" (string, required) The bitcoin address to send funds to.
3. amount (numeric or string, required) The amount in BTC (transaction fee is added on top).
4. minconf (numeric, optional, default=1) Only use funds with at least this many confirmations.
5. "comment" (string, optional) A comment used to store what the transaction is for.
This is not part of the transaction, just kept in your wallet.
6. "comment_to" (string, optional) An optional comment to store the name of the person or organization
to which you're sending the transaction. This is not part of the transaction,
it is just kept in your wallet.
Result:
"txid" (string) The transaction id.
|
sendfrom
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def sendmany(self, fromaccount, amounts, minconf=1, comment=None, subtractfeefrom=None, replaceable=None, conf_target=None, estimate_mode="UNSET"):
'''
http://chainquery.com/bitcoin-api/sendmany
NOT TESTED
Send multiple times. Amounts are double-precision floating point numbers.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "fromaccount" (string, required) DEPRECATED. The account to send the funds from. Should be "" for the default account
2. "amounts" (string, required) A json object with addresses and amounts
{
"address":amount (numeric or string) The bitcoin address is the key, the numeric amount (can be string) in BTC is the value
,...
}
3. minconf (numeric, optional, default=1) Only use the balance confirmed at least this many times.
4. "comment" (string, optional) A comment
5. subtractfeefrom (array, optional) A json array with addresses.
The fee will be equally deducted from the amount of each selected address.
Those recipients will receive less bitcoins than you enter in their corresponding amount field.
If no addresses are specified here, the sender pays the fee.
[
"address" (string) Subtract fee from this address
,...
]
6. replaceable (boolean, optional) Allow this transaction to be replaced by a transaction with higher fees via BIP 125
7. conf_target (numeric, optional) Confirmation target (in blocks)
8. "estimate_mode" (string, optional, default=UNSET) The fee estimate mode, must be one of:
"UNSET"
"ECONOMICAL"
"CONSERVATIVE"
Result:
"txid" (string) The transaction id for the send. Only 1 transaction is created regardless of
the number of addresses.
'''
param = [fromaccount, amounts, minconf]
if comment is not None:
param.append(comment)
if subtractfeefrom is not None:
param.append(subtractfeefrom)
if replaceable is not None:
param.append(replaceable)
if conf_target is not None:
param.append(conf_target)
param.append(estimate_mode)
return self.call('sendmany', param)
|
http://chainquery.com/bitcoin-api/sendmany
NOT TESTED
Send multiple times. Amounts are double-precision floating point numbers.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "fromaccount" (string, required) DEPRECATED. The account to send the funds from. Should be "" for the default account
2. "amounts" (string, required) A json object with addresses and amounts
{
"address":amount (numeric or string) The bitcoin address is the key, the numeric amount (can be string) in BTC is the value
,...
}
3. minconf (numeric, optional, default=1) Only use the balance confirmed at least this many times.
4. "comment" (string, optional) A comment
5. subtractfeefrom (array, optional) A json array with addresses.
The fee will be equally deducted from the amount of each selected address.
Those recipients will receive less bitcoins than you enter in their corresponding amount field.
If no addresses are specified here, the sender pays the fee.
[
"address" (string) Subtract fee from this address
,...
]
6. replaceable (boolean, optional) Allow this transaction to be replaced by a transaction with higher fees via BIP 125
7. conf_target (numeric, optional) Confirmation target (in blocks)
8. "estimate_mode" (string, optional, default=UNSET) The fee estimate mode, must be one of:
"UNSET"
"ECONOMICAL"
"CONSERVATIVE"
Result:
"txid" (string) The transaction id for the send. Only 1 transaction is created regardless of
the number of addresses.
|
sendmany
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def sendtoaddress(self, address, amount, comment=None, comment_to=None, subtractfeefromamount=False, replaceable=None, estimate_mode="UNSET"):
'''
http://chainquery.com/bitcoin-api/sendtoaddress
NOT TESTED
Send an amount to a given address.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "address" (string, required) The bitcoin address to send to.
2. "amount" (numeric or string, required) The amount in BTC to send. eg 0.1
3. "comment" (string, optional) A comment used to store what the transaction is for.
This is not part of the transaction, just kept in your wallet.
4. "comment_to" (string, optional) A comment to store the name of the person or organization
to which you're sending the transaction. This is not part of the
transaction, just kept in your wallet.
5. subtractfeefromamount (boolean, optional, default=false) The fee will be deducted from the amount being sent.
The recipient will receive less bitcoins than you enter in the amount field.
6. replaceable (boolean, optional) Allow this transaction to be replaced by a transaction with higher fees via BIP 125
7. conf_target (numeric, optional) Confirmation target (in blocks)
8. "estimate_mode" (string, optional, default=UNSET) The fee estimate mode, must be one of:
"UNSET"
"ECONOMICAL"
"CONSERVATIVE"
Result:
"txid" (string) The transaction id.
'''
param = [address, amount]
if comment is not None:
param.append(comment)
if comment_to is not None:
param.append(comment_to)
param.append(subtractfeefromamount)
if replaceable is not None:
param.append(replaceable)
param.append(estimate_mode)
return self.call('sendtoaddress', param)
|
http://chainquery.com/bitcoin-api/sendtoaddress
NOT TESTED
Send an amount to a given address.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "address" (string, required) The bitcoin address to send to.
2. "amount" (numeric or string, required) The amount in BTC to send. eg 0.1
3. "comment" (string, optional) A comment used to store what the transaction is for.
This is not part of the transaction, just kept in your wallet.
4. "comment_to" (string, optional) A comment to store the name of the person or organization
to which you're sending the transaction. This is not part of the
transaction, just kept in your wallet.
5. subtractfeefromamount (boolean, optional, default=false) The fee will be deducted from the amount being sent.
The recipient will receive less bitcoins than you enter in the amount field.
6. replaceable (boolean, optional) Allow this transaction to be replaced by a transaction with higher fees via BIP 125
7. conf_target (numeric, optional) Confirmation target (in blocks)
8. "estimate_mode" (string, optional, default=UNSET) The fee estimate mode, must be one of:
"UNSET"
"ECONOMICAL"
"CONSERVATIVE"
Result:
"txid" (string) The transaction id.
|
sendtoaddress
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def signrawtransaction(self, hexstring, prevtxs=None, privkeys=None, sighashtype="ALL"):
'''
http://chainquery.com/bitcoin-api/signrawtransaction
NOT TESTED
Sign inputs for raw transaction (serialized, hex-encoded).
The second optional argument (may be null) is an array of previous transaction outputs that
this transaction depends on but may not yet be in the block chain.
The third optional argument (may be null) is an array of base58-encoded private
keys that, if given, will be the only keys used to sign the transaction.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "hexstring" (string, required) The transaction hex string
2. "prevtxs" (string, optional) An json array of previous dependent transaction outputs
[ (json array of json objects, or 'null' if none provided)
{
"txid":"id", (string, required) The transaction id
"vout":n, (numeric, required) The output number
"scriptPubKey": "hex", (string, required) script key
"redeemScript": "hex", (string, required for P2SH or P2WSH) redeem script
"amount": value (numeric, required) The amount spent
}
,...
]
3. "privkeys" (string, optional) A json array of base58-encoded private keys for signing
[ (json array of strings, or 'null' if none provided)
"privatekey" (string) private key in base58-encoding
,...
]
4. "sighashtype" (string, optional, default=ALL) The signature hash type. Must be one of
"ALL"
"NONE"
"SINGLE"
"ALL|ANYONECANPAY"
"NONE|ANYONECANPAY"
"SINGLE|ANYONECANPAY"
Result:
{
"hex" : "value", (string) The hex-encoded raw transaction with signature(s)
"complete" : true|false, (boolean) If the transaction has a complete set of signatures
"errors" : [ (json array of objects) Script verification errors (if there are any)
{
"txid" : "hash", (string) The hash of the referenced, previous transaction
"vout" : n, (numeric) The index of the output to spent and used as input
"scriptSig" : "hex", (string) The hex-encoded signature script
"sequence" : n, (numeric) Script sequence number
"error" : "text" (string) Verification or signing error related to the input
}
,...
]
}
'''
param = [hexstring]
if prevtxs is not None:
param.append(prevtxs)
if privkeys is not None:
param.append(privkeys)
param.append(sighashtype)
return self.call('signrawtransaction', param)
|
http://chainquery.com/bitcoin-api/signrawtransaction
NOT TESTED
Sign inputs for raw transaction (serialized, hex-encoded).
The second optional argument (may be null) is an array of previous transaction outputs that
this transaction depends on but may not yet be in the block chain.
The third optional argument (may be null) is an array of base58-encoded private
keys that, if given, will be the only keys used to sign the transaction.
Requires wallet passphrase to be set with walletpassphrase call.
Arguments:
1. "hexstring" (string, required) The transaction hex string
2. "prevtxs" (string, optional) An json array of previous dependent transaction outputs
[ (json array of json objects, or 'null' if none provided)
{
"txid":"id", (string, required) The transaction id
"vout":n, (numeric, required) The output number
"scriptPubKey": "hex", (string, required) script key
"redeemScript": "hex", (string, required for P2SH or P2WSH) redeem script
"amount": value (numeric, required) The amount spent
}
,...
]
3. "privkeys" (string, optional) A json array of base58-encoded private keys for signing
[ (json array of strings, or 'null' if none provided)
"privatekey" (string) private key in base58-encoding
,...
]
4. "sighashtype" (string, optional, default=ALL) The signature hash type. Must be one of
"ALL"
"NONE"
"SINGLE"
"ALL|ANYONECANPAY"
"NONE|ANYONECANPAY"
"SINGLE|ANYONECANPAY"
Result:
{
"hex" : "value", (string) The hex-encoded raw transaction with signature(s)
"complete" : true|false, (boolean) If the transaction has a complete set of signatures
"errors" : [ (json array of objects) Script verification errors (if there are any)
{
"txid" : "hash", (string) The hash of the referenced, previous transaction
"vout" : n, (numeric) The index of the output to spent and used as input
"scriptSig" : "hex", (string) The hex-encoded signature script
"sequence" : n, (numeric) Script sequence number
"error" : "text" (string) Verification or signing error related to the input
}
,...
]
}
|
signrawtransaction
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/explorer.py
|
MIT
|
def get_block(self, block_num_or_id):
'''Get information related to a block.
TESTED
'''
data = {'block_num_or_id': block_num_or_id}
return self.call('get_block', data)
|
Get information related to a block.
TESTED
|
get_block
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def get_account(self, account_name):
'''Get information related to an account.
TESTED
'''
data = {'account_name': account_name}
return self.call('get_account', data)
|
Get information related to an account.
TESTED
|
get_account
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def get_table_rows(self, scope, code, table, json=False, lower_bound=None, upper_bound=None, limit=None):
'''Fetch smart contract data from an account.
NOT TESTED
'''
data = {'scope': scope,
'code': code,
'table': table,
'json': json}
if lower_bound:
data['lower_bound'] = lower_bound
if upper_bound:
data['upper_bound'] = upper_bound
if limit:
data['limit'] = limit
return self.call('get_table_rows', data)
|
Fetch smart contract data from an account.
NOT TESTED
|
get_table_rows
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def abi_json_to_bin(self, code, action, args):
'''Serialize json to binary hex. The resulting binary hex is usually used for the data field in push_transaction.
NOT TESTED
'''
data = {'code': code,
'action': action,
'args': args}
print(data)
return self.call('abi_json_to_bin', data)
|
Serialize json to binary hex. The resulting binary hex is usually used for the data field in push_transaction.
NOT TESTED
|
abi_json_to_bin
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def abi_bin_to_json(self, code, action, binargs):
'''Serialize back binary hex to json.
NOT TESTED
'''
data = {'code': code,
'action': action,
'binargs': binargs}
return self.call('abi_bin_to_json', data)
|
Serialize back binary hex to json.
NOT TESTED
|
abi_bin_to_json
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def get_required_keys(self, transaction):
'''Get required keys to sign a transaction from list of your keys.
NOT TESTED
'''
data = {'transaction': transaction}
return self.call('get_required_keys', data)
|
Get required keys to sign a transaction from list of your keys.
NOT TESTED
|
get_required_keys
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/EOS/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/EOS/explorer.py
|
MIT
|
def decode_tx(self, transaction_id):
""" Return dict with important information about
the given transaction
"""
tx_data = self.eth_getTransactionByHash(transaction_id)
return tx_data
#TODO
|
Return dict with important information about
the given transaction
|
decode_tx
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def create_contract(self, from_, code, gas, sig=None, args=None):
"""
Create a contract on the blockchain from compiled EVM code. Returns the
transaction hash.
"""
'''
from_ = from_ or self.eth_coinbase()
if sig is not None and args is not None:
types = sig[sig.find('(') + 1: sig.find(')')].split(',')
encoded_params = encode_abi(types, args)
code += encoded_params.encode('hex')
return self.eth_sendTransaction(from_address=from_, gas=gas, data=code)
'''
return NotImplementedError()
|
Create a contract on the blockchain from compiled EVM code. Returns the
transaction hash.
|
create_contract
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def call_without_transaction(self, address, sig, args, result_types):
"""
Call a contract function on the RPC server, without sending a
transaction (useful for reading data)
"""
'''
data = self._encode_function(sig, args)
data_hex = data.encode('hex')
response = self.eth_call(to_address=address, data=data_hex)
return decode_abi(result_types, response[2:].decode('hex'))
'''
return NotImplementedError()
|
Call a contract function on the RPC server, without sending a
transaction (useful for reading data)
|
call_without_transaction
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def call_with_transaction(self, from_, address, sig, args, gas=None, gas_price=None, value=None):
"""
Call a contract function by sending a transaction (useful for storing
data)
"""
'''
gas = gas or DEFAULT_GAS_PER_TX
gas_price = gas_price or DEFAULT_GAS_PRICE
data = self._encode_function(sig, args)
data_hex = data.encode('hex')
return self.eth_sendTransaction(from_address=from_, to_address=address, data=data_hex, gas=gas,
gas_price=gas_price, value=value)
'''
return NotImplementedError()
|
Call a contract function by sending a transaction (useful for storing
data)
|
call_with_transaction
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def web3_sha3(self, data):
""" Returns Keccak-256 (not the standardized SHA3-256) of the given data.
:param data: the data to convert into a SHA3 hash
:type data: hex string
:return: The SHA3 result of the given string.
:rtype: hex string
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.web3_sha3('0x' + b'hello world'.hex())
'0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#web3_sha3
.. todo::
TESTED
"""
#data = str(data).encode('hex')
return self.call('web3_sha3', [data])
|
Returns Keccak-256 (not the standardized SHA3-256) of the given data.
:param data: the data to convert into a SHA3 hash
:type data: hex string
:return: The SHA3 result of the given string.
:rtype: hex string
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.web3_sha3('0x' + b'hello world'.hex())
'0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#web3_sha3
.. todo::
TESTED
|
web3_sha3
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getBalance(self, address=None, block=BLOCK_TAG_LATEST):
""" Returns the balance of the account of given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the current balance in wei.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBalance("0x956b6B7454884b734B29A8115F045a95179ea00C")
17410594678300000000
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
.. todo::
TESTED
"""
address = address or self.eth_coinbase()
block = validate_block(block)
v = hex_to_dec(self.call('eth_getBalance', [address, block]))
return (v if v else 0)
|
Returns the balance of the account of given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the current balance in wei.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBalance("0x956b6B7454884b734B29A8115F045a95179ea00C")
17410594678300000000
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
.. todo::
TESTED
|
eth_getBalance
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getStorageAt(self, address=None, position=0, block=BLOCK_TAG_LATEST):
""" Returns the value from a storage position at a given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param address: (optionnal) integer of the position in the storage. default is 0
:type address: int
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the value at this storage position.
:rtype: str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getStorageAt("0x295a70b2de5e3953354a6a8344e616ed314d7251", 0, "latest")
'0x0000000000000000000000000000000000000000000000000000000000000000'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat
.. todo::
TESTED
"""
block = validate_block(block)
return self.call('eth_getStorageAt', [address, hex(position), block])
|
Returns the value from a storage position at a given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param address: (optionnal) integer of the position in the storage. default is 0
:type address: int
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the value at this storage position.
:rtype: str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getStorageAt("0x295a70b2de5e3953354a6a8344e616ed314d7251", 0, "latest")
'0x0000000000000000000000000000000000000000000000000000000000000000'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat
.. todo::
TESTED
|
eth_getStorageAt
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getTransactionCount(self, address, block=BLOCK_TAG_LATEST):
""" Returns the number of transactions sent from an address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions send from this address.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionCount("0x956b6B7454884b734B29A8115F045a95179ea00C")
12891
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getTransactionCount', [address, block]))
|
Returns the number of transactions sent from an address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions send from this address.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionCount("0x956b6B7454884b734B29A8115F045a95179ea00C")
12891
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount
.. todo::
TESTED
|
eth_getTransactionCount
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getBlockTransactionCountByNumber(self, block=BLOCK_TAG_LATEST):
""" Returns the number of transactions in a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockTransactionCountByNumber(5100196)
69
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblocktransactioncountbynumber
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getBlockTransactionCountByNumber', [block]))
|
Returns the number of transactions in a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockTransactionCountByNumber(5100196)
69
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblocktransactioncountbynumber
.. todo::
TESTED
|
eth_getBlockTransactionCountByNumber
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getUncleCountByBlockNumber(self, block=BLOCK_TAG_LATEST):
""" Returns the number of uncles in a block from a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of uncles in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getUncleCountByBlockNumber(5100196)
0
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclecountbyblocknumber
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getUncleCountByBlockNumber', [block]))
|
Returns the number of uncles in a block from a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of uncles in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getUncleCountByBlockNumber(5100196)
0
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclecountbyblocknumber
.. todo::
TESTED
|
eth_getUncleCountByBlockNumber
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getCode(self, address, default_block=BLOCK_TAG_LATEST):
""" Returns code at a given address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the code from the given address.
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getCode("0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413")
'0x6060604052361561020e5760e060020a6000350463013cf08b[...]62f93160ef3e563'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
.. todo::
TESTED
"""
default_block = validate_block(default_block)
return self.call('eth_getCode', [address, default_block])
|
Returns code at a given address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the code from the given address.
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getCode("0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413")
'0x6060604052361561020e5760e060020a6000350463013cf08b[...]62f93160ef3e563'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
.. todo::
TESTED
|
eth_getCode
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_sendTransaction(self, to_address=None, from_address=None, gas=None, gas_price=None, value=None, data=None,
nonce=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
NEEDS TESTING
"""
params = {}
params['from'] = from_address or self.eth_coinbase()
if to_address is not None:
params['to'] = to_address
if gas is not None:
params['gas'] = hex(gas)
if gas_price is not None:
params['gasPrice'] = clean_hex(gas_price)
if value is not None:
params['value'] = clean_hex(value)
if data is not None:
params['data'] = data
if nonce is not None:
params['nonce'] = hex(nonce)
return self.call('eth_sendTransaction', [params])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction\n\n NEEDS TESTING\n " |
eth_sendTransaction
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_call(self, to_address, from_address=None, gas=None, gas_price=None, value=None, data=None,
default_block=BLOCK_TAG_LATEST):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
NEEDS TESTING
"""
default_block = validate_block(default_block)
obj = {}
obj['to'] = to_address
if from_address is not None:
obj['from'] = from_address
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = clean_hex(gas_price)
if value is not None:
obj['value'] = value
if data is not None:
obj['data'] = data
return self.call('eth_call', [obj, default_block])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call\n\n NEEDS TESTING\n " |
eth_call
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_estimateGas(self, to_address=None, from_address=None, gas=None, gas_price=None, value=None, data=None,
default_block=BLOCK_TAG_LATEST):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_estimategas
NEEDS TESTING
"""
if isinstance(default_block, basestring):
if default_block not in BLOCK_TAGS:
raise ValueError
obj = {}
if to_address is not None:
obj['to'] = to_address
if from_address is not None:
obj['from'] = from_address
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = clean_hex(gas_price)
if value is not None:
obj['value'] = value
if data is not None:
obj['data'] = data
return hex_to_dec(self.call('eth_estimateGas', [obj, default_block]))
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_estimategas\n\n NEEDS TESTING\n " |
eth_estimateGas
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getTransactionByBlockNumberAndIndex(self, block=BLOCK_TAG_LATEST, index=0):
""" Returns information about a transaction by block number and transaction index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) integer of the transaction index position.
:type index: int
:return: A transaction object, or null when no transaction was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionByBlockNumberAndIndex(5100196, 1)
{'blockHash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'blockNumber': '0x4dd2a4',
'from': '0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c',
'gas': '0x5208',
'gasPrice': '0xe33e22200',
'hash': '0xf02ffa405bae96e62a9e36fbd781362ca378ec62353d5e2bd0585868d3deaf61',
'input': '0x',
'nonce': '0x1908f',
'r': '0xcad900a5060ba9bb646a7f6965f98e945d71a19b3e30ff53d03b9797c6153d07',
's': '0x53b11a48758fc383df878a9b5468c83b033f5036b124b16dbb0a5167aee7fc4f',
'to': '0x26cd018553871f2e887986bc24c68a0ce622bb8f',
'transactionIndex': '0x1',
'v': '0x25',
'value': '0x1bc16d674ec80000'}
.. seealso::
:method:`eth_getTransactionByHash`
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblocknumberandindex
.. todo::
TESTED
"""
block = validate_block(block)
return self.call('eth_getTransactionByBlockNumberAndIndex', [block, hex(index)])
|
Returns information about a transaction by block number and transaction index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) integer of the transaction index position.
:type index: int
:return: A transaction object, or null when no transaction was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionByBlockNumberAndIndex(5100196, 1)
{'blockHash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'blockNumber': '0x4dd2a4',
'from': '0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c',
'gas': '0x5208',
'gasPrice': '0xe33e22200',
'hash': '0xf02ffa405bae96e62a9e36fbd781362ca378ec62353d5e2bd0585868d3deaf61',
'input': '0x',
'nonce': '0x1908f',
'r': '0xcad900a5060ba9bb646a7f6965f98e945d71a19b3e30ff53d03b9797c6153d07',
's': '0x53b11a48758fc383df878a9b5468c83b033f5036b124b16dbb0a5167aee7fc4f',
'to': '0x26cd018553871f2e887986bc24c68a0ce622bb8f',
'transactionIndex': '0x1',
'v': '0x25',
'value': '0x1bc16d674ec80000'}
.. seealso::
:method:`eth_getTransactionByHash`
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblocknumberandindex
.. todo::
TESTED
|
eth_getTransactionByBlockNumberAndIndex
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_getUncleByBlockNumberAndIndex(self, block=BLOCK_TAG_LATEST, index=0):
""" Returns information about a uncle of a block by number and uncle index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) the uncle's index position.
:type index: int
:return: A block object, or null when no block was found
:rtype: dict
.. note::
An uncle doesn't contain individual transactions.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclebyblocknumberandindex
.. todo::
NOT TESTED
"""
block = validate_block(block)
return self.call('eth_getUncleByBlockNumberAndIndex', [block, hex(index)])
|
Returns information about a uncle of a block by number and uncle index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) the uncle's index position.
:type index: int
:return: A block object, or null when no block was found
:rtype: dict
.. note::
An uncle doesn't contain individual transactions.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclebyblocknumberandindex
.. todo::
NOT TESTED
|
eth_getUncleByBlockNumberAndIndex
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def eth_newFilter(self, from_block=BLOCK_TAG_LATEST, to_block=BLOCK_TAG_LATEST, address=None, topics=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
NEEDS TESTING
"""
_filter = {
'fromBlock': from_block,
'toBlock': to_block,
'address': address,
'topics': topics,
}
return self.call('eth_newFilter', [_filter])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter\n\n NEEDS TESTING\n " |
eth_newFilter
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def db_putString(self, db_name, key, value):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_putString', [db_name, key, value])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring\n\n TESTED\n " |
db_putString
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def db_getString(self, db_name, key):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_getstring
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_getString', [db_name, key])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#db_getstring\n\n TESTED\n " |
db_getString
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def db_putHex(self, db_name, key, value):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
TESTED
"""
if not value.startswith('0x'):
value = '0x{}'.format(value)
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_putHex', [db_name, key, value])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex\n\n TESTED\n " |
db_putHex
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def db_getHex(self, db_name, key):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_gethex
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_getHex', [db_name, key])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#db_gethex\n\n TESTED\n " |
db_getHex
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def shh_post(self, topics, payload, priority, ttl, from_=None, to=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_post
NEEDS TESTING
"""
whisper_object = {
'from': from_,
'to': to,
'topics': topics,
'payload': payload,
'priority': hex(priority),
'ttl': hex(ttl),
}
return self.call('shh_post', [whisper_object])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_post\n\n NEEDS TESTING\n " |
shh_post
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def shh_newFilter(self, to, topics):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newfilter
NEEDS TESTING
"""
_filter = {
'to': to,
'topics': topics,
}
return self.call('shh_newFilter', [_filter])
| ERROR: type should be string, got "\n https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newfilter\n\n NEEDS TESTING\n " |
shh_newFilter
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def trace_filter(self, from_block=None, to_block=None, from_addresses=None, to_addresses=None):
"""
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_filter
TESTED
"""
params = {}
if from_block is not None:
from_block = validate_block(from_block)
params['fromBlock'] = from_block
if to_block is not None:
to_block = validate_block(to_block)
params['toBlock'] = to_block
if from_addresses is not None:
if not isinstance(from_addresses, list):
from_addresses = [from_addresses]
params['fromAddress'] = from_addresses
if to_addresses is not None:
if not isinstance(to_addresses, list):
to_addresses = [to_addresses]
params['toAddress'] = to_addresses
return self.call('trace_filter', [params])
| ERROR: type should be string, got "\n https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_filter\n\n TESTED\n " |
trace_filter
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def trace_get(self, tx_hash, positions):
"""
https://wiki.parity.io/JSONRPC
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_get
NEEDS TESTING
"""
if not isinstance(positions, list):
positions = [positions]
return self.call('trace_get', [tx_hash, positions])
| ERROR: type should be string, got "\n https://wiki.parity.io/JSONRPC\n https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_get\n\n NEEDS TESTING\n " |
trace_get
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def trace_block(self, block=BLOCK_TAG_LATEST):
"""
https://wiki.parity.io/JSONRPC
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_block
NEEDS TESTING
"""
block = validate_block(block)
return self.call('trace_block', [block])
| ERROR: type should be string, got "\n https://wiki.parity.io/JSONRPC\n https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_block\n\n NEEDS TESTING\n " |
trace_block
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/explorer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/explorer.py
|
MIT
|
def ssa_sha3_instruction(self, instr, state):
'''Symbolic execution of SHA3 group of opcode'''
# SSA STACK
s0, s1 = state.ssa_stack.pop(), state.ssa_stack.pop()
instr.ssa = SSA(new_assignement=self.ssa_counter, method_name=instr.name, args=[s0, s1])
state.ssa_stack.append(instr)
self.ssa_counter += 1
|
Symbolic execution of SHA3 group of opcode
|
ssa_sha3_instruction
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/save_ssa.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/save_ssa.py
|
MIT
|
def clean_hex(d):
'''
Convert decimal to hex and remove the "L" suffix that is appended to large
numbers
'''
try:
return hex(d).rstrip('L')
except:
return None
|
Convert decimal to hex and remove the "L" suffix that is appended to large
numbers
|
clean_hex
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/util.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/util.py
|
MIT
|
def validate_block(block):
'''
Test if the block tag is valid
'''
if isinstance(block, str):
if block not in BLOCK_TAGS:
raise ValueError('invalid block tag')
if isinstance(block, int):
block = hex(block)
return block
|
Test if the block tag is valid
|
validate_block
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/ETH/util.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/ETH/util.py
|
MIT
|
def _get_reverse_table(self):
"""Build an internal table used in the assembler."""
reverse_table = {}
for (opcode, (mnemonic, immediate_operand_size,
pops, pushes, gas, description)) in self.table.items():
reverse_table[mnemonic] = opcode, mnemonic, immediate_operand_size, \
pops, pushes, gas, description
return reverse_table
|
Build an internal table used in the assembler.
|
_get_reverse_table
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/NEO/avm.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/NEO/avm.py
|
MIT
|
def enum_blocks_edges(instructions):
"""
Return a list of basicblock after
statically parsing given instructions
"""
basicblocks = list()
edges = list()
xrefs = enumerate_xref(instructions)
# create the first block
new_block = True
for inst in instructions:
if new_block:
block = BasicBlock(start_offset=inst.offset,
start_instr=inst,
name='block_%x' % inst.offset)
new_block = False
# add current instruction to the basicblock
block.instructions.append(inst)
# next instruction in xrefs list
if (inst.offset_end + 1) in xrefs:
# absolute JUMP
if inst.is_branch_unconditional:
edges.append(Edge(block.name, 'block_%x' % xref_of_instr(inst),
EDGE_UNCONDITIONAL))
# conditionnal JUMPI / JUMPIF / ...
elif inst.is_branch_conditional:
edges.append(Edge(block.name, 'block_%x' % xref_of_instr(inst),
EDGE_CONDITIONAL_TRUE))
edges.append(Edge(block.name, 'block_%x' % (inst.offset_end + 1),
EDGE_CONDITIONAL_FALSE))
# Halt instruction : RETURN, STOP, RET, ...
elif inst.is_halt:
pass
# just falls to the next instruction
else:
edges.append(Edge(block.name, 'block_%x' % (inst.offset_end + 1),
EDGE_FALLTHROUGH))
block.end_offset = inst.offset_end
block.end_instr = inst
basicblocks.append(block)
new_block = True
# add the last block
basicblocks.append(block)
edges = list(set(edges))
return (basicblocks, edges)
|
Return a list of basicblock after
statically parsing given instructions
|
enum_blocks_edges
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/NEO/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/NEO/cfg.py
|
MIT
|
def get_stats(ids, counts=None):
"""
Given a list of integers, return a dictionary of counts of consecutive pairs
Example: [1, 2, 3, 1, 2] -> {(1, 2): 2, (2, 3): 1, (3, 1): 1}
Optionally allows to update an existing dictionary of counts
"""
counts = {} if counts is None else counts
for pair in zip(ids, ids[1:]): # iterate consecutive elements
counts[pair] = counts.get(pair, 0) + 1
return counts
|
Given a list of integers, return a dictionary of counts of consecutive pairs
Example: [1, 2, 3, 1, 2] -> {(1, 2): 2, (2, 3): 1, (3, 1): 1}
Optionally allows to update an existing dictionary of counts
|
get_stats
|
python
|
karpathy/minbpe
|
minbpe/base.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/base.py
|
MIT
|
def merge(ids, pair, idx):
"""
In the list of integers (ids), replace all consecutive occurrences
of pair with the new integer token idx
Example: ids=[1, 2, 3, 1, 2], pair=(1, 2), idx=4 -> [4, 3, 4]
"""
newids = []
i = 0
while i < len(ids):
# if not at the very last position AND the pair matches, replace it
if ids[i] == pair[0] and i < len(ids) - 1 and ids[i+1] == pair[1]:
newids.append(idx)
i += 2
else:
newids.append(ids[i])
i += 1
return newids
|
In the list of integers (ids), replace all consecutive occurrences
of pair with the new integer token idx
Example: ids=[1, 2, 3, 1, 2], pair=(1, 2), idx=4 -> [4, 3, 4]
|
merge
|
python
|
karpathy/minbpe
|
minbpe/base.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/base.py
|
MIT
|
def load(self, model_file):
"""Inverse of save() but only for the model file"""
assert model_file.endswith(".model")
# read the model file
merges = {}
special_tokens = {}
idx = 256
with open(model_file, 'r', encoding="utf-8") as f:
# read the version
version = f.readline().strip()
assert version == "minbpe v1"
# read the pattern
self.pattern = f.readline().strip()
# read the special tokens
num_special = int(f.readline().strip())
for _ in range(num_special):
special, special_idx = f.readline().strip().split()
special_tokens[special] = int(special_idx)
# read the merges
for line in f:
idx1, idx2 = map(int, line.split())
merges[(idx1, idx2)] = idx
idx += 1
self.merges = merges
self.special_tokens = special_tokens
self.vocab = self._build_vocab()
|
Inverse of save() but only for the model file
|
load
|
python
|
karpathy/minbpe
|
minbpe/base.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/base.py
|
MIT
|
def __init__(self, pattern=None):
"""
- pattern: optional string to override the default (GPT-4 split pattern)
- special_tokens: str -> int dictionary of special tokens
example: {'<|endoftext|>': 100257}
"""
super().__init__()
self.pattern = GPT4_SPLIT_PATTERN if pattern is None else pattern
self.compiled_pattern = re.compile(self.pattern)
self.special_tokens = {}
self.inverse_special_tokens = {}
|
- pattern: optional string to override the default (GPT-4 split pattern)
- special_tokens: str -> int dictionary of special tokens
example: {'<|endoftext|>': 100257}
|
__init__
|
python
|
karpathy/minbpe
|
minbpe/regex.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/regex.py
|
MIT
|
def encode_ordinary(self, text):
"""Encoding that ignores any special tokens."""
# split text into chunks of text by categories defined in regex pattern
text_chunks = re.findall(self.compiled_pattern, text)
# all chunks of text are encoded separately, then results are joined
ids = []
for chunk in text_chunks:
chunk_bytes = chunk.encode("utf-8") # raw bytes
chunk_ids = self._encode_chunk(chunk_bytes)
ids.extend(chunk_ids)
return ids
|
Encoding that ignores any special tokens.
|
encode_ordinary
|
python
|
karpathy/minbpe
|
minbpe/regex.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/regex.py
|
MIT
|
def encode(self, text, allowed_special="none_raise"):
"""
Unlike encode_ordinary, this function handles special tokens.
allowed_special: can be "all"|"none"|"none_raise" or a custom set of special tokens
if none_raise, then an error is raised if any special token is encountered in text
this is the default tiktoken behavior right now as well
any other behavior is either annoying, or a major footgun
"""
# decode the user desire w.r.t. handling of special tokens
special = None
if allowed_special == "all":
special = self.special_tokens
elif allowed_special == "none":
special = {}
elif allowed_special == "none_raise":
special = {}
assert all(token not in text for token in self.special_tokens)
elif isinstance(allowed_special, set):
special = {k: v for k, v in self.special_tokens.items() if k in allowed_special}
else:
raise ValueError(f"allowed_special={allowed_special} not understood")
if not special:
# shortcut: if no special tokens, just use the ordinary encoding
return self.encode_ordinary(text)
# otherwise, we have to be careful with potential special tokens in text
# we handle special tokens by splitting the text
# based on the occurrence of any exact match with any of the special tokens
# we can use re.split for this. note that surrounding the pattern with ()
# makes it into a capturing group, so the special tokens will be included
special_pattern = "(" + "|".join(re.escape(k) for k in special) + ")"
special_chunks = re.split(special_pattern, text)
# now all the special characters are separated from the rest of the text
# all chunks of text are encoded separately, then results are joined
ids = []
for part in special_chunks:
if part in special:
# this is a special token, encode it separately as a special case
ids.append(special[part])
else:
# this is an ordinary sequence, encode it normally
ids.extend(self.encode_ordinary(part))
return ids
|
Unlike encode_ordinary, this function handles special tokens.
allowed_special: can be "all"|"none"|"none_raise" or a custom set of special tokens
if none_raise, then an error is raised if any special token is encountered in text
this is the default tiktoken behavior right now as well
any other behavior is either annoying, or a major footgun
|
encode
|
python
|
karpathy/minbpe
|
minbpe/regex.py
|
https://github.com/karpathy/minbpe/blob/master/minbpe/regex.py
|
MIT
|
def test_wikipedia_example(tokenizer_factory):
"""
Quick unit test, following along the Wikipedia example:
https://en.wikipedia.org/wiki/Byte_pair_encoding
According to Wikipedia, running bpe on the input string:
"aaabdaaabac"
for 3 merges will result in string:
"XdXac"
where:
X=ZY
Y=ab
Z=aa
Keep in mind that for us a=97, b=98, c=99, d=100 (ASCII values)
so Z will be 256, Y will be 257, X will be 258.
So we expect the output list of ids to be [258, 100, 258, 97, 99]
"""
tokenizer = tokenizer_factory()
text = "aaabdaaabac"
tokenizer.train(text, 256 + 3)
ids = tokenizer.encode(text)
assert ids == [258, 100, 258, 97, 99]
assert tokenizer.decode(tokenizer.encode(text)) == text
|
Quick unit test, following along the Wikipedia example:
https://en.wikipedia.org/wiki/Byte_pair_encoding
According to Wikipedia, running bpe on the input string:
"aaabdaaabac"
for 3 merges will result in string:
"XdXac"
where:
X=ZY
Y=ab
Z=aa
Keep in mind that for us a=97, b=98, c=99, d=100 (ASCII values)
so Z will be 256, Y will be 257, X will be 258.
So we expect the output list of ids to be [258, 100, 258, 97, 99]
|
test_wikipedia_example
|
python
|
karpathy/minbpe
|
tests/test_tokenizer.py
|
https://github.com/karpathy/minbpe/blob/master/tests/test_tokenizer.py
|
MIT
|
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
ffn_bias=True,
proj_bias=True,
drop_path_rate=0.0,
drop_path_uniform=False,
init_values=None, # for layerscale: None or 0 => no layerscale
embed_layer=PatchEmbed,
act_layer=nn.GELU,
block_fn=Block,
ffn_layer="mlp",
block_chunks=1,
num_register_tokens=0,
interpolate_antialias=False,
interpolate_offset=0.1,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
proj_bias (bool): enable bias for proj in attn if True
ffn_bias (bool): enable bias for ffn if True
drop_path_rate (float): stochastic depth rate
drop_path_uniform (bool): apply uniform drop rate across blocks
weight_init (str): weight init scheme
init_values (float): layer-scale init values
embed_layer (nn.Module): patch embedding layer
act_layer (nn.Module): MLP activation layer
block_fn (nn.Module): transformer block class
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
num_register_tokens: (int) number of extra cls tokens (so-called "registers")
interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
"""
super().__init__()
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 1
self.n_blocks = depth
self.num_heads = num_heads
self.patch_size = patch_size
self.num_register_tokens = num_register_tokens
self.interpolate_antialias = interpolate_antialias
self.interpolate_offset = interpolate_offset
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
assert num_register_tokens >= 0
self.register_tokens = (
nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
)
if drop_path_uniform is True:
dpr = [drop_path_rate] * depth
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
if ffn_layer == "mlp":
logger.info("using MLP layer as FFN")
ffn_layer = Mlp
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
logger.info("using SwiGLU layer as FFN")
ffn_layer = SwiGLUFFNFused
elif ffn_layer == "identity":
logger.info("using Identity layer as FFN")
def f(*args, **kwargs):
return nn.Identity()
ffn_layer = f
else:
raise NotImplementedError
blocks_list = [
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
ffn_bias=ffn_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
ffn_layer=ffn_layer,
init_values=init_values,
)
for i in range(depth)
]
if block_chunks > 0:
self.chunked_blocks = True
chunked_blocks = []
chunksize = depth // block_chunks
for i in range(0, depth, chunksize):
# this is to keep the block index consistent if we chunk the block list
chunked_blocks.append([nn.Identity()] * i + blocks_list[i: i + chunksize])
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
else:
self.chunked_blocks = False
self.blocks = nn.ModuleList(blocks_list)
self.norm = norm_layer(embed_dim)
self.head = nn.Identity()
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
self.init_weights()
|
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
proj_bias (bool): enable bias for proj in attn if True
ffn_bias (bool): enable bias for ffn if True
drop_path_rate (float): stochastic depth rate
drop_path_uniform (bool): apply uniform drop rate across blocks
weight_init (str): weight init scheme
init_values (float): layer-scale init values
embed_layer (nn.Module): patch embedding layer
act_layer (nn.Module): MLP activation layer
block_fn (nn.Module): transformer block class
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
num_register_tokens: (int) number of extra cls tokens (so-called "registers")
interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/dinov2.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/dinov2.py
|
Apache-2.0
|
def init_weights_vit_timm(module: nn.Module, name: str = ""):
"""ViT weight initialization, original timm impl (for reproducibility)"""
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
|
ViT weight initialization, original timm impl (for reproducibility)
|
init_weights_vit_timm
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/dinov2.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/dinov2.py
|
Apache-2.0
|
def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs):
"""
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
"""
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=1536,
depth=40,
num_heads=24,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
num_register_tokens=num_register_tokens,
**kwargs,
)
return model
|
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
|
vit_giant2
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/dinov2.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/dinov2.py
|
Apache-2.0
|
def get_attn_bias_and_cat(x_list, branges=None):
"""
this will perform the index select, cat the tensors, and provide the attn_bias from cache
"""
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
if all_shapes not in attn_bias_cache.keys():
seqlens = []
for b, x in zip(batch_sizes, x_list):
for _ in range(b):
seqlens.append(x.shape[1])
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
attn_bias._batch_sizes = batch_sizes
attn_bias_cache[all_shapes] = attn_bias
if branges is not None:
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
else:
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
cat_tensors = torch.cat(tensors_bs1, dim=1)
return attn_bias_cache[all_shapes], cat_tensors
|
this will perform the index select, cat the tensors, and provide the attn_bias from cache
|
get_attn_bias_and_cat
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/layers/block.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/layers/block.py
|
Apache-2.0
|
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
"""
x_list contains a list of tensors to nest together and run
"""
assert isinstance(self.attn, MemEffAttention)
if self.training and self.sample_drop_ratio > 0.0:
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.attn(self.norm1(x), attn_bias=attn_bias)
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.mlp(self.norm2(x))
x_list = drop_add_residual_stochastic_depth_list(
x_list,
residual_func=attn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
)
x_list = drop_add_residual_stochastic_depth_list(
x_list,
residual_func=ffn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
)
return x_list
else:
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.ls2(self.mlp(self.norm2(x)))
attn_bias, x = get_attn_bias_and_cat(x_list)
x = x + attn_residual_func(x, attn_bias=attn_bias)
x = x + ffn_residual_func(x)
return attn_bias.split(x)
|
x_list contains a list of tensors to nest together and run
|
forward_nested
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/layers/block.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/layers/block.py
|
Apache-2.0
|
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
if self.bn == True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/util/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/util/blocks.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn == True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn == True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
|
Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/util/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/util/blocks.py
|
Apache-2.0
|
def __init__(
self,
features,
activation,
deconv=False,
bn=False,
expand=False,
align_corners=True,
size=None
):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand == True:
out_features = features // 2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
self.size = size
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/util/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/util/blocks.py
|
Apache-2.0
|
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
|
Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/depth_anything_v2/util/transform.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/depth_anything_v2/util/transform.py
|
Apache-2.0
|
def nms(boxes, scores, nms_thr):
"""Single class NMS implemented in Numpy."""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= nms_thr)[0]
order = order[inds + 1]
return keep
|
Single class NMS implemented in Numpy.
|
nms
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxdet.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxdet.py
|
Apache-2.0
|
def multiclass_nms(boxes, scores, nms_thr, score_thr):
"""Multiclass NMS implemented in Numpy. Class-aware version."""
final_dets = []
num_classes = scores.shape[1]
for cls_ind in range(num_classes):
cls_scores = scores[:, cls_ind]
valid_score_mask = cls_scores > score_thr
if valid_score_mask.sum() == 0:
continue
else:
valid_scores = cls_scores[valid_score_mask]
valid_boxes = boxes[valid_score_mask]
keep = nms(valid_boxes, valid_scores, nms_thr)
if len(keep) > 0:
cls_inds = np.ones((len(keep), 1)) * cls_ind
dets = np.concatenate(
[valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
)
final_dets.append(dets)
if len(final_dets) == 0:
return None
return np.concatenate(final_dets, 0)
|
Multiclass NMS implemented in Numpy. Class-aware version.
|
multiclass_nms
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxdet.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxdet.py
|
Apache-2.0
|
def preprocess(
img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256)
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Do preprocessing for RTMPose model inference.
Args:
img (np.ndarray): Input image in shape.
input_size (tuple): Input image size in shape (w, h).
Returns:
tuple:
- resized_img (np.ndarray): Preprocessed image.
- center (np.ndarray): Center of image.
- scale (np.ndarray): Scale of image.
"""
# get shape of image
img_shape = img.shape[:2]
out_img, out_center, out_scale = [], [], []
if len(out_bbox) == 0:
out_bbox = [[0, 0, img_shape[1], img_shape[0]]]
for i in range(len(out_bbox)):
x0 = out_bbox[i][0]
y0 = out_bbox[i][1]
x1 = out_bbox[i][2]
y1 = out_bbox[i][3]
bbox = np.array([x0, y0, x1, y1])
# get center and scale
center, scale = bbox_xyxy2cs(bbox, padding=1.25)
# do affine transformation
resized_img, scale = top_down_affine(input_size, scale, center, img)
# normalize image
mean = np.array([123.675, 116.28, 103.53])
std = np.array([58.395, 57.12, 57.375])
resized_img = (resized_img - mean) / std
out_img.append(resized_img)
out_center.append(center)
out_scale.append(scale)
return out_img, out_center, out_scale
|
Do preprocessing for RTMPose model inference.
Args:
img (np.ndarray): Input image in shape.
input_size (tuple): Input image size in shape (w, h).
Returns:
tuple:
- resized_img (np.ndarray): Preprocessed image.
- center (np.ndarray): Center of image.
- scale (np.ndarray): Scale of image.
|
preprocess
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
"""Inference RTMPose model.
Args:
sess (ort.InferenceSession): ONNXRuntime session.
img (np.ndarray): Input image in shape.
Returns:
outputs (np.ndarray): Output of RTMPose model.
"""
all_out = []
# build input
for i in range(len(img)):
input = [img[i].transpose(2, 0, 1)]
# build output
sess_input = {sess.get_inputs()[0].name: input}
sess_output = []
for out in sess.get_outputs():
sess_output.append(out.name)
# run model
outputs = sess.run(sess_output, sess_input)
all_out.append(outputs)
return all_out
|
Inference RTMPose model.
Args:
sess (ort.InferenceSession): ONNXRuntime session.
img (np.ndarray): Input image in shape.
Returns:
outputs (np.ndarray): Output of RTMPose model.
|
inference
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def postprocess(outputs: List[np.ndarray],
model_input_size: Tuple[int, int],
center: Tuple[int, int],
scale: Tuple[int, int],
simcc_split_ratio: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
"""Postprocess for RTMPose model output.
Args:
outputs (np.ndarray): Output of RTMPose model.
model_input_size (tuple): RTMPose model Input image size.
center (tuple): Center of bbox in shape (x, y).
scale (tuple): Scale of bbox in shape (w, h).
simcc_split_ratio (float): Split ratio of simcc.
Returns:
tuple:
- keypoints (np.ndarray): Rescaled keypoints.
- scores (np.ndarray): Model predict scores.
"""
all_key = []
all_score = []
for i in range(len(outputs)):
# use simcc to decode
simcc_x, simcc_y = outputs[i]
keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)
# rescale keypoints
keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2
all_key.append(keypoints[0])
all_score.append(scores[0])
return np.array(all_key), np.array(all_score)
|
Postprocess for RTMPose model output.
Args:
outputs (np.ndarray): Output of RTMPose model.
model_input_size (tuple): RTMPose model Input image size.
center (tuple): Center of bbox in shape (x, y).
scale (tuple): Scale of bbox in shape (w, h).
simcc_split_ratio (float): Split ratio of simcc.
Returns:
tuple:
- keypoints (np.ndarray): Rescaled keypoints.
- scores (np.ndarray): Model predict scores.
|
postprocess
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def bbox_xyxy2cs(bbox: np.ndarray,
padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
"""Transform the bbox format from (x,y,w,h) into (center, scale)
Args:
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
as (left, top, right, bottom)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
(n, 2)
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
(n, 2)
"""
# convert single bbox from (4, ) to (1, 4)
dim = bbox.ndim
if dim == 1:
bbox = bbox[None, :]
# get bbox center and scale
x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
center = np.hstack([x1 + x2, y1 + y2]) * 0.5
scale = np.hstack([x2 - x1, y2 - y1]) * padding
if dim == 1:
center = center[0]
scale = scale[0]
return center, scale
|
Transform the bbox format from (x,y,w,h) into (center, scale)
Args:
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
as (left, top, right, bottom)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
(n, 2)
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
(n, 2)
|
bbox_xyxy2cs
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def _fix_aspect_ratio(bbox_scale: np.ndarray,
aspect_ratio: float) -> np.ndarray:
"""Extend the scale to match the given aspect ratio.
Args:
scale (np.ndarray): The image scale (w, h) in shape (2, )
aspect_ratio (float): The ratio of ``w/h``
Returns:
np.ndarray: The reshaped image scale in (2, )
"""
w, h = np.hsplit(bbox_scale, [1])
bbox_scale = np.where(w > h * aspect_ratio,
np.hstack([w, w / aspect_ratio]),
np.hstack([h * aspect_ratio, h]))
return bbox_scale
|
Extend the scale to match the given aspect ratio.
Args:
scale (np.ndarray): The image scale (w, h) in shape (2, )
aspect_ratio (float): The ratio of ``w/h``
Returns:
np.ndarray: The reshaped image scale in (2, )
|
_fix_aspect_ratio
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray:
"""Rotate a point by an angle.
Args:
pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
angle_rad (float): rotation angle in radian
Returns:
np.ndarray: Rotated point in shape (2, )
"""
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
rot_mat = np.array([[cs, -sn], [sn, cs]])
return rot_mat @ pt
|
Rotate a point by an angle.
Args:
pt (np.ndarray): 2D point coordinates (x, y) in shape (2, )
angle_rad (float): rotation angle in radian
Returns:
np.ndarray: Rotated point in shape (2, )
|
_rotate_point
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): The 1st point (x,y) in shape (2, )
b (np.ndarray): The 2nd point (x,y) in shape (2, )
Returns:
np.ndarray: The 3rd point.
"""
direction = a - b
c = b + np.r_[-direction[1], direction[0]]
return c
|
To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): The 1st point (x,y) in shape (2, )
b (np.ndarray): The 2nd point (x,y) in shape (2, )
Returns:
np.ndarray: The 3rd point.
|
_get_3rd_point
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def get_warp_matrix(center: np.ndarray,
scale: np.ndarray,
rot: float,
output_size: Tuple[int, int],
shift: Tuple[float, float] = (0., 0.),
inv: bool = False) -> np.ndarray:
"""Calculate the affine transformation matrix that can warp the bbox area
in the input image to the output size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ] | list(2,)): Size of the
destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: A 2x3 transformation matrix
"""
shift = np.array(shift)
src_w = scale[0]
dst_w = output_size[0]
dst_h = output_size[1]
# compute transformation matrix
rot_rad = np.deg2rad(rot)
src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
# get four corners of the src rectangle in the original image
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale * shift
src[1, :] = center + src_dir + scale * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
# get four corners of the dst rectangle in the input image
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return warp_mat
|
Calculate the affine transformation matrix that can warp the bbox area
in the input image to the output size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ] | list(2,)): Size of the
destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: A 2x3 transformation matrix
|
get_warp_matrix
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict,
img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get the bbox image as the model input by affine transform.
Args:
input_size (dict): The input size of the model.
bbox_scale (dict): The bbox scale of the img.
bbox_center (dict): The bbox center of the img.
img (np.ndarray): The original image.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: img after affine transform.
- np.ndarray[float32]: bbox scale after affine transform.
"""
w, h = input_size
warp_size = (int(w), int(h))
# reshape bbox to fixed aspect ratio
bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)
# get the affine matrix
center = bbox_center
scale = bbox_scale
rot = 0
warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))
# do affine transform
img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)
return img, bbox_scale
|
Get the bbox image as the model input by affine transform.
Args:
input_size (dict): The input size of the model.
bbox_scale (dict): The bbox scale of the img.
bbox_center (dict): The bbox center of the img.
img (np.ndarray): The original image.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: img after affine transform.
- np.ndarray[float32]: bbox scale after affine transform.
|
top_down_affine
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def get_simcc_maximum(simcc_x: np.ndarray,
simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get maximum response location and value from simcc representations.
Note:
instance number: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (N, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (N, K)
"""
N, K, Wx = simcc_x.shape
simcc_x = simcc_x.reshape(N * K, -1)
simcc_y = simcc_y.reshape(N * K, -1)
# get maximum value locations
x_locs = np.argmax(simcc_x, axis=1)
y_locs = np.argmax(simcc_y, axis=1)
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
max_val_x = np.amax(simcc_x, axis=1)
max_val_y = np.amax(simcc_y, axis=1)
# get maximum value across x and y axis
mask = max_val_x > max_val_y
max_val_x[mask] = max_val_y[mask]
vals = max_val_x
locs[vals <= 0.] = -1
# reshape
locs = locs.reshape(N, K, 2)
vals = vals.reshape(N, K)
return locs, vals
|
Get maximum response location and value from simcc representations.
Note:
instance number: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (N, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (N, K)
|
get_simcc_maximum
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray,
simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
"""Modulate simcc distribution with Gaussian.
Args:
simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
simcc_split_ratio (int): The split ratio of simcc.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
- np.ndarray[float32]: scores in shape (K,) or (n, K)
"""
keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
keypoints /= simcc_split_ratio
return keypoints, scores
|
Modulate simcc distribution with Gaussian.
Args:
simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
simcc_split_ratio (int): The split ratio of simcc.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
- np.ndarray[float32]: scores in shape (K,) or (n, K)
|
decode
|
python
|
ali-vilab/VACE
|
vace/annotators/dwpose/onnxpose.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/dwpose/onnxpose.py
|
Apache-2.0
|
def load(self, path):
"""Load model from file.
Args:
path (str): file path
"""
parameters = torch.load(path, map_location=torch.device('cpu'), weights_only=True)
if 'optimizer' in parameters:
parameters = parameters['model']
self.load_state_dict(parameters)
|
Load model from file.
Args:
path (str): file path
|
load
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/base_model.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/base_model.py
|
Apache-2.0
|
def __init__(self, scale_factor, mode, align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
|
Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
return x
|
Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=True)
self.conv2 = nn.Conv2d(features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=True)
self.relu = nn.ReLU(inplace=True)
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
return out + x
|
Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.resConfUnit1 = ResidualConvUnit(features)
self.resConfUnit2 = ResidualConvUnit(features)
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=True,
groups=self.groups)
self.conv2 = nn.Conv2d(features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=True,
groups=self.groups)
if self.bn is True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn is True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn is True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
# return out + x
|
Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def __init__(self,
features,
activation,
deconv=False,
bn=False,
expand=False,
align_corners=True):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand is True:
out_features = features // 2
self.out_conv = nn.Conv2d(features,
out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
groups=1)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
|
Init.
Args:
features (int): number of features
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/blocks.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/blocks.py
|
Apache-2.0
|
def __init__(self, path=None, features=256, non_negative=True):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print('Loading weights: ', path)
super(MidasNet, self).__init__()
use_pretrained = False if path is None else True
self.pretrained, self.scratch = _make_encoder(
backbone='resnext101_wsl',
features=features,
use_pretrained=use_pretrained)
self.scratch.refinenet4 = FeatureFusionBlock(features)
self.scratch.refinenet3 = FeatureFusionBlock(features)
self.scratch.refinenet2 = FeatureFusionBlock(features)
self.scratch.refinenet1 = FeatureFusionBlock(features)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode='bilinear'),
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
)
if path:
self.load(path)
|
Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/midas_net.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/midas_net.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
|
Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/midas_net.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/midas_net.py
|
Apache-2.0
|
def __init__(self,
path=None,
features=64,
backbone='efficientnet_lite3',
non_negative=True,
exportable=True,
channels_last=False,
align_corners=True,
blocks={'expand': True}):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print('Loading weights: ', path)
super(MidasNet_small, self).__init__()
use_pretrained = False if path else True
self.channels_last = channels_last
self.blocks = blocks
self.backbone = backbone
self.groups = 1
features1 = features
features2 = features
features3 = features
features4 = features
self.expand = False
if 'expand' in self.blocks and self.blocks['expand'] is True:
self.expand = True
features1 = features
features2 = features * 2
features3 = features * 4
features4 = features * 8
self.pretrained, self.scratch = _make_encoder(self.backbone,
features,
use_pretrained,
groups=self.groups,
expand=self.expand,
exportable=exportable)
self.scratch.activation = nn.ReLU(False)
self.scratch.refinenet4 = FeatureFusionBlock_custom(
features4,
self.scratch.activation,
deconv=False,
bn=False,
expand=self.expand,
align_corners=align_corners)
self.scratch.refinenet3 = FeatureFusionBlock_custom(
features3,
self.scratch.activation,
deconv=False,
bn=False,
expand=self.expand,
align_corners=align_corners)
self.scratch.refinenet2 = FeatureFusionBlock_custom(
features2,
self.scratch.activation,
deconv=False,
bn=False,
expand=self.expand,
align_corners=align_corners)
self.scratch.refinenet1 = FeatureFusionBlock_custom(
features1,
self.scratch.activation,
deconv=False,
bn=False,
align_corners=align_corners)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features,
features // 2,
kernel_size=3,
stride=1,
padding=1,
groups=self.groups),
Interpolate(scale_factor=2, mode='bilinear'),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
self.scratch.activation,
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
if path:
self.load(path)
|
Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/midas_net_custom.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/midas_net_custom.py
|
Apache-2.0
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
if self.channels_last is True:
print('self.channels_last = ', self.channels_last)
x.contiguous(memory_format=torch.channels_last)
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
|
Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
|
forward
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/midas_net_custom.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/midas_net_custom.py
|
Apache-2.0
|
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample['disparity'].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample['image'] = cv2.resize(sample['image'],
tuple(shape[::-1]),
interpolation=image_interpolation_method)
sample['disparity'] = cv2.resize(sample['disparity'],
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST)
sample['mask'] = cv2.resize(
sample['mask'].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample['mask'] = sample['mask'].astype(bool)
return tuple(shape)
|
Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
|
apply_min_size
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/transforms.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/transforms.py
|
Apache-2.0
|
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method='lower_bound',
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. "
"(Output size might be smaller than given size.)"
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
|
Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. "
"(Output size might be smaller than given size.)"
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
|
__init__
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/transforms.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/transforms.py
|
Apache-2.0
|
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, 'rb') as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode('ascii') == 'PF':
color = True
elif header.decode('ascii') == 'Pf':
color = False
else:
raise Exception('Not a PFM file: ' + path)
dim_match = re.match(r'^(\d+)\s(\d+)\s$',
file.readline().decode('ascii'))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode('ascii').rstrip())
if scale < 0:
# little-endian
endian = '<'
scale = -scale
else:
# big-endian
endian = '>'
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
|
Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
|
read_pfm
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, 'wb') as file:
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (len(image.shape) == 2
or len(image.shape) == 3 and image.shape[2] == 1): # greyscale
color = False
else:
raise Exception(
'Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n'.encode())
file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n'.encode() % scale)
image.tofile(file)
|
Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
|
write_pfm
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
|
Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
|
read_image
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def resize_image(img):
"""Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
"""
height_orig = img.shape[0]
width_orig = img.shape[1]
if width_orig > height_orig:
scale = width_orig / 384
else:
scale = height_orig / 384
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
img_resized = cv2.resize(img, (width, height),
interpolation=cv2.INTER_AREA)
img_resized = (torch.from_numpy(np.transpose(
img_resized, (2, 0, 1))).contiguous().float())
img_resized = img_resized.unsqueeze(0)
return img_resized
|
Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
|
resize_image
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to('cpu')
depth_resized = cv2.resize(depth.numpy(), (width, height),
interpolation=cv2.INTER_CUBIC)
return depth_resized
|
Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
|
resize_depth
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def write_depth(path, depth, bits=1):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
"""
write_pfm(path + '.pfm', depth.astype(np.float32))
depth_min = depth.min()
depth_max = depth.max()
max_val = (2**(8 * bits)) - 1
if depth_max - depth_min > np.finfo('float').eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.type)
if bits == 1:
cv2.imwrite(path + '.png', out.astype('uint8'))
elif bits == 2:
cv2.imwrite(path + '.png', out.astype('uint16'))
return
|
Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
|
write_depth
|
python
|
ali-vilab/VACE
|
vace/annotators/midas/utils.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/annotators/midas/utils.py
|
Apache-2.0
|
def forward(
self,
hidden_states: torch.Tensor,
indices_grid: torch.Tensor,
source_latents: torch.Tensor = None,
source_mask_latents: torch.Tensor = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
class_labels: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
skip_layer_mask: Optional[torch.Tensor] = None,
skip_layer_strategy: Optional[SkipLayerStrategy] = None,
return_dict: bool = True,
context_scale: Optional[torch.FloatTensor] = 1.0,
**kwargs
):
"""
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`):
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
skip_layer_mask ( `torch.Tensor`, *optional*):
A mask of shape `(num_layers, batch)` that indicates which layers to skip. `0` at position
`layer, batch_idx` indicates that the layer should be skipped for the corresponding batch index.
skip_layer_strategy ( `SkipLayerStrategy`, *optional*, defaults to `None`):
Controls which layers are skipped when calculating a perturbed latent for spatiotemporal guidance.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
"""
# for tpu attention offload 2d token masks are used. No need to transform.
if not self.use_tpu_flash_attention:
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
# we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
# we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None and attention_mask.ndim == 2:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
encoder_attention_mask = (
1 - encoder_attention_mask.to(hidden_states.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 1. Input
hidden_states = self.patchify_proj(hidden_states)
if source_latents is not None:
source_latents = source_latents.repeat(hidden_states.shape[0], 1, 1)
if source_mask_latents is not None:
source_latents = torch.cat([source_latents, source_mask_latents.repeat(hidden_states.shape[0], 1, 1)], dim=-1)
context_hidden_states = self.patchify_context_proj(source_latents) if source_latents is not None else None
if self.timestep_scale_multiplier:
timestep = self.timestep_scale_multiplier * timestep
if self.positional_embedding_type == "absolute":
pos_embed_3d = self.get_absolute_pos_embed(indices_grid).to(
hidden_states.device
)
if self.project_to_2d_pos:
pos_embed = self.to_2d_proj(pos_embed_3d)
hidden_states = (hidden_states + pos_embed).to(hidden_states.dtype)
freqs_cis = None
elif self.positional_embedding_type == "rope":
freqs_cis = self.precompute_freqs_cis(indices_grid)
batch_size = hidden_states.shape[0]
timestep, embedded_timestep = self.adaln_single(
timestep.flatten(),
{"resolution": None, "aspect_ratio": None},
batch_size=batch_size,
hidden_dtype=hidden_states.dtype,
)
# Second dimension is 1 or number of tokens (if timestep_per_token)
timestep = timestep.view(batch_size, -1, timestep.shape[-1])
embedded_timestep = embedded_timestep.view(
batch_size, -1, embedded_timestep.shape[-1]
)
if skip_layer_mask is None:
skip_layer_mask = torch.ones(
len(self.transformer_blocks), batch_size, device=hidden_states.device
)
# 2. Blocks
if self.caption_projection is not None:
batch_size = hidden_states.shape[0]
encoder_hidden_states = self.caption_projection(encoder_hidden_states)
encoder_hidden_states = encoder_hidden_states.view(
batch_size, -1, hidden_states.shape[-1]
)
# bypass block
context_hints = []
for block_idx, block in enumerate(self.transformer_context_blocks):
if (context_hidden_states is None) or (block_idx not in self.context_num_layers):
context_hints.append(None)
continue
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
(hint_context_hidden_states, context_hidden_states) = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
context_hidden_states,
freqs_cis,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
timestep,
cross_attention_kwargs,
class_labels,
skip_layer_mask[block_idx],
skip_layer_strategy,
**ckpt_kwargs,
)
else:
(hint_context_hidden_states, context_hidden_states) = block(
hidden_states=hidden_states,
context_hidden_states=context_hidden_states,
freqs_cis=freqs_cis,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
skip_layer_mask=skip_layer_mask[block_idx],
skip_layer_strategy=skip_layer_strategy,
)
context_hints.append(hint_context_hidden_states)
# main block
for block_idx, block in enumerate(self.transformer_blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
freqs_cis,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
timestep,
cross_attention_kwargs,
class_labels,
skip_layer_mask[block_idx],
skip_layer_strategy,
context_hints,
context_scale
**ckpt_kwargs,
)
else:
hidden_states = block(
hidden_states=hidden_states,
freqs_cis=freqs_cis,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
skip_layer_mask=skip_layer_mask[block_idx],
skip_layer_strategy=skip_layer_strategy,
context_hints=context_hints,
context_scale=context_scale
)
# 3. Output
scale_shift_values = (
self.scale_shift_table[None, None] + embedded_timestep[:, :, None]
)
shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1]
hidden_states = self.norm_out(hidden_states)
# Modulation
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.proj_out(hidden_states)
if not return_dict:
return (hidden_states,)
return Transformer3DModelOutput(sample=hidden_states)
|
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`):
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
skip_layer_mask ( `torch.Tensor`, *optional*):
A mask of shape `(num_layers, batch)` that indicates which layers to skip. `0` at position
`layer, batch_idx` indicates that the layer should be skipped for the corresponding batch index.
skip_layer_strategy ( `SkipLayerStrategy`, *optional*, defaults to `None`):
Controls which layers are skipped when calculating a perturbed latent for spatiotemporal guidance.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
|
forward
|
python
|
ali-vilab/VACE
|
vace/models/ltx/models/transformers/transformer3d.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/ltx/models/transformers/transformer3d.py
|
Apache-2.0
|
def _resize_crop(self, img, oh, ow, normalize=True):
"""
Resize, center crop, convert to tensor, and normalize.
"""
# resize and crop
iw, ih = img.size
if iw != ow or ih != oh:
# resize
scale = max(ow / iw, oh / ih)
img = img.resize(
(round(scale * iw), round(scale * ih)),
resample=Image.Resampling.LANCZOS
)
assert img.width >= ow and img.height >= oh
# center crop
x1 = (img.width - ow) // 2
y1 = (img.height - oh) // 2
img = img.crop((x1, y1, x1 + ow, y1 + oh))
# normalize
if normalize:
img = TF.to_tensor(img).sub_(0.5).div_(0.5).unsqueeze(1)
return img
|
Resize, center crop, convert to tensor, and normalize.
|
_resize_crop
|
python
|
ali-vilab/VACE
|
vace/models/utils/preprocessor.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/utils/preprocessor.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.