code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_connect_to_mainnet_by_default(mocker):
"""
Tests the condition where mainnet is configured as the default network
and no --network option is passed. It should avoid running the tests
to be safe.
"""
cfg = mocker.MagicMock()
cfg.network = "ethereum:mainnet:node"
runner = PytestApeRunner(
cfg, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()
)
expected = (
"Default network is mainnet; unable to run tests on mainnet. "
"Please specify the network using the `--network` flag or "
"configure a different default network."
)
with pytest.raises(ConfigError, match=expected):
runner._connect()
|
Tests the condition where mainnet is configured as the default network
and no --network option is passed. It should avoid running the tests
to be safe.
|
test_connect_to_mainnet_by_default
|
python
|
ApeWorX/ape
|
tests/functional/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_test.py
|
Apache-2.0
|
def test_names(self, fixture_map):
"""
Show that we have both the initialized fixtures as well
as the properly injected isolation fixtures. Order is
EXTREMELY important here! It determines the order in which
fixtures run; isolation should run before their sister fixtures.
Function isolation is expected even when not using other function-scoped
fixtures. Package isolation is missing because there are no
package-scoped fixtures being used.
"""
actual = fixture_map.names
expected = [
"_session_isolation",
"foo",
"_module_isolation",
"bar",
"_class_isolation",
"baz",
"_function_isolation",
]
assert actual == expected
|
Show that we have both the initialized fixtures as well
as the properly injected isolation fixtures. Order is
EXTREMELY important here! It determines the order in which
fixtures run; isolation should run before their sister fixtures.
Function isolation is expected even when not using other function-scoped
fixtures. Package isolation is missing because there are no
package-scoped fixtures being used.
|
test_names
|
python
|
ApeWorX/ape
|
tests/functional/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_test.py
|
Apache-2.0
|
def test_isolate(
self, isolation_manager, owner, vyper_contract_instance, empty_snapshot_registry
):
"""
Low-level test simulating how pytest interacts with these yield-based
isolation fixtures.
"""
start_number = vyper_contract_instance.myNumber()
session = isolation_manager.isolation(Scope.SESSION)
module = isolation_manager.isolation(Scope.MODULE)
function = isolation_manager.isolation(Scope.FUNCTION)
expected_session = 10_000_000
expected_module = 20_000_000
expected_test = 30_000_000
# Show we start off clear of snapshots.
assert all(isolation_manager.snapshots[s].identifier is None for s in Scope), (
"Setup failed - snapshots not empty"
)
# Start session.
next(session)
assert isolation_manager.snapshots[Scope.SESSION].identifier is not None
vyper_contract_instance.setNumber(expected_session, sender=owner)
# Start module.
next(module)
vyper_contract_instance.setNumber(expected_module, sender=owner)
# Start test.
next(function)
vyper_contract_instance.setNumber(expected_test, sender=owner)
assert vyper_contract_instance.myNumber() == expected_test
# End test; back to module.
next(function, None)
assert vyper_contract_instance.myNumber() == expected_module, "Is not back at module."
# End module; back to session.
assert isolation_manager.snapshots[Scope.MODULE].identifier is not None
next(module, None)
assert vyper_contract_instance.myNumber() == expected_session, "Is not back at session."
# Start new module.
module = isolation_manager.isolation(Scope.MODULE)
next(module)
vyper_contract_instance.setNumber(expected_module, sender=owner)
# Start new test.
function = isolation_manager.isolation(Scope.FUNCTION)
next(function)
vyper_contract_instance.setNumber(expected_test, sender=owner)
assert vyper_contract_instance.myNumber() == expected_test
# End test.
next(function, None)
assert vyper_contract_instance.myNumber() == expected_module, "(2) Is not back at module."
# End module.
assert isolation_manager.snapshots[Scope.MODULE].identifier is not None
next(module, None)
assert isolation_manager.snapshots[Scope.MODULE].identifier is None
assert vyper_contract_instance.myNumber() == expected_session, "(2) Is not back at session."
# End session.
next(session, None)
assert vyper_contract_instance.myNumber() == start_number, "(2) Is not back pre-session."
|
Low-level test simulating how pytest interacts with these yield-based
isolation fixtures.
|
test_isolate
|
python
|
ApeWorX/ape
|
tests/functional/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_test.py
|
Apache-2.0
|
def test_parse_rich_tree(vyper_contract_instance):
"""
Show that when full selector is set as the method ID,
the tree-output only shows the short method name.
"""
contract_id = vyper_contract_instance.contract_type.name
method_id = vyper_contract_instance.contract_type.methods["setAddress"].selector
call = CallTreeNode(address=vyper_contract_instance.address, call_type=CallType.CALL)
data = {
**call.model_dump(by_alias=True, mode="json"),
"method_id": method_id,
"contract_id": contract_id,
}
actual = parse_rich_tree(data).label
expected = f"[#ff8c00]{contract_id}[/].[bright_green]setAddress[/]()"
assert actual == expected
|
Show that when full selector is set as the method ID,
the tree-output only shows the short method name.
|
test_parse_rich_tree
|
python
|
ApeWorX/ape
|
tests/functional/test_trace.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_trace.py
|
Apache-2.0
|
def test_transaction_trace_basic_approach_on_failed_call(chain, vyper_contract_instance, not_owner):
"""
Show we can use the basic approach for failed calls.
"""
tx = vyper_contract_instance.setNumber(0, sender=not_owner, raise_on_revert=False)
trace = TransactionTrace.model_validate(
{
"call_trace_approach": None,
"debug_trace_transaction_parameters": {"enableMemory": True},
"transaction_hash": tx.txn_hash,
"transaction": tx,
}
)
trace.call_trace_approach = TraceApproach.BASIC
actual = trace.get_calltree()
# Mostly just checking that it did not fail!
assert actual is not None
assert isinstance(actual, CallTreeNode)
|
Show we can use the basic approach for failed calls.
|
test_transaction_trace_basic_approach_on_failed_call
|
python
|
ApeWorX/ape
|
tests/functional/test_trace.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_trace.py
|
Apache-2.0
|
def test_call_trace_debug_trace_call_not_supported(owner, vyper_contract_instance):
"""
When using EthTester, we can still see the top-level trace of a call.
"""
tx = {"to": vyper_contract_instance.address, "from": owner.address}
trace = CallTrace(tx=tx)
actual = f"{trace}"
assert actual == "VyperContract.0x()"
|
When using EthTester, we can still see the top-level trace of a call.
|
test_call_trace_debug_trace_call_not_supported
|
python
|
ApeWorX/ape
|
tests/functional/test_trace.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_trace.py
|
Apache-2.0
|
def test_type_1_transactions_using_access_list(ethereum, access_list, key):
"""
If not given type and only given accessList, the assumed type is 1,
an "access-list" transaction.
"""
data = {key: access_list}
txn = ethereum.create_transaction(**data)
assert txn.type == 1
|
If not given type and only given accessList, the assumed type is 1,
an "access-list" transaction.
|
test_type_1_transactions_using_access_list
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_type_2_transactions_with_max_fee_and_access_list(ethereum, access_list, key):
"""
Dynamic-fee txns also support access lists, so the presence of max_fee
with access_list implies a type 2 txn.
"""
data = {"max_fee": 1000000000, key: access_list}
txn = ethereum.create_transaction(**data)
assert txn.type == 2
assert txn.max_fee == 1000000000
|
Dynamic-fee txns also support access lists, so the presence of max_fee
with access_list implies a type 2 txn.
|
test_type_2_transactions_with_max_fee_and_access_list
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_txn_hash_when_access_list_is_raw(ethereum, owner):
"""
Tests against a condition I was never able to reproduce where
a transaction's access list contained bytes-values and that
causes the serialization to error.
"""
txn = ethereum.create_transaction(accessList=ACCESS_LIST_HEXBYTES, type=2)
txn = owner.prepare_transaction(txn)
txn = owner.sign_transaction(txn)
# Hack to make access_list raw. I am not sure how a user would get
# to this state, but somehow they have.
txn.access_list = ACCESS_LIST_HEXBYTES
# Ignore the Pydantic warning from access-list being the wrong type.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
actual = to_hex(txn.txn_hash)
assert actual.startswith("0x")
|
Tests against a condition I was never able to reproduce where
a transaction's access list contained bytes-values and that
causes the serialization to error.
|
test_txn_hash_when_access_list_is_raw
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_str_when_data_is_bytes(ethereum):
"""
Tests against a condition that would cause transactions to
fail with string-encoding errors.
"""
txn = ethereum.create_transaction(data=HexBytes("0x123"))
actual = str(txn)
assert isinstance(actual, str)
|
Tests against a condition that would cause transactions to
fail with string-encoding errors.
|
test_str_when_data_is_bytes
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_str_when_data_is_long_shows_first_4_bytes(vyper_contract_instance):
"""
Tests against a condition that would cause transactions to
fail with string-encoding errors.
"""
txn = vyper_contract_instance.setNumber.as_transaction(123)
actual = str(txn)
assert isinstance(actual, str)
assert "data: 0x3fb5c1cb..." in actual
|
Tests against a condition that would cause transactions to
fail with string-encoding errors.
|
test_str_when_data_is_long_shows_first_4_bytes
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_override_annotated_fields():
"""
This test is to prove that a user may use an `int` for a base-class
when the API field is described as a `HexInt`.
"""
class MyTransaction(TransactionAPI):
@property
def txn_hash(self) -> HexBytes:
return HexBytes("")
def serialize_transaction(self) -> bytes:
return b""
chain_id: Optional[int] = None # The base type is `Optional[HexInt]`.
chain_id = 123123123123123123123123123123
tx_type = 120
my_tx = MyTransaction.model_validate({"chain_id": chain_id, "type": tx_type})
assert my_tx.chain_id == chain_id
assert my_tx.type == tx_type
|
This test is to prove that a user may use an `int` for a base-class
when the API field is described as a `HexInt`.
|
test_override_annotated_fields
|
python
|
ApeWorX/ape
|
tests/functional/test_transaction.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_transaction.py
|
Apache-2.0
|
def test_none(self):
"""
Was getting unhelpful conversion errors here. We should instead
let Pydantic fail as it normally does in this situation.
"""
class MyModel(BaseModel):
an_int: HexInt
expected = ".*Input should be a valid integer.*"
with pytest.raises(ValidationError, match=expected):
_ = MyModel(an_int=None)
|
Was getting unhelpful conversion errors here. We should instead
let Pydantic fail as it normally does in this situation.
|
test_none
|
python
|
ApeWorX/ape
|
tests/functional/test_types.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/test_types.py
|
Apache-2.0
|
def test_separaters(convert, sep):
"""
Show that separates, such as commands and underscores, are OK
in currency-string values, e.g. "10,000 ETH" is valid.
"""
currency_str = f"10{sep}000 ETHER"
actual = convert(currency_str, int)
expected = TEN_THOUSAND_ETHER_IN_WEI
assert actual == expected
|
Show that separates, such as commands and underscores, are OK
in currency-string values, e.g. "10,000 ETH" is valid.
|
test_separaters
|
python
|
ApeWorX/ape
|
tests/functional/conversion/test_ether.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/conversion/test_ether.py
|
Apache-2.0
|
def test_convert_logs_and_passes_errors_from_is_convertible(conversion_manager, ape_caplog):
"""
When checking if something is convertible, and is_convertible errors
for whatever reason, log the error and consider it "not convertible".
More than likely, it isn't by that converter and is a plugin-error.
"""
error_msg = "Unexpected error!"
class ProblematicConverter(ConverterAPI):
def is_convertible(self, value: Any) -> bool:
raise ValueError(error_msg)
def convert(self, value: Any) -> Any:
return value
conversion_manager._converters[dict] = (ProblematicConverter(),)
expected = f"Issue while checking `ProblematicConverter.is_convertible()`: {error_msg}"
with pytest.raises(ConversionError):
_ = conversion_manager.convert(123, dict)
assert expected in ape_caplog.head
|
When checking if something is convertible, and is_convertible errors
for whatever reason, log the error and consider it "not convertible".
More than likely, it isn't by that converter and is a plugin-error.
|
test_convert_logs_and_passes_errors_from_is_convertible
|
python
|
ApeWorX/ape
|
tests/functional/conversion/test_misc.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/conversion/test_misc.py
|
Apache-2.0
|
def contract_with_call_depth_geth(
owner, geth_provider, get_contract_type, leaf_contract_geth, middle_contract_geth
):
"""
This contract has methods that make calls to other local contracts
and is used for any testing that requires nested calls, such as
call trees or event-name clashes.
"""
contract = ContractContainer(get_contract_type("ContractA"))
return owner.deploy(contract, middle_contract_geth, leaf_contract_geth)
|
This contract has methods that make calls to other local contracts
and is used for any testing that requires nested calls, such as
call trees or event-name clashes.
|
contract_with_call_depth_geth
|
python
|
ApeWorX/ape
|
tests/functional/geth/conftest.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/conftest.py
|
Apache-2.0
|
def test_contract_call_show_trace(geth_contract, geth_account):
"""
Show the `show_trace=True` does not corrupt the value.
Note: The provider uses `debug_traceCall` to get the result instead of
`eth_call`.
"""
geth_contract.setNumber(203, sender=geth_account)
actual = geth_contract.myNumber(show_trace=True)
assert actual == 203
|
Show the `show_trace=True` does not corrupt the value.
Note: The provider uses `debug_traceCall` to get the result instead of
`eth_call`.
|
test_contract_call_show_trace
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_contract.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_contract.py
|
Apache-2.0
|
def test_revert_out_of_gas_error(geth_account, geth_second_account, geth_provider):
"""
Attempt to transact with not quite enough gas. We should get an error saying
we ran out of gas.
"""
with pytest.raises(OutOfGasError) as err:
geth_account.transfer(geth_second_account, 1, gas_limit=1)
assert err.value.txn is not None
|
Attempt to transact with not quite enough gas. We should get an error saying
we ran out of gas.
|
test_revert_out_of_gas_error
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_contract.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_contract.py
|
Apache-2.0
|
def test_get_proxy_from_explorer(
mock_explorer,
create_mock_sepolia,
safe_proxy_container,
geth_account,
vyper_contract_container,
geth_provider,
chain,
):
"""
Simulated when you get a contract from Etherscan for the first time
but that contract is a proxy. We expect both proxy and target ABIs
to be cached under the proxy's address.
"""
target_contract = geth_account.deploy(vyper_contract_container, 10011339315)
proxy_contract = geth_account.deploy(safe_proxy_container, target_contract.address)
# Ensure both of these are not cached so we have to rely on our fake explorer.
del chain.contracts[target_contract.address]
del chain.contracts[proxy_contract.address]
# Sanity check.
with pytest.raises(ContractNotFoundError):
_ = chain.contracts.instance_at(proxy_contract.address)
def get_contract_type(address, *args, **kwargs):
# Mock etherscan backend.
if address == target_contract.address:
return target_contract.contract_type
elif address == proxy_contract.address:
return proxy_contract.contract_type
raise ValueError("Fake explorer only knows about proxy and target contracts.")
with create_mock_sepolia() as network:
# Set up our network to use our fake explorer.
mock_explorer.get_contract_type.side_effect = get_contract_type
network.__dict__["explorer"] = mock_explorer
# Typical flow: user attempts to get an un-cached contract type from Etherscan.
# That contract may be a proxy, in which case we should get a type
# w/ both proxy ABIs and the target ABIs.
contract_from_explorer = chain.contracts.instance_at(proxy_contract.address)
network.__dict__.pop("explorer", None)
# Ensure we can call proxy methods!
assert contract_from_explorer.masterCopy # No attr error!
# Ensure we can call target methods!
assert contract_from_explorer.myNumber # No attr error!
|
Simulated when you get a contract from Etherscan for the first time
but that contract is a proxy. We expect both proxy and target ABIs
to be cached under the proxy's address.
|
test_get_proxy_from_explorer
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_contracts_cache.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_contracts_cache.py
|
Apache-2.0
|
def mock_geth_sepolia(ethereum, geth_provider, geth_contract):
"""
Temporarily tricks Ape into thinking the local network
is Sepolia so we can test features that require a live
network.
"""
# Ensuring contract exists before hack.
# This allows the network to be past genesis which is more realistic.
_ = geth_contract
geth_provider.network.name = "sepolia"
yield geth_provider.network
geth_provider.network.name = LOCAL_NETWORK_NAME
|
Temporarily tricks Ape into thinking the local network
is Sepolia so we can test features that require a live
network.
|
mock_geth_sepolia
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_network_manager.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_network_manager.py
|
Apache-2.0
|
def test_parse_network_choice_evmchains(networks, connection_str):
"""
Show we can (without having a plugin installed) connect to a network
that evm-chains knows about.
"""
with networks.parse_network_choice(connection_str) as moon_provider:
assert moon_provider.network.name == "moonriver"
assert moon_provider.network.ecosystem.name == "moonbeam"
# When including the HTTP URL in the network choice,
# `provider.network_choice` should also include it.
# This ensures when relying on `provider.network_choice` for
# multiple connections that they all use the same HTTP URI.
expected_network_choice = (
f"moonbeam:moonriver:{connection_str}"
if "http" in connection_str
else f"{connection_str}:node"
)
assert moon_provider.network_choice == expected_network_choice
|
Show we can (without having a plugin installed) connect to a network
that evm-chains knows about.
|
test_parse_network_choice_evmchains
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_network_manager.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_network_manager.py
|
Apache-2.0
|
def test_uri_non_dev_and_not_configured(mocker, ethereum):
"""
If the URI was not configured and we are not using a dev
network (local or -fork), then it should fail, rather than
use local-host.
"""
network = ethereum.sepolia.model_copy(deep=True)
# NOTE: This may fail if using real network names that evmchains would
# know about.
network.name = "gorillanet"
network.ecosystem.name = "gorillas"
provider = Node.model_construct(network=network)
with pytest.raises(ProviderError):
_ = provider.uri
# Show that if an evm-chains _does_ exist, it will use that.
patch = mocker.patch("ape_ethereum.provider.Web3Provider._get_random_rpc")
# The following URL is made up (please keep example.com).
expected = "https://gorillas.example.com/v1/rpc"
patch.return_value = "https://gorillas.example.com/v1/rpc"
actual = provider.uri
assert actual == expected
|
If the URI was not configured and we are not using a dev
network (local or -fork), then it should fail, rather than
use local-host.
|
test_uri_non_dev_and_not_configured
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_connect_to_chain_that_started_poa(mock_web3, web3_factory, ethereum):
"""
Ensure that when connecting to a chain that
started out as PoA, such as Sepolia, we include
the right middleware. Note: even if the chain
is no longer PoA, we still need the middleware
to fetch blocks during the PoA portion of the chain.
"""
mock_web3.eth.get_block.side_effect = ExtraDataLengthError
def make_request(rpc, arguments):
if rpc == "eth_chainId":
return {"result": ethereum.sepolia.chain_id}
return None
mock_web3.provider.make_request.side_effect = make_request
web3_factory.return_value = mock_web3
provider = ethereum.sepolia.get_provider("node")
provider.provider_settings = {"uri": "http://node.example.com"} # fake
provider.connect()
# Verify PoA middleware was added.
assert mock_web3.middleware_onion.inject.call_args[0] == (ExtraDataToPOAMiddleware,)
assert mock_web3.middleware_onion.inject.call_args[1] == {"layer": 0}
|
Ensure that when connecting to a chain that
started out as PoA, such as Sepolia, we include
the right middleware. Note: even if the chain
is no longer PoA, we still need the middleware
to fetch blocks during the PoA portion of the chain.
|
test_connect_to_chain_that_started_poa
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_connect_using_only_ipc_for_uri_already_connected(project, networks, geth_provider):
"""
Shows we can remote-connect to a node that is already running when it exposes its IPC path.
"""
ipc_path = geth_provider.ipc_path
with project.temp_config(node={"ethereum": {"local": {"uri": f"{ipc_path}"}}}):
with networks.ethereum.local.use_provider("node") as node:
assert node.uri == f"{ipc_path}"
|
Shows we can remote-connect to a node that is already running when it exposes its IPC path.
|
test_connect_using_only_ipc_for_uri_already_connected
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_get_block_pending(geth_provider, geth_account, geth_second_account, accounts):
"""
Pending timestamps can be weird.
This ensures we can check those are various strange states of geth.
"""
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
snap = geth_provider.snapshot()
# Transact to increase block
geth_account.transfer(geth_second_account, "1 gwei")
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
# Restore state before transaction
geth_provider.restore(snap)
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
|
Pending timestamps can be weird.
This ensures we can check those are various strange states of geth.
|
test_get_block_pending
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_get_pending_block(geth_provider, geth_account, geth_second_account, accounts):
"""
Pending timestamps can be weird.
This ensures we can check those are various strange states of geth.
"""
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
snap = geth_provider.snapshot()
# Transact to increase block
geth_account.transfer(geth_second_account, "1 gwei")
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
# Restore state before transaction
geth_provider.restore(snap)
actual = geth_provider.get_block("latest")
assert isinstance(actual, Block)
|
Pending timestamps can be weird.
This ensures we can check those are various strange states of geth.
|
test_get_pending_block
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_send_transaction_exceed_block_gas_limit(chain, geth_provider, geth_contract, geth_account):
"""
Shows that the local geth node will retry the transaction
with a new gas if this happens, automatically.
"""
transaction = geth_contract.setNumber.as_transaction(23333322101, sender=geth_account)
prepared = geth_account.prepare_transaction(transaction)
prepared.gas_limit += 100000
signed = geth_account.sign_transaction(prepared)
expected_gas_limit = chain.blocks.head.gas_limit
geth_provider.send_transaction(signed)
tx_sent = geth_account.history[-1]
assert tx_sent.gas_limit == expected_gas_limit
|
Shows that the local geth node will retry the transaction
with a new gas if this happens, automatically.
|
test_send_transaction_exceed_block_gas_limit
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_send_call_base_class_block_id(networks, ethereum, mocker):
"""
Testing a case where was a bug in the base class for most providers.
Note: can't use ape-node as-is, as it overrides `send_call()`.
"""
provider = mocker.MagicMock()
provider.network.name = "mainnet"
def hacked_send_call(*args, **kwargs):
return EthereumNodeProvider.send_call(provider, *args, **kwargs)
provider.send_call = hacked_send_call
tx = ethereum.create_transaction()
block_id = 567
orig = networks.active_provider
networks.active_provider = provider
_ = provider.send_call(tx, block_id=block_id, skip_trace=True) == HexStr("0x")
networks.active_provider = orig # put back ASAP
actual = provider._prepare_call.call_args[-1]["block_identifier"]
assert actual == block_id
|
Testing a case where was a bug in the base class for most providers.
Note: can't use ape-node as-is, as it overrides `send_call()`.
|
test_send_call_base_class_block_id
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_send_call_handles_contract_type_failure(mocker, geth_provider, tx_for_call, mock_web3):
"""
Fixes an issue where we would get a recursion error during
handling a CALL failure, which would happen during proxy detection.
"""
orig_web3 = geth_provider._web3
def sfx(rpc, arguments, *args, **kwargs):
if rpc == "eth_call" and arguments[0]:
raise ContractLogicError()
return orig_web3.provider.make_request(rpc, arguments, *args, **kwargs)
# Do this to trigger re-entrancy.
mock_get = mocker.MagicMock()
mock_get.side_effect = RecursionError
orig = geth_provider.chain_manager.contracts.get
geth_provider.chain_manager.contracts.get = mock_get
mock_web3.provider.make_request.side_effect = sfx
geth_provider._web3 = mock_web3
try:
with pytest.raises(VirtualMachineError):
geth_provider.send_call(tx_for_call)
finally:
geth_provider._web3 = orig_web3
geth_provider.chain_manager.contracts.get = orig
|
Fixes an issue where we would get a recursion error during
handling a CALL failure, which would happen during proxy detection.
|
test_send_call_handles_contract_type_failure
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_send_call_skip_trace(mocker, geth_provider, ethereum, tx_for_call):
"""
When we pass skip_trace=True to `send_call` (as proxy-checking des), we should
also not bother with any traces in exception handling for that call, as proxy-
checks fail consistently and getting their traces is unnecessary.
"""
eth_call_spy = mocker.spy(geth_provider, "_eth_call")
get_contract_spy = mocker.spy(geth_provider.chain_manager.contracts, "get")
geth_provider.send_call(tx_for_call, skip_trace=True)
assert eth_call_spy.call_args[1]["skip_trace"] is True
assert get_contract_spy.call_count == 0
|
When we pass skip_trace=True to `send_call` (as proxy-checking des), we should
also not bother with any traces in exception handling for that call, as proxy-
checks fail consistently and getting their traces is unnecessary.
|
test_send_call_skip_trace
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_disconnect_does_not_delete_unrelated_files_in_given_data_dir(networks):
"""
One time, I used a data-dir containing other files I didn't want to lose. GethDev
deleted the entire folder during `.disconnect()`, and it was tragic. Ensure this does
not happen to anyone else.
"""
with create_tempdir() as temp_dir:
file = temp_dir / "dont_delete_me_plz.txt"
file.write_text("Please don't delete me.")
geth_dev = GethDev(network=networks.ethereum.local)
geth_dev_proc = GethDevProcess.from_uri(
"path/to/geth.ipc",
temp_dir / "geth",
block_time=1,
generate_accounts=False,
initialize_chain=False,
)
geth_dev._process = geth_dev_proc
geth_dev.disconnect()
assert file.is_file()
assert not (temp_dir / "geth" / "genesis.json").is_file()
|
One time, I used a data-dir containing other files I didn't want to lose. GethDev
deleted the entire folder during `.disconnect()`, and it was tragic. Ensure this does
not happen to anyone else.
|
test_disconnect_does_not_delete_unrelated_files_in_given_data_dir
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_is_rpc_ready_false(self, mocker, data_folder):
"""
Both Geth and Reth nodes raise simple URLError when the node is not running.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.side_effect = URLError("Unable to connect")
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert not geth_dev.is_rpc_ready
|
Both Geth and Reth nodes raise simple URLError when the node is not running.
|
test_is_rpc_ready_false
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_is_rpc_ready_true_geth(self, mocker, data_folder):
"""
Geth has no error when the RPC is ready.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.return_value = None
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert geth_dev.is_rpc_ready
|
Geth has no error when the RPC is ready.
|
test_is_rpc_ready_true_geth
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_is_rpc_ready_true_reth(self, mocker, data_folder):
"""
Reth raises HTTPError("Method not found") when the RPC is ready.
"""
urlopen_patch = mocker.patch("ape_node.provider.urlopen")
urlopen_patch.side_effect = HTTPError("127.0.0.1", 404, "method not found", 0, 0) # type: ignore
geth_dev = GethDevProcess.from_uri("path/to/geth.ipc", data_folder)
assert geth_dev.is_rpc_ready
|
Reth raises HTTPError("Method not found") when the RPC is ready.
|
test_is_rpc_ready_true_reth
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_command_reth(self, mocker, data_folder, ignore_bin_check):
"""
Showing we get usable kwargs for a reth --dev node.
"""
reth_dev = GethDevProcess.from_uri(
"path/to/reth.ipc", data_folder, executable=["reth", "node"], verify_bin=False
)
actual = reth_dev.command
assert "reth" in actual
assert "node" in actual
assert "--http.port" in actual
assert "--dev" in actual
# Geth only
assert "localhost" not in actual
assert "--maxpeers" not in actual
assert "--password" not in actual
assert "--nodiscover" not in actual
assert "--networkid" not in actual
|
Showing we get usable kwargs for a reth --dev node.
|
test_command_reth
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_provider.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_provider.py
|
Apache-2.0
|
def test_await_confirmations_zero_confirmations(mocker, geth_account, geth_contract):
"""
We still need to wait for the nonce to increase when required confirmations is 0.
Otherwise, we sometimes ran into nonce-issues when transacting too fast with
the same account.
"""
tx = geth_contract.setNumber(545921972923759, sender=geth_account, required_confirmations=0)
spy = mocker.spy(tx, "_await_sender_nonce_increment")
tx.await_confirmations()
assert tx.confirmed
assert spy.call_count == 1
|
We still need to wait for the nonce to increase when required confirmations is 0.
Otherwise, we sometimes ran into nonce-issues when transacting too fast with
the same account.
|
test_await_confirmations_zero_confirmations
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_receipt.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_receipt.py
|
Apache-2.0
|
def test_return_value_tuple(geth_provider):
"""
Tests against a bug where a trace in a certain state (HH returning a tuple) was
unable to get the correct return_value.
"""
transaction_hash = "0xa4803961e06c673b255ca6af78d00df3c0ebef0b2f23325a1457eaaf20914e8e"
abi = MethodABI(
type="function",
name="newAccountant",
stateMutability="nonpayable",
inputs=[
ABIType(name="feeManager", type="address", components=None, internal_type="address"),
ABIType(name="feeRecipient", type="address", components=None, internal_type="address"),
ABIType(
name="defaultManagement", type="uint16", components=None, internal_type="uint16"
),
ABIType(
name="defaultPerformance", type="uint16", components=None, internal_type="uint16"
),
ABIType(name="defaultRefund", type="uint16", components=None, internal_type="uint16"),
ABIType(name="defaultMaxFee", type="uint16", components=None, internal_type="uint16"),
ABIType(name="defaultMaxGain", type="uint16", components=None, internal_type="uint16"),
ABIType(name="defaultMaxLoss", type="uint16", components=None, internal_type="uint16"),
],
outputs=[
ABIType(name="_newAccountant", type="address", components=None, internal_type="address")
],
)
calltree = {
"call_type": "CALL",
"address": "0x5fbdb2315678afecb367f032d93f642f64180aa3",
"value": 0,
"depth": 0,
"gas_limit": None,
"gas_cost": None,
"calldata": "0x184ac61b000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000015d34aaf54267db7d7c367839aaf71a00a2c6a65000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000000",
"returndata": "0x000000000000000000000000a16e02e87b7454126e5e10d957a927a7f5b5d2be",
"calls": [
{
"call_type": "CREATE",
"address": "0xa16e02e87b7454126e5e10d957a927a7f5b5d2be",
"value": 0,
"depth": 1,
"gas_limit": None,
"gas_cost": None,
"calldata": "0x",
"returndata": "0x608060405234801561001057600080fd5b50600436106101c45760003560e01c80639b3b6955116100f9578063d0fb020311610097578063e2a85ce411610071578063e2a85ce4146104e3578063e74b981b146104f6578063f94c53c714610509578063fb9321081461051157600080fd5b8063d0fb02031461049d578063d8609c5b146104b0578063de1eb9a3146104c357600080fd5b8063b53d68e5116100d3578063b53d68e51461043b578063b543503e14610444578063c7c504b914610457578063ceb68c231461048a57600080fd5b80639b3b6955146103a85780639e09ed5f146103bb578063a622ee7c1461040857600080fd5b806363453ae11161016657806382e4dd6f1161014057806382e4dd6f1461033f5780638a4adf241461035a578063921f8a8f1461036d578063962941781461039557600080fd5b806363453ae11461027157806367bee7e9146102845780637b5d7b651461029757600080fd5b8063256b5a02116101a2578063256b5a021461022157806346904840146102345780635783fe39146102475780635cece03a1461025e57600080fd5b8063015cf150146101c957806303579dca146101f957806324be66281461020e575b600080fd5b6004546101dc906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b61020c610207366004611ea3565b610524565b005b61020c61021c366004611ec7565b610595565b61020c61022f366004611ea3565b610622565b6002546101dc906001600160a01b031681565b61025060005481565b6040519081526020016101f0565b61020c61026c366004611ee0565b6106e3565b61020c61027f366004611ea3565b610757565b61020c610292366004611ea3565b6107c5565b6102fa6102a5366004611ea3565b60076020526000908152604090205461ffff80821691620100008104821691600160201b8204811691600160301b8104821691600160401b8204811691600160501b810490911690600160601b900460ff1687565b6040805161ffff988916815296881660208801529487169486019490945291851660608501528416608084015290921660a082015290151560c082015260e0016101f0565b61034760c881565b60405161ffff90911681526020016101f0565b6003546101dc906001600160a01b031681565b61038061037b366004611f19565b610886565b604080519283526020830191909152016101f0565b61020c6103a3366004611f4e565b610d90565b61020c6103b6366004611ea3565b610e20565b6005546102fa9061ffff80821691620100008104821691600160201b8204811691600160301b8104821691600160401b8204811691600160501b810490911690600160601b900460ff1687565b61042b610416366004611ea3565b60066020526000908152604090205460ff1681565b60405190151581526020016101f0565b61034761138881565b61020c610452366004611ea3565b610eb7565b61042b610465366004611ea3565b6001600160a01b0316600090815260076020526040902054600160601b900460ff1690565b61020c610498366004611ea3565b610f09565b6001546101dc906001600160a01b031681565b61020c6104be366004611f8c565b6110b4565b6104d66104d1366004611ea3565b61138f565b6040516101f09190612014565b61020c6104f1366004612074565b6114cf565b61020c610504366004611ea3565b6114ed565b61020c61158c565b61020c61051f366004611f4e565b611631565b6040516370a0823160e01b81523060048201526105929082906001600160a01b038216906370a0823190602401602060405180830381865afa15801561056e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103a391906120e8565b50565b61059d61168e565b6127108111156105e75760405162461bcd60e51b815260206004820152601060248201526f686967686572207468616e203130302560801b60448201526064015b60405180910390fd5b60008190556040518181527f18182e268b61d2aada98f23ade23b0ea133d5b0b6712dbfa682dc6da29941c229060200160405180910390a150565b61062a6116d9565b6001600160a01b03811660009081526006602052604090205460ff16156106835760405162461bcd60e51b815260206004820152600d60248201526c185b1c9958591e481859191959609a1b60448201526064016105de565b6001600160a01b03811660008181526006602052604090819020805460ff1916600190811790915590517fce96c4db32686d3f0011df1abea0ab6c5794b848868dcbece79961fef7e8198d916106d891612101565b60405180910390a250565b6106eb61168e565b6001600160a01b03821660009081526006602052604090205460ff166107235760405162461bcd60e51b81526004016105de90612129565b6001600160a01b0391821660009081526008602090815260408083209390941682529190915220805460ff19166001179055565b6040516370a0823160e01b81523060048201526105929082906001600160a01b038216906370a0823190602401602060405180830381865afa1580156107a1573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061051f91906120e8565b6107cd61168e565b6001600160a01b038116600090815260076020526040902054600160601b900460ff166108315760405162461bcd60e51b8152602060048201526012602482015271139bc818dd5cdd1bdb481999595cc81cd95d60721b60448201526064016105de565b6001600160a01b03811660008181526007602052604080822080546cffffffffffffffffffffffffff19169055517f3e6648a1d6918f893e09d7f2a385f04bdafbf8ad899b255b7f40e02c967b55879190a250565b600080610891611739565b33600090815260076020908152604091829020825160e081018452905461ffff8082168352620100008204811693830193909352600160201b8104831693820193909352600160301b830482166060820152600160401b830482166080820152600160501b830490911660a0820152600160601b90910460ff16151560c0820181905261098957506040805160e08101825260055461ffff808216835262010000820481166020840152600160201b8204811693830193909352600160301b810483166060830152600160401b810483166080830152600160501b810490921660a0820152600160601b90910460ff16151560c08201525b6040516339ebf82360e01b81526001600160a01b038716600482015260009033906339ebf82390602401608060405180830381865afa1580156109d0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109f49190612152565b825190915061ffff1615610a59576000816020015142610a1491906121dc565b90506301e18558612710846000015161ffff16838560400151610a3791906121ef565b610a4191906121ef565b610a4b9190612206565b610a559190612206565b9450505b8515610b5f573360009081526008602090815260408083206001600160a01b038b16845290915290205460ff1615610abb573360009081526008602090815260408083206001600160a01b038b1684529091529020805460ff19169055610b2d565b608082015161ffff1615610b2d57612710826080015161ffff168260400151610ae491906121ef565b610aee9190612206565b861115610b2d5760405162461bcd60e51b815260206004820152600d60248201526c3a37b79036bab1b41033b0b4b760991b60448201526064016105de565b612710826020015161ffff1687610b4491906121ef565b610b4e9190612206565b610b589085612228565b9350610d4b565b3360009081526008602090815260408083206001600160a01b038b16845290915290205460ff1615610bbb573360009081526008602090815260408083206001600160a01b038b1684529091529020805460ff19169055610c31565b6127108260a0015161ffff161015610c31576127108260a0015161ffff168260400151610be891906121ef565b610bf29190612206565b851115610c315760405162461bcd60e51b815260206004820152600d60248201526c746f6f206d756368206c6f737360981b60448201526064016105de565b604082015161ffff1615610d4b576000336001600160a01b03166338d52e0f6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610c7f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ca3919061223b565b9050610d36612710846040015161ffff1688610cbf91906121ef565b610cc99190612206565b6040516370a0823160e01b81523060048201526001600160a01b038416906370a0823190602401602060405180830381865afa158015610d0d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d3191906120e8565b611768565b93508315610d4957610d49338286611782565b505b606082015161ffff1615610d8657610d83612710836060015161ffff1688610d7391906121ef565b610d7d9190612206565b85611768565b93505b5050935093915050565b610d9861168e565b600054604051639f40a7b360e01b8152600481018390523060248201819052604482015260648101919091526001600160a01b03831690639f40a7b3906084016020604051808303816000875af1158015610df7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e1b91906120e8565b505050565b610e2861168e565b6001600160a01b038116610e6d5760405162461bcd60e51b815260206004820152600c60248201526b5a45524f204144445245535360a01b60448201526064016105de565b600480546001600160a01b0319166001600160a01b0383169081179091556040517fa839c45565e8a86de41783841928f4acde049c2b7160f0ea4d4698220c5af61b90600090a250565b610ebf61168e565b600380546001600160a01b0319166001600160a01b0383169081179091556040517fda833a9122ed0b27d5c78c99315bb3758f1b77fb240db484a67fd0f286b263e590600090a250565b610f116116d9565b6001600160a01b03811660009081526006602052604090205460ff16610f655760405162461bcd60e51b81526020600482015260096024820152681b9bdd08185919195960ba1b60448201526064016105de565b6000816001600160a01b03166338d52e0f6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610fa5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fc9919061223b565b604051636eb1769f60e11b81523060048201526001600160a01b0384811660248301529192509082169063dd62ed3e90604401602060405180830381865afa158015611019573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061103d91906120e8565b15611057576110576001600160a01b038216836000611820565b6001600160a01b03821660008181526006602052604090819020805460ff19169055517fce96c4db32686d3f0011df1abea0ab6c5794b848868dcbece79961fef7e8198d906110a890600290612101565b60405180910390a25050565b6110bc61168e565b6001600160a01b03871660009081526006602052604090205460ff166110f45760405162461bcd60e51b81526004016105de90612129565b60c861ffff871611156111445760405162461bcd60e51b81526020600482015260186024820152771b585b9859d95b595b9d08199959481d1a1c995cda1bdb1960421b60448201526064016105de565b61138861ffff861611156111965760405162461bcd60e51b81526020600482015260196024820152781c195c999bdc9b585b98d948199959481d1a1c995cda1bdb19603a1b60448201526064016105de565b6127108161ffff1611156111d75760405162461bcd60e51b81526020600482015260086024820152670e8dede40d0d2ced60c31b60448201526064016105de565b60006040518060e001604052808861ffff1681526020018761ffff1681526020018661ffff1681526020018561ffff1681526020018461ffff1681526020018361ffff16815260200160011515815250905080600760008a6001600160a01b03166001600160a01b0316815260200190815260200160002060008201518160000160006101000a81548161ffff021916908361ffff16021790555060208201518160000160026101000a81548161ffff021916908361ffff16021790555060408201518160000160046101000a81548161ffff021916908361ffff16021790555060608201518160000160066101000a81548161ffff021916908361ffff16021790555060808201518160000160086101000a81548161ffff021916908361ffff16021790555060a082015181600001600a6101000a81548161ffff021916908361ffff16021790555060c082015181600001600c6101000a81548160ff021916908315150217905550905050876001600160a01b03167fff2b689837652b4795317128d1dd57305f04ec90d567ff4b921424f1a19e8b0a8260405161137d9190612014565b60405180910390a25050505050505050565b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810191909152506001600160a01b038116600090815260076020908152604091829020825160e081018452905461ffff8082168352620100008204811693830193909352600160201b8104831693820193909352600160301b830482166060820152600160401b830482166080820152600160501b830490911660a0820152600160601b90910460ff16151560c082018190526114ca57506040805160e08101825260055461ffff808216835262010000820481166020840152600160201b8204811693830193909352600160301b810483166060830152600160401b810483166080830152600160501b810490921660a0820152600160601b90910460ff16151560c08201525b919050565b6114d761168e565b6114e5868686868686611968565b505050505050565b6114f561168e565b6001600160a01b03811661153a5760405162461bcd60e51b815260206004820152600c60248201526b5a45524f204144445245535360a01b60448201526064016105de565b600280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907fb03f92c8c7ac39710f28b146f754650929499d599a66d51423cbd7f3ceb9aee390600090a35050565b6004546001600160a01b031633146115df5760405162461bcd60e51b81526020600482015260166024820152753737ba10333aba3ab932903332b29036b0b730b3b2b960511b60448201526064016105de565b60048054600180546001600160a01b03199081166001600160a01b0384161790915516905560405133907f772ddcfc9a0f3b1401c0f60000a81999005d9d593b71bb67707c5f326eb7c94d90600090a2565b611639611b9d565b600254611653906001600160a01b03848116911683611bf9565b816001600160a01b03167f962bc326c7b063c70721a63687e0e19450155f93c58eca94769746c0cfb02c5d826040516110a891815260200190565b6001546001600160a01b031633146116d75760405162461bcd60e51b815260206004820152600c60248201526b10b332b29036b0b730b3b2b960a11b60448201526064016105de565b565b6001546001600160a01b03163314806116fc57506003546001600160a01b031633145b6116d75760405162461bcd60e51b815260206004820152600e60248201526d10bb30bab63a1036b0b730b3b2b960911b60448201526064016105de565b3360009081526006602052604090205460ff166116d75760405162461bcd60e51b81526004016105de90612129565b60008183106117775781611779565b825b90505b92915050565b604051636eb1769f60e11b81523060048201526001600160a01b03848116602483015282919084169063dd62ed3e90604401602060405180830381865afa1580156117d1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117f591906120e8565b1015610e1b576118106001600160a01b038316846000611820565b610e1b6001600160a01b03831684835b80158061189a5750604051636eb1769f60e11b81523060048201526001600160a01b03838116602483015284169063dd62ed3e90604401602060405180830381865afa158015611874573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061189891906120e8565b155b6119055760405162461bcd60e51b815260206004820152603660248201527f5361666545524332303a20617070726f76652066726f6d206e6f6e2d7a65726f60448201527520746f206e6f6e2d7a65726f20616c6c6f77616e636560501b60648201526084016105de565b6040516001600160a01b038316602482015260448101829052610e1b90849063095ea7b360e01b906064015b60408051601f198184030181529190526020810180516001600160e01b03166001600160e01b031990931692909217909152611c29565b60c861ffff871611156119b85760405162461bcd60e51b81526020600482015260186024820152771b585b9859d95b595b9d08199959481d1a1c995cda1bdb1960421b60448201526064016105de565b61138861ffff86161115611a0a5760405162461bcd60e51b81526020600482015260196024820152781c195c999bdc9b585b98d948199959481d1a1c995cda1bdb19603a1b60448201526064016105de565b6127108161ffff161115611a4b5760405162461bcd60e51b81526020600482015260086024820152670e8dede40d0d2ced60c31b60448201526064016105de565b6040805160e0808201835261ffff89811680845289821660208086018290528a84168688018190528a851660608089018290528b87166080808b018290528c891660a0808d01829052600060c09d8e018190526005805463ffffffff1916909b1762010000909a029990991767ffffffff000000001916600160201b90970267ffff000000000000191696909617600160301b909502949094176bffffffff00000000000000001916600160401b90920261ffff60501b191691909117600160501b9093029290921760ff60601b19811690965589518688168152601087901c8816818601529386901c8716848b0152603086901c8716908401529784901c85169782019790975260509290921c90921694810194909452918301919091527fbbcfba7e6e61ab9dbbe4ee1512e1e0c0ff1b236ba707ef51c8f45e7af433b89d910160405180910390a1505050505050565b6002546001600160a01b0316331480611bc057506001546001600160a01b031633145b6116d75760405162461bcd60e51b815260206004820152600a602482015269085c9958da5c1a595b9d60b21b60448201526064016105de565b6040516001600160a01b038316602482015260448101829052610e1b90849063a9059cbb60e01b90606401611931565b6000611c7e826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c6564815250856001600160a01b0316611cfe9092919063ffffffff16565b9050805160001480611c9f575080806020019051810190611c9f9190612258565b610e1b5760405162461bcd60e51b815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e6044820152691bdd081cdd58d8d9595960b21b60648201526084016105de565b6060611d0d8484600085611d15565b949350505050565b606082471015611d765760405162461bcd60e51b815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f6044820152651c8818d85b1b60d21b60648201526084016105de565b600080866001600160a01b03168587604051611d92919061229e565b60006040518083038185875af1925050503d8060008114611dcf576040519150601f19603f3d011682016040523d82523d6000602084013e611dd4565b606091505b5091509150611de587838387611df0565b979650505050505050565b60608315611e5f578251600003611e58576001600160a01b0385163b611e585760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e747261637400000060448201526064016105de565b5081611d0d565b611d0d8383815115611e745781518083602001fd5b8060405162461bcd60e51b81526004016105de91906122ba565b6001600160a01b038116811461059257600080fd5b600060208284031215611eb557600080fd5b8135611ec081611e8e565b9392505050565b600060208284031215611ed957600080fd5b5035919050565b60008060408385031215611ef357600080fd5b8235611efe81611e8e565b91506020830135611f0e81611e8e565b809150509250929050565b600080600060608486031215611f2e57600080fd5b8335611f3981611e8e565b95602085013595506040909401359392505050565b60008060408385031215611f6157600080fd5b8235611f6c81611e8e565b946020939093013593505050565b803561ffff811681146114ca57600080fd5b600080600080600080600060e0888a031215611fa757600080fd5b8735611fb281611e8e565b9650611fc060208901611f7a565b9550611fce60408901611f7a565b9450611fdc60608901611f7a565b9350611fea60808901611f7a565b9250611ff860a08901611f7a565b915061200660c08901611f7a565b905092959891949750929550565b600060e08201905061ffff8084511683528060208501511660208401528060408501511660408401528060608501511660608401528060808501511660808401528060a08501511660a08401525060c0830151151560c083015292915050565b60008060008060008060c0878903121561208d57600080fd5b61209687611f7a565b95506120a460208801611f7a565b94506120b260408801611f7a565b93506120c060608801611f7a565b92506120ce60808801611f7a565b91506120dc60a08801611f7a565b90509295509295509295565b6000602082840312156120fa57600080fd5b5051919050565b602081016003831061212357634e487b7160e01b600052602160045260246000fd5b91905290565b6020808252600f908201526e1d985d5b1d081b9bdd081859191959608a1b604082015260600190565b60006080828403121561216457600080fd5b6040516080810181811067ffffffffffffffff8211171561219557634e487b7160e01b600052604160045260246000fd5b8060405250825181526020830151602082015260408301516040820152606083015160608201528091505092915050565b634e487b7160e01b600052601160045260246000fd5b8181038181111561177c5761177c6121c6565b808202811582820484141761177c5761177c6121c6565b60008261222357634e487b7160e01b600052601260045260246000fd5b500490565b8082018082111561177c5761177c6121c6565b60006020828403121561224d57600080fd5b8151611ec081611e8e565b60006020828403121561226a57600080fd5b81518015158114611ec057600080fd5b60005b8381101561229557818101518382015260200161227d565b50506000910152565b600082516122b081846020870161227a565b9190910192915050565b60208152600082518060208401526122d981604085016020870161227a565b601f01601f1916919091016040019291505056fea26469706673582212206c39a04ece4b6c74500d141c33f45c15a39c48bef7ac5111ccc582306f6d41d964736f6c63430008120033",
"calls": [],
"selfdestruct": False,
"failed": False,
"events": [
{
"call_type": "EVENT",
"data": "0x000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"depth": 2,
"topics": [
"0xbbcfba7e6e61ab9dbbe4ee1512e1e0c0ff1b236ba707ef51c8f45e7af433b89d"
],
}
],
}
],
"selfdestruct": False,
"failed": False,
"events": [
{
"call_type": "EVENT",
"data": "0x",
"depth": 1,
"topics": [
"0x111fcf41d7f010b6acebbb070fcf96056db140c08d3e7cd9cff07789d93b1e4c",
"0x000000000000000000000000a16e02e87b7454126e5e10d957a927a7f5b5d2be",
],
}
],
}
transaction = {
"chainId": 31337,
"to": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"from": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
"gas": 30000000,
"nonce": 1,
"value": 0,
"data": "0x184ac61b000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000015d34aaf54267db7d7c367839aaf71a00a2c6a65000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000000",
"type": 2,
"maxFeePerGas": 0,
"maxPriorityFeePerGas": 0,
"accessList": [],
"block_number": 2,
"gas_used": 1931665,
"logs": [
{
"address": "0xa16E02E87b7454126E5E10d957A927A7F5B5d2be",
"blockHash": HexBytes(
"0x43c010ce0d9452289205c88a180520e9670bdf6f84d21b8c35d7c815136bba78"
),
"blockNumber": 2,
"data": HexBytes(
"0x000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
),
"logIndex": 0,
"removed": False,
"topics": [
HexBytes("0xbbcfba7e6e61ab9dbbe4ee1512e1e0c0ff1b236ba707ef51c8f45e7af433b89d")
],
"transactionHash": HexBytes(
"0xa4803961e06c673b255ca6af78d00df3c0ebef0b2f23325a1457eaaf20914e8e"
),
"transactionIndex": 0,
},
{
"address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"blockHash": HexBytes(
"0x43c010ce0d9452289205c88a180520e9670bdf6f84d21b8c35d7c815136bba78"
),
"blockNumber": 2,
"data": HexBytes("0x"),
"logIndex": 1,
"removed": False,
"topics": [
HexBytes("0x111fcf41d7f010b6acebbb070fcf96056db140c08d3e7cd9cff07789d93b1e4c"),
HexBytes("0x000000000000000000000000a16e02e87b7454126e5e10d957a927a7f5b5d2be"),
],
"transactionHash": HexBytes(
"0xa4803961e06c673b255ca6af78d00df3c0ebef0b2f23325a1457eaaf20914e8e"
),
"transactionIndex": 0,
},
],
"status": 1,
"txn_hash": "0xa4803961e06c673b255ca6af78d00df3c0ebef0b2f23325a1457eaaf20914e8e",
"transaction": {
"chainId": 31337,
"to": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"from": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
"gas": 30000000,
"nonce": 1,
"value": 0,
"data": "0x184ac61b000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000015d34aaf54267db7d7c367839aaf71a00a2c6a65000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000000",
"type": 2,
"max_fee": 0,
"max_priority_fee": 0,
},
"gas_limit": 30000000,
"gas_price": 0,
}
class TraceForTest(TransactionTrace):
@property
def transaction(self) -> dict:
return transaction
def get_calltree(self) -> CallTreeNode:
return CallTreeNode.model_validate(calltree)
@property
def root_method_abi(self) -> Optional[MethodABI]:
return abi
trace = TraceForTest(transaction_hash=transaction_hash)
trace.call_trace_approach = TraceApproach.GETH_STRUCT_LOG_PARSE
actual = trace.return_value
expected = ("0xa16E02E87b7454126E5E10d957A927A7F5B5d2be",)
assert actual == expected
|
Tests against a bug where a trace in a certain state (HH returning a tuple) was
unable to get the correct return_value.
|
test_return_value_tuple
|
python
|
ApeWorX/ape
|
tests/functional/geth/test_trace.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/geth/test_trace.py
|
Apache-2.0
|
def test_decode_data_missing_trailing_zeroes(
collection, topics, log_data_missing_trailing_zeroes, ape_caplog
):
"""
This test is for a time where Alchemy gave us log data when it was missing trailing zeroes.
When using strict=False, it was able to properly decode. In this case, in Ape, we warn
the user and still proceed to decode the log.
"""
actual = ape_caplog.assert_last_log_with_retries(
lambda: collection.decode(topics, log_data_missing_trailing_zeroes),
"However, we are able to get a value using decode(strict=False)",
)
expected = {
"name": "Launchnodes",
"nodeOperatorId": 30,
"rewardAddress": "0x5a8b929edbf3ce44526465dd2087ec7efb59a561",
"stakingLimit": 0,
}
assert actual == expected
|
This test is for a time where Alchemy gave us log data when it was missing trailing zeroes.
When using strict=False, it was able to properly decode. In this case, in Ape, we warn
the user and still proceed to decode the log.
|
test_decode_data_missing_trailing_zeroes
|
python
|
ApeWorX/ape
|
tests/functional/utils/test_abi.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/utils/test_abi.py
|
Apache-2.0
|
def test_get_release_retry(self, mock_release, github_client, mock_session, version):
"""
Ensure after failing to get a release, we re-attempt with
out a v-prefix.
"""
opposite = version.lstrip("v") if version.startswith("v") else f"v{version}"
def side_effect(method, uri, *arg, **kwargs):
_version = uri.split("/")[-1]
if _version == version:
# Force it to try the opposite.
raise ValueError()
return mock_release
mock_session.request.side_effect = side_effect
actual = github_client.get_release(ORG_NAME, REPO_NAME, version)
assert actual["name"] == REPO_NAME
calls = mock_session.request.call_args_list[-2:]
expected_uri = "https://api.github.com/repos/test/path/releases/tags"
assert calls[0][0] == ("GET", f"{expected_uri}/{version}")
assert calls[1][0] == ("GET", f"{expected_uri}/{opposite}")
|
Ensure after failing to get a release, we re-attempt with
out a v-prefix.
|
test_get_release_retry
|
python
|
ApeWorX/ape
|
tests/functional/utils/test_github.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/utils/test_github.py
|
Apache-2.0
|
def test_available_plugins_handles_401(self, mocker, github_client, mock_session, ape_caplog):
"""
When you get a 401 from using a token, Ape's GitHub client should not
only warn the user but retry the request w/o authorization, as it likely
will still work.
"""
mock_session.headers = {"Authorization": "token mytoken"}
response1 = mocker.MagicMock()
response1.json.return_value = [{"name": "ape-myplugin"}]
response2 = mocker.MagicMock()
response2.json.return_value = []
bad_auth_response = mocker.MagicMock()
bad_auth_response.status_code = 401
bad_auth_response.raise_for_status.side_effect = HTTPError(response=bad_auth_response)
def get_org_repos(method, url, **kwargs):
if mock_session.headers.get("Authorization") == "token mytoken":
return bad_auth_response
elif kwargs["params"]["page"] == 1:
return response1
else:
# End.
return response2
mock_session.request.side_effect = get_org_repos
actual = github_client.available_plugins
# Still works, even with bad auth.
assert actual == {"ape_myplugin"}
# Show we got our log message.
expected = (
"Requests are not authorized! GITHUB_ACCESS_TOKEN is likely "
"expired; received 401 when attempted to use it. If you need "
"GitHub authorization, try resetting your token."
)
assert ape_caplog.head == expected
|
When you get a 401 from using a token, Ape's GitHub client should not
only warn the user but retry the request w/o authorization, as it likely
will still work.
|
test_available_plugins_handles_401
|
python
|
ApeWorX/ape
|
tests/functional/utils/test_github.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/utils/test_github.py
|
Apache-2.0
|
def test_path_match_recurse_dir(path):
"""
Testing a specific way of excluding all the files in a directory.
"""
excl = "exclude_dir/**"
assert path_match(path, excl)
|
Testing a specific way of excluding all the files in a directory.
|
test_path_match_recurse_dir
|
python
|
ApeWorX/ape
|
tests/functional/utils/test_os.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/utils/test_os.py
|
Apache-2.0
|
def test_setitem_user_agent_parts_exist(self, headers):
"""
Tests the case when user-agents share a sub-set
of each other, that it does not duplicate.
"""
headers["User-Agent"] = "test0/1.0"
# The beginning of the user-agent is already present.
# It shouldn't add the full thing.
headers["User-Agent"] = "test0/1.0 test1/2.0"
assert headers["User-Agent"] == "test0/1.0 test1/2.0"
# unexpected = "test0/1.0 test0/1.0 test1/2.0"
|
Tests the case when user-agents share a sub-set
of each other, that it does not duplicate.
|
test_setitem_user_agent_parts_exist
|
python
|
ApeWorX/ape
|
tests/functional/utils/test_rpc.py
|
https://github.com/ApeWorX/ape/blob/master/tests/functional/utils/test_rpc.py
|
Apache-2.0
|
def pytest_collection_modifyitems(session, config, items):
"""
Filter out tests marked to be skipped using ``skip_projects``
and the ``skip_projects_except`` decorators.
"""
modified_items = []
for item in items:
item_name_parts = item.name.split("[")
item_name_parts = [p.strip("[]") for p in item_name_parts]
module_full_name = getattr(item.module, "__name__", None)
if not module_full_name:
continue
module_name = module_full_name.split(".")[-1]
test_name = item_name_parts[0]
# Handle if a parametrized test is on-top
# of the project's parametrization.
project_name = item_name_parts[-1]
for proj_name in project_skipper:
# Example: 'test_foo[project-name-fuzz-0]' matches 'project-name'
if project_name.startswith(proj_name):
project_name = proj_name
break
is_cli_integration_test = (
len(item_name_parts) == 2 and "integration.cli" in module_full_name
)
if not is_cli_integration_test or not project_skipper.do_skip(
project_name, module_name, test_name
):
modified_items.append(item)
items[:] = modified_items
|
Filter out tests marked to be skipped using ``skip_projects``
and the ``skip_projects_except`` decorators.
|
pytest_collection_modifyitems
|
python
|
ApeWorX/ape
|
tests/integration/cli/conftest.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/conftest.py
|
Apache-2.0
|
def project_dir_map():
"""
Ensure only copying projects once to prevent `TooManyOpenFilesError`.
"""
class ProjectDirCache:
project_map: dict[str, Path] = {}
def load(self, name: str) -> Path:
base_path = Path(__file__).parent / "projects"
if name in self.project_map:
res = self.project_map[name]
if res.is_dir():
# Already copied and still exists!
return res
# Either re-copy or copy for the first time.
project_source_dir = __projects_directory__ / name
project_dest_dir = base_path / project_source_dir.name
project_dest_dir.parent.mkdir(exist_ok=True, parents=True)
if not project_dest_dir.is_dir():
copytree(project_source_dir, project_dest_dir)
self.project_map[name] = project_dest_dir
return self.project_map[name]
return ProjectDirCache()
|
Ensure only copying projects once to prevent `TooManyOpenFilesError`.
|
project_dir_map
|
python
|
ApeWorX/ape
|
tests/integration/cli/conftest.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/conftest.py
|
Apache-2.0
|
def ape_plugins_runner(config):
"""
Use subprocess runner so can manipulate site packages and see results.
"""
class PluginSubprocessRunner(ApeSubprocessRunner):
def __init__(self):
super().__init__("plugins", data_folder=config.DATA_FOLDER)
def invoke_list(self, arguments: Optional[list] = None):
arguments = arguments or []
result = self.invoke("list", *arguments)
assert result.exit_code == 0, result.output
return ListResult.parse_output(result.output)
return PluginSubprocessRunner()
|
Use subprocess runner so can manipulate site packages and see results.
|
ape_plugins_runner
|
python
|
ApeWorX/ape
|
tests/integration/cli/conftest.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/conftest.py
|
Apache-2.0
|
def test_import_alias_is_really_long(ape_cli, runner):
"""
For entropy related use-cases regarding alias, we
must ensure long aliases are supported.
"""
long_alias = "this is a long alias that i am going to use and you can't stop me"
result = runner.invoke(
ape_cli,
("accounts", "import", long_alias),
input="\n".join([f"0x{PRIVATE_KEY}", PASSWORD, PASSWORD]),
)
assert result.exit_code == 0
|
For entropy related use-cases regarding alias, we
must ensure long aliases are supported.
|
test_import_alias_is_really_long
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_accounts.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_accounts.py
|
Apache-2.0
|
def test_compile_when_sources_change_problematically(ape_cli, runner, integ_project, clean_cache):
"""
There was a bug when sources changes but had errors, that the old sources continued
to be used and the errors were swallowed.
"""
source_path = integ_project.contracts_folder / "Interface.json"
content = source_path.read_text()
assert "bar" in content, "Test setup failed - unexpected content"
result = runner.invoke(
ape_cli, ("compile", "--project", f"{integ_project.path}"), catch_exceptions=False
)
assert result.exit_code == 0, result.output
# Change the contents of a file in a problematic way.
source_path = integ_project.contracts_folder / "Interface.json"
modified_source_text = source_path.read_text().replace("{", "BRACKET")
source_path.unlink()
source_path.touch()
source_path.write_text(modified_source_text, encoding="utf8")
result = runner.invoke(
ape_cli, ("compile", "--project", f"{integ_project.path}"), catch_exceptions=False
)
assert result.exit_code != 0, result.output
|
There was a bug when sources changes but had errors, that the old sources continued
to be used and the errors were swallowed.
|
test_compile_when_sources_change_problematically
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_compile.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_compile.py
|
Apache-2.0
|
def test_compile_when_source_contains_return_characters(
ape_cli, runner, integ_project, clean_cache
):
"""
This tests a bugfix where a source file contained return-characters
and that triggered endless re-compiles because it technically contains extra
bytes than the ones that show up in the text.
"""
source_path = integ_project.contracts_folder / "Interface.json"
# Change the contents of a file to contain the '\r' character.
modified_source_text = f"{source_path.read_text()}\r"
source_path.unlink()
source_path.touch()
source_path.write_text(modified_source_text, encoding="utf8")
arguments = ("compile", "--project", f"{integ_project.path}")
result = runner.invoke(ape_cli, arguments, catch_exceptions=False)
assert result.exit_code == 0, result.output
assert "contracts/Interface.json" in result.output
# Verify that the next time, it does not need to recompile (no changes)
result = runner.invoke(ape_cli, arguments, catch_exceptions=False)
assert result.exit_code == 0, result.output
assert "contracts/Interface.json" not in result.output
|
This tests a bugfix where a source file contained return-characters
and that triggered endless re-compiles because it technically contains extra
bytes than the ones that show up in the text.
|
test_compile_when_source_contains_return_characters
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_compile.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_compile.py
|
Apache-2.0
|
def test_console_natspecs(integ_project, solidity_contract_type, console_runner):
"""
This test shows that the various natspec integrations with ABI-backed
types work in ``ape console``.
"""
contract_code = solidity_contract_type.model_dump_json(by_alias=True)
# flake8: noqa
cmd_ls = [
"%load_ext ape_console.plugin",
f"contract_container = compilers.ethpm.compile_code('{contract_code}')",
"account = accounts.test_accounts[0]",
"contract = account.deploy(contract_container, 123)",
"print('0: method')",
"contract.setNumber",
"print('1: event')",
"contract.NumberChange",
"print('2: error')",
"contract.ACustomError",
"exit",
]
cmd_str = "\n".join(cmd_ls)
expected_method = """
setNumber(uint256 num)
@custom:emits Emits a `NumberChange` event with the previous number, the new number, and the previous block hash
@custom:modifies Sets the `myNumber` state variable
@custom:require num Must not be equal to 5
@details Only the owner can call this function. The new number cannot be 5.
@param num uint256 The new number to be set
""".strip()
expected_event = """
NumberChange(bytes32 b, uint256 prevNum, string dynData, uint256 indexed newNum, string indexed dynIndexed)
@details Emitted when number is changed. `newNum` is the new number from the call. Expected every time number changes.
""".strip()
# flake8: on
result = console_runner.invoke("--project", f"{integ_project.path}", input=f"{cmd_str}\n")
# Getting rid of newlines as terminal-breakage never consistent in tests.
actual = result.output.replace("\n", "")
assert all(ln in actual for ln in expected_method.splitlines())
assert all(ln in actual for ln in expected_event.splitlines())
|
This test shows that the various natspec integrations with ABI-backed
types work in ``ape console``.
|
test_console_natspecs
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_console.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_console.py
|
Apache-2.0
|
def test_try_run_script_missing_cli_decorator(scripts_runner, integ_project):
"""
Shows that we cannot run a script defining a `cli()` method without
it being a click command. The script is not recognized, so you get
a usage error.
"""
scripts_runner.project = integ_project
result = scripts_runner.invoke("error_forgot_click")
assert "Usage: ape run" in result._completed_process.stderr
|
Shows that we cannot run a script defining a `cli()` method without
it being a click command. The script is not recognized, so you get
a usage error.
|
test_try_run_script_missing_cli_decorator
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_run.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_run.py
|
Apache-2.0
|
def test_scripts_module_already_installed(integ_project, scripts_runner, mocker):
"""
Make sure that if there is for some reason a python module names `scripts`
installed, it does not interfere with Ape's scripting mechanism.
"""
scripts_runner.project = integ_project
mock_scripts = mocker.MagicMock()
mock_path = mocker.MagicMock()
mock_path._path = "path/to/scripts"
mock_scripts.__file__ = None
mock_scripts.__path__ = mock_path
sys.modules["scripts"] = mock_scripts
result = scripts_runner.invoke()
assert result.exit_code == 0, result.output
del sys.modules["scripts"]
|
Make sure that if there is for some reason a python module names `scripts`
installed, it does not interfere with Ape's scripting mechanism.
|
test_scripts_module_already_installed
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_run.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_run.py
|
Apache-2.0
|
def test_run_recompiles_if_needed(runner, ape_cli, scripts_runner, integ_project):
"""
Ensure that when a change is made to a contract,
when we run a script, it re-compiles the script first.
"""
scripts_runner.project = integ_project
# Ensure we begin compiled.
runner.invoke(ape_cli, ("compile", "--force", "--project", f"{integ_project.path}"))
# Make a change to the contract.
contract = integ_project.contracts_folder / "VyperContract.json"
method_name = integ_project.VyperContract.contract_type.view_methods[0].name
new_method_name = f"f__{method_name}__"
new_contract_text = contract.read_text().replace(method_name, new_method_name)
contract.write_text(new_contract_text, encoding="utf8")
# Run the script. It better recompile first!
result = scripts_runner.invoke("output_contract_view_methods")
assert new_method_name in result.output
|
Ensure that when a change is made to a contract,
when we run a script, it re-compiles the script first.
|
test_run_recompiles_if_needed
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_run.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_run.py
|
Apache-2.0
|
def test_verbosity(runner, ape_cli):
"""
Tests again an issue where `ape test -v debug` would fail because of
an invalid type check from click; only appeared in `ape test` command
for some reason.
"""
# NOTE: Only using `--fixtures` flag to avoid running tests (just prints fixtures).
cmd = ("test", "--verbosity", "DEBUG", "--fixtures")
result = runner.invoke(ape_cli, cmd, catch_exceptions=False)
assert result.exit_code == 0, result.output
|
Tests again an issue where `ape test -v debug` would fail because of
an invalid type check from click; only appeared in `ape test` command
for some reason.
|
test_verbosity
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_test.py
|
Apache-2.0
|
def test_vvv(runner, ape_cli, integ_project, v_arg):
"""
Showing you can somehow use pytest's -v flag without
messing up Ape.
"""
here = integ_project.path
os.chdir(integ_project.path)
name = f"test_{v_arg.replace('-', '_')}"
TEST = f"""
def {name}():
assert True
""".lstrip()
# Have to create a new test each time to avoid .pycs issues
new_test_file = integ_project.tests_folder / f"{name}.py"
new_test_file.write_text(TEST)
try:
# NOTE: v_arg purposely at the end because testing doesn't interfere
# with click's option parsing "requires value" error.
result = runner.invoke(
ape_cli,
("test", f"tests/{new_test_file.name}::{name}", v_arg),
catch_exceptions=False,
)
finally:
new_test_file.unlink(missing_ok=True)
os.chdir(here)
assert result.exit_code == 0, result.output
# Prove `-vvv` worked via the output.
# It shows PASSED instead of the little green dot.
assert "PASSED" in result.output
|
Showing you can somehow use pytest's -v flag without
messing up Ape.
|
test_vvv
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_test.py
|
Apache-2.0
|
def test_gas_when_estimating(geth_provider, setup_pytester, integ_project, pytester, geth_account):
"""
Shows that gas reports still work when estimating gas.
"""
cfg = integ_project.config.model_dump(by_alias=True, mode="json")
cfg["test"]["gas"] = {"reports": ["terminal"]}
geth_account.transfer(geth_account, "1 wei") # Force a clean block.
with integ_project.temp_config(**cfg):
passed, failed = setup_pytester(integ_project)
result = pytester.runpytest_subprocess("-n", "0", timeout=120)
run_gas_test(result, passed, failed)
|
Shows that gas reports still work when estimating gas.
|
test_gas_when_estimating
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_test.py
|
Apache-2.0
|
def test_coverage(geth_provider, setup_pytester, integ_project, pytester, geth_account):
"""
Ensures the --coverage flag works.
For better coverage tests, see ape-vyper because the Vyper
plugin is what implements the `trace_source()` method which does the bulk
of the coverage work.
"""
geth_account.transfer(geth_account, "1 wei") # Force a clean block.
passed, failed = setup_pytester(integ_project)
result = pytester.runpytest_subprocess(
"--coverage", "--show-internal", "--network", "ethereum:local:node", "-n", "0"
)
result.assert_outcomes(passed=passed, failed=failed)
|
Ensures the --coverage flag works.
For better coverage tests, see ape-vyper because the Vyper
plugin is what implements the `trace_source()` method which does the bulk
of the coverage work.
|
test_coverage
|
python
|
ApeWorX/ape
|
tests/integration/cli/test_test.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/test_test.py
|
Apache-2.0
|
def do_skip(self, project: str, module: str, test: str) -> bool:
"""
Returns ``True`` if a test has been marked to be
skipped for the given project using the ``skip_project`` or
``skip_project_except`` decorators.
"""
if project not in self.projects:
# Not a project-based integration test
return False
result = test in self.projects[project].get(module, [])
return result
|
Returns ``True`` if a test has been marked to be
skipped for the given project using the ``skip_project`` or
``skip_project_except`` decorators.
|
do_skip
|
python
|
ApeWorX/ape
|
tests/integration/cli/utils.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/utils.py
|
Apache-2.0
|
def skip_projects(self, method: Callable, *projects: str):
"""
Call this method to record a 'skip'.
The ``skip_project`` decorator calls this method
on the test method they are wrapped around.
"""
assert hasattr(method, "__name__") and hasattr(method, "__module__")
node = NodeId(method)
for project in projects:
self._raise_if_not_exists(project, node.node_id)
if node.module_name not in self.projects[project]:
self.projects[project][node.module_name] = set()
self.projects[project][node.module_name].add(node.name)
|
Call this method to record a 'skip'.
The ``skip_project`` decorator calls this method
on the test method they are wrapped around.
|
skip_projects
|
python
|
ApeWorX/ape
|
tests/integration/cli/utils.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/utils.py
|
Apache-2.0
|
def skip_projects_except(self, method: Callable, *projects: str):
"""
Call this method to record 'skip's for each project that is not
in the given list. The ``skip_project_except`` decorator calls
this method on the test method they are wrapped around.
"""
assert hasattr(method, "__name__") and hasattr(method, "__module__")
node = NodeId(method)
# Verify projects to run for exist
for proj in projects:
self._raise_if_not_exists(proj, node.node_id)
filtered_projects = [p for p in self.projects if p not in projects]
self.skip_projects(method, *filtered_projects)
|
Call this method to record 'skip's for each project that is not
in the given list. The ``skip_project_except`` decorator calls
this method on the test method they are wrapped around.
|
skip_projects_except
|
python
|
ApeWorX/ape
|
tests/integration/cli/utils.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/utils.py
|
Apache-2.0
|
def skip_projects(*names: str):
"""
Use this decorator to cause a CLI integration test
not to run for the given projects.
"""
def decorator(f):
project_skipper.skip_projects(f, *names)
return f
return decorator
|
Use this decorator to cause a CLI integration test
not to run for the given projects.
|
skip_projects
|
python
|
ApeWorX/ape
|
tests/integration/cli/utils.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/utils.py
|
Apache-2.0
|
def skip_projects_except(*names: str):
"""
Use this decorator to cause a CLI integration test
to only run for the given projects.
"""
def decorator(f):
project_skipper.skip_projects_except(f, *names)
return f
return decorator
|
Use this decorator to cause a CLI integration test
to only run for the given projects.
|
skip_projects_except
|
python
|
ApeWorX/ape
|
tests/integration/cli/utils.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/utils.py
|
Apache-2.0
|
def test_extra_account(chain):
"""
Show we can fund accounts from the config option.
"""
addr = "0x63c7f11162dBFC374DC6f5C0B3Aa26C618846a85"
actual = chain.provider.get_balance(addr)
assert actual > 0
|
Show we can fund accounts from the config option.
|
test_extra_account
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
Apache-2.0
|
def test_using_contract_with_same_type_and_method_call(accounts, project):
"""
Deploy the same contract from the ``contract`` fixture and call a method
that gets called elsewhere in the test suite. This shows that we amass
results across all instances of contract types when making the gas report.
"""
owner = accounts[7]
contract = project.VyperContract.deploy(sender=owner)
contract.setNumber(777, sender=owner)
assert contract.myNumber() == 777
|
Deploy the same contract from the ``contract`` fixture and call a method
that gets called elsewhere in the test suite. This shows that we amass
results across all instances of contract types when making the gas report.
|
test_using_contract_with_same_type_and_method_call
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
Apache-2.0
|
def test_two_contracts_with_same_symbol(accounts, project):
"""
Tests against scenario when using 2 tokens with same symbol.
There was almost a bug where the contract IDs clashed.
This is to help prevent future bugs related to this.
"""
receiver = accounts[-1]
sender = accounts[-2]
token_a = project.TokenA.deploy(sender=sender)
token_a.transfer(receiver, 5, sender=sender)
assert token_a.balanceOf(receiver) == 5
token_b = project.TokenB.deploy(sender=sender)
token_b.transfer(receiver, 6, sender=sender)
assert token_b.balanceOf(receiver) == 6
|
Tests against scenario when using 2 tokens with same symbol.
There was almost a bug where the contract IDs clashed.
This is to help prevent future bugs related to this.
|
test_two_contracts_with_same_symbol
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
Apache-2.0
|
def test_call_method_excluded_from_cli_options(accounts, contract):
"""
Call a method so that we can intentionally ignore it via command
line options and test that it does not show in the report.
"""
receipt = contract.fooAndBar(sender=accounts[9])
assert not receipt.failed
|
Call a method so that we can intentionally ignore it via command
line options and test that it does not show in the report.
|
test_call_method_excluded_from_cli_options
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
Apache-2.0
|
def test_call_method_excluded_from_config(accounts, contract):
"""
Call a method excluded in the ``ape-config.yaml`` file
for asserting it does not show in gas report.
"""
account = accounts[-4]
receipt = contract.setAddress(account.address, sender=account)
assert not receipt.failed
|
Call a method excluded in the ``ape-config.yaml`` file
for asserting it does not show in gas report.
|
test_call_method_excluded_from_config
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/geth/tests/test_using_local_geth.py
|
Apache-2.0
|
def cli():
"""
This script tests the scenario when a cli script is missing
a click-decorator. The script itself is not runnable by Ape,
but it will cause a warning. Primarily, it is important that
it does not cause the entire scripts-integration to fail.
"""
|
This script tests the scenario when a cli script is missing
a click-decorator. The script itself is not runnable by Ape,
but it will cause a warning. Primarily, it is important that
it does not cause the entire scripts-integration to fail.
|
cli
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/script/scripts/error_forgot_click.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/script/scripts/error_forgot_click.py
|
Apache-2.0
|
def test_isolation_with_session_module_and_function(chain, session_one, session_two, function_one):
"""
The sessions should be used, so that is 6.
Function is 1 and the module 3.
Also, setup does a transfer - that bumps up another 1.
Expected is 11.
"""
# NOTE: Module is on autouse=True
assert chain.blocks.height == 11
|
The sessions should be used, so that is 6.
Function is 1 and the module 3.
Also, setup does a transfer - that bumps up another 1.
Expected is 11.
|
test_isolation_with_session_module_and_function
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
Apache-2.0
|
def functional_fixture_using_session(chain, session_one):
"""
Showing the transactions in a functional-scoped
fixture that use a session-scoped fixture don't
persist on-chain.
"""
_ = session_one
chain.mine()
return 11 # expected: 10 built up plus this 1.
|
Showing the transactions in a functional-scoped
fixture that use a session-scoped fixture don't
persist on-chain.
|
functional_fixture_using_session
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
Apache-2.0
|
def test_use_parametrized_transaction_again(chain, parametrized_transaction):
"""
Should not have invalidated parametrized fixture.
"""
starting = 10 # All session + module
assert chain.blocks.height == starting + parametrized_transaction
|
Should not have invalidated parametrized fixture.
|
test_use_parametrized_transaction_again
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
Apache-2.0
|
def test_use_isolate_in_test(chain, parametrized_transaction):
"""
Show the isolation we control doesn't affect
the isolation fixtures.
"""
_ = parametrized_transaction # Using this for complexity.
start_block = chain.blocks.height
with chain.isolate():
chain.mine()
assert chain.blocks.height == start_block + 1
assert chain.blocks.height == start_block
|
Show the isolation we control doesn't affect
the isolation fixtures.
|
test_use_isolate_in_test
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/test/tests/test_fixture_isolation.py
|
Apache-2.0
|
def main():
"""
Cause an uncaught contract logic error to test traceback output.
"""
account = ape.accounts.test_accounts[0]
contract = account.deploy(ape.project.ContractA)
# Fails.
contract.setNumber(5, sender=account)
|
Cause an uncaught contract logic error to test traceback output.
|
main
|
python
|
ApeWorX/ape
|
tests/integration/cli/projects/with-contracts/scripts/txerr.py
|
https://github.com/ApeWorX/ape/blob/master/tests/integration/cli/projects/with-contracts/scripts/txerr.py
|
Apache-2.0
|
def batch_parallelize(algos, fn, batch_size):
"""
Algorithms are coroutines that yield items to be processed in parallel.
We concurrently run the algorithm on all items in the batch.
"""
inputs = []
for i, algo in enumerate(algos):
inputs.append((i, next(algo)))
results = [None] * len(algos)
while len(inputs) > 0:
ret = list(apply_batched(fn, [x[1] for x in inputs], batch_size))
assert len(ret) == len(inputs)
inds = [x[0] for x in inputs]
inputs = []
for i, r in zip(inds, ret):
try:
next_input = algos[i].send(r)
inputs.append((i, next_input))
except StopIteration as e:
results[i] = e.value
return results
|
Algorithms are coroutines that yield items to be processed in parallel.
We concurrently run the algorithm on all items in the batch.
|
batch_parallelize
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/explanations.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/explanations.py
|
MIT
|
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
|
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
|
triton_sparse_transpose_dense_matmul
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
|
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
|
triton_sparse_transpose_dense_matmul_kernel
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
|
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
|
triton_sparse_dense_matmul
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
|
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
|
triton_sparse_dense_matmul_kernel
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
|
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
|
triton_dense_dense_sparseout_matmul
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
|
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
|
triton_dense_dense_sparseout_matmul_kernel
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/kernels.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/kernels.py
|
MIT
|
def autoencoder_loss(
reconstruction: torch.Tensor,
original_input: torch.Tensor,
latent_activations: torch.Tensor,
l1_weight: float,
) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:param latent_activations: output of Autoencoder.encode (shape: [batch, n_latents])
:param l1_weight: weight of L1 loss
:return: loss (shape: [1])
"""
return (
normalized_mean_squared_error(reconstruction, original_input)
+ normalized_L1_loss(latent_activations, original_input) * l1_weight
)
|
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:param latent_activations: output of Autoencoder.encode (shape: [batch, n_latents])
:param l1_weight: weight of L1 loss
:return: loss (shape: [1])
|
autoencoder_loss
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/loss.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/loss.py
|
MIT
|
def normalized_mean_squared_error(
reconstruction: torch.Tensor,
original_input: torch.Tensor,
) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (
((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)
).mean()
|
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
|
normalized_mean_squared_error
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/loss.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/loss.py
|
MIT
|
def normalized_L1_loss(
latent_activations: torch.Tensor,
original_input: torch.Tensor,
) -> torch.Tensor:
"""
:param latent_activations: output of Autoencoder.encode (shape: [batch, n_latents])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized L1 loss (shape: [1])
"""
return (latent_activations.abs().sum(dim=1) / original_input.norm(dim=1)).mean()
|
:param latent_activations: output of Autoencoder.encode (shape: [batch, n_latents])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized L1 loss (shape: [1])
|
normalized_L1_loss
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/loss.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/loss.py
|
MIT
|
def __init__(
self, n_latents: int, n_inputs: int, activation: Callable = nn.ReLU(), tied: bool = False,
normalize: bool = False
) -> None:
"""
:param n_latents: dimension of the autoencoder latent
:param n_inputs: dimensionality of the original data (e.g residual stream, number of MLP hidden units)
:param activation: activation function
:param tied: whether to tie the encoder and decoder weights
"""
super().__init__()
self.pre_bias = nn.Parameter(torch.zeros(n_inputs))
self.encoder: nn.Module = nn.Linear(n_inputs, n_latents, bias=False)
self.latent_bias = nn.Parameter(torch.zeros(n_latents))
self.activation = activation
if tied:
self.decoder: nn.Linear | TiedTranspose = TiedTranspose(self.encoder)
else:
self.decoder = nn.Linear(n_latents, n_inputs, bias=False)
self.normalize = normalize
self.stats_last_nonzero: torch.Tensor
self.latents_activation_frequency: torch.Tensor
self.latents_mean_square: torch.Tensor
self.register_buffer("stats_last_nonzero", torch.zeros(n_latents, dtype=torch.long))
self.register_buffer(
"latents_activation_frequency", torch.ones(n_latents, dtype=torch.float)
)
self.register_buffer("latents_mean_square", torch.zeros(n_latents, dtype=torch.float))
|
:param n_latents: dimension of the autoencoder latent
:param n_inputs: dimensionality of the original data (e.g residual stream, number of MLP hidden units)
:param activation: activation function
:param tied: whether to tie the encoder and decoder weights
|
__init__
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/model.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/model.py
|
MIT
|
def encode_pre_act(self, x: torch.Tensor, latent_slice: slice = slice(None)) -> torch.Tensor:
"""
:param x: input data (shape: [batch, n_inputs])
:param latent_slice: slice of latents to compute
Example: latent_slice = slice(0, 10) to compute only the first 10 latents.
:return: autoencoder latents before activation (shape: [batch, n_latents])
"""
x = x - self.pre_bias
latents_pre_act = F.linear(
x, self.encoder.weight[latent_slice], self.latent_bias[latent_slice]
)
return latents_pre_act
|
:param x: input data (shape: [batch, n_inputs])
:param latent_slice: slice of latents to compute
Example: latent_slice = slice(0, 10) to compute only the first 10 latents.
:return: autoencoder latents before activation (shape: [batch, n_latents])
|
encode_pre_act
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/model.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/model.py
|
MIT
|
def encode(self, x: torch.Tensor) -> tuple[torch.Tensor, dict[str, Any]]:
"""
:param x: input data (shape: [batch, n_inputs])
:return: autoencoder latents (shape: [batch, n_latents])
"""
x, info = self.preprocess(x)
return self.activation(self.encode_pre_act(x)), info
|
:param x: input data (shape: [batch, n_inputs])
:return: autoencoder latents (shape: [batch, n_latents])
|
encode
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/model.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/model.py
|
MIT
|
def decode(self, latents: torch.Tensor, info: dict[str, Any] | None = None) -> torch.Tensor:
"""
:param latents: autoencoder latents (shape: [batch, n_latents])
:return: reconstructed data (shape: [batch, n_inputs])
"""
ret = self.decoder(latents) + self.pre_bias
if self.normalize:
assert info is not None
ret = ret * info["std"] + info["mu"]
return ret
|
:param latents: autoencoder latents (shape: [batch, n_latents])
:return: reconstructed data (shape: [batch, n_inputs])
|
decode
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/model.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/model.py
|
MIT
|
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param x: input data (shape: [batch, n_inputs])
:return: autoencoder latents pre activation (shape: [batch, n_latents])
autoencoder latents (shape: [batch, n_latents])
reconstructed data (shape: [batch, n_inputs])
"""
x, info = self.preprocess(x)
latents_pre_act = self.encode_pre_act(x)
latents = self.activation(latents_pre_act)
recons = self.decode(latents, info)
# set all indices of self.stats_last_nonzero where (latents != 0) to 0
self.stats_last_nonzero *= (latents == 0).all(dim=0).long()
self.stats_last_nonzero += 1
return latents_pre_act, latents, recons
|
:param x: input data (shape: [batch, n_inputs])
:return: autoencoder latents pre activation (shape: [batch, n_latents])
autoencoder latents (shape: [batch, n_latents])
reconstructed data (shape: [batch, n_inputs])
|
forward
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/model.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/model.py
|
MIT
|
def v1(location, layer_index):
"""
Details:
- Number of autoencoder latents: 32768
- Number of training tokens: ~64M
- Activation function: ReLU
- L1 regularization strength: 0.01
- Layer normed inputs: false
- NeuronRecord files:
`az://openaipublic/sparse-autoencoder/gpt2-small/{location}/collated_activations/{layer_index}/{latent_index}.json`
"""
assert location in ["mlp_post_act", "resid_delta_mlp"]
assert layer_index in range(12)
return f"az://openaipublic/sparse-autoencoder/gpt2-small/{location}/autoencoders/{layer_index}.pt"
|
Details:
- Number of autoencoder latents: 32768
- Number of training tokens: ~64M
- Activation function: ReLU
- L1 regularization strength: 0.01
- Layer normed inputs: false
- NeuronRecord files:
`az://openaipublic/sparse-autoencoder/gpt2-small/{location}/collated_activations/{layer_index}/{latent_index}.json`
|
v1
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/paths.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/paths.py
|
MIT
|
def v5_32k(location, layer_index):
"""
Details:
- Number of autoencoder latents: 2**15 = 32768
- Number of training tokens: TODO
- Activation function: TopK(32)
- L1 regularization strength: n/a
- Layer normed inputs: true
"""
assert location in ["resid_delta_attn", "resid_delta_mlp", "resid_post_attn", "resid_post_mlp"]
assert layer_index in range(12)
# note: it's actually 2**15 and 2**17 ~= 131k
return f"az://openaipublic/sparse-autoencoder/gpt2-small/{location}_v5_32k/autoencoders/{layer_index}.pt"
|
Details:
- Number of autoencoder latents: 2**15 = 32768
- Number of training tokens: TODO
- Activation function: TopK(32)
- L1 regularization strength: n/a
- Layer normed inputs: true
|
v5_32k
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/paths.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/paths.py
|
MIT
|
def v5_128k(location, layer_index):
"""
Details:
- Number of autoencoder latents: 2**17 = 131072
- Number of training tokens: TODO
- Activation function: TopK(32)
- L1 regularization strength: n/a
- Layer normed inputs: true
"""
assert location in ["resid_delta_attn", "resid_delta_mlp", "resid_post_attn", "resid_post_mlp"]
assert layer_index in range(12)
# note: it's actually 2**15 and 2**17 ~= 131k
return f"az://openaipublic/sparse-autoencoder/gpt2-small/{location}_v5_128k/autoencoders/{layer_index}.pt"
|
Details:
- Number of autoencoder latents: 2**17 = 131072
- Number of training tokens: TODO
- Activation function: TopK(32)
- L1 regularization strength: n/a
- Layer normed inputs: true
|
v5_128k
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/paths.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/paths.py
|
MIT
|
def unit_norm_decoder_grad_adjustment_(autoencoder) -> None:
"""project out gradient information parallel to the dictionary vectors - assumes that the decoder is already unit normed"""
assert autoencoder.decoder.weight.grad is not None
triton_add_mul_(
autoencoder.decoder.weight.grad,
torch.einsum("bn,bn->n", autoencoder.decoder.weight.data, autoencoder.decoder.weight.grad),
autoencoder.decoder.weight.data,
c=-1,
)
|
project out gradient information parallel to the dictionary vectors - assumes that the decoder is already unit normed
|
unit_norm_decoder_grad_adjustment_
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/train.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/train.py
|
MIT
|
def batch_tensors(
it: Iterable[torch.Tensor],
batch_size: int,
drop_last=True,
stream=None,
) -> Iterator[torch.Tensor]:
"""
input is iterable of tensors of shape [batch_old, ...]
output is iterable of tensors of shape [batch_size, ...]
batch_old does not need to be divisible by batch_size
"""
tensors = []
batch_so_far = 0
for t in it:
tensors.append(t)
batch_so_far += t.shape[0]
if sum(t.shape[0] for t in tensors) < batch_size:
continue
while batch_so_far >= batch_size:
if len(tensors) == 1:
(concat,) = tensors
else:
with torch.cuda.stream(stream):
concat = torch.cat(tensors, dim=0)
offset = 0
while offset + batch_size <= concat.shape[0]:
yield concat[offset : offset + batch_size]
batch_so_far -= batch_size
offset += batch_size
tensors = [concat[offset:]] if offset < concat.shape[0] else []
if len(tensors) > 0 and not drop_last:
yield torch.cat(tensors, dim=0)
|
input is iterable of tensors of shape [batch_old, ...]
output is iterable of tensors of shape [batch_size, ...]
batch_old does not need to be divisible by batch_size
|
batch_tensors
|
python
|
openai/sparse_autoencoder
|
sparse_autoencoder/train.py
|
https://github.com/openai/sparse_autoencoder/blob/master/sparse_autoencoder/train.py
|
MIT
|
def bootstrap_statistic(observed_sample, compute_statistic, num_trials):
"""
Creates num_trials resamples of the initial sample.
Returns an array of the provided statistic for those samples.
* observed_sample: the initial sample, as an array.
* compute_statistic: a function that takes a sample as
an array and returns the statistic for that
sample.
* num_trials: the number of bootstrap samples to create.
"""
# Check that observed_sample is an array!
if not isinstance(observed_sample, np.ndarray):
raise ValueError('The first parameter to bootstrap_statistic must be a sample represented as an array, not a value of type ' + str(type(observed_sample).__name__))
statistics = make_array()
for i in np.arange(0, num_trials):
#Key: in bootstrapping we must always sample with replacement
simulated_resample = np.random.choice(observed_sample, len(observed_sample))
resample_statistic = compute_statistic(simulated_resample)
statistics = np.append(statistics, resample_statistic)
return statistics
|
Creates num_trials resamples of the initial sample.
Returns an array of the provided statistic for those samples.
* observed_sample: the initial sample, as an array.
* compute_statistic: a function that takes a sample as
an array and returns the statistic for that
sample.
* num_trials: the number of bootstrap samples to create.
|
bootstrap_statistic
|
python
|
plasma-umass/ChatDBG
|
samples/python/ds101.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/samples/python/ds101.py
|
Apache-2.0
|
def stop_handler(event):
"""Sets last error type so we can report it later."""
# Check if the event is a stop event
global last_error_type
if not hasattr(event, "stop_signal"):
last_error_type = "" # Not a real error (e.g., a breakpoint)
return
if event.stop_signal is not None:
last_error_type = event.stop_signal
|
Sets last error type so we can report it later.
|
stop_handler
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_gdb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_gdb.py
|
Apache-2.0
|
def llm_debug(self, command: str):
"""
{
"name": "debug",
"description": "The `debug` function runs a GDB command on the stopped program and gets the response.",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The GDB command to run, possibly with arguments."
}
},
"required": [ "command" ]
}
}
"""
if not chatdbg_config.unsafe and not command_is_safe(command):
self._unsafe_cmd = True
return command, f"Command `{command}` is not allowed."
return command, self._run_one_command(command)
|
{
"name": "debug",
"description": "The `debug` function runs a GDB command on the stopped program and gets the response.",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The GDB command to run, possibly with arguments."
}
},
"required": [ "command" ]
}
}
|
llm_debug
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_gdb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_gdb.py
|
Apache-2.0
|
def _is_debug_build(self) -> bool:
"""Returns False if not compiled with debug information."""
target = self._debugger.GetSelectedTarget()
if not target:
return False
for module in target.module_iter():
for cu in module.compile_unit_iter():
for line_entry in cu:
if line_entry.GetLine() > 0:
return True
return False
|
Returns False if not compiled with debug information.
|
_is_debug_build
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_lldb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_lldb.py
|
Apache-2.0
|
def get_thread(self) -> Optional[lldb.SBThread]:
"""
Returns a currently stopped thread in the debugged process.
:return: A currently stopped thread or None if no thread is stopped.
"""
process = self._get_process()
if not process:
return None
for thread in process:
reason = thread.GetStopReason()
if reason not in [lldb.eStopReasonNone, lldb.eStopReasonInvalid]:
return thread
return thread
|
Returns a currently stopped thread in the debugged process.
:return: A currently stopped thread or None if no thread is stopped.
|
get_thread
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_lldb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_lldb.py
|
Apache-2.0
|
def _get_process(self) -> Optional[lldb.SBProcess]:
"""
Get the process that the current target owns.
:return: An lldb object representing the process (lldb.SBProcess) that this target owns.
"""
target = self._debugger.GetSelectedTarget()
return target.process if target else None
|
Get the process that the current target owns.
:return: An lldb object representing the process (lldb.SBProcess) that this target owns.
|
_get_process
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_lldb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_lldb.py
|
Apache-2.0
|
def llm_debug(self, command: str):
"""
{
"name": "debug",
"description": "The `debug` function runs an LLDB command on the stopped program and gets the response.",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The LLDB command to run, possibly with arguments."
}
},
"required": [ "command" ]
}
}
"""
if not chatdbg_config.unsafe and not command_is_safe(command):
self._unsafe_cmd = True
return command, f"Command `{command}` is not allowed."
return command, self._run_one_command(command)
|
{
"name": "debug",
"description": "The `debug` function runs an LLDB command on the stopped program and gets the response.",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The LLDB command to run, possibly with arguments."
}
},
"required": [ "command" ]
}
}
|
llm_debug
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_lldb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_lldb.py
|
Apache-2.0
|
def onecmd(self, line: str) -> bool:
"""
Override to stash the results in our history.
"""
if not line:
# blank -- let super call back to into onecmd
return super().onecmd(line)
else:
hist_file = CaptureOutput(self.stdout)
self.stdout = hist_file
try:
self.was_chat_or_renew = False
return super().onecmd(line)
finally:
self.stdout = hist_file.getfile()
output = strip_ansi(hist_file.getvalue())
if not self.was_chat_or_renew:
self._log.on_function_call(line, output)
if line.split()[0] not in [
"hist",
"test_prompt",
"c",
"cont",
"continue",
"config",
]:
self._history.append(line, output)
|
Override to stash the results in our history.
|
onecmd
|
python
|
plasma-umass/ChatDBG
|
src/chatdbg/chatdbg_pdb.py
|
https://github.com/plasma-umass/ChatDBG/blob/master/src/chatdbg/chatdbg_pdb.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.