repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/block_processing/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py
from eth2spec.test.context import ( spec_state_test, with_altair_and_later, with_presets, ) from eth2spec.test.helpers.constants import ( MINIMAL, ) from eth2spec.test.helpers.attestations import ( state_transition_with_full_block, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store_and_block, on_tick_and_append_step, tick_and_add_block, apply_next_epoch_with_attestations, find_next_justifying_slot, ) from eth2spec.test.helpers.state import ( state_transition_and_sign_block, next_epoch, ) TESTING_PRESETS = [MINIMAL] @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_withholding_attack(spec, state): """ """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Create the attack block that includes justifying attestations for epoch 4 # This block is withheld & revealed only in epoch 5 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) assert len(signed_blocks) > 1 signed_attack_block = signed_blocks[-1] for signed_block in signed_blocks[:-1]: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() state = store.block_states[spec.get_head(store)].copy() assert spec.compute_epoch_at_slot(state.slot) == 4 assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Create an honest chain in epoch 5 that includes the justifying attestations from the attack block next_epoch(spec, state) assert spec.compute_epoch_at_slot(state.slot) == 5 assert state.current_justified_checkpoint.epoch == 3 # Create two block in the honest chain with full attestations, and add to the store for _ in range(2): signed_block = state_transition_with_full_block(spec, state, True, False) yield from tick_and_add_block(spec, store, signed_block, test_steps) # Create final block in the honest chain that includes the justifying attestations from the attack block honest_block = build_empty_block_for_next_slot(spec, state) honest_block.body.attestations = signed_attack_block.message.body.attestations signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) # Add the honest block to the store yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Tick to the next slot so proposer boost is not a factor in choosing the head current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Upon revealing the withheld attack block, the honest block should still be the head yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() # As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated assert store.justified_checkpoint.epoch == 4 # Even after going to the next epoch, the honest block should remain the head slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_withholding_attack_unviable_honest_chain(spec, state): """ Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond two epochs ago. """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 next_epoch(spec, state) assert spec.compute_epoch_at_slot(state.slot) == 5 # Create the attack block that includes justifying attestations for epoch 5 # This block is withheld & revealed only in epoch 6 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) assert len(signed_blocks) > 1 signed_attack_block = signed_blocks[-1] for signed_block in signed_blocks[:-1]: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() state = store.block_states[spec.get_head(store)].copy() assert spec.compute_epoch_at_slot(state.slot) == 5 assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Create an honest chain in epoch 6 that includes the justifying attestations from the attack block next_epoch(spec, state) assert spec.compute_epoch_at_slot(state.slot) == 6 assert state.current_justified_checkpoint.epoch == 3 # Create two block in the honest chain with full attestations, and add to the store for _ in range(2): signed_block = state_transition_with_full_block(spec, state, True, False) assert state.current_justified_checkpoint.epoch == 3 yield from tick_and_add_block(spec, store, signed_block, test_steps) # Create final block in the honest chain that includes the justifying attestations from the attack block honest_block = build_empty_block_for_next_slot(spec, state) honest_block.body.attestations = signed_attack_block.message.body.attestations signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) honest_block_root = signed_honest_block.message.hash_tree_root() assert state.current_justified_checkpoint.epoch == 3 # Add the honest block to the store yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store)) assert current_epoch == 6 # assert store.voting_source[honest_block_root].epoch == 3 assert spec.get_head(store) == honest_block_root assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Tick to the next slot so proposer boost is not a factor in choosing the head current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.get_head(store) == honest_block_root assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Upon revealing the withheld attack block, it should become the head yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) # The attack block is pulled up and store.justified_checkpoint is updated assert store.justified_checkpoint.epoch == 5 attack_block_root = signed_attack_block.message.hash_tree_root() assert spec.get_head(store) == attack_block_root # After going to the next epoch, the honest block should become the head slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 # assert store.voting_source[honest_block_root].epoch == 5 assert spec.get_head(store) == honest_block_root yield 'steps', test_steps
10,474
49.849515
116
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py
import random from eth2spec.test.context import ( spec_state_test, with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations from eth2spec.test.helpers.block import ( apply_empty_block, build_empty_block_for_next_slot, ) from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.fork_choice import ( add_attester_slashing, add_block, get_anchor_root, get_genesis_forkchoice_store_and_block, get_formatted_head_output, on_tick_and_append_step, add_attestation, tick_and_run_on_attestation, tick_and_add_block, output_head_check, apply_next_epoch_with_attestations, ) from eth2spec.test.helpers.forks import ( is_post_altair, ) from eth2spec.test.helpers.state import ( next_slots, next_epoch, state_transition_and_sign_block, ) rng = random.Random(1001) @with_altair_and_later @spec_state_test def test_genesis(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root test_steps.append({ 'checks': { 'genesis_time': int(store.genesis_time), 'head': get_formatted_head_output(spec, store), } }) yield 'steps', test_steps if is_post_altair(spec): yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets." @with_altair_and_later @spec_state_test def test_chain_no_attestations(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # On receiving a block of `GENESIS_SLOT + 1` slot block_1 = build_empty_block_for_next_slot(spec, state) signed_block_1 = state_transition_and_sign_block(spec, state, block_1) yield from tick_and_add_block(spec, store, signed_block_1, test_steps) # On receiving a block of next epoch block_2 = build_empty_block_for_next_slot(spec, state) signed_block_2 = state_transition_and_sign_block(spec, state, block_2) yield from tick_and_add_block(spec, store, signed_block_2, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_split_tie_breaker_no_attestations(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # Create block at slot 1 block_1_state = genesis_state.copy() block_1 = build_empty_block_for_next_slot(spec, block_1_state) signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1) # Create additional block at slot 1 block_2_state = genesis_state.copy() block_2 = build_empty_block_for_next_slot(spec, block_2_state) block_2.body.graffiti = b'\x42' * 32 signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2) # Tick time past slot 1 so proposer score boost does not apply time = store.genesis_time + (block_2.slot + 1) * spec.config.SECONDS_PER_SLOT on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_1, test_steps) yield from add_block(spec, store, signed_block_2, test_steps) highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2)) assert spec.get_head(store) == highest_root output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_shorter_chain_but_heavier_weight(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # build longer tree long_state = genesis_state.copy() for _ in range(3): long_block = build_empty_block_for_next_slot(spec, long_state) signed_long_block = state_transition_and_sign_block(spec, long_state, long_block) yield from tick_and_add_block(spec, store, signed_long_block, test_steps) # build short tree short_state = genesis_state.copy() short_block = build_empty_block_for_next_slot(spec, short_state) short_block.body.graffiti = b'\x42' * 32 signed_short_block = state_transition_and_sign_block(spec, short_state, short_block) yield from tick_and_add_block(spec, store, signed_short_block, test_steps) # Since the long chain has higher proposer_score at slot 1, the latest long block is the head assert spec.get_head(store) == spec.hash_tree_root(long_block) short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True) yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps) assert spec.get_head(store) == spec.hash_tree_root(short_block) output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_filtered_block_tree(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # transition state past initial couple of epochs next_epoch(spec, state) next_epoch(spec, state) # fill in attestations for entire epoch, justifying the recent epoch prev_state, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False) assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch # tick time forward and add blocks and attestations to store current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) for signed_block in signed_blocks: yield from add_block(spec, store, signed_block, test_steps) assert store.justified_checkpoint == state.current_justified_checkpoint # the last block in the branch should be the head expected_head_root = spec.hash_tree_root(signed_blocks[-1].message) assert spec.get_head(store) == expected_head_root output_head_check(spec, store, test_steps) # # create branch containing the justified block but not containing enough on # chain votes to justify that block # # build a chain without attestations off of previous justified block non_viable_state = store.block_states[store.justified_checkpoint.root].copy() # ensure that next wave of votes are for future epoch next_epoch(spec, non_viable_state) next_epoch(spec, non_viable_state) next_epoch(spec, non_viable_state) assert spec.get_current_epoch(non_viable_state) > store.justified_checkpoint.epoch # create rogue block that will be attested to in this non-viable branch rogue_block = build_empty_block_for_next_slot(spec, non_viable_state) signed_rogue_block = state_transition_and_sign_block(spec, non_viable_state, rogue_block) # create an epoch's worth of attestations for the rogue block next_epoch(spec, non_viable_state) attestations = [] for i in range(spec.SLOTS_PER_EPOCH): slot = rogue_block.slot + i for index in range(spec.get_committee_count_per_slot(non_viable_state, spec.compute_epoch_at_slot(slot))): attestation = get_valid_attestation(spec, non_viable_state, slot, index, signed=True) attestations.append(attestation) # tick time forward to be able to include up to the latest attestation current_time = (attestations[-1].data.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) # include rogue block and associated attestations in the store yield from add_block(spec, store, signed_rogue_block, test_steps) for attestation in attestations: yield from tick_and_run_on_attestation(spec, store, attestation, test_steps) # ensure that get_head still returns the head from the previous branch assert spec.get_head(store) == expected_head_root output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_proposer_boost_correct_head(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # Build block that serves as head ONLY on timely arrival, and ONLY in that slot state_1 = genesis_state.copy() next_slots(spec, state_1, 3) block_1 = build_empty_block_for_next_slot(spec, state_1) signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1) # Build block that serves as current head, and remains the head after block_1.slot state_2 = genesis_state.copy() next_slots(spec, state_2, 2) block_2 = build_empty_block_for_next_slot(spec, state_2) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2): block_2.body.graffiti = spec.Bytes32(hex(rng.getrandbits(8 * 32))[2:].zfill(64)) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) assert spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2) # Tick to block_1 slot time time = store.genesis_time + block_1.slot * spec.config.SECONDS_PER_SLOT on_tick_and_append_step(spec, store, time, test_steps) # Process block_2 yield from add_block(spec, store, signed_block_2, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) # Process block_1 on timely arrival # The head should temporarily change to block_1 yield from add_block(spec, store, signed_block_1, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block_1) assert spec.get_head(store) == spec.hash_tree_root(block_1) # After block_1.slot, the head should revert to block_2 time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_discard_equivocations_on_attester_slashing(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root output_head_check(spec, store, test_steps) # Build block that serves as head before discarding equivocations state_1 = genesis_state.copy() next_slots(spec, state_1, 3) block_1 = build_empty_block_for_next_slot(spec, state_1) signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1) # Build equivocating attestations to feed to store state_eqv = state_1.copy() block_eqv = apply_empty_block(spec, state_eqv, state_eqv.slot + 1) attestation_eqv = get_valid_attestation(spec, state_eqv, slot=block_eqv.slot, signed=True) next_slots(spec, state_1, 1) attestation = get_valid_attestation(spec, state_1, slot=block_eqv.slot, signed=True) assert spec.is_slashable_attestation_data(attestation.data, attestation_eqv.data) indexed_attestation = spec.get_indexed_attestation(state_1, attestation) indexed_attestation_eqv = spec.get_indexed_attestation(state_eqv, attestation_eqv) attester_slashing = spec.AttesterSlashing(attestation_1=indexed_attestation, attestation_2=indexed_attestation_eqv) # Build block that serves as head after discarding equivocations state_2 = genesis_state.copy() next_slots(spec, state_2, 2) block_2 = build_empty_block_for_next_slot(spec, state_2) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2): block_2.body.graffiti = spec.Bytes32(hex(rng.getrandbits(8 * 32))[2:].zfill(64)) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) assert spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2) # Tick to (block_eqv.slot + 2) slot time time = store.genesis_time + (block_eqv.slot + 2) * spec.config.SECONDS_PER_SLOT on_tick_and_append_step(spec, store, time, test_steps) # Process block_2 yield from add_block(spec, store, signed_block_2, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) # Process block_1 # The head should remain block_2 yield from add_block(spec, store, signed_block_1, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) # Process attestation # The head should change to block_1 yield from add_attestation(spec, store, attestation, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_1) # Process attester_slashing # The head should revert to block_2 yield from add_attester_slashing(spec, store, attester_slashing, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_discard_equivocations_slashed_validator_censoring(spec, state): # Check that the store does not count LMD votes from validators that are slashed in the justified state test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 # We will slash all validators voting at the 2nd slot of epoch 0 current_slot = spec.get_current_slot(store) eqv_slot = current_slot + 1 eqv_epoch = spec.compute_epoch_at_slot(eqv_slot) assert eqv_slot % spec.SLOTS_PER_EPOCH == 1 assert eqv_epoch == 0 slashed_validators = [] comm_count = spec.get_committee_count_per_slot(state, eqv_epoch) for comm_index in range(comm_count): comm = spec.get_beacon_committee(state, eqv_slot, comm_index) slashed_validators += comm assert len(slashed_validators) > 0 # Slash those validators in the state for val_index in slashed_validators: state.validators[val_index].slashed = True # Store this state as the anchor state anchor_state = state.copy() # Generate an anchor block with correct state root anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root()) yield 'anchor_state', anchor_state yield 'anchor_block', anchor_block # Get a new store with the anchor state & anchor block store = spec.get_forkchoice_store(anchor_state, anchor_block) # Now generate the store checks current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Create two competing blocks at eqv_slot next_slots(spec, state, eqv_slot - state.slot - 1) assert state.slot == eqv_slot - 1 state_1 = state.copy() block_1 = build_empty_block_for_next_slot(spec, state_1) signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1) state_2 = state.copy() block_2 = build_empty_block_for_next_slot(spec, state_2) block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32 signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2) assert block_1.slot == block_2.slot == eqv_slot # Add both blocks to the store yield from tick_and_add_block(spec, store, signed_block_1, test_steps) yield from tick_and_add_block(spec, store, signed_block_2, test_steps) # Find out which block will win in tie breaking if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2): block_low_root = block_1.hash_tree_root() block_low_root_post_state = state_1 block_high_root = block_2.hash_tree_root() else: block_low_root = block_2.hash_tree_root() block_low_root_post_state = state_2 block_high_root = block_1.hash_tree_root() assert block_low_root < block_high_root # Tick to next slot so proposer boost does not apply current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT on_tick_and_append_step(spec, store, current_time, test_steps) # Check that block with higher root wins assert spec.get_head(store) == block_high_root # Create attestation for block with lower root attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True) # Check that all attesting validators were slashed in the anchor state att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0) for i in att_comm: assert anchor_state.validators[i].slashed # Add attestation to the store yield from add_attestation(spec, store, attestation, test_steps) # Check that block with higher root still wins assert spec.get_head(store) == block_high_root output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_voting_source_within_two_epoch(spec, state): """ Check that the store allows for a head block that has: - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and - store.voting_source[block_root].epoch + 2 >= current_epoch, and - store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Copy the state to use later fork_state = state.copy() # Fill epoch 4 state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 assert store.finalized_checkpoint.epoch == 3 # Create a fork from the earlier saved state next_epoch(spec, fork_state) assert spec.compute_epoch_at_slot(fork_state.slot) == 5 _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) # Only keep the blocks from epoch 5, so discard the last generated block signed_blocks = signed_blocks[:-1] last_fork_block = signed_blocks[-1].message assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5 # Now add the fork to the store for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 assert store.finalized_checkpoint.epoch == 3 # Check that the last block from the fork is the head # LMD votes for the competing branch are overwritten so this fork should win last_fork_block_root = last_fork_block.hash_tree_root() # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch # assert store.voting_source[last_fork_block_root].epoch + 2 >= \ # spec.compute_epoch_at_slot(spec.get_current_slot(store)) assert store.finalized_checkpoint.root == spec.get_checkpoint_block( store, last_fork_block_root, store.finalized_checkpoint.epoch ) assert spec.get_head(store) == last_fork_block_root yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_voting_source_beyond_two_epoch(spec, state): """ Check that the store doesn't allow for a head block that has: - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and - store.voting_source[block_root].epoch + 2 < current_epoch, and - store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Copy the state to use later fork_state = state.copy() # Fill epoch 4 and 5 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert store.finalized_checkpoint.epoch == 4 # Create a fork from the earlier saved state for _ in range(2): next_epoch(spec, fork_state) assert spec.compute_epoch_at_slot(fork_state.slot) == 6 assert fork_state.current_justified_checkpoint.epoch == 3 _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) # Only keep the blocks from epoch 6, so discard the last generated block signed_blocks = signed_blocks[:-1] last_fork_block = signed_blocks[-1].message assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6 # Store the head before adding the fork to the store correct_head = spec.get_head(store) # Now add the fork to the store for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert store.finalized_checkpoint.epoch == 4 last_fork_block_root = last_fork_block.hash_tree_root() last_fork_block_state = store.block_states[last_fork_block_root] assert last_fork_block_state.current_justified_checkpoint.epoch == 3 # Check that the head is unchanged # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch # assert store.voting_source[last_fork_block_root].epoch + 2 < \ # spec.compute_epoch_at_slot(spec.get_current_slot(store)) assert store.finalized_checkpoint.root == spec.get_checkpoint_block( store, last_fork_block_root, store.finalized_checkpoint.epoch ) assert spec.get_head(store) == correct_head yield 'steps', test_steps """ Note: We are unable to generate test vectors that check failure of the correct_finalized condition. We cannot generate a block that: - has !correct_finalized, and - has correct_justified, and - is a descendant of store.justified_checkpoint.root The block being a descendant of store.justified_checkpoint.root is necessary because filter_block_tree descends the tree starting at store.justified_checkpoint.root @with_altair_and_later @spec_state_test def test_incorrect_finalized(spec, state): # Check that the store doesn't allow for a head block that has: # - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and # - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and # - store.finalized_checkpoint.root != get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 4 for _ in range(4): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 assert store.finalized_checkpoint.epoch == 3 # Identify the fork block as the last block in epoch 4 fork_block_root = state.latest_block_header.parent_root fork_block = store.blocks[fork_block_root] assert spec.compute_epoch_at_slot(fork_block.slot) == 4 # Copy the state to use later fork_state = store.block_states[fork_block_root].copy() assert spec.compute_epoch_at_slot(fork_state.slot) == 4 assert fork_state.current_justified_checkpoint.epoch == 3 assert fork_state.finalized_checkpoint.epoch == 2 # Create a fork from the earlier saved state for _ in range(2): next_epoch(spec, fork_state) assert spec.compute_epoch_at_slot(fork_state.slot) == 6 assert fork_state.current_justified_checkpoint.epoch == 4 assert fork_state.finalized_checkpoint.epoch == 3 # Fill epoch 6 signed_blocks = [] _, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) signed_blocks += signed_blocks_1 assert spec.compute_epoch_at_slot(fork_state.slot) == 7 # Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork assert fork_state.current_justified_checkpoint.epoch == 6 assert fork_state.finalized_checkpoint.epoch == 3 # Create a chain in epoch 7 that has new justification for epoch 7 _, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) # Only keep the blocks from epoch 7, so discard the last generated block signed_blocks_2 = signed_blocks_2[:-1] signed_blocks += signed_blocks_2 last_fork_block = signed_blocks[-1].message assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7 # Now add the fork to the store for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 assert store.justified_checkpoint.epoch == 6 assert store.finalized_checkpoint.epoch == 3 # Fill epoch 5 and 6 in the original chain for _ in range(2): state, store, signed_head_block = yield from apply_next_epoch_with_attestations( spec, state, store, True, False, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6 assert store.finalized_checkpoint.epoch == 5 # Store the expected head head_root = signed_head_block.message.hash_tree_root() # Check that the head is unchanged last_fork_block_root = last_fork_block.hash_tree_root() assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert store.finalized_checkpoint.root != spec.get_checkpoint_block( store, block_root, store.finalized_checkpoint.epoch ) assert spec.get_head(store) != last_fork_block_root assert spec.get_head(store) == head_root yield 'steps', test_steps """
31,519
41.767978
119
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py
import random from eth_utils import encode_hex from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.test.context import ( MINIMAL, spec_state_test, with_altair_and_later, with_presets ) from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, build_empty_block, transition_unsigned_block, sign_block, ) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store_and_block, on_tick_and_append_step, add_block, tick_and_add_block, apply_next_epoch_with_attestations, apply_next_slots_with_attestations, is_ready_to_justify, find_next_justifying_slot, ) from eth2spec.test.helpers.state import ( next_epoch, next_slots, state_transition_and_sign_block, ) rng = random.Random(2020) def _drop_random_one_third(_slot, _index, indices): committee_len = len(indices) assert committee_len >= 3 filter_len = committee_len // 3 participant_count = committee_len - filter_len return rng.sample(sorted(indices), participant_count) @with_altair_and_later @spec_state_test def test_basic(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving a block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() # On receiving a block of next epoch store.time = current_time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_block = state_transition_and_sign_block(spec, state, block) yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() yield 'steps', test_steps # TODO: add tests for justified_root and finalized_root @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_checkpoints(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Run for 1 epoch with full attestations next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) state, store, last_signed_block = yield from apply_next_epoch_with_attestations( spec, state, store, True, False, test_steps=test_steps) last_block_root = hash_tree_root(last_signed_block.message) assert spec.get_head(store) == last_block_root # Forward 1 epoch next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Mock the finalized_checkpoint and build a block on it fin_state = store.block_states[last_block_root].copy() fin_state.finalized_checkpoint = store.block_states[last_block_root].current_justified_checkpoint.copy() block = build_empty_block_for_next_slot(spec, fin_state) signed_block = state_transition_and_sign_block(spec, fin_state.copy(), block) yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_on_block_future_block(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Do NOT tick time to `GENESIS_SLOT + 1` slot # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield from add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_on_block_bad_parent_root(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) transition_unsigned_block(spec, state, block) block.state_root = state.hash_tree_root() block.parent_root = b'\x45' * 32 signed_block = sign_block(spec, state, block) yield from add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_before_finalized(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Fork another_state = state.copy() # Create a finalized chain for _ in range(4): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, False, test_steps=test_steps) assert store.finalized_checkpoint.epoch == 2 # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, another_state) block.body.graffiti = b'\x12' * 32 signed_block = state_transition_and_sign_block(spec, another_state, block) assert signed_block.message.hash_tree_root() not in store.blocks yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots(spec, state): """ Test case was originally from https://github.com/ethereum/consensus-specs/pull/1579 And then rewrote largely. """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Fill epoch 0 and the first slot of epoch 1 state, store, _ = yield from apply_next_slots_with_attestations( spec, state, store, spec.SLOTS_PER_EPOCH, True, False, test_steps) # Skip the rest slots of epoch 1 and the first slot of epoch 2 next_slots(spec, state, spec.SLOTS_PER_EPOCH) # The state after the skipped slots target_state = state.copy() # Fill epoch 3 and 4 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) # Now we get finalized epoch 2, where `compute_start_slot_at_epoch(2)` is a skipped slot assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert store.finalized_checkpoint.root == spec.get_block_root(state, 1) == spec.get_block_root(state, 2) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.justified_checkpoint == state.current_justified_checkpoint # Now build a block at later slot than finalized *epoch* # Includes finalized block in chain and the skipped slots block = build_empty_block_for_next_slot(spec, target_state) signed_block = state_transition_and_sign_block(spec, target_state, block) yield from tick_and_add_block(spec, store, signed_block, test_steps) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): """ Test case was originally from https://github.com/ethereum/consensus-specs/pull/1579 And then rewrote largely. """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Fill epoch 0 and the first slot of epoch 1 state, store, _ = yield from apply_next_slots_with_attestations( spec, state, store, spec.SLOTS_PER_EPOCH, True, False, test_steps) # Skip the rest slots of epoch 1 and the first slot of epoch 2 next_slots(spec, state, spec.SLOTS_PER_EPOCH) # Fill epoch 3 and 4 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) # Now we get finalized epoch 2, where `compute_start_slot_at_epoch(2)` is a skipped slot assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert store.finalized_checkpoint.root == spec.get_block_root(state, 1) == spec.get_block_root(state, 2) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.justified_checkpoint == state.current_justified_checkpoint # Now build a block after the block of the finalized **root** # Includes finalized block in chain, but does not include finalized skipped slots another_state = store.block_states[store.finalized_checkpoint.root].copy() assert another_state.slot == spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch - 1) block = build_empty_block_for_next_slot(spec, another_state) signed_block = state_transition_and_sign_block(spec, another_state, block) yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps """ @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): # J: Justified # F: Finalized # state (forked from genesis): # epoch # [0] <- [1] <- [2] <- [3] <- [4] <- [5] # F J # another_state (forked from epoch 0): # └──── [1] <- [2] <- [3] <- [4] <- [5] # F J test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # ----- Process state # Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3` # Skip epoch 0 next_epoch(spec, state) # Forking another_state another_state = state.copy() # Fill epoch 1 with previous epoch attestations state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, False, True, test_steps=test_steps) # Skip epoch 2 next_epoch(spec, state) # Fill epoch 3 & 4 with previous epoch attestations for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, False, True, test_steps=test_steps) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.justified_checkpoint == state.current_justified_checkpoint # Create another chain # Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3` all_blocks = [] # Fill epoch 1 & 2 with previous + current epoch attestations for _ in range(3): _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) all_blocks += signed_blocks assert another_state.finalized_checkpoint.epoch == 2 assert another_state.current_justified_checkpoint.epoch == 3 assert state.finalized_checkpoint != another_state.finalized_checkpoint assert state.current_justified_checkpoint != another_state.current_justified_checkpoint pre_store_justified_checkpoint_root = store.justified_checkpoint.root # Apply blocks of `another_state` to `store` for block in all_blocks: # NOTE: Do not call `on_tick` here yield from add_block(spec, store, block, test_steps) ancestor_at_finalized_slot = spec.get_checkpoint_block( store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch ) assert ancestor_at_finalized_slot != store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint # NOTE: inconsistent justified/finalized checkpoints in this edge case. # This can only happen when >1/3 validators are slashable, as this testcase requires that # store.justified_checkpoint is higher than store.finalized_checkpoint and on a different branch. # Ignoring this testcase for now. assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps """ @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): """ J: Justified F: Finalized state: epoch [0] <- [1] <- [2] <- [3] <- [4] <- [5] F J another_state (forked from state at epoch 3): └──── [4] <- [5] F J """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # Process state next_epoch(spec, state) state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, False, True, test_steps=test_steps) state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, False, test_steps=test_steps) next_epoch(spec, state) for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, False, True, test_steps=test_steps) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 assert store.justified_checkpoint == state.current_justified_checkpoint # Create another chain # Forking from epoch 3 all_blocks = [] slot = spec.compute_start_slot_at_epoch(3) block_root = spec.get_block_root_at_slot(state, slot) another_state = store.block_states[block_root].copy() for _ in range(2): _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) all_blocks += signed_blocks assert another_state.finalized_checkpoint.epoch == 3 assert another_state.current_justified_checkpoint.epoch == 4 pre_store_justified_checkpoint_root = store.justified_checkpoint.root for block in all_blocks: yield from tick_and_add_block(spec, store, block, test_steps) ancestor_at_finalized_slot = spec.get_checkpoint_block( store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch ) assert ancestor_at_finalized_slot == store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint # NOTE: inconsistent justified/finalized checkpoints in this edge case assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_proposer_boost(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block # Build block that serves as head ONLY on timely arrival, and ONLY in that slot state = genesis_state.copy() next_slots(spec, state, 3) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # Process block on timely arrival just before end of boost interval time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT - 1) on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 next_slots(spec, state, 3) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # Process block on timely arrival at start of boost interval time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 test_steps.append({ 'checks': { 'proposer_boost_root': encode_hex(store.proposer_boost_root), } }) yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_proposer_boost_root_same_slot_untimely_block(spec, state): test_steps = [] genesis_state = state.copy() # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block # Build block that serves as head ONLY on timely arrival, and ONLY in that slot state = genesis_state.copy() next_slots(spec, state, 3) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # Process block on untimely arrival in the same slot time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.Root() test_steps.append({ 'checks': { 'proposer_boost_root': encode_hex(store.proposer_boost_root), } }) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time for _ in range(2): next_epoch(spec, state) for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.get_current_epoch(state) == 4 # ------------ # Create attacker's fork that can justify epoch 4 # Do not apply attacker's blocks to store attacker_state = state.copy() attacker_signed_blocks = [] while not is_ready_to_justify(spec, attacker_state): attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( spec, attacker_state, 1, True, False) attacker_signed_blocks += signed_blocks assert attacker_state.finalized_checkpoint.epoch == 2 assert attacker_state.current_justified_checkpoint.epoch == 3 assert spec.get_current_epoch(attacker_state) == 4 # ------------ # The honest fork sees all except the last block from attacker_signed_blocks # Apply honest fork to store honest_signed_blocks = attacker_signed_blocks[:-1] assert len(honest_signed_blocks) > 0 for signed_block in honest_signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) last_honest_block = honest_signed_blocks[-1].message honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.get_current_epoch(honest_state) == 4 # Create & apply an honest block in epoch 5 that can justify epoch 4 next_epoch(spec, honest_state) assert spec.get_current_epoch(honest_state) == 5 honest_block = build_empty_block_for_next_slot(spec, honest_state) honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) yield from tick_and_add_block(spec, store, signed_block, test_steps) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.get_head(store) == hash_tree_root(honest_block) assert is_ready_to_justify(spec, honest_state) # ------------ # When the attacker's block is received, the honest block is still the head # This relies on the honest block's LMD score increasing due to proposer boost yield from tick_and_add_block(spec, store, attacker_signed_blocks[-1], test_steps) assert store.finalized_checkpoint.epoch == 3 assert store.justified_checkpoint.epoch == 4 assert spec.get_head(store) == hash_tree_root(honest_block) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding_reverse_order(spec, state): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time for _ in range(2): next_epoch(spec, state) for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.get_current_epoch(state) == 4 # ------------ # Create attacker's fork that can justify epoch 4 attacker_state = state.copy() attacker_signed_blocks = [] while not is_ready_to_justify(spec, attacker_state): attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( spec, attacker_state, 1, True, False) assert len(signed_blocks) == 1 attacker_signed_blocks += signed_blocks yield from tick_and_add_block(spec, store, signed_blocks[0], test_steps) assert attacker_state.finalized_checkpoint.epoch == 2 assert attacker_state.current_justified_checkpoint.epoch == 3 assert spec.get_current_epoch(attacker_state) == 4 attackers_head = hash_tree_root(attacker_signed_blocks[-1].message) assert spec.get_head(store) == attackers_head # ------------ # The honest fork sees all except the last block from attacker_signed_blocks honest_signed_blocks = attacker_signed_blocks[:-1] assert len(honest_signed_blocks) > 0 last_honest_block = honest_signed_blocks[-1].message honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.get_current_epoch(honest_state) == 4 # Create an honest block in epoch 5 that can justify epoch 4 next_epoch(spec, honest_state) assert spec.get_current_epoch(honest_state) == 5 honest_block = build_empty_block_for_next_slot(spec, honest_state) honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert is_ready_to_justify(spec, honest_state) # When the honest block is received, the honest block becomes the head # This relies on the honest block's LMD score increasing due to proposer boost yield from tick_and_add_block(spec, store, signed_block, test_steps) assert store.finalized_checkpoint.epoch == 3 assert store.justified_checkpoint.epoch == 4 assert spec.get_head(store) == hash_tree_root(honest_block) yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_beginning_of_epoch(spec, state): """ Check that the store's justified checkpoint is updated when a block containing better justification is revealed at the first slot of an epoch """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Create a block that has new justification information contained within it, but don't add to store yet another_state = state.copy() _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) assert spec.compute_epoch_at_slot(another_state.slot) == 5 assert another_state.current_justified_checkpoint.epoch == 4 # Tick store to the start of the next epoch slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert store.justified_checkpoint.epoch == 4 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_end_of_epoch(spec, state): """ Check that the store's justified checkpoint is updated when a block containing better justification is revealed at the last slot of an epoch """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # Create a block that has new justification information contained within it, but don't add to store yet another_state = state.copy() _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) assert spec.compute_epoch_at_slot(another_state.slot) == 5 assert another_state.current_justified_checkpoint.epoch == 4 # Tick store to the last slot of the next epoch slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) slot = slot + spec.SLOTS_PER_EPOCH - 1 current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert store.justified_checkpoint.epoch == 4 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_start_of_epoch(spec, state): """ Check that the store's justified checkpoint is updated when a block containing better justification is revealed at the start slot of an epoch, even when the better justified checkpoint is not a descendant of the store's justified checkpoint """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 # Copy the state to create a fork later another_state = state.copy() # Fill epoch 4 and 5 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 # Create a block that has new justification information contained within it, but don't add to store yet next_epoch(spec, another_state) signed_blocks = [] _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 6 assert another_state.current_justified_checkpoint.epoch == 3 assert another_state.finalized_checkpoint.epoch == 2 _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 7 assert another_state.current_justified_checkpoint.epoch == 6 assert another_state.finalized_checkpoint.epoch == 2 last_block_root = another_state.latest_block_header.parent_root # Tick store to the last slot of the next epoch slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) finalized_checkpoint_block = spec.get_checkpoint_block( store, last_block_root, state.finalized_checkpoint.epoch, ) assert finalized_checkpoint_block == state.finalized_checkpoint.root justified_checkpoint_block = spec.get_checkpoint_block( store, last_block_root, state.current_justified_checkpoint.epoch, ) assert justified_checkpoint_block != state.current_justified_checkpoint.root assert store.finalized_checkpoint.epoch == 4 assert store.justified_checkpoint.epoch == 6 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_end_of_epoch(spec, state): """ Check that the store's justified checkpoint is updated when a block containing better justification is revealed at the last slot of an epoch, even when the better justified checkpoint is not a descendant of the store's justified checkpoint """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 # Copy the state to create a fork later another_state = state.copy() # Fill epoch 4 and 5 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 # Create a block that has new justification information contained within it, but don't add to store yet next_epoch(spec, another_state) signed_blocks = [] _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 6 assert another_state.current_justified_checkpoint.epoch == 3 assert another_state.finalized_checkpoint.epoch == 2 _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 7 assert another_state.current_justified_checkpoint.epoch == 6 assert another_state.finalized_checkpoint.epoch == 2 last_block_root = another_state.latest_block_header.parent_root # Tick store to the last slot of the next epoch slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) slot = slot + spec.SLOTS_PER_EPOCH - 1 current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) finalized_checkpoint_block = spec.get_checkpoint_block( store, last_block_root, state.finalized_checkpoint.epoch, ) assert finalized_checkpoint_block == state.finalized_checkpoint.root justified_checkpoint_block = spec.get_checkpoint_block( store, last_block_root, state.current_justified_checkpoint.epoch, ) assert justified_checkpoint_block != state.current_justified_checkpoint.root assert store.finalized_checkpoint.epoch == 4 assert store.justified_checkpoint.epoch == 6 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_not_realized_finality(spec, state): """ Check that the store updates its justified checkpoint if a higher justified checkpoint is found that is a descendant of the finalized checkpoint, but does not know about the finality """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # We'll make the current head block the finalized block finalized_root = spec.get_head(store) finalized_block = store.blocks[finalized_root] assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 assert spec.get_head(store) == finalized_root # Copy the post-state to use later another_state = state.copy() # Create a fork that finalizes our block for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 assert state.finalized_checkpoint.root == store.finalized_checkpoint.root == finalized_root # Create a fork for a better justification that is a descendant of the finalized block, # but does not realize the finality. # Do not add these blocks to the store yet next_epoch(spec, another_state) signed_blocks = [] _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 6 assert another_state.current_justified_checkpoint.epoch == 3 assert another_state.finalized_checkpoint.epoch == 2 _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 7 assert another_state.current_justified_checkpoint.epoch == 6 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert store.justified_checkpoint.epoch == 6 assert store.finalized_checkpoint.epoch == 4 last_block = signed_blocks[-1] last_block_root = last_block.message.hash_tree_root() ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) assert ancestor_at_finalized_slot == store.finalized_checkpoint.root yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_monotonic(spec, state): """ Check that the store does not update it's justified checkpoint with lower justified checkpoints. This testcase checks that the store's justified checkpoint remains the same even when we input a block that has: - a higher finalized checkpoint than the store's finalized checkpoint, and - a lower justified checkpoint than the store's justified checkpoint """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # We'll eventually make the current head block the finalized block finalized_root = spec.get_head(store) finalized_block = store.blocks[finalized_root] assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 assert spec.get_head(store) == finalized_root # Copy into another variable so we can use `state` later another_state = state.copy() # Create a fork with justification that is a descendant of the finalized block # Do not add these blocks to the store yet next_epoch(spec, another_state) signed_blocks = [] _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 6 assert another_state.current_justified_checkpoint.epoch == 3 assert another_state.finalized_checkpoint.epoch == 2 _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 7 assert another_state.current_justified_checkpoint.epoch == 6 assert another_state.finalized_checkpoint.epoch == 2 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 assert store.justified_checkpoint.epoch == 6 assert store.finalized_checkpoint.epoch == 2 last_block = signed_blocks[-1] last_block_root = last_block.message.hash_tree_root() ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) assert ancestor_at_finalized_slot == finalized_root # Create a fork with lower justification that also finalizes our chosen block for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 assert state.current_justified_checkpoint.epoch == 5 # Check that store's finalized checkpoint is updated assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 # Check that store's justified checkpoint is not updated assert store.justified_checkpoint.epoch == 6 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_always_if_better(spec, state): """ Check that the store updates it's justified checkpoint with any higher justified checkpoint. This testcase checks that the store's justified checkpoint is updated when we input a block that has: - a lower finalized checkpoint than the store's finalized checkpoint, and - a higher justified checkpoint than the store's justified checkpoint """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # We'll eventually make the current head block the finalized block finalized_root = spec.get_head(store) finalized_block = store.blocks[finalized_root] assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 assert spec.get_head(store) == finalized_root # Copy into another variable to use later another_state = state.copy() # Create a fork with lower justification that also finalizes our chosen block for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 # Create a fork with higher justification that is a descendant of the finalized block # Do not add these blocks to the store yet next_epoch(spec, another_state) signed_blocks = [] _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 6 assert another_state.current_justified_checkpoint.epoch == 3 assert another_state.finalized_checkpoint.epoch == 2 _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) signed_blocks += signed_blocks_temp assert spec.compute_epoch_at_slot(another_state.slot) == 7 assert another_state.current_justified_checkpoint.epoch == 6 assert another_state.finalized_checkpoint.epoch == 2 # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 assert store.justified_checkpoint.epoch == 6 assert store.finalized_checkpoint.epoch == 4 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_pull_up_past_epoch_block(spec, state): """ Check that the store pulls-up a block from the past epoch to realize it's justification & finalization information """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Create a chain within epoch 4 that contains a justification for epoch 4 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 4 # Tick store to the next epoch next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Add the previously created chain to the store and check for updates for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert store.justified_checkpoint.epoch == 4 assert store.finalized_checkpoint.epoch == 3 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_not_pull_up_current_epoch_block(spec, state): """ Check that the store does not pull-up a block from the current epoch if the previous epoch is not justified """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Skip to the next epoch next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(state.slot) == 5 # Create a chain within epoch 5 that contains a justification for epoch 5 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 # Add the previously created chain to the store and check that store does not apply pull-up updates for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_pull_up_on_tick(spec, state): """ Check that the store pulls-up current epoch tips on the on_tick transition to the next epoch """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Skip to the next epoch next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(state.slot) == 5 # Create a chain within epoch 5 that contains a justification for epoch 5 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 # Add the previously created chain to the store and check that store does not apply pull-up updates, # since the previous epoch was not justified for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert store.justified_checkpoint.epoch == 3 assert store.finalized_checkpoint.epoch == 2 # Now tick the store to the next epoch and check that pull-up tip updates were applied next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(state.slot) == 6 assert store.justified_checkpoint.epoch == 5 # There's no new finality, so no finality updates expected assert store.finalized_checkpoint.epoch == 3 yield 'steps', test_steps
59,113
42.788148
118
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py
from eth2spec.test.context import ( spec_state_test, with_altair_and_later, with_presets, ) from eth2spec.test.helpers.constants import ( MINIMAL, ) from eth2spec.test.helpers.attestations import ( state_transition_with_full_block, get_valid_attestation, get_valid_attestation_at_slot, ) from eth2spec.test.helpers.block import ( build_empty_block, build_empty_block_for_next_slot, ) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store_and_block, on_tick_and_append_step, add_attestations, tick_and_add_block, apply_next_epoch_with_attestations, find_next_justifying_slot, is_ready_to_justify, ) from eth2spec.test.helpers.state import ( state_transition_and_sign_block, next_epoch, next_slot, transition_to, ) TESTING_PRESETS = [MINIMAL] @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state): """ [Case 1] { epoch 4 }{ epoch 5 } [c4]<--[a]<--[-]<--[y] ↑____[-]<--[z] At c4, c3 is the latest justified checkpoint (or something earlier) The block y doesn't have enough votes to justify c4. The block z also doesn't have enough votes to justify c4. """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 3 for _ in range(3): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 # create block_a, it needs 2 more full blocks to justify epoch 4 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) for signed_block in signed_blocks[:-2]: yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() state = store.block_states[spec.get_head(store)].copy() assert state.current_justified_checkpoint.epoch == 3 next_slot(spec, state) state_a = state.copy() # to test the "no withholding" situation, temporarily store the blocks in lists signed_blocks_of_y = [] signed_blocks_of_z = [] # add an empty block on chain y block_y = build_empty_block_for_next_slot(spec, state) signed_block_y = state_transition_and_sign_block(spec, state, block_y) signed_blocks_of_y.append(signed_block_y) # chain y has some on-chain attestations, but not enough to justify c4 signed_block_y = state_transition_with_full_block(spec, state, True, True) assert not is_ready_to_justify(spec, state) signed_blocks_of_y.append(signed_block_y) assert store.justified_checkpoint.epoch == 3 state = state_a.copy() signed_block_z = None # add one block on chain z, which is not enough to justify c4 attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) block_z = build_empty_block_for_next_slot(spec, state) block_z.body.attestations = [attestation] signed_block_z = state_transition_and_sign_block(spec, state, block_z) signed_blocks_of_z.append(signed_block_z) # add an empty block on chain z block_z = build_empty_block_for_next_slot(spec, state) signed_block_z = state_transition_and_sign_block(spec, state, block_z) signed_blocks_of_z.append(signed_block_z) # ensure z couldn't justify c4 assert not is_ready_to_justify(spec, state) # apply blocks to store # (i) slot block_a.slot + 1 signed_block_y = signed_blocks_of_y.pop(0) yield from tick_and_add_block(spec, store, signed_block_y, test_steps) # apply block of chain `z` signed_block_z = signed_blocks_of_z.pop(0) yield from tick_and_add_block(spec, store, signed_block_z, test_steps) # (ii) slot block_a.slot + 2 # apply block of chain `z` signed_block_z = signed_blocks_of_z.pop(0) yield from tick_and_add_block(spec, store, signed_block_z, test_steps) # apply block of chain `y` signed_block_y = signed_blocks_of_y.pop(0) yield from tick_and_add_block(spec, store, signed_block_y, test_steps) # chain `y` remains the winner since it arrives earlier than `z` assert spec.get_head(store) == signed_block_y.message.hash_tree_root() assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0 assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 # tick to the prior of the epoch boundary slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 # chain `y` reminds the winner assert spec.get_head(store) == signed_block_y.message.hash_tree_root() # to next block next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 assert spec.get_head(store) == signed_block_y.message.hash_tree_root() assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 yield 'steps', test_steps def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch): """ """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 2 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) if is_justifying_previous_epoch: state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, False, False, test_steps=test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 else: state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 if is_justifying_previous_epoch: # try to find the block that can justify epoch 3 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True) else: # try to find the block that can justify epoch 4 signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) spec.get_head(store) == signed_block.message.hash_tree_root() state = store.block_states[spec.get_head(store)].copy() if is_justifying_previous_epoch: assert state.current_justified_checkpoint.epoch == 2 else: assert state.current_justified_checkpoint.epoch == 3 assert is_ready_to_justify(spec, state) state_b = state.copy() # add chain y if is_justifying_previous_epoch: signed_block_y = state_transition_with_full_block(spec, state, False, True) else: signed_block_y = state_transition_with_full_block(spec, state, True, True) yield from tick_and_add_block(spec, store, signed_block_y, test_steps) assert spec.get_head(store) == signed_block_y.message.hash_tree_root() if is_justifying_previous_epoch: assert store.justified_checkpoint.epoch == 2 else: assert store.justified_checkpoint.epoch == 3 # add attestations of y temp_state = state.copy() next_slot(spec, temp_state) attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot)) current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) yield from add_attestations(spec, store, attestations_for_y, test_steps) assert spec.get_head(store) == signed_block_y.message.hash_tree_root() if attemped_reorg: # add chain z state = state_b.copy() slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 transition_to(spec, state, slot) block_z = build_empty_block_for_next_slot(spec, state) assert spec.compute_epoch_at_slot(block_z.slot) == 5 signed_block_z = state_transition_and_sign_block(spec, state, block_z) yield from tick_and_add_block(spec, store, signed_block_z, test_steps) else: # next epoch state = state_b.copy() next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) # no reorg assert spec.get_head(store) == signed_block_y.message.hash_tree_root() if is_justifying_previous_epoch: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 else: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state): """ [Case 2] { epoch 4 }{ epoch 5 } [c4]<--[b]<--[y] ↑______________[z] At c4, c3 is the latest justified checkpoint (or something earlier) block_b: the block that can justify c4. z: the child of block of x at the first slot of epoch 5. block z can reorg the chain from block y. """ yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False) def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch): test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time next_epoch(spec, state) on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Fill epoch 1 to 2 for _ in range(2): state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) if is_justifying_previous_epoch: block_a = build_empty_block_for_next_slot(spec, state) signed_block_a = state_transition_and_sign_block(spec, state, block_a) yield from tick_and_add_block(spec, store, signed_block_a, test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 else: # fill one more epoch state, store, _ = yield from apply_next_epoch_with_attestations( spec, state, store, True, True, test_steps=test_steps) signed_block_a = state_transition_with_full_block(spec, state, True, True) yield from tick_and_add_block(spec, store, signed_block_a, test_steps) assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 spec.get_head(store) == signed_block_a.message.hash_tree_root() state = store.block_states[spec.get_head(store)].copy() if is_justifying_previous_epoch: assert state.current_justified_checkpoint.epoch == 2 else: assert state.current_justified_checkpoint.epoch == 3 state_a = state.copy() if is_justifying_previous_epoch: # try to find the block that can justify epoch 3 _, justifying_slot = find_next_justifying_slot(spec, state, False, True) else: # try to find the block that can justify epoch 4 _, justifying_slot = find_next_justifying_slot(spec, state, True, True) last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1 last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1 # to test the "no withholding" situation, temporarily store the blocks in lists signed_blocks_of_y = [] # build an empty chain to the slot prior epoch boundary signed_blocks_of_empty_chain = [] states_of_empty_chain = [] for slot in range(state.slot + 1, last_slot_of_y + 1): block = build_empty_block(spec, state, slot=slot) signed_block = state_transition_and_sign_block(spec, state, block) signed_blocks_of_empty_chain.append(signed_block) states_of_empty_chain.append(state.copy()) signed_blocks_of_y.append(signed_block) signed_block_y = signed_blocks_of_empty_chain[-1] # create 2/3 votes for the empty chain attestations_for_y = [] # target_is_current = not is_justifying_previous_epoch attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot)) attestations_for_y.append(attestations) for state in states_of_empty_chain: attestations = list(get_valid_attestation_at_slot(state, spec, state.slot)) attestations_for_y.append(attestations) state = state_a.copy() signed_block_z = None for slot in range(state_a.slot + 1, last_slot_of_z + 1): # apply chain y, the empty chain if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0: signed_block_y = signed_blocks_of_y.pop(0) assert signed_block_y.message.slot == slot yield from tick_and_add_block(spec, store, signed_block_y, test_steps) # apply chain z, a fork chain that includes these attestations_for_y block = build_empty_block(spec, state, slot=slot) if ( len(attestations_for_y) > 0 and ( (not is_justifying_previous_epoch) or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5) ) ): block.body.attestations = attestations_for_y.pop(0) signed_block_z = state_transition_and_sign_block(spec, state, block) if signed_block_y != signed_block_z: yield from tick_and_add_block(spec, store, signed_block_z, test_steps) if is_ready_to_justify(spec, state): break assert spec.get_head(store) == signed_block_y.message.hash_tree_root() if is_justifying_previous_epoch: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 else: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 if enough_ffg: assert is_ready_to_justify(spec, state) else: assert not is_ready_to_justify(spec, state) # to next epoch next_epoch(spec, state) current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 if enough_ffg: # reorg assert spec.get_head(store) == signed_block_z.message.hash_tree_root() if is_justifying_previous_epoch: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 else: assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 else: # no reorg assert spec.get_head(store) == signed_block_y.message.hash_tree_root() assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 yield 'steps', test_steps @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state): """ [Case 3] """ yield from _run_include_votes_of_another_empty_chain( spec, state, enough_ffg=True, is_justifying_previous_epoch=False) @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state): """ [Case 4] """ yield from _run_include_votes_of_another_empty_chain( spec, state, enough_ffg=False, is_justifying_previous_epoch=False) @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_current_epoch(spec, state): """ [Case 5] To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``, this is the basic case if there is no chain z { epoch 4 }{ epoch 5 } [c4]<--[b]<--[y] At c4, c3 is the latest justified checkpoint. block_b: the block that can justify c4. """ yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False) @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_previous_epoch(spec, state): """ [Case 6] Similar to ``test_delayed_justification_current_epoch``, but includes attestations during epoch N to justify checkpoint N-1. { epoch 3 }{ epoch 4 }{ epoch 5 } [c3]<---------------[c4]---[b]<---------------------------------[y] """ yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True) @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state): """ [Case 7] Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``, but includes attestations during epoch N to justify checkpoint N-1. { epoch 3 }{ epoch 4 }{ epoch 5 } [c3]<---------------[c4]<--[b]<--[y] ↑______________[z] At c4, c2 is the latest justified checkpoint. block_b: the block that can justify c3. z: the child of block of x at the first slot of epoch 5. block z can reorg the chain from block y. """ yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True) @with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state): """ [Case 8] Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``, but includes attestations during epoch N to justify checkpoint N-1. """ yield from _run_include_votes_of_another_empty_chain( spec, state, enough_ffg=True, is_justifying_previous_epoch=True)
20,423
39.92986
116
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py
from eth2spec.test.context import ( MAINNET, spec_state_test, with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import ( get_valid_attestation, sign_attestation, ) from eth2spec.test.helpers.block import ( build_empty_block, ) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store_and_block, on_tick_and_append_step, add_attestation, add_block, tick_and_add_block, ) from eth2spec.test.helpers.state import ( state_transition_and_sign_block, ) def _apply_base_block_a(spec, state, store, test_steps): # On receiving block A at slot `N` block = build_empty_block(spec, state, slot=state.slot + 1) signed_block_a = state_transition_and_sign_block(spec, state, block) yield from tick_and_add_block(spec, store, signed_block_a, test_steps) assert spec.get_head(store) == signed_block_a.message.hash_tree_root() @with_altair_and_later @spec_state_test def test_ex_ante_vanilla(spec, state): """ With a single adversarial attestation Objects: Block A - slot N Block B (parent A) - slot N+1 Block C (parent A) - slot N+2 Attestation_1 (Block B); size `1` - slot N+1 Steps: Block A received at N — A is head Block C received at N+2 — C is head Block B received at N+2 — C is head Attestation_1 received at N+2 — C is head """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving block A at slot `N` yield from _apply_base_block_a(spec, state, store, test_steps) state_a = state.copy() # Block B at slot `N + 1`, parent is A state_b = state_a.copy() block = build_empty_block(spec, state_a, slot=state_a.slot + 1) signed_block_b = state_transition_and_sign_block(spec, state_b, block) # Block C at slot `N + 2`, parent is A state_c = state_a.copy() block = build_empty_block(spec, state_c, slot=state_a.slot + 2) signed_block_c = state_transition_and_sign_block(spec, state_c, block) # Attestation_1 at slot `N + 1` voting for block B def _filter_participant_set(participants): return [next(iter(participants))] attestation = get_valid_attestation( spec, state_b, slot=state_b.slot, signed=False, filter_participant_set=_filter_participant_set ) attestation.data.beacon_block_root = signed_block_b.message.hash_tree_root() assert len([i for i in attestation.aggregation_bits if i == 1]) == 1 sign_attestation(spec, state_b, attestation) # Block C received at N+2 — C is head time = state_c.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_c, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block B received at N+2 — C is head due to proposer score boost yield from add_block(spec, store, signed_block_b, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Attestation_1 received at N+2 — C is head yield from add_attestation(spec, store, attestation, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() yield 'steps', test_steps def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_root, root): """ Return the minimum attestation participant count such that attestation_score > proposer_score """ # calculate proposer boost score block = store.blocks[root] proposer_score = 0 if spec.get_ancestor(store, root, block.slot) == proposer_boost_root: num_validators = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state))) avg_balance = spec.get_total_active_balance(state) // num_validators committee_size = num_validators // spec.SLOTS_PER_EPOCH committee_weight = committee_size * avg_balance proposer_score = (committee_weight * spec.config.PROPOSER_SCORE_BOOST) // 100 # calculate minimum participant count such that attestation_score > proposer_score base_effective_balance = state.validators[0].effective_balance return proposer_score // base_effective_balance + 1 @with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state): """ Adversarial attestations > proposer boost Objects: Block A - slot N Block B (parent A) - slot N+1 Block C (parent A) - slot N+2 Attestation_set_1 (Block B); size `proposer_boost + 1` - slot N+1 Steps: Block A received at N — A is head Block C received at N+2 — C is head Block B received at N+2 — C is head Attestation_1 received at N+2 — B is head """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving block A at slot `N` yield from _apply_base_block_a(spec, state, store, test_steps) state_a = state.copy() # Block B at slot `N + 1`, parent is A state_b = state_a.copy() block = build_empty_block(spec, state_a, slot=state_a.slot + 1) signed_block_b = state_transition_and_sign_block(spec, state_b, block) # Block C at slot `N + 2`, parent is A state_c = state_a.copy() block = build_empty_block(spec, state_c, slot=state_a.slot + 2) signed_block_c = state_transition_and_sign_block(spec, state_c, block) # Block C received at N+2 — C is head time = state_c.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_c, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block B received at N+2 — C is head due to proposer score boost yield from add_block(spec, store, signed_block_b, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Attestation_set_1 at slot `N + 1` voting for block B proposer_boost_root = signed_block_b.message.hash_tree_root() root = signed_block_b.message.hash_tree_root() participant_num = _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_root, root) def _filter_participant_set(participants): return [index for i, index in enumerate(participants) if i < participant_num] attestation = get_valid_attestation( spec, state_b, slot=state_b.slot, signed=False, filter_participant_set=_filter_participant_set ) attestation.data.beacon_block_root = signed_block_b.message.hash_tree_root() assert len([i for i in attestation.aggregation_bits if i == 1]) == participant_num sign_attestation(spec, state_b, attestation) # Attestation_set_1 received at N+2 — B is head because B's attestation_score > C's proposer_score. # (B's proposer_score = C's attestation_score = 0) yield from add_attestation(spec, store, attestation, test_steps) assert spec.get_head(store) == signed_block_b.message.hash_tree_root() yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_ex_ante_sandwich_without_attestations(spec, state): """ Simple Sandwich test with boost and no attestations. Objects: Block A - slot N Block B (parent A) - slot N+1 Block C (parent A) - slot N+2 Block D (parent B) - slot N+3 Steps: Block A received at N — A is head Block C received at N+2 — C is head Block B received at N+2 — C is head (with boost) Block D received at N+3 — D is head (with boost) """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving block A at slot `N` yield from _apply_base_block_a(spec, state, store, test_steps) state_a = state.copy() # Block B at slot `N + 1`, parent is A state_b = state_a.copy() block = build_empty_block(spec, state_a, slot=state_a.slot + 1) signed_block_b = state_transition_and_sign_block(spec, state_b, block) # Block C at slot `N + 2`, parent is A state_c = state_a.copy() block = build_empty_block(spec, state_c, slot=state_a.slot + 2) signed_block_c = state_transition_and_sign_block(spec, state_c, block) # Block D at slot `N + 3`, parent is B state_d = state_b.copy() block = build_empty_block(spec, state_d, slot=state_a.slot + 3) signed_block_d = state_transition_and_sign_block(spec, state_d, block) # Block C received at N+2 — C is head time = state_c.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_c, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block B received at N+2 — C is head, it has proposer score boost yield from add_block(spec, store, signed_block_b, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block D received at N+3 - D is head, it has proposer score boost time = state_d.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_d, test_steps) assert spec.get_head(store) == signed_block_d.message.hash_tree_root() yield 'steps', test_steps @with_altair_and_later @spec_state_test def test_ex_ante_sandwich_with_honest_attestation(spec, state): """ Boosting necessary to sandwich attack. Objects: Block A - slot N Block B (parent A) - slot N+1 Block C (parent A) - slot N+2 Block D (parent B) - slot N+3 Attestation_1 (Block C); size 1 - slot N+2 (honest) Steps: Block A received at N — A is head Block C received at N+2 — C is head Block B received at N+2 — C is head Attestation_1 received at N+3 — C is head Block D received at N+3 — D is head """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving block A at slot `N` yield from _apply_base_block_a(spec, state, store, test_steps) state_a = state.copy() # Block B at slot `N + 1`, parent is A state_b = state_a.copy() block = build_empty_block(spec, state_a, slot=state_a.slot + 1) signed_block_b = state_transition_and_sign_block(spec, state_b, block) # Block C at slot `N + 2`, parent is A state_c = state_a.copy() block = build_empty_block(spec, state_c, slot=state_a.slot + 2) signed_block_c = state_transition_and_sign_block(spec, state_c, block) # Attestation_1 at N+2 voting for block C def _filter_participant_set(participants): return [next(iter(participants))] attestation = get_valid_attestation( spec, state_c, slot=state_c.slot, signed=False, filter_participant_set=_filter_participant_set ) attestation.data.beacon_block_root = signed_block_c.message.hash_tree_root() assert len([i for i in attestation.aggregation_bits if i == 1]) == 1 sign_attestation(spec, state_c, attestation) # Block D at slot `N + 3`, parent is B state_d = state_b.copy() block = build_empty_block(spec, state_d, slot=state_a.slot + 3) signed_block_d = state_transition_and_sign_block(spec, state_d, block) # Block C received at N+2 — C is head time = state_c.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_c, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block B received at N+2 — C is head, it has proposer score boost yield from add_block(spec, store, signed_block_b, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Attestation_1 received at N+3 — C is head time = state_d.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_attestation(spec, store, attestation, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block D received at N+3 - D is head, it has proposer score boost yield from add_block(spec, store, signed_block_d, test_steps) assert spec.get_head(store) == signed_block_d.message.hash_tree_root() yield 'steps', test_steps @with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state): """ Boost not sufficient to sandwich attack. Objects: Block A - slot N Block B (parent A) - slot N+1 Block C (parent A) - slot N+2 Block D (parent B) - slot N+3 Attestation_set_1 (Block C); size proposer_boost + 1 - slot N+2 Steps: Block A received at N — A is head Block C received at N+2 — C is head Block B received at N+2 — C is head Attestation_set_1 received — C is head Block D received at N+3 — C is head """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time # On receiving block A at slot `N` yield from _apply_base_block_a(spec, state, store, test_steps) state_a = state.copy() # Block B at slot `N + 1`, parent is A state_b = state_a.copy() block = build_empty_block(spec, state_a, slot=state_a.slot + 1) signed_block_b = state_transition_and_sign_block(spec, state_b, block) # Block C at slot `N + 2`, parent is A state_c = state_a.copy() block = build_empty_block(spec, state_c, slot=state_a.slot + 2) signed_block_c = state_transition_and_sign_block(spec, state_c, block) # Block D at slot `N + 3`, parent is B state_d = state_b.copy() block = build_empty_block(spec, state_d, slot=state_a.slot + 3) signed_block_d = state_transition_and_sign_block(spec, state_d, block) # Block C received at N+2 — C is head time = state_c.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block_c, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block B received at N+2 — C is head, it has proposer score boost yield from add_block(spec, store, signed_block_b, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Attestation_set_1 at N+2 voting for block C proposer_boost_root = signed_block_c.message.hash_tree_root() root = signed_block_c.message.hash_tree_root() participant_num = _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_root, root) def _filter_participant_set(participants): return [index for i, index in enumerate(participants) if i < participant_num] attestation = get_valid_attestation( spec, state_c, slot=state_c.slot, signed=False, filter_participant_set=_filter_participant_set ) attestation.data.beacon_block_root = signed_block_c.message.hash_tree_root() assert len([i for i in attestation.aggregation_bits if i == 1]) == participant_num sign_attestation(spec, state_c, attestation) # Attestation_1 received at N+3 — B is head because B's attestation_score > C's proposer_score. # (B's proposer_score = C's attestation_score = 0) time = state_d.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, time, test_steps) yield from add_attestation(spec, store, attestation, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() # Block D received at N+3 - C is head, D's boost not sufficient! yield from add_block(spec, store, signed_block_d, test_steps) assert spec.get_head(store) == signed_block_c.message.hash_tree_root() yield 'steps', test_steps
17,645
40.815166
107
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py
from random import Random from eth2spec.utils import bls from eth2spec.test.helpers.state import ( get_balance, state_transition_and_sign_block, next_slot, next_epoch, next_epoch_via_block, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, build_empty_block, sign_block, transition_unsigned_block, ) from eth2spec.test.helpers.keys import pubkeys from eth2spec.test.helpers.attester_slashings import ( get_valid_attester_slashing_by_indices, get_valid_attester_slashing, get_indexed_attestation_participants, ) from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.execution_payload import build_empty_execution_payload from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits from eth2spec.test.helpers.multi_operations import ( run_slash_and_exit, run_test_full_random_operations, ) from eth2spec.test.helpers.sync_committee import ( compute_committee_indices, compute_sync_committee_participant_reward_and_penalty, ) from eth2spec.test.helpers.constants import PHASE0, MINIMAL from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix, is_post_capella from eth2spec.test.context import ( spec_test, spec_state_test, dump_skipping_message, with_phases, with_all_phases, single_phase, expect_assertion_error, always_bls, with_presets, with_custom_state, large_validator_set, ) @with_all_phases @spec_state_test def test_invalid_prev_slot_block_transition(spec, state): # Go to clean slot spec.process_slots(state, state.slot + 1) # Make a block for it block = build_empty_block(spec, state, slot=state.slot) proposer_index = spec.get_beacon_proposer_index(state) # Transition to next slot, above block will not be invalid on top of new state. spec.process_slots(state, state.slot + 1) yield 'pre', state # State is beyond block slot, but the block can still be realistic when invalid. # Try the transition, and update the state root to where it is halted. Then sign with the supposed proposer. expect_assertion_error(lambda: transition_unsigned_block(spec, state, block)) block.state_root = state.hash_tree_root() signed_block = sign_block(spec, state, block, proposer_index=proposer_index) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_invalid_same_slot_block_transition(spec, state): # Same slot on top of pre-state, but move out of slot 0 first. spec.process_slots(state, state.slot + 1) block = build_empty_block(spec, state, slot=state.slot) yield 'pre', state assert state.slot == block.slot signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_empty_block_transition(spec, state): pre_slot = state.slot pre_eth1_votes = len(state.eth1_data_votes) pre_mix = spec.get_randao_mix(state, spec.get_current_epoch(state)) yield 'pre', state block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert len(state.eth1_data_votes) == pre_eth1_votes + 1 assert spec.get_block_root_at_slot(state, pre_slot) == signed_block.message.parent_root assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != pre_mix @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_empty_block_transition_large_validator_set(spec, state): pre_slot = state.slot pre_eth1_votes = len(state.eth1_data_votes) pre_mix = spec.get_randao_mix(state, spec.get_current_epoch(state)) yield 'pre', state block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert len(state.eth1_data_votes) == pre_eth1_votes + 1 assert spec.get_block_root_at_slot(state, pre_slot) == signed_block.message.parent_root assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != pre_mix def process_and_sign_block_without_header_validations(spec, state, block): """ Artificially bypass the restrictions in the state transition to transition and sign block WARNING UNSAFE: Only use when generating valid-looking invalid blocks for test vectors """ # Perform single mutation in `process_block_header` state.latest_block_header = spec.BeaconBlockHeader( slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=spec.Bytes32(), body_root=block.body.hash_tree_root(), ) if is_post_bellatrix(spec): if spec.is_execution_enabled(state, block.body): spec.process_execution_payload(state, block.body, spec.EXECUTION_ENGINE) # Perform rest of process_block transitions spec.process_randao(state, block.body) spec.process_eth1_data(state, block.body) spec.process_operations(state, block.body) if is_post_altair(spec): spec.process_sync_aggregate(state, block.body.sync_aggregate) # Insert post-state rot block.state_root = state.hash_tree_root() # Sign block return sign_block(spec, state, block) @with_phases([PHASE0]) @spec_state_test def test_invalid_proposal_for_genesis_slot(spec, state): assert state.slot == spec.GENESIS_SLOT yield 'pre', state block = build_empty_block(spec, state, spec.GENESIS_SLOT) block.parent_root = state.latest_block_header.hash_tree_root() # Show that normal path through transition fails failed_state = state.copy() expect_assertion_error( lambda: spec.state_transition(failed_state, spec.SignedBeaconBlock(message=block), validate_result=False) ) # Artificially bypass the restriction in the state transition to transition and sign block for test vectors signed_block = process_and_sign_block_without_header_validations(spec, state, block) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_invalid_parent_from_same_slot(spec, state): yield 'pre', state parent_block = build_empty_block_for_next_slot(spec, state) signed_parent_block = state_transition_and_sign_block(spec, state, parent_block) child_block = parent_block.copy() child_block.parent_root = state.latest_block_header.hash_tree_root() if is_post_bellatrix(spec): child_block.body.execution_payload = build_empty_execution_payload(spec, state) # Show that normal path through transition fails failed_state = state.copy() expect_assertion_error( lambda: spec.state_transition(failed_state, spec.SignedBeaconBlock(message=child_block), validate_result=False) ) # Artificially bypass the restriction in the state transition to transition and sign block for test vectors signed_child_block = process_and_sign_block_without_header_validations(spec, state, child_block) yield 'blocks', [signed_parent_block, signed_child_block] yield 'post', None @with_all_phases @spec_state_test def test_invalid_incorrect_state_root(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) block.state_root = b"\xaa" * 32 signed_block = sign_block(spec, state, block) expect_assertion_error(lambda: spec.state_transition(state, signed_block)) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test @always_bls def test_invalid_all_zeroed_sig(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) invalid_signed_block = spec.SignedBeaconBlock(message=block) expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block)) yield 'blocks', [invalid_signed_block] yield 'post', None @with_all_phases @spec_state_test @always_bls def test_invalid_incorrect_block_sig(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot)) signing_root = spec.compute_signing_root(block, domain) invalid_signed_block = spec.SignedBeaconBlock( message=block, signature=bls.Sign(123456, signing_root) ) expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block)) yield 'blocks', [invalid_signed_block] yield 'post', None @with_all_phases @spec_state_test @always_bls def test_invalid_incorrect_proposer_index_sig_from_expected_proposer(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) expect_proposer_index = block.proposer_index # Set invalid proposer index but correct signature wrt expected proposer active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)) active_indices = [i for i in active_indices if i != block.proposer_index] block.proposer_index = active_indices[0] # invalid proposer index invalid_signed_block = sign_block(spec, state, block, expect_proposer_index) expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block)) yield 'blocks', [invalid_signed_block] yield 'post', None @with_all_phases @spec_state_test @always_bls def test_invalid_incorrect_proposer_index_sig_from_proposer_index(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) # Set invalid proposer index but correct signature wrt proposer_index active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)) active_indices = [i for i in active_indices if i != block.proposer_index] block.proposer_index = active_indices[0] # invalid proposer index invalid_signed_block = sign_block(spec, state, block, block.proposer_index) expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block)) yield 'blocks', [invalid_signed_block] yield 'post', None @with_all_phases @spec_state_test def test_skipped_slots(spec, state): pre_slot = state.slot yield 'pre', state block = build_empty_block(spec, state, state.slot + 4) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.slot == block.slot assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32() for slot in range(pre_slot, state.slot): assert spec.get_block_root_at_slot(state, slot) == block.parent_root @with_all_phases @spec_state_test def test_empty_epoch_transition(spec, state): pre_slot = state.slot yield 'pre', state block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): assert spec.get_block_root_at_slot(state, slot) == block.parent_root @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_empty_epoch_transition_large_validator_set(spec, state): pre_slot = state.slot yield 'pre', state block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.slot == block.slot for slot in range(pre_slot, state.slot): assert spec.get_block_root_at_slot(state, slot) == block.parent_root @with_all_phases @spec_state_test def test_empty_epoch_transition_not_finalizing(spec, state): if spec.SLOTS_PER_EPOCH > 8: return dump_skipping_message("Skip mainnet config for saving time." " Minimal config suffice to cover the target-of-test.") # copy for later balance lookups. pre_balances = list(state.balances) yield 'pre', state spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH * 5)) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.slot == block.slot assert state.finalized_checkpoint.epoch < spec.get_current_epoch(state) - 4 for index in range(len(state.validators)): assert state.balances[index] < pre_balances[index] @with_all_phases @spec_state_test def test_proposer_self_slashing(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) assert not state.validators[block.proposer_index].slashed proposer_slashing = get_valid_proposer_slashing( spec, state, slashed_index=block.proposer_index, signed_1=True, signed_2=True) block.body.proposer_slashings.append(proposer_slashing) # The header is processed *before* the block body: # the proposer was not slashed before the body, thus the block is valid. signed_block = state_transition_and_sign_block(spec, state, block) # The proposer slashed themselves. assert state.validators[block.proposer_index].slashed yield 'blocks', [signed_block] yield 'post', state @with_all_phases @spec_state_test def test_proposer_slashing(spec, state): # copy for later balance lookups. pre_state = state.copy() proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) slashed_index = proposer_slashing.signed_header_1.message.proposer_index assert not state.validators[slashed_index].slashed yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings.append(proposer_slashing) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) @with_all_phases @spec_state_test def test_invalid_duplicate_proposer_slashings_same_block(spec, state): proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) slashed_index = proposer_slashing.signed_header_1.message.proposer_index assert not state.validators[slashed_index].slashed yield 'pre', state block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings = [proposer_slashing, proposer_slashing] signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_invalid_similar_proposer_slashings_same_block(spec, state): slashed_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] # Same validator, but different slashable offences in the same block proposer_slashing_1 = get_valid_proposer_slashing(spec, state, random_root=b'\xaa' * 32, slashed_index=slashed_index, signed_1=True, signed_2=True) proposer_slashing_2 = get_valid_proposer_slashing(spec, state, random_root=b'\xbb' * 32, slashed_index=slashed_index, signed_1=True, signed_2=True) assert not state.validators[slashed_index].slashed yield 'pre', state block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings = [proposer_slashing_1, proposer_slashing_2] signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_multiple_different_proposer_slashings_same_block(spec, state): pre_state = state.copy() num_slashings = 3 proposer_slashings = [] for i in range(num_slashings): slashed_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[i] assert not state.validators[slashed_index].slashed proposer_slashing = get_valid_proposer_slashing(spec, state, slashed_index=slashed_index, signed_1=True, signed_2=True) proposer_slashings.append(proposer_slashing) yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.proposer_slashings = proposer_slashings signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state for proposer_slashing in proposer_slashings: slashed_index = proposer_slashing.signed_header_1.message.proposer_index check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) def check_attester_slashing_effect(spec, pre_state, state, slashed_indices): for slashed_index in slashed_indices: slashed_validator = state.validators[slashed_index] assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH # lost whistleblower reward assert get_balance(state, slashed_index) < get_balance(pre_state, slashed_index) proposer_index = spec.get_beacon_proposer_index(state) # gained whistleblower reward assert get_balance(state, proposer_index) > get_balance(pre_state, proposer_index) @with_all_phases @spec_state_test def test_attester_slashing(spec, state): # copy for later balance lookups. pre_state = state.copy() attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1) assert not any(state.validators[i].slashed for i in slashed_indices) yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings.append(attester_slashing) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state check_attester_slashing_effect(spec, pre_state, state, slashed_indices) @with_all_phases @spec_state_test def test_invalid_duplicate_attester_slashing_same_block(spec, state): if spec.MAX_ATTESTER_SLASHINGS < 2: return dump_skipping_message("Skip test if config cannot handle multiple AttesterSlashings per block") attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True) attester_slashings = [attester_slashing, attester_slashing.copy()] slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1) assert not any(state.validators[i].slashed for i in slashed_indices) yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings = attester_slashings signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None # TODO All AttesterSlashing tests should be adopted for SHARDING and later but helper support is not yet there @with_all_phases @spec_state_test def test_multiple_attester_slashings_no_overlap(spec, state): if spec.MAX_ATTESTER_SLASHINGS < 2: return dump_skipping_message("Skip test if config cannot handle multiple AttesterSlashings per block") # copy for later balance lookups. pre_state = state.copy() full_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[:8] half_length = len(full_indices) // 2 attester_slashing_1 = get_valid_attester_slashing_by_indices( spec, state, full_indices[:half_length], signed_1=True, signed_2=True, ) attester_slashing_2 = get_valid_attester_slashing_by_indices( spec, state, full_indices[half_length:], signed_1=True, signed_2=True, ) attester_slashings = [attester_slashing_1, attester_slashing_2] assert not any(state.validators[i].slashed for i in full_indices) yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings = attester_slashings signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state check_attester_slashing_effect(spec, pre_state, state, full_indices) @with_all_phases @spec_state_test def test_multiple_attester_slashings_partial_overlap(spec, state): if spec.MAX_ATTESTER_SLASHINGS < 2: return dump_skipping_message("Skip test if config cannot handle multiple AttesterSlashings per block") # copy for later balance lookups. pre_state = state.copy() full_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[:8] one_third_length = len(full_indices) // 3 attester_slashing_1 = get_valid_attester_slashing_by_indices( spec, state, full_indices[:one_third_length * 2], signed_1=True, signed_2=True, ) attester_slashing_2 = get_valid_attester_slashing_by_indices( spec, state, full_indices[one_third_length:], signed_1=True, signed_2=True, ) attester_slashings = [attester_slashing_1, attester_slashing_2] assert not any(state.validators[i].slashed for i in full_indices) yield 'pre', state # # Add to state via block transition # block = build_empty_block_for_next_slot(spec, state) block.body.attester_slashings = attester_slashings signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state check_attester_slashing_effect(spec, pre_state, state, full_indices) @with_all_phases @spec_state_test def test_proposer_after_inactive_index(spec, state): # disable some low validator index to check after for inactive_index = 10 state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state) # skip forward, get brand new proposers next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) while True: proposer_index = spec.get_beacon_proposer_index(state) if proposer_index > inactive_index: # found a proposer that has a higher index than a disabled validator yield 'pre', state # test if the proposer can be recognized correctly after the inactive validator signed_block = state_transition_and_sign_block(spec, state, build_empty_block_for_next_slot(spec, state)) yield 'blocks', [signed_block] yield 'post', state break next_slot(spec, state) @with_all_phases @spec_state_test def test_high_proposer_index(spec, state): # disable a good amount of validators to make the active count lower, for a faster test current_epoch = spec.get_current_epoch(state) for i in range(len(state.validators) // 3): state.validators[i].exit_epoch = current_epoch # skip forward, get brand new proposers state.slot = spec.SLOTS_PER_EPOCH * 2 block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, block) active_count = len(spec.get_active_validator_indices(state, current_epoch)) while True: proposer_index = spec.get_beacon_proposer_index(state) if proposer_index >= active_count: # found a proposer that has a higher index than the active validator count yield 'pre', state # test if the proposer can be recognized correctly, even while it has a high index. signed_block = state_transition_and_sign_block(spec, state, build_empty_block_for_next_slot(spec, state)) yield 'blocks', [signed_block] yield 'post', state break next_slot(spec, state) @with_all_phases @spec_state_test def test_invalid_only_increase_deposit_count(spec, state): # Make the state expect a deposit, then don't provide it. state.eth1_data.deposit_count += 1 yield 'pre', state block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_deposit_in_block(spec, state): initial_registry_len = len(state.validators) initial_balances_len = len(state.balances) validator_index = len(state.validators) amount = spec.MAX_EFFECTIVE_BALANCE deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) yield 'pre', state block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert len(state.validators) == initial_registry_len + 1 assert len(state.balances) == initial_balances_len + 1 assert get_balance(state, validator_index) == spec.MAX_EFFECTIVE_BALANCE assert state.validators[validator_index].pubkey == pubkeys[validator_index] @with_all_phases @spec_state_test def test_invalid_duplicate_deposit_same_block(spec, state): validator_index = len(state.validators) amount = spec.MAX_EFFECTIVE_BALANCE deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) yield 'pre', state block = build_empty_block_for_next_slot(spec, state) # The same deposit of the same validator for _ in range(2): block.body.deposits.append(deposit) signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'blocks', [signed_block] yield 'post', None @with_all_phases @spec_state_test def test_deposit_top_up(spec, state): validator_index = 0 amount = spec.MAX_EFFECTIVE_BALANCE // 4 deposit = prepare_state_and_deposit(spec, state, validator_index, amount) initial_registry_len = len(state.validators) initial_balances_len = len(state.balances) validator_pre_balance = get_balance(state, validator_index) pre_state = state.copy() yield 'pre', pre_state block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert len(state.validators) == initial_registry_len assert len(state.balances) == initial_balances_len # Altair introduces sync committee (sm) reward and penalty sync_committee_reward = sync_committee_penalty = 0 if is_post_altair(spec): committee_indices = compute_committee_indices(state, state.current_sync_committee) committee_bits = block.body.sync_aggregate.sync_committee_bits sync_committee_reward, sync_committee_penalty = compute_sync_committee_participant_reward_and_penalty( spec, pre_state, validator_index, committee_indices, committee_bits, ) assert get_balance(state, validator_index) == ( validator_pre_balance + amount + sync_committee_reward - sync_committee_penalty ) @with_all_phases @spec_state_test def test_attestation(spec, state): next_epoch(spec, state) yield 'pre', state attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) index = 0 # if spec.fork == SHARDING: # TODO add shard data to block to vote on attestation = get_valid_attestation(spec, state, index=index, signed=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) # Add to state via block transition attestation_block.body.attestations.append(attestation) signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block) if not is_post_altair(spec): assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations) else: pre_current_epoch_participation_root = spec.hash_tree_root(state.current_epoch_participation) epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_epoch_block = state_transition_and_sign_block(spec, state, epoch_block) yield 'blocks', [signed_attestation_block, signed_epoch_block] yield 'post', state if not is_post_altair(spec): assert len(state.current_epoch_attestations) == 0 assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root else: for index in range(len(state.validators)): assert state.current_epoch_participation[index] == spec.ParticipationFlags(0b0000_0000) assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root @with_all_phases @spec_state_test def test_duplicate_attestation_same_block(spec, state): next_epoch(spec, state) yield 'pre', state attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) index = 0 attestation = get_valid_attestation(spec, state, index=index, signed=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) # Add to state via block transition for _ in range(2): attestation_block.body.attestations.append(attestation) signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block) if not is_post_altair(spec): assert len(state.current_epoch_attestations) == pre_current_attestations_len + 2 # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations) else: pre_current_epoch_participation_root = spec.hash_tree_root(state.current_epoch_participation) epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_epoch_block = state_transition_and_sign_block(spec, state, epoch_block) yield 'blocks', [signed_attestation_block, signed_epoch_block] yield 'post', state if not is_post_altair(spec): assert len(state.current_epoch_attestations) == 0 assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root else: for index in range(len(state.validators)): assert state.current_epoch_participation[index] == spec.ParticipationFlags(0b0000_0000) assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root # After SHARDING is enabled, a committee is computed for SHARD_COMMITTEE_PERIOD slots ago, # exceeding the minimal-config randao mixes memory size. # Applies to all voluntary-exit sanity block tests. # TODO: when integrating SHARDING tests, voluntary-exit tests may need to change. @with_all_phases @spec_state_test def test_voluntary_exit(spec, state): validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH signed_exits = prepare_signed_exits(spec, state, [validator_index]) yield 'pre', state # Add to state via block transition initiate_exit_block = build_empty_block_for_next_slot(spec, state) initiate_exit_block.body.voluntary_exits = signed_exits signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block) assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # Process within epoch transition exit_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_exit_block = state_transition_and_sign_block(spec, state, exit_block) yield 'blocks', [signed_initiate_exit_block, signed_exit_block] yield 'post', state assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH @with_all_phases @spec_state_test def test_invalid_duplicate_validator_exit_same_block(spec, state): validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # Same index tries to exit twice, but should only be able to do so once. signed_exits = prepare_signed_exits(spec, state, [validator_index, validator_index]) yield 'pre', state # Add to state via block transition initiate_exit_block = build_empty_block_for_next_slot(spec, state) initiate_exit_block.body.voluntary_exits = signed_exits signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block, expect_fail=True) yield 'blocks', [signed_initiate_exit_block] yield 'post', None @with_all_phases @spec_state_test def test_multiple_different_validator_exits_same_block(spec, state): validator_indices = [ spec.get_active_validator_indices(state, spec.get_current_epoch(state))[i] for i in range(3) ] # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH signed_exits = prepare_signed_exits(spec, state, validator_indices) yield 'pre', state # Add to state via block transition initiate_exit_block = build_empty_block_for_next_slot(spec, state) initiate_exit_block.body.voluntary_exits = signed_exits signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block) for index in validator_indices: assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH # Process within epoch transition exit_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_exit_block = state_transition_and_sign_block(spec, state, exit_block) yield 'blocks', [signed_initiate_exit_block, signed_exit_block] yield 'post', state for index in validator_indices: assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH @with_all_phases @spec_state_test def test_slash_and_exit_same_index(spec, state): validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] yield from run_slash_and_exit(spec, state, validator_index, validator_index, valid=False) @with_all_phases @spec_state_test def test_slash_and_exit_diff_index(spec, state): slash_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] exit_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-2] yield from run_slash_and_exit(spec, state, slash_index, exit_index) @with_all_phases @spec_state_test def test_balance_driven_status_transitions(spec, state): current_epoch = spec.get_current_epoch(state) validator_index = spec.get_active_validator_indices(state, current_epoch)[-1] assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold state.validators[validator_index].effective_balance = spec.config.EJECTION_BALANCE yield 'pre', state # trigger epoch transition block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # Requires always_bls because historical root period and sync committee period is same length # so this epoch transition also computes new sync committees which requires aggregation @with_all_phases @spec_state_test @always_bls def test_historical_batch(spec, state): state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 pre_historical_roots = state.historical_roots.copy() if is_post_capella(spec): pre_historical_summaries = state.historical_summaries.copy() yield 'pre', state block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] yield 'post', state assert state.slot == block.slot assert spec.get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 # check history update if is_post_capella(spec): # Frozen `historical_roots` assert state.historical_roots == pre_historical_roots assert len(state.historical_summaries) == len(pre_historical_summaries) + 1 else: assert len(state.historical_roots) == len(pre_historical_roots) + 1 @with_all_phases @with_presets([MINIMAL], reason="suffices to test eth1 data voting without long voting period") @spec_state_test def test_eth1_data_votes_consensus(spec, state): voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1) state_transition_and_sign_block(spec, state, offset_block) yield 'pre', state a = b'\xaa' * 32 b = b'\xbb' * 32 c = b'\xcc' * 32 blocks = [] for i in range(0, voting_period_slots): block = build_empty_block_for_next_slot(spec, state) # wait for over 50% for A, then start voting B block.body.eth1_data.block_hash = b if i * 2 > voting_period_slots else a signed_block = state_transition_and_sign_block(spec, state, block) blocks.append(signed_block) assert len(state.eth1_data_votes) == voting_period_slots assert state.eth1_data.block_hash == a # transition to next eth1 voting period block = build_empty_block_for_next_slot(spec, state) block.body.eth1_data.block_hash = c signed_block = state_transition_and_sign_block(spec, state, block) blocks.append(signed_block) yield 'blocks', blocks yield 'post', state assert state.eth1_data.block_hash == a assert state.slot % voting_period_slots == 0 assert len(state.eth1_data_votes) == 1 assert state.eth1_data_votes[0].block_hash == c @with_all_phases @with_presets([MINIMAL], reason="suffices to test eth1 data voting without long voting period") @spec_state_test def test_eth1_data_votes_no_consensus(spec, state): voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH pre_eth1_hash = state.eth1_data.block_hash offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1) state_transition_and_sign_block(spec, state, offset_block) yield 'pre', state a = b'\xaa' * 32 b = b'\xbb' * 32 blocks = [] for i in range(0, voting_period_slots): block = build_empty_block_for_next_slot(spec, state) # wait for precisely 50% for A, then start voting B for other 50% block.body.eth1_data.block_hash = b if i * 2 >= voting_period_slots else a signed_block = state_transition_and_sign_block(spec, state, block) blocks.append(signed_block) assert len(state.eth1_data_votes) == voting_period_slots assert state.eth1_data.block_hash == pre_eth1_hash yield 'blocks', blocks yield 'post', state @with_all_phases @spec_state_test def test_full_random_operations_0(spec, state): yield from run_test_full_random_operations(spec, state, rng=Random(2020)) @with_all_phases @spec_state_test def test_full_random_operations_1(spec, state): yield from run_test_full_random_operations(spec, state, rng=Random(2021)) @with_all_phases @spec_state_test def test_full_random_operations_2(spec, state): yield from run_test_full_random_operations(spec, state, rng=Random(2022)) @with_all_phases @spec_state_test def test_full_random_operations_3(spec, state): yield from run_test_full_random_operations(spec, state, rng=Random(2023))
41,750
35.368467
119
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/sanity/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/sanity/test_slots.py
from eth2spec.test.helpers.forks import ( is_post_capella, ) from eth2spec.test.helpers.state import get_state_root from eth2spec.test.context import ( spec_state_test, with_all_phases, ) @with_all_phases @spec_state_test def test_slots_1(spec, state): pre_slot = state.slot pre_root = state.hash_tree_root() yield 'pre', state slots = 1 yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state assert state.slot == pre_slot + 1 assert get_state_root(spec, state, pre_slot) == pre_root @with_all_phases @spec_state_test def test_slots_2(spec, state): yield 'pre', state slots = 2 yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state @with_all_phases @spec_state_test def test_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state @with_all_phases @spec_state_test def test_double_empty_epoch(spec, state): yield 'pre', state slots = spec.SLOTS_PER_EPOCH * 2 yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state @with_all_phases @spec_state_test def test_over_epoch_boundary(spec, state): if spec.SLOTS_PER_EPOCH > 1: spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2)) yield 'pre', state slots = spec.SLOTS_PER_EPOCH yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state @with_all_phases @spec_state_test def test_historical_accumulator(spec, state): pre_historical_roots = state.historical_roots.copy() if is_post_capella(spec): pre_historical_summaries = state.historical_summaries.copy() yield 'pre', state slots = spec.SLOTS_PER_HISTORICAL_ROOT yield 'slots', int(slots) spec.process_slots(state, state.slot + slots) yield 'post', state # check history update if is_post_capella(spec): # Frozen `historical_roots` assert state.historical_roots == pre_historical_roots assert len(state.historical_summaries) == len(pre_historical_summaries) + 1 else: assert len(state.historical_roots) == len(pre_historical_roots) + 1
2,338
24.988889
83
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/genesis/test_validity.py
from eth2spec.test.context import ( spec_test, single_phase, with_presets, with_all_phases, ) from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.deposits import ( prepare_full_genesis_deposits, ) from eth2spec.test.helpers.forks import ( is_post_altair, ) def get_post_altair_description(spec): return f"Although it's not phase 0, we may use {spec.fork} spec to start testnets." def create_valid_beacon_state(spec): deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT deposits, _, _ = prepare_full_genesis_deposits( spec, amount=spec.MAX_EFFECTIVE_BALANCE, deposit_count=deposit_count, signed=True, ) eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME return spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) def run_is_valid_genesis_state(spec, state, valid=True): """ Run ``is_valid_genesis_state``, yielding: - genesis ('state') - is_valid ('is_valid') """ yield 'genesis', state is_valid = spec.is_valid_genesis_state(state) yield 'is_valid', is_valid assert is_valid == valid @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_full_genesis_deposits(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) yield from run_is_valid_genesis_state(spec, state) @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_invalid_invalid_timestamp(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) state.genesis_time = spec.config.MIN_GENESIS_TIME - 1 yield from run_is_valid_genesis_state(spec, state, valid=False) @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_extra_balance(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1 yield from run_is_valid_genesis_state(spec, state) @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_one_more_validator(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1 deposits, _, _ = prepare_full_genesis_deposits( spec, amount=spec.MAX_EFFECTIVE_BALANCE, deposit_count=deposit_count, signed=True, ) eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) yield from run_is_valid_genesis_state(spec, state) @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_invalid_not_enough_validator_count(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1 deposits, _, _ = prepare_full_genesis_deposits( spec, amount=spec.MAX_EFFECTIVE_BALANCE, deposit_count=deposit_count, signed=True, ) eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) yield from run_is_valid_genesis_state(spec, state, valid=False)
3,769
27.778626
93
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py
from eth2spec.test.context import ( single_phase, spec_test, with_presets, with_all_phases, ) from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.deposits import ( prepare_full_genesis_deposits, prepare_random_genesis_deposits, ) from eth2spec.test.helpers.forks import ( is_post_altair, ) def get_post_altair_description(spec): return f"Although it's not phase 0, we may use {spec.fork} spec to start testnets." def eth1_init_data(eth1_block_hash, eth1_timestamp): yield 'eth1', { 'eth1_block_hash': '0x' + eth1_block_hash.hex(), 'eth1_timestamp': int(eth1_timestamp), } @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_initialize_beacon_state_from_eth1(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT deposits, deposit_root, _ = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count, signed=True, ) eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert state.genesis_time == eth1_timestamp + spec.config.GENESIS_DELAY assert len(state.validators) == deposit_count assert state.eth1_data.deposit_root == deposit_root assert state.eth1_data.deposit_count == deposit_count assert state.eth1_data.block_hash == eth1_block_hash assert spec.get_total_active_balance(state) == deposit_count * spec.MAX_EFFECTIVE_BALANCE # yield state yield 'state', state @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_initialize_beacon_state_some_small_balances(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT main_deposits, _, deposit_data_list = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count=main_deposit_count, signed=True, ) # For deposits above, and for another deposit_count, add a balance of EFFECTIVE_BALANCE_INCREMENT small_deposit_count = main_deposit_count * 2 small_deposits, deposit_root, _ = prepare_full_genesis_deposits( spec, spec.MIN_DEPOSIT_AMOUNT, deposit_count=small_deposit_count, signed=True, deposit_data_list=deposit_data_list, ) deposits = main_deposits + small_deposits eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert state.genesis_time == eth1_timestamp + spec.config.GENESIS_DELAY assert len(state.validators) == small_deposit_count assert state.eth1_data.deposit_root == deposit_root assert state.eth1_data.deposit_count == len(deposits) assert state.eth1_data.block_hash == eth1_block_hash # only main deposits participate to the active balance assert spec.get_total_active_balance(state) == main_deposit_count * spec.MAX_EFFECTIVE_BALANCE # yield state yield 'state', state @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_initialize_beacon_state_one_topup_activation(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) # Submit all but one deposit as MAX_EFFECTIVE_BALANCE main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1 main_deposits, _, deposit_data_list = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count=main_deposit_count, signed=True, ) # Submit last pubkey deposit as MAX_EFFECTIVE_BALANCE - MIN_DEPOSIT_AMOUNT partial_deposits, _, deposit_data_list = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE - spec.MIN_DEPOSIT_AMOUNT, deposit_count=1, min_pubkey_index=main_deposit_count, signed=True, deposit_data_list=deposit_data_list, ) # Top up thelast pubkey deposit as MIN_DEPOSIT_AMOUNT to complete the deposit top_up_deposits, _, _ = prepare_full_genesis_deposits( spec, spec.MIN_DEPOSIT_AMOUNT, deposit_count=1, min_pubkey_index=main_deposit_count, signed=True, deposit_data_list=deposit_data_list, ) deposits = main_deposits + partial_deposits + top_up_deposits eth1_block_hash = b'\x13' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert spec.is_valid_genesis_state(state) # yield state yield 'state', state @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_initialize_beacon_state_random_invalid_genesis(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) # Make a bunch of random deposits deposits, _, deposit_data_list = prepare_random_genesis_deposits( spec, deposit_count=20, max_pubkey_index=10, ) eth1_block_hash = b'\x14' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME + 1 yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert not spec.is_valid_genesis_state(state) yield 'state', state @with_all_phases @spec_test @single_phase @with_presets([MINIMAL], reason="too slow") def test_initialize_beacon_state_random_valid_genesis(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) # Make a bunch of random deposits random_deposits, _, deposit_data_list = prepare_random_genesis_deposits( spec, deposit_count=20, min_pubkey_index=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 5, max_pubkey_index=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 5, ) # Then make spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT full deposits full_deposits, _, _ = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, signed=True, deposit_data_list=deposit_data_list ) deposits = random_deposits + full_deposits eth1_block_hash = b'\x15' * 32 eth1_timestamp = spec.config.MIN_GENESIS_TIME + 2 yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert spec.is_valid_genesis_state(state) yield 'state', state
7,451
32.719457
101
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/genesis/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py
from eth2spec.test.context import ( spec_state_test, with_all_phases, ) from eth2spec.test.helpers.constants import MAX_UINT_64 from eth2spec.test.helpers.forks import ( is_post_altair, is_post_bellatrix, ) def check_bound(value, lower_bound, upper_bound): assert value >= lower_bound assert value <= upper_bound @with_all_phases @spec_state_test def test_validators(spec, state): check_bound(spec.VALIDATOR_REGISTRY_LIMIT, 1, MAX_UINT_64) check_bound(spec.MAX_COMMITTEES_PER_SLOT, 1, MAX_UINT_64) check_bound(spec.TARGET_COMMITTEE_SIZE, 1, MAX_UINT_64) # Note: can be less if you assume stricters bounds on validator set based on total ETH supply maximum_validators_per_committee = ( spec.VALIDATOR_REGISTRY_LIMIT // spec.SLOTS_PER_EPOCH // spec.MAX_COMMITTEES_PER_SLOT ) check_bound(spec.MAX_VALIDATORS_PER_COMMITTEE, 1, maximum_validators_per_committee) check_bound(spec.config.MIN_PER_EPOCH_CHURN_LIMIT, 1, spec.VALIDATOR_REGISTRY_LIMIT) check_bound(spec.config.CHURN_LIMIT_QUOTIENT, 1, spec.VALIDATOR_REGISTRY_LIMIT) check_bound(spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, spec.TARGET_COMMITTEE_SIZE, MAX_UINT_64) @with_all_phases @spec_state_test def test_balances(spec, state): assert spec.MAX_EFFECTIVE_BALANCE % spec.EFFECTIVE_BALANCE_INCREMENT == 0 check_bound(spec.MIN_DEPOSIT_AMOUNT, 1, MAX_UINT_64) check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.MIN_DEPOSIT_AMOUNT, MAX_UINT_64) check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.EFFECTIVE_BALANCE_INCREMENT, MAX_UINT_64) @with_all_phases @spec_state_test def test_hysteresis_quotient(spec, state): check_bound(spec.HYSTERESIS_QUOTIENT, 1, MAX_UINT_64) check_bound(spec.HYSTERESIS_DOWNWARD_MULTIPLIER, 1, spec.HYSTERESIS_QUOTIENT) check_bound(spec.HYSTERESIS_UPWARD_MULTIPLIER, spec.HYSTERESIS_QUOTIENT, MAX_UINT_64) @with_all_phases @spec_state_test def test_incentives(spec, state): # Ensure no ETH is minted in slash_validator if is_post_bellatrix(spec): assert spec.MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX <= spec.WHISTLEBLOWER_REWARD_QUOTIENT elif is_post_altair(spec): assert spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR <= spec.WHISTLEBLOWER_REWARD_QUOTIENT else: assert spec.MIN_SLASHING_PENALTY_QUOTIENT <= spec.WHISTLEBLOWER_REWARD_QUOTIENT @with_all_phases @spec_state_test def test_time(spec, state): assert spec.SLOTS_PER_EPOCH <= spec.SLOTS_PER_HISTORICAL_ROOT assert spec.MIN_SEED_LOOKAHEAD < spec.MAX_SEED_LOOKAHEAD assert spec.SLOTS_PER_HISTORICAL_ROOT % spec.SLOTS_PER_EPOCH == 0 check_bound(spec.SLOTS_PER_HISTORICAL_ROOT, spec.SLOTS_PER_EPOCH, MAX_UINT_64) check_bound(spec.MIN_ATTESTATION_INCLUSION_DELAY, 1, spec.SLOTS_PER_EPOCH) @with_all_phases @spec_state_test def test_networking(spec, state): assert spec.config.MIN_EPOCHS_FOR_BLOCK_REQUESTS == ( spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + spec.config.CHURN_LIMIT_QUOTIENT // 2 ) assert spec.config.ATTESTATION_SUBNET_PREFIX_BITS == ( spec.ceillog2(spec.config.ATTESTATION_SUBNET_COUNT) + spec.config.ATTESTATION_SUBNET_EXTRA_BITS ) assert spec.config.SUBNETS_PER_NODE <= spec.config.ATTESTATION_SUBNET_COUNT node_id_length = spec.NodeID(1).type_byte_length() # in bytes assert node_id_length * 8 == spec.NODE_ID_BITS # in bits @with_all_phases @spec_state_test def test_fork_choice(spec, state): assert spec.INTERVALS_PER_SLOT < spec.config.SECONDS_PER_SLOT assert spec.config.PROPOSER_SCORE_BOOST <= 100
3,590
37.202128
104
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py
import random from eth2spec.test.context import ( single_phase, spec_state_test, spec_test, always_bls, with_phases, with_all_phases, ) from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.state import next_epoch from eth2spec.utils import bls from eth2spec.utils.ssz.ssz_typing import Bitlist def run_get_signature_test(spec, state, obj, domain, get_signature_fn, privkey, pubkey, signing_ssz_object=None): if signing_ssz_object is None: signing_ssz_object = obj signature = get_signature_fn(state, obj, privkey) signing_root = spec.compute_signing_root(signing_ssz_object, domain) assert bls.Verify(pubkey, signing_root, signature) def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True): try: assignment = spec.get_committee_assignment(state, epoch, validator_index) committee, committee_index, slot = assignment assert spec.compute_epoch_at_slot(slot) == epoch assert committee == spec.get_beacon_committee(state, slot, committee_index) assert committee_index < spec.get_committee_count_per_slot(state, epoch) assert validator_index in committee assert valid except AssertionError: assert not valid else: assert valid def run_is_candidate_block(spec, eth1_block, period_start, success=True): assert success == spec.is_candidate_block(eth1_block, period_start) def get_min_new_period_epochs(spec): return ( (spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE * 2) # to seconds // spec.config.SECONDS_PER_SLOT // spec.SLOTS_PER_EPOCH ) def get_mock_aggregate(spec): return spec.Attestation( data=spec.AttestationData( slot=10, ) ) # # Becoming a validator # @with_all_phases @spec_state_test def test_check_if_validator_active(spec, state): active_validator_index = len(state.validators) - 1 assert spec.check_if_validator_active(state, active_validator_index) new_validator_index = len(state.validators) amount = spec.MAX_EFFECTIVE_BALANCE deposit = prepare_state_and_deposit(spec, state, new_validator_index, amount, signed=True) spec.process_deposit(state, deposit) assert not spec.check_if_validator_active(state, new_validator_index) # # Validator assignments # @with_all_phases @spec_state_test def test_get_committee_assignment_current_epoch(spec, state): epoch = spec.get_current_epoch(state) validator_index = len(state.validators) - 1 run_get_committee_assignment(spec, state, epoch, validator_index, valid=True) @with_all_phases @spec_state_test def test_get_committee_assignment_next_epoch(spec, state): epoch = spec.get_current_epoch(state) + 1 validator_index = len(state.validators) - 1 run_get_committee_assignment(spec, state, epoch, validator_index, valid=True) @with_all_phases @spec_state_test def test_get_committee_assignment_out_bound_epoch(spec, state): epoch = spec.get_current_epoch(state) + 2 validator_index = len(state.validators) - 1 run_get_committee_assignment(spec, state, epoch, validator_index, valid=False) @with_all_phases @spec_state_test def test_is_proposer(spec, state): proposer_index = spec.get_beacon_proposer_index(state) assert spec.is_proposer(state, proposer_index) proposer_index = proposer_index + 1 % len(state.validators) assert not spec.is_proposer(state, proposer_index) # # Beacon chain responsibilities # # Block proposal @with_all_phases @spec_state_test def test_get_epoch_signature(spec, state): block = spec.BeaconBlock() privkey = privkeys[0] pubkey = pubkeys[0] domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot)) run_get_signature_test( spec=spec, state=state, obj=block, domain=domain, get_signature_fn=spec.get_epoch_signature, privkey=privkey, pubkey=pubkey, signing_ssz_object=spec.compute_epoch_at_slot(block.slot), ) @with_all_phases @spec_state_test def test_is_candidate_block(spec, state): distance_duration = spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE period_start = distance_duration * 2 + 1000 run_is_candidate_block( spec, spec.Eth1Block(timestamp=period_start - distance_duration), period_start, success=True, ) run_is_candidate_block( spec, spec.Eth1Block(timestamp=period_start - distance_duration + 1), period_start, success=False, ) run_is_candidate_block( spec, spec.Eth1Block(timestamp=period_start - distance_duration * 2), period_start, success=True, ) run_is_candidate_block( spec, spec.Eth1Block(timestamp=period_start - distance_duration * 2 - 1), period_start, success=False, ) @with_all_phases @spec_state_test def test_get_eth1_vote_default_vote(spec, state): min_new_period_epochs = get_min_new_period_epochs(spec) for _ in range(min_new_period_epochs): next_epoch(spec, state) state.eth1_data_votes = () eth1_chain = [] eth1_data = spec.get_eth1_vote(state, eth1_chain) assert eth1_data == state.eth1_data @with_all_phases @spec_state_test def test_get_eth1_vote_consensus_vote(spec, state): min_new_period_epochs = get_min_new_period_epochs(spec) for _ in range(min_new_period_epochs + 2): next_epoch(spec, state) period_start = spec.voting_period_start_time(state) votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD assert votes_length >= 3 # We need to have the majority vote state.eth1_data_votes = () block_1 = spec.Eth1Block( timestamp=period_start - spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE - 1, deposit_count=state.eth1_data.deposit_count, deposit_root=b'\x04' * 32, ) block_2 = spec.Eth1Block( timestamp=period_start - spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE, deposit_count=state.eth1_data.deposit_count + 1, deposit_root=b'\x05' * 32, ) eth1_chain = [block_1, block_2] eth1_data_votes = [] # Only the first vote is for block_1 eth1_data_votes.append(spec.get_eth1_data(block_1)) # Other votes are for block_2 for _ in range(votes_length - 1): eth1_data_votes.append(spec.get_eth1_data(block_2)) state.eth1_data_votes = eth1_data_votes eth1_data = spec.get_eth1_vote(state, eth1_chain) assert eth1_data.block_hash == block_2.hash_tree_root() @with_all_phases @spec_state_test def test_get_eth1_vote_tie(spec, state): min_new_period_epochs = get_min_new_period_epochs(spec) for _ in range(min_new_period_epochs + 1): next_epoch(spec, state) period_start = spec.voting_period_start_time(state) votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD assert votes_length > 0 and votes_length % 2 == 0 state.eth1_data_votes = () block_1 = spec.Eth1Block( timestamp=period_start - spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE - 1, deposit_count=state.eth1_data.deposit_count, deposit_root=b'\x04' * 32, ) block_2 = spec.Eth1Block( timestamp=period_start - spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE, deposit_count=state.eth1_data.deposit_count + 1, deposit_root=b'\x05' * 32, ) eth1_chain = [block_1, block_2] eth1_data_votes = [] # Half votes are for block_1, another half votes are for block_2 for i in range(votes_length): if i % 2 == 0: block = block_1 else: block = block_2 eth1_data_votes.append(spec.get_eth1_data(block)) state.eth1_data_votes = eth1_data_votes eth1_data = spec.get_eth1_vote(state, eth1_chain) # Tiebreak by smallest distance -> eth1_chain[0] assert eth1_data.block_hash == eth1_chain[0].hash_tree_root() @with_all_phases @spec_state_test def test_get_eth1_vote_chain_in_past(spec, state): min_new_period_epochs = get_min_new_period_epochs(spec) for _ in range(min_new_period_epochs + 1): next_epoch(spec, state) period_start = spec.voting_period_start_time(state) votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD assert votes_length > 0 and votes_length % 2 == 0 state.eth1_data_votes = () block_1 = spec.Eth1Block( timestamp=period_start - spec.config.SECONDS_PER_ETH1_BLOCK * spec.config.ETH1_FOLLOW_DISTANCE, deposit_count=state.eth1_data.deposit_count - 1, # Chain prior to current eth1data deposit_root=b'\x42' * 32, ) eth1_chain = [block_1] eth1_data_votes = [] state.eth1_data_votes = eth1_data_votes eth1_data = spec.get_eth1_vote(state, eth1_chain) # Should be default vote assert eth1_data == state.eth1_data @with_all_phases @spec_state_test def test_compute_new_state_root(spec, state): pre_state = state.copy() post_state = state.copy() block = build_empty_block(spec, state, state.slot + 1) state_root = spec.compute_new_state_root(state, block) assert state_root != pre_state.hash_tree_root() assert state == pre_state # dumb verification spec.process_slots(post_state, block.slot) spec.process_block(post_state, block) assert state_root == post_state.hash_tree_root() @with_all_phases @spec_state_test @always_bls def test_get_block_signature(spec, state): privkey = privkeys[0] pubkey = pubkeys[0] block = build_empty_block(spec, state) domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot)) run_get_signature_test( spec=spec, state=state, obj=block, domain=domain, get_signature_fn=spec.get_block_signature, privkey=privkey, pubkey=pubkey, ) @with_all_phases @spec_state_test def test_compute_fork_digest(spec, state): actual_fork_digest = spec.compute_fork_digest(state.fork.current_version, state.genesis_validators_root) expected_fork_data_root = spec.hash_tree_root( spec.ForkData(current_version=state.fork.current_version, genesis_validators_root=state.genesis_validators_root)) expected_fork_digest = spec.ForkDigest(expected_fork_data_root[:4]) assert actual_fork_digest == expected_fork_digest # Attesting @with_all_phases @spec_state_test @always_bls def test_get_attestation_signature_phase0(spec, state): privkey = privkeys[0] pubkey = pubkeys[0] attestation = get_valid_attestation(spec, state, signed=False) domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch) run_get_signature_test( spec=spec, state=state, obj=attestation.data, domain=domain, get_signature_fn=spec.get_attestation_signature, privkey=privkey, pubkey=pubkey, ) @with_all_phases @spec_state_test def test_compute_subnet_for_attestation(spec, state): for committee_idx in range(spec.MAX_COMMITTEES_PER_SLOT): for slot in range(state.slot, state.slot + spec.SLOTS_PER_EPOCH): committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot)) actual_subnet_id = spec.compute_subnet_for_attestation(committees_per_slot, slot, committee_idx) slots_since_epoch_start = slot % spec.SLOTS_PER_EPOCH committees_since_epoch_start = committees_per_slot * slots_since_epoch_start expected_subnet_id = (committees_since_epoch_start + committee_idx) % spec.config.ATTESTATION_SUBNET_COUNT assert actual_subnet_id == expected_subnet_id # Attestation aggregation @with_all_phases @spec_state_test @always_bls def test_get_slot_signature(spec, state): privkey = privkeys[0] pubkey = pubkeys[0] slot = spec.Slot(10) domain = spec.get_domain(state, spec.DOMAIN_SELECTION_PROOF, spec.compute_epoch_at_slot(slot)) run_get_signature_test( spec=spec, state=state, obj=slot, domain=domain, get_signature_fn=spec.get_slot_signature, privkey=privkey, pubkey=pubkey, ) @with_all_phases @spec_state_test @always_bls def test_is_aggregator(spec, state): # TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE` # if we have more validators and larger committee size slot = state.slot committee_index = 0 has_aggregator = False beacon_committee = spec.get_beacon_committee(state, slot, committee_index) for validator_index in beacon_committee: privkey = privkeys[validator_index] slot_signature = spec.get_slot_signature(state, slot, privkey) if spec.is_aggregator(state, slot, committee_index, slot_signature): has_aggregator = True break assert has_aggregator @with_phases([PHASE0]) @spec_state_test @always_bls def test_get_aggregate_signature(spec, state): attestations = [] attesting_pubkeys = [] slot = state.slot committee_index = 0 attestation_data = build_attestation_data(spec, state, slot=slot, index=committee_index) beacon_committee = spec.get_beacon_committee( state, attestation_data.slot, attestation_data.index, ) committee_size = len(beacon_committee) aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size)) for i, validator_index in enumerate(beacon_committee): bits = aggregation_bits.copy() bits[i] = True attestations.append( spec.Attestation( data=attestation_data, aggregation_bits=bits, signature=spec.get_attestation_signature(state, attestation_data, privkeys[validator_index]), ) ) attesting_pubkeys.append(state.validators[validator_index].pubkey) assert len(attestations) > 0 signature = spec.get_aggregate_signature(attestations) domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) signing_root = spec.compute_signing_root(attestation_data, domain) assert bls.FastAggregateVerify(attesting_pubkeys, signing_root, signature) @with_all_phases @spec_state_test @always_bls def test_get_aggregate_and_proof(spec, state): privkey = privkeys[0] aggregator_index = spec.ValidatorIndex(10) aggregate = get_mock_aggregate(spec) aggregate_and_proof = spec.get_aggregate_and_proof(state, aggregator_index, aggregate, privkey) assert aggregate_and_proof.aggregator_index == aggregator_index assert aggregate_and_proof.aggregate == aggregate assert aggregate_and_proof.selection_proof == spec.get_slot_signature(state, aggregate.data.slot, privkey) @with_all_phases @spec_state_test @always_bls def test_get_aggregate_and_proof_signature(spec, state): privkey = privkeys[0] pubkey = pubkeys[0] aggregate = get_mock_aggregate(spec) aggregate_and_proof = spec.get_aggregate_and_proof(state, spec.ValidatorIndex(1), aggregate, privkey) domain = spec.get_domain(state, spec.DOMAIN_AGGREGATE_AND_PROOF, spec.compute_epoch_at_slot(aggregate.data.slot)) run_get_signature_test( spec=spec, state=state, obj=aggregate_and_proof, domain=domain, get_signature_fn=spec.get_aggregate_and_proof_signature, privkey=privkey, pubkey=pubkey, ) def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)): node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT epoch = rng.randint(0, 2**64 - 1) subnets = spec.compute_subscribed_subnets(node_id, epoch) assert len(subnets) == spec.config.SUBNETS_PER_NODE @with_all_phases @spec_test @single_phase def test_compute_subscribed_subnets_random_1(spec): rng = random.Random(1111) run_compute_subscribed_subnets_arguments(spec, rng) @with_all_phases @spec_test @single_phase def test_compute_subscribed_subnets_random_2(spec): rng = random.Random(2222) run_compute_subscribed_subnets_arguments(spec, rng) @with_all_phases @spec_test @single_phase def test_compute_subscribed_subnets_random_3(spec): rng = random.Random(3333) run_compute_subscribed_subnets_arguments(spec, rng)
16,880
31.715116
118
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py
from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation from eth2spec.test.helpers.constants import ALL_PHASES from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store def run_on_attestation(spec, state, store, attestation, valid=True): if not valid: try: spec.on_attestation(store, attestation) except AssertionError: return else: assert False indexed_attestation = spec.get_indexed_attestation(state, attestation) spec.on_attestation(store, attestation) sample_index = indexed_attestation.attesting_indices[0] if spec.fork in ALL_PHASES: latest_message = spec.LatestMessage( epoch=attestation.data.target.epoch, root=attestation.data.beacon_block_root, ) # elif spec.fork == SHARDING: TODO: check if vote count for shard blob increased as expected assert ( store.latest_messages[sample_index] == latest_message ) @with_all_phases @spec_state_test def test_on_attestation_current_epoch(spec, state): store = get_genesis_forkchoice_store(spec, state) spec.on_tick(store, store.time + spec.config.SECONDS_PER_SLOT * 2) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, signed_block) attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True) assert attestation.data.target.epoch == spec.GENESIS_EPOCH assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH run_on_attestation(spec, state, store, attestation) @with_all_phases @spec_state_test def test_on_attestation_previous_epoch(spec, state): store = get_genesis_forkchoice_store(spec, state) spec.on_tick(store, store.time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, signed_block) attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True) assert attestation.data.target.epoch == spec.GENESIS_EPOCH assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 1 run_on_attestation(spec, state, store, attestation) @with_all_phases @spec_state_test def test_on_attestation_past_epoch(spec, state): store = get_genesis_forkchoice_store(spec, state) # move time forward 2 epochs time = store.time + 2 * spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH spec.on_tick(store, time) # create and store block from 3 epochs ago block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) spec.on_block(store, signed_block) # create attestation for past block attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) assert attestation.data.target.epoch == spec.GENESIS_EPOCH assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 2 run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_mismatched_target_and_slot(spec, state): store = get_genesis_forkchoice_store(spec, state) spec.on_tick(store, store.time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, signed_block) attestation = get_valid_attestation(spec, state, slot=block.slot) attestation.data.target.epoch += 1 sign_attestation(spec, state, attestation) assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1 assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 1 run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_inconsistent_target_and_head(spec, state): store = get_genesis_forkchoice_store(spec, state) spec.on_tick(store, store.time + 2 * spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH) # Create chain 1 as empty chain between genesis and start of 1st epoch target_state_1 = state.copy() next_epoch(spec, target_state_1) # Create chain 2 with different block in chain from chain 1 from chain 1 from chain 1 from chain 1 target_state_2 = state.copy() diff_block = build_empty_block_for_next_slot(spec, target_state_2) signed_diff_block = state_transition_and_sign_block(spec, target_state_2, diff_block) spec.on_block(store, signed_diff_block) next_epoch(spec, target_state_2) next_slot(spec, target_state_2) # Create and store block new head block on target state 1 head_block = build_empty_block_for_next_slot(spec, target_state_1) signed_head_block = state_transition_and_sign_block(spec, target_state_1, head_block) spec.on_block(store, signed_head_block) # Attest to head of chain 1 attestation = get_valid_attestation(spec, target_state_1, slot=head_block.slot, signed=False) epoch = spec.compute_epoch_at_slot(attestation.data.slot) # Set attestation target to be from chain 2 attestation.data.target = spec.Checkpoint(epoch=epoch, root=spec.get_block_root(target_state_2, epoch)) sign_attestation(spec, state, attestation) assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1 assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH + 1 assert spec.get_block_root(target_state_1, epoch) != attestation.data.target.root run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_target_block_not_in_store(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1) spec.on_tick(store, time) # move to immediately before next epoch to make block new target next_epoch = spec.get_current_epoch(state) + 1 transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1) target_block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, target_block) # do not add target block to store attestation = get_valid_attestation(spec, state, slot=target_block.slot, signed=True) assert attestation.data.target.root == target_block.hash_tree_root() run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_target_checkpoint_not_in_store(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1) spec.on_tick(store, time) # move to immediately before next epoch to make block new target next_epoch = spec.get_current_epoch(state) + 1 transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1) target_block = build_empty_block_for_next_slot(spec, state) signed_target_block = state_transition_and_sign_block(spec, state, target_block) # add target block to store spec.on_block(store, signed_target_block) # target checkpoint state is not yet in store attestation = get_valid_attestation(spec, state, slot=target_block.slot, signed=True) assert attestation.data.target.root == target_block.hash_tree_root() run_on_attestation(spec, state, store, attestation) @with_all_phases @spec_state_test def test_on_attestation_target_checkpoint_not_in_store_diff_slot(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1) spec.on_tick(store, time) # move to two slots before next epoch to make target block one before an empty slot next_epoch = spec.get_current_epoch(state) + 1 transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 2) target_block = build_empty_block_for_next_slot(spec, state) signed_target_block = state_transition_and_sign_block(spec, state, target_block) # add target block to store spec.on_block(store, signed_target_block) # target checkpoint state is not yet in store attestation_slot = target_block.slot + 1 transition_to(spec, state, attestation_slot) attestation = get_valid_attestation(spec, state, slot=attestation_slot, signed=True) assert attestation.data.target.root == target_block.hash_tree_root() run_on_attestation(spec, state, store, attestation) @with_all_phases @spec_state_test def test_on_attestation_beacon_block_not_in_store(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1) spec.on_tick(store, time) # move to immediately before next epoch to make block new target next_epoch = spec.get_current_epoch(state) + 1 transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1) target_block = build_empty_block_for_next_slot(spec, state) signed_target_block = state_transition_and_sign_block(spec, state, target_block) # store target in store spec.on_block(store, signed_target_block) head_block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, head_block) # do not add head block to store attestation = get_valid_attestation(spec, state, slot=head_block.slot, signed=True) assert attestation.data.target.root == target_block.hash_tree_root() assert attestation.data.beacon_block_root == head_block.hash_tree_root() run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_future_epoch(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + 3 * spec.config.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, signed_block) # move state forward but not store next_epoch(spec, state) attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_future_block(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT * 5 spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) spec.on_block(store, signed_block) # attestation for slot immediately prior to the block being attested to attestation = get_valid_attestation(spec, state, slot=block.slot - 1, signed=False) attestation.data.beacon_block_root = block.hash_tree_root() sign_attestation(spec, state, attestation) run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_same_slot(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + spec.config.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) spec.on_block(store, signed_block) attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True) run_on_attestation(spec, state, store, attestation, False) @with_all_phases @spec_state_test def test_on_attestation_invalid_attestation(spec, state): store = get_genesis_forkchoice_store(spec, state) time = store.time + 3 * spec.config.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) spec.on_block(store, signed_block) attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True) # make invalid by using an invalid committee index attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT * spec.SLOTS_PER_EPOCH run_on_attestation(spec, state, store, attestation, False)
12,915
38.021148
109
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py
from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) from eth2spec.test.helpers.state import ( next_epoch, state_transition_and_sign_block, transition_to, ) def run_on_tick(spec, store, time, new_justified_checkpoint=False): previous_justified_checkpoint = store.justified_checkpoint spec.on_tick(store, time) assert store.time == time if new_justified_checkpoint: assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch assert store.justified_checkpoint.root != previous_justified_checkpoint.root else: assert store.justified_checkpoint == previous_justified_checkpoint @with_all_phases @spec_state_test def test_basic(spec, state): store = get_genesis_forkchoice_store(spec, state) run_on_tick(spec, store, store.time + 1) """ @with_all_phases @spec_state_test def test_update_justified_single_on_store_finalized_chain(spec, state): store = get_genesis_forkchoice_store(spec, state) # Create a block at epoch 1 next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() parent_block = block.copy() # To make compute_slots_since_epoch_start(current_slot) == 0, transition to the end of the epoch slot = state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH - 1 transition_to(spec, state, slot) # Create a block at the start of epoch 2 block = build_empty_block_for_next_slot(spec, state) # Mock state state.current_justified_checkpoint = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(parent_block.slot), root=parent_block.hash_tree_root(), ) state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block store.block_states[block.hash_tree_root()] = state run_on_tick( spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, new_justified_checkpoint=True ) """ @with_all_phases @spec_state_test def test_update_justified_single_not_on_store_finalized_chain(spec, state): store = get_genesis_forkchoice_store(spec, state) init_state = state.copy() # Chain grows # Create a block at epoch 1 next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) block.body.graffiti = b'\x11' * 32 state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() # Mock store.finalized_checkpoint store.finalized_checkpoint = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(block.slot), root=block.hash_tree_root(), ) # Create a block at epoch 1 state = init_state.copy() next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) block.body.graffiti = b'\x22' * 32 state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() parent_block = block.copy() # To make compute_slots_since_epoch_start(current_slot) == 0, transition to the end of the epoch slot = state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH - 1 transition_to(spec, state, slot) # Create a block at the start of epoch 2 block = build_empty_block_for_next_slot(spec, state) # Mock state state.current_justified_checkpoint = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(parent_block.slot), root=parent_block.hash_tree_root(), ) state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() run_on_tick( spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, )
4,249
35.016949
100
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_participation_record_updates.py
from eth2spec.test.context import spec_state_test, with_phases from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with ) def run_process_participation_record_updates(spec, state): yield from run_epoch_processing_with(spec, state, 'process_participation_record_updates') @with_phases([PHASE0]) @spec_state_test def test_updated_participation_record(spec, state): state.previous_epoch_attestations = [spec.PendingAttestation(proposer_index=100)] current_epoch_attestations = [spec.PendingAttestation(proposer_index=200)] state.current_epoch_attestations = current_epoch_attestations yield from run_process_participation_record_updates(spec, state) assert state.previous_epoch_attestations == current_epoch_attestations assert state.current_epoch_attestations == []
875
37.086957
93
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_justification_and_finalization.py
from random import Random from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with, ) from eth2spec.test.helpers.forks import is_post_altair from eth2spec.test.helpers.state import transition_to, next_epoch_via_block, next_slot from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators def run_process_just_and_fin(spec, state): yield from run_epoch_processing_with(spec, state, 'process_justification_and_finalization') def add_mock_attestations(spec, state, epoch, source, target, sufficient_support=False, messed_up_target=False): # we must be at the end of the epoch assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0 previous_epoch = spec.get_previous_epoch(state) current_epoch = spec.get_current_epoch(state) if not is_post_altair(spec): if current_epoch == epoch: attestations = state.current_epoch_attestations elif previous_epoch == epoch: attestations = state.previous_epoch_attestations else: raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}") else: if current_epoch == epoch: epoch_participation = state.current_epoch_participation elif previous_epoch == epoch: epoch_participation = state.previous_epoch_participation else: raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}") total_balance = spec.get_total_active_balance(state) remaining_balance = int(total_balance * 2 // 3) # can become negative start_slot = spec.compute_start_slot_at_epoch(epoch) committees_per_slot = spec.get_committee_count_per_slot(state, epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): for index in range(committees_per_slot): # Check if we already have had sufficient balance. (and undone if we don't want it). # If so, do not create more attestations. (we do not have empty pending attestations normally anyway) if remaining_balance < 0: return committee = spec.get_beacon_committee(state, slot, index) # Create a bitfield filled with the given count per attestation, # exactly on the right-most part of the committee field. aggregation_bits = [0] * len(committee) for v in range(len(committee) * 2 // 3 + 1): if remaining_balance > 0: remaining_balance -= int(state.validators[v].effective_balance) aggregation_bits[v] = 1 else: break # remove 1/5th of attesters so that support is insufficient if not sufficient_support: for i in range(max(len(committee) // 5, 1)): aggregation_bits[i] = 0 # Update state if not is_post_altair(spec): attestations.append(spec.PendingAttestation( aggregation_bits=aggregation_bits, data=spec.AttestationData( slot=slot, beacon_block_root=b'\xff' * 32, # irrelevant to testing source=source, target=target, index=index, ), inclusion_delay=1, )) if messed_up_target: attestations[len(attestations) - 1].data.target.root = b'\x99' * 32 else: for i, index in enumerate(committee): if aggregation_bits[i]: epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_HEAD_FLAG_INDEX) epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_SOURCE_FLAG_INDEX) if not messed_up_target: epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_TARGET_FLAG_INDEX) def get_checkpoints(spec, epoch): c1 = None if epoch < 1 else spec.Checkpoint(epoch=epoch - 1, root=b'\xaa' * 32) c2 = None if epoch < 2 else spec.Checkpoint(epoch=epoch - 2, root=b'\xbb' * 32) c3 = None if epoch < 3 else spec.Checkpoint(epoch=epoch - 3, root=b'\xcc' * 32) c4 = None if epoch < 4 else spec.Checkpoint(epoch=epoch - 4, root=b'\xdd' * 32) c5 = None if epoch < 5 else spec.Checkpoint(epoch=epoch - 5, root=b'\xee' * 32) return c1, c2, c3, c4, c5 def put_checkpoints_in_block_roots(spec, state, checkpoints): for c in checkpoints: state.block_roots[spec.compute_start_slot_at_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root def finalize_on_234(spec, state, epoch, sufficient_support): assert epoch > 4 transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 3210x -- justification bitfield indices # 11*0. -- justification bitfield contents, . = this epoch, * is being justified now # checkpoints for the epochs ago: c1, c2, c3, c4, _ = get_checkpoints(spec, epoch) put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4]) old_finalized = state.finalized_checkpoint state.previous_justified_checkpoint = c4 state.current_justified_checkpoint = c3 state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]() state.justification_bits[1:3] = [1, 1] # mock 3rd and 4th latest epochs as justified (indices are pre-shift) # mock the 2nd latest epoch as justifiable, with 4th as source add_mock_attestations(spec, state, epoch=epoch - 2, source=c4, target=c2, sufficient_support=sufficient_support) # process! yield from run_process_just_and_fin(spec, state) assert state.previous_justified_checkpoint == c3 # changed to old current if sufficient_support: assert state.current_justified_checkpoint == c2 # changed to 2nd latest assert state.finalized_checkpoint == c4 # finalized old previous justified epoch else: assert state.current_justified_checkpoint == c3 # still old current assert state.finalized_checkpoint == old_finalized # no new finalized def finalize_on_23(spec, state, epoch, sufficient_support): assert epoch > 3 transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 210xx -- justification bitfield indices (pre shift) # 3210x -- justification bitfield indices (post shift) # 01*0. -- justification bitfield contents, . = this epoch, * is being justified now # checkpoints for the epochs ago: c1, c2, c3, _, _ = get_checkpoints(spec, epoch) put_checkpoints_in_block_roots(spec, state, [c1, c2, c3]) old_finalized = state.finalized_checkpoint state.previous_justified_checkpoint = c3 state.current_justified_checkpoint = c3 state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]() state.justification_bits[1] = 1 # mock 3rd latest epoch as justified (index is pre-shift) # mock the 2nd latest epoch as justifiable, with 3rd as source add_mock_attestations(spec, state, epoch=epoch - 2, source=c3, target=c2, sufficient_support=sufficient_support) # process! yield from run_process_just_and_fin(spec, state) assert state.previous_justified_checkpoint == c3 # changed to old current if sufficient_support: assert state.current_justified_checkpoint == c2 # changed to 2nd latest assert state.finalized_checkpoint == c3 # finalized old previous justified epoch else: assert state.current_justified_checkpoint == c3 # still old current assert state.finalized_checkpoint == old_finalized # no new finalized def finalize_on_123(spec, state, epoch, sufficient_support): assert epoch > 5 state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch # 43210 -- epochs ago # 210xx -- justification bitfield indices (pre shift) # 3210x -- justification bitfield indices (post shift) # 011*. -- justification bitfield contents, . = this epoch, * is being justified now # checkpoints for the epochs ago: c1, c2, c3, c4, c5 = get_checkpoints(spec, epoch) put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4, c5]) old_finalized = state.finalized_checkpoint state.previous_justified_checkpoint = c5 state.current_justified_checkpoint = c3 state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]() state.justification_bits[1] = 1 # mock 3rd latest epochs as justified (index is pre-shift) # mock the 2nd latest epoch as justifiable, with 5th as source add_mock_attestations(spec, state, epoch=epoch - 2, source=c5, target=c2, sufficient_support=sufficient_support) # mock the 1st latest epoch as justifiable, with 3rd as source add_mock_attestations(spec, state, epoch=epoch - 1, source=c3, target=c1, sufficient_support=sufficient_support) # process! yield from run_process_just_and_fin(spec, state) assert state.previous_justified_checkpoint == c3 # changed to old current if sufficient_support: assert state.current_justified_checkpoint == c1 # changed to 1st latest assert state.finalized_checkpoint == c3 # finalized old current else: assert state.current_justified_checkpoint == c3 # still old current assert state.finalized_checkpoint == old_finalized # no new finalized def finalize_on_12(spec, state, epoch, sufficient_support, messed_up_target): assert epoch > 2 transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 210xx -- justification bitfield indices (pre shift) # 3210x -- justification bitfield indices (post shift) # 001*. -- justification bitfield contents, . = this epoch, * is being justified now # checkpoints for the epochs ago: c1, c2, _, _, _ = get_checkpoints(spec, epoch) put_checkpoints_in_block_roots(spec, state, [c1, c2]) old_finalized = state.finalized_checkpoint state.previous_justified_checkpoint = c2 state.current_justified_checkpoint = c2 state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]() state.justification_bits[0] = 1 # mock 2nd latest epoch as justified (this is pre-shift) # mock the 1st latest epoch as justifiable, with 2nd as source add_mock_attestations(spec, state, epoch=epoch - 1, source=c2, target=c1, sufficient_support=sufficient_support, messed_up_target=messed_up_target) # process! yield from run_process_just_and_fin(spec, state) assert state.previous_justified_checkpoint == c2 # changed to old current if sufficient_support and not messed_up_target: assert state.current_justified_checkpoint == c1 # changed to 1st latest assert state.finalized_checkpoint == c2 # finalized previous justified epoch else: assert state.current_justified_checkpoint == c2 # still old current assert state.finalized_checkpoint == old_finalized # no new finalized @with_all_phases @spec_state_test def test_234_ok_support(spec, state): yield from finalize_on_234(spec, state, 5, True) @with_all_phases @spec_state_test def test_234_poor_support(spec, state): yield from finalize_on_234(spec, state, 5, False) @with_all_phases @spec_state_test def test_23_ok_support(spec, state): yield from finalize_on_23(spec, state, 4, True) @with_all_phases @spec_state_test def test_23_poor_support(spec, state): yield from finalize_on_23(spec, state, 4, False) @with_all_phases @spec_state_test def test_123_ok_support(spec, state): yield from finalize_on_123(spec, state, 6, True) @with_all_phases @spec_state_test def test_123_poor_support(spec, state): yield from finalize_on_123(spec, state, 6, False) @with_all_phases @spec_state_test def test_12_ok_support(spec, state): yield from finalize_on_12(spec, state, 3, True, False) @with_all_phases @spec_state_test def test_12_ok_support_messed_target(spec, state): yield from finalize_on_12(spec, state, 3, True, True) @with_all_phases @spec_state_test def test_12_poor_support(spec, state): yield from finalize_on_12(spec, state, 3, False, False) @with_all_phases @spec_state_test def test_balance_threshold_with_exited_validators(spec, state): """ This test exercises a very specific failure mode where exited validators are incorrectly included in the total active balance when weighing justification. """ rng = Random(133333) # move past genesis conditions for _ in range(3): next_epoch_via_block(spec, state) # mock attestation helper requires last slot of epoch for _ in range(spec.SLOTS_PER_EPOCH - 1): next_slot(spec, state) # Step 1: Exit ~1/2 vals in current epoch epoch = spec.get_current_epoch(state) for index in spec.get_active_validator_indices(state, epoch): if rng.choice([True, False]): continue validator = state.validators[index] validator.exit_epoch = epoch validator.withdrawable_epoch = epoch + 1 validator.withdrawable_epoch = validator.exit_epoch + spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY exited_validators = get_unslashed_exited_validators(spec, state) assert len(exited_validators) != 0 source = state.current_justified_checkpoint target = spec.Checkpoint( epoch=epoch, root=spec.get_block_root(state, epoch) ) add_mock_attestations( spec, state, epoch, source, target, sufficient_support=False, ) if not is_post_altair(spec): current_attestations = spec.get_matching_target_attestations(state, epoch) total_active_balance = spec.get_total_active_balance(state) current_target_balance = spec.get_attesting_balance(state, current_attestations) # Check we will not justify the current checkpoint does_justify = current_target_balance * 3 >= total_active_balance * 2 assert not does_justify # Ensure we would have justified the current checkpoint w/ the exited validators current_exited_balance = spec.get_total_balance(state, exited_validators) does_justify = (current_target_balance + current_exited_balance) * 3 >= total_active_balance * 2 assert does_justify else: current_indices = spec.get_unslashed_participating_indices(state, spec.TIMELY_TARGET_FLAG_INDEX, epoch) total_active_balance = spec.get_total_active_balance(state) current_target_balance = spec.get_total_balance(state, current_indices) # Check we will not justify the current checkpoint does_justify = current_target_balance * 3 >= total_active_balance * 2 assert not does_justify # Ensure we would have justified the current checkpoint w/ the exited validators current_exited_balance = spec.get_total_balance(state, exited_validators) does_justify = (current_target_balance + current_exited_balance) * 3 >= total_active_balance * 2 assert does_justify yield from run_process_just_and_fin(spec, state) assert state.current_justified_checkpoint.epoch != epoch
16,059
41.37467
115
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_randao_mixes_reset.py
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with ) def run_process_randao_mixes_reset(spec, state): yield from run_epoch_processing_with(spec, state, 'process_randao_mixes_reset') @with_all_phases @spec_state_test def test_updated_randao_mixes(spec, state): next_epoch = spec.get_current_epoch(state) + 1 state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] = b'\x56' * 32 yield from run_process_randao_mixes_reset(spec, state) assert state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == spec.get_randao_mix( state, spec.get_current_epoch(state) )
718
31.681818
101
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py
from eth2spec.test.helpers.deposits import mock_deposit from eth2spec.test.helpers.state import next_epoch, next_slots from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( spec_test, spec_state_test, with_all_phases, single_phase, with_custom_state, with_presets, scaled_churn_balances, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with def run_process_registry_updates(spec, state): yield from run_epoch_processing_with(spec, state, 'process_registry_updates') @with_all_phases @spec_state_test def test_add_to_activation_queue(spec, state): # move past first two irregular epochs wrt finality next_epoch(spec, state) next_epoch(spec, state) index = 0 mock_deposit(spec, state, index) yield from run_process_registry_updates(spec, state) # validator moved into queue assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validators[index].activation_epoch == spec.FAR_FUTURE_EPOCH assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) @with_all_phases @spec_state_test def test_activation_queue_to_activated_if_finalized(spec, state): # move past first two irregular epochs wrt finality next_epoch(spec, state) next_epoch(spec, state) index = 0 mock_deposit(spec, state, index) # mock validator as having been in queue since latest finalized state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 state.validators[index].activation_eligibility_epoch = state.finalized_checkpoint.epoch assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) yield from run_process_registry_updates(spec, state) # validator activated for future epoch assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validators[index].activation_epoch != spec.FAR_FUTURE_EPOCH assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) assert spec.is_active_validator( state.validators[index], spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) ) @with_all_phases @spec_state_test def test_activation_queue_no_activation_no_finality(spec, state): # move past first two irregular epochs wrt finality next_epoch(spec, state) next_epoch(spec, state) index = 0 mock_deposit(spec, state, index) # mock validator as having been in queue only after latest finalized state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 state.validators[index].activation_eligibility_epoch = state.finalized_checkpoint.epoch + 1 assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) yield from run_process_registry_updates(spec, state) # validator not activated assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert state.validators[index].activation_epoch == spec.FAR_FUTURE_EPOCH @with_all_phases @spec_state_test def test_activation_queue_sorting(spec, state): churn_limit = spec.get_validator_churn_limit(state) # try to activate more than the per-epoch churn limit mock_activations = churn_limit * 2 epoch = spec.get_current_epoch(state) for i in range(mock_activations): mock_deposit(spec, state, i) state.validators[i].activation_eligibility_epoch = epoch + 1 # give the last priority over the others state.validators[mock_activations - 1].activation_eligibility_epoch = epoch # move state forward and finalize to allow for activations next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3) state.finalized_checkpoint.epoch = epoch + 1 yield from run_process_registry_updates(spec, state) # the first got in as second assert state.validators[0].activation_epoch != spec.FAR_FUTURE_EPOCH # the prioritized got in as first assert state.validators[mock_activations - 1].activation_epoch != spec.FAR_FUTURE_EPOCH # the second last is at the end of the queue, and did not make the churn, # hence is not assigned an activation_epoch yet. assert state.validators[mock_activations - 2].activation_epoch == spec.FAR_FUTURE_EPOCH # the one at churn_limit did not make it, it was out-prioritized assert state.validators[churn_limit].activation_epoch == spec.FAR_FUTURE_EPOCH # but the the one in front of the above did assert state.validators[churn_limit - 1].activation_epoch != spec.FAR_FUTURE_EPOCH def run_test_activation_queue_efficiency(spec, state): churn_limit = spec.get_validator_churn_limit(state) mock_activations = churn_limit * 2 epoch = spec.get_current_epoch(state) for i in range(mock_activations): mock_deposit(spec, state, i) state.validators[i].activation_eligibility_epoch = epoch + 1 # move state forward and finalize to allow for activations next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3) state.finalized_checkpoint.epoch = epoch + 1 # Churn limit could have changed given the active vals removed via `mock_deposit` churn_limit_0 = spec.get_validator_churn_limit(state) # Run first registry update. Do not yield test vectors for _ in run_process_registry_updates(spec, state): pass # Half should churn in first run of registry update for i in range(mock_activations): if i < churn_limit_0: assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH else: assert state.validators[i].activation_epoch == spec.FAR_FUTURE_EPOCH # Second half should churn in second run of registry update churn_limit_1 = spec.get_validator_churn_limit(state) yield from run_process_registry_updates(spec, state) for i in range(churn_limit_0 + churn_limit_1): assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH @with_all_phases @spec_state_test def test_activation_queue_efficiency_min(spec, state): assert spec.get_validator_churn_limit(state) == spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_efficiency(spec, state) @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_efficiency_scaled(spec, state): assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_efficiency(spec, state) @with_all_phases @spec_state_test def test_ejection(spec, state): index = 0 assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) assert state.validators[index].exit_epoch == spec.FAR_FUTURE_EPOCH # Mock an ejection state.validators[index].effective_balance = spec.config.EJECTION_BALANCE yield from run_process_registry_updates(spec, state) assert state.validators[index].exit_epoch != spec.FAR_FUTURE_EPOCH assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) assert not spec.is_active_validator( state.validators[index], spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) ) def run_test_ejection_past_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) # try to eject more than per-epoch churn limit mock_ejections = churn_limit * 3 for i in range(mock_ejections): state.validators[i].effective_balance = spec.config.EJECTION_BALANCE expected_ejection_epoch = spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) yield from run_process_registry_updates(spec, state) for i in range(mock_ejections): # first third ejected in normal speed if i < mock_ejections // 3: assert state.validators[i].exit_epoch == expected_ejection_epoch # second third gets delayed by 1 epoch elif mock_ejections // 3 <= i < mock_ejections * 2 // 3: assert state.validators[i].exit_epoch == expected_ejection_epoch + 1 # final third gets delayed by 2 epochs else: assert state.validators[i].exit_epoch == expected_ejection_epoch + 2 @with_all_phases @spec_state_test def test_ejection_past_churn_limit_min(spec, state): assert spec.get_validator_churn_limit(state) == spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_ejection_past_churn_limit(spec, state) @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_ejection_past_churn_limit_scaled(spec, state): assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_ejection_past_churn_limit(spec, state) def run_test_activation_queue_activation_and_ejection(spec, state, num_per_status): # move past first two irregular epochs wrt finality next_epoch(spec, state) next_epoch(spec, state) # ready for entrance into activation queue activation_queue_start_index = 0 activation_queue_indices = list(range(activation_queue_start_index, activation_queue_start_index + num_per_status)) for validator_index in activation_queue_indices: mock_deposit(spec, state, validator_index) # ready for activation state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1 activation_start_index = num_per_status activation_indices = list(range(activation_start_index, activation_start_index + num_per_status)) for validator_index in activation_indices: mock_deposit(spec, state, validator_index) state.validators[validator_index].activation_eligibility_epoch = state.finalized_checkpoint.epoch # ready for ejection ejection_start_index = num_per_status * 2 ejection_indices = list(range(ejection_start_index, ejection_start_index + num_per_status)) for validator_index in ejection_indices: state.validators[validator_index].effective_balance = spec.config.EJECTION_BALANCE churn_limit = spec.get_validator_churn_limit(state) yield from run_process_registry_updates(spec, state) # all eligible validators moved into activation queue for validator_index in activation_queue_indices: validator = state.validators[validator_index] assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH assert not spec.is_active_validator(validator, spec.get_current_epoch(state)) # up to churn limit validators get activated for future epoch from the queue for validator_index in activation_indices[:churn_limit]: validator = state.validators[validator_index] assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert validator.activation_epoch != spec.FAR_FUTURE_EPOCH assert not spec.is_active_validator(validator, spec.get_current_epoch(state)) assert spec.is_active_validator( validator, spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) ) # any remaining validators do not exit the activation queue for validator_index in activation_indices[churn_limit:]: validator = state.validators[validator_index] assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH # all ejection balance validators ejected for a future epoch for i, validator_index in enumerate(ejection_indices): validator = state.validators[validator_index] assert validator.exit_epoch != spec.FAR_FUTURE_EPOCH assert spec.is_active_validator(validator, spec.get_current_epoch(state)) queue_offset = i // churn_limit assert not spec.is_active_validator( validator, spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + queue_offset ) @with_all_phases @spec_state_test def test_activation_queue_activation_and_ejection__1(spec, state): yield from run_test_activation_queue_activation_and_ejection(spec, state, 1) @with_all_phases @spec_state_test def test_activation_queue_activation_and_ejection__churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) assert churn_limit == spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit) @with_all_phases @spec_state_test def test_activation_queue_activation_and_ejection__exceed_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) assert churn_limit == spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit + 1) @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_activation_and_ejection__scaled_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) assert churn_limit > spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit) @with_all_phases @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test @with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_activation_and_ejection__exceed_scaled_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) assert churn_limit > spec.config.MIN_PER_EPOCH_CHURN_LIMIT yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit * 2) @with_all_phases @spec_state_test def test_invalid_large_withdrawable_epoch(spec, state): """ This test forces a validator into a withdrawable epoch that overflows the epoch (uint64) type. To do this we need two validators, one validator that already has an exit epoch and another with a low effective balance. When calculating the withdrawable epoch for the second validator, it will use the greatest exit epoch of all of the validators. If the first validator is given an exit epoch between (FAR_FUTURE_EPOCH-MIN_VALIDATOR_WITHDRAWABILITY_DELAY+1) and (FAR_FUTURE_EPOCH-1), it will cause an overflow. """ assert spec.is_active_validator(state.validators[0], spec.get_current_epoch(state)) assert spec.is_active_validator(state.validators[1], spec.get_current_epoch(state)) state.validators[0].exit_epoch = spec.FAR_FUTURE_EPOCH - 1 state.validators[1].effective_balance = spec.config.EJECTION_BALANCE try: yield from run_process_registry_updates(spec, state) except ValueError: yield 'post', None return raise AssertionError('expected ValueError')
15,650
40.959786
119
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_effective_balance_updates.py
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to @with_all_phases @spec_state_test def test_effective_balance_hysteresis(spec, state): # Prepare state up to the final-updates. # Then overwrite the balances, we only want to focus to be on the hysteresis based changes. run_epoch_processing_to(spec, state, 'process_effective_balance_updates') # Set some edge cases for balances max = spec.MAX_EFFECTIVE_BALANCE min = spec.config.EJECTION_BALANCE inc = spec.EFFECTIVE_BALANCE_INCREMENT div = spec.HYSTERESIS_QUOTIENT hys_inc = inc // div down = spec.HYSTERESIS_DOWNWARD_MULTIPLIER up = spec.HYSTERESIS_UPWARD_MULTIPLIER cases = [ (max, max, max, "as-is"), (max, max - 1, max, "round up"), (max, max + 1, max, "round down"), (max, max - down * hys_inc, max, "lower balance, but not low enough"), (max, max - down * hys_inc - 1, max - inc, "lower balance, step down"), (max, max + (up * hys_inc) + 1, max, "already at max, as is"), (max, max - inc, max - inc, "exactly 1 step lower"), (max, max - inc - 1, max - (2 * inc), "past 1 step lower, double step"), (max, max - inc + 1, max - inc, "close to 1 step lower"), (min, min + (hys_inc * up), min, "bigger balance, but not high enough"), (min, min + (hys_inc * up) + 1, min + inc, "bigger balance, high enough, but small step"), (min, min + (hys_inc * div * 2) - 1, min + inc, "bigger balance, high enough, close to double step"), (min, min + (hys_inc * div * 2), min + (2 * inc), "exact two step balance increment"), (min, min + (hys_inc * div * 2) + 1, min + (2 * inc), "over two steps, round down"), ] current_epoch = spec.get_current_epoch(state) for i, (pre_eff, bal, _, _) in enumerate(cases): assert spec.is_active_validator(state.validators[i], current_epoch) state.validators[i].effective_balance = pre_eff state.balances[i] = bal yield 'pre', state spec.process_effective_balance_updates(state) yield 'post', state for i, (_, _, post_eff, name) in enumerate(cases): assert state.validators[i].effective_balance == post_eff, name
2,303
48.021277
109
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings_reset.py
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with ) def run_process_slashings_reset(spec, state): yield from run_epoch_processing_with(spec, state, 'process_slashings_reset') @with_all_phases @spec_state_test def test_flush_slashings(spec, state): next_epoch = spec.get_current_epoch(state) + 1 state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100 assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0 yield from run_process_slashings_reset(spec, state) assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
696
32.190476
80
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py
from eth2spec.test.context import ( PHASE0, ALTAIR, BELLATRIX, spec_state_test, with_phases, ) from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with ) def run_process_historical_roots_update(spec, state): yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update') @with_phases([PHASE0, ALTAIR, BELLATRIX]) @spec_state_test def test_historical_root_accumulator(spec, state): # skip ahead to near the end of the historical roots period (excl block before epoch processing) state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1 history_len = len(state.historical_roots) yield from run_process_historical_roots_update(spec, state) assert len(state.historical_roots) == history_len + 1
772
29.92
100
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py
from eth2spec.test.context import ( spec_state_test, spec_test, with_all_phases, single_phase, with_phases, PHASE0, with_custom_state, zero_activation_threshold, misc_balances, low_single_balance, ) from eth2spec.test.helpers.forks import ( is_post_altair, ) from eth2spec.test.helpers.state import ( next_epoch, next_slot, ) from eth2spec.test.helpers.attestations import ( add_attestations_to_state, get_valid_attestation, sign_attestation, prepare_state_with_attestations, ) from eth2spec.test.helpers.rewards import leaking from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from random import Random def run_process_rewards_and_penalties(spec, state): yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties') def validate_resulting_balances(spec, pre_state, post_state, attestations): attesting_indices = spec.get_unslashed_attesting_indices(post_state, attestations) current_epoch = spec.get_current_epoch(post_state) for index in range(len(pre_state.validators)): if not spec.is_active_validator(pre_state.validators[index], current_epoch): assert post_state.balances[index] == pre_state.balances[index] elif not is_post_altair(spec): proposer_indices = [a.proposer_index for a in post_state.previous_epoch_attestations] if spec.is_in_inactivity_leak(post_state): # Proposers can still make money during a leak before LIGHTCLIENT_PATCH if index in proposer_indices and index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] elif index in attesting_indices: # If not proposer but participated optimally, should have exactly neutral balance assert post_state.balances[index] == pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if spec.is_in_inactivity_leak(post_state): if index in attesting_indices: # If not proposer but participated optimally, should have exactly neutral balance assert post_state.balances[index] == pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] @with_all_phases @spec_state_test def test_genesis_epoch_no_attestations_no_penalties(spec, state): pre_state = state.copy() assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH yield from run_process_rewards_and_penalties(spec, state) for index in range(len(pre_state.validators)): assert state.balances[index] == pre_state.balances[index] @with_all_phases @spec_state_test def test_genesis_epoch_full_attestations_no_rewards(spec, state): attestations = [] for slot in range(spec.SLOTS_PER_EPOCH - 1): # create an attestation for each slot if slot < spec.SLOTS_PER_EPOCH: attestation = get_valid_attestation(spec, state, signed=True) attestations.append(attestation) # fill each created slot in state after inclusion delay if slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY] add_attestations_to_state(spec, state, [include_att], state.slot) next_slot(spec, state) # ensure has not cross the epoch boundary assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH pre_state = state.copy() yield from run_process_rewards_and_penalties(spec, state) for index in range(len(pre_state.validators)): assert state.balances[index] == pre_state.balances[index] @with_phases([PHASE0]) @spec_state_test def test_full_attestations_random_incorrect_fields(spec, state): attestations = prepare_state_with_attestations(spec, state) for i, attestation in enumerate(state.previous_epoch_attestations): if i % 3 == 0: # Mess up some head votes attestation.data.beacon_block_root = b'\x56' * 32 if i % 3 == 1: # Message up some target votes attestation.data.target.root = b'\x23' * 32 if i % 3 == 2: # Keep some votes 100% correct pass yield from run_process_rewards_and_penalties(spec, state) attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) assert len(attesting_indices) > 0 # No balance checks, non-trivial base on group rewards # Mainly for consensus tests @with_all_phases @spec_test @with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.MAX_EFFECTIVE_BALANCE // 2) @single_phase def test_full_attestations_misc_balances(spec, state): attestations = prepare_state_with_attestations(spec, state) pre_state = state.copy() yield from run_process_rewards_and_penalties(spec, state) validate_resulting_balances(spec, pre_state, state, attestations) # Check if base rewards are consistent with effective balance. brs = {} attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) for index in attesting_indices: br = spec.get_base_reward(state, index) if br in brs: assert brs[br] == state.validators[index].effective_balance else: brs[br] = state.validators[index].effective_balance @with_all_phases @spec_test @with_custom_state(balances_fn=low_single_balance, threshold_fn=zero_activation_threshold) @single_phase def test_full_attestations_one_validaor_one_gwei(spec, state): attestations = prepare_state_with_attestations(spec, state) yield from run_process_rewards_and_penalties(spec, state) # Few assertions. Mainly to check that this extreme case can run without exception attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) assert len(attesting_indices) == 1 @with_all_phases @spec_state_test def test_no_attestations_all_penalties(spec, state): # Move to next epoch to ensure rewards/penalties are processed next_epoch(spec, state) pre_state = state.copy() assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1 yield from run_process_rewards_and_penalties(spec, state) validate_resulting_balances(spec, pre_state, state, []) def run_with_participation(spec, state, participation_fn): participated = set() def participation_tracker(slot, comm_index, comm): att_participants = participation_fn(slot, comm_index, comm) participated.update(att_participants) return att_participants attestations = prepare_state_with_attestations(spec, state, participation_fn=participation_tracker) pre_state = state.copy() yield from run_process_rewards_and_penalties(spec, state) attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) assert len(attesting_indices) == len(participated) validate_resulting_balances(spec, pre_state, state, attestations) @with_all_phases @spec_state_test def test_almost_empty_attestations(spec, state): rng = Random(1234) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), 1) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test @leaking() def test_almost_empty_attestations_with_leak(spec, state): rng = Random(1234) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), 1) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test def test_random_fill_attestations(spec, state): rng = Random(4567) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), len(comm) // 3) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test @leaking() def test_random_fill_attestations_with_leak(spec, state): rng = Random(4567) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), len(comm) // 3) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test def test_almost_full_attestations(spec, state): rng = Random(8901) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), len(comm) - 1) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test @leaking() def test_almost_full_attestations_with_leak(spec, state): rng = Random(8901) def participation_fn(slot, comm_index, comm): return rng.sample(sorted(comm), len(comm) - 1) yield from run_with_participation(spec, state, participation_fn) @with_all_phases @spec_state_test def test_full_attestation_participation(spec, state): yield from run_with_participation(spec, state, lambda slot, comm_index, comm: comm) @with_all_phases @spec_state_test @leaking() def test_full_attestation_participation_with_leak(spec, state): yield from run_with_participation(spec, state, lambda slot, comm_index, comm: comm) @with_all_phases @spec_state_test def test_duplicate_attestation(spec, state): """ Although duplicate attestations can be included on-chain, they should only be rewarded for once. This test addresses this issue found at Interop https://github.com/djrtwo/interop-test-cases/tree/master/tests/prysm_16_duplicate_attestation_rewards """ attestation = get_valid_attestation(spec, state, signed=True) indexed_attestation = spec.get_indexed_attestation(state, attestation) participants = get_indexed_attestation_participants(spec, indexed_attestation) assert len(participants) > 0 single_state = state.copy() dup_state = state.copy() inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY add_attestations_to_state(spec, single_state, [attestation], inclusion_slot) add_attestations_to_state(spec, dup_state, [attestation, attestation], inclusion_slot) next_epoch(spec, single_state) next_epoch(spec, dup_state) # Run non-duplicate inclusion rewards for comparison. Do not yield test vectors for _ in run_process_rewards_and_penalties(spec, single_state): pass # Output duplicate inclusion to test vectors yield from run_process_rewards_and_penalties(spec, dup_state) for index in participants: assert state.balances[index] < single_state.balances[index] assert single_state.balances[index] == dup_state.balances[index] @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_1(spec, state): """ Same attesters get two different attestations on chain for the same inclusion delay Earlier attestation (by list order) is correct, later has incorrect head Note: although these are slashable, they can validly be included """ correct_attestation = get_valid_attestation(spec, state, signed=True) incorrect_attestation = correct_attestation.copy() incorrect_attestation.data.beacon_block_root = b'\x42' * 32 sign_attestation(spec, state, incorrect_attestation) indexed_attestation = spec.get_indexed_attestation(state, correct_attestation) participants = get_indexed_attestation_participants(spec, indexed_attestation) assert len(participants) > 0 single_correct_state = state.copy() dup_state = state.copy() inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot) add_attestations_to_state(spec, dup_state, [correct_attestation, incorrect_attestation], inclusion_slot) next_epoch(spec, single_correct_state) next_epoch(spec, dup_state) # Run non-duplicate inclusion rewards for comparison. Do not yield test vectors for _ in run_process_rewards_and_penalties(spec, single_correct_state): pass # Output duplicate inclusion to test vectors yield from run_process_rewards_and_penalties(spec, dup_state) for index in participants: assert state.balances[index] < single_correct_state.balances[index] assert single_correct_state.balances[index] == dup_state.balances[index] @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_2(spec, state): """ Same attesters get two different attestations on chain for the same inclusion delay Earlier attestation (by list order) has incorrect head, later is correct Note: although these are slashable, they can validly be included """ correct_attestation = get_valid_attestation(spec, state, signed=True) incorrect_attestation = correct_attestation.copy() incorrect_attestation.data.beacon_block_root = b'\x42' * 32 sign_attestation(spec, state, incorrect_attestation) indexed_attestation = spec.get_indexed_attestation(state, correct_attestation) participants = get_indexed_attestation_participants(spec, indexed_attestation) assert len(participants) > 0 single_correct_state = state.copy() dup_state = state.copy() inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot) add_attestations_to_state(spec, dup_state, [incorrect_attestation, correct_attestation], inclusion_slot) next_epoch(spec, single_correct_state) next_epoch(spec, dup_state) # Run non-duplicate inclusion rewards for comparison. Do not yield test vectors for _ in run_process_rewards_and_penalties(spec, single_correct_state): pass # Output duplicate inclusion to test vectors yield from run_process_rewards_and_penalties(spec, dup_state) for index in participants: assert state.balances[index] < single_correct_state.balances[index] # Inclusion delay does not take into account correctness so equal reward assert single_correct_state.balances[index] == dup_state.balances[index] @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_3(spec, state): """ Same attesters get two different attestations on chain for *different* inclusion delay Earlier attestation (by list order) has incorrect head, later is correct Note: although these are slashable, they can validly be included """ correct_attestation = get_valid_attestation(spec, state, signed=True) incorrect_attestation = correct_attestation.copy() incorrect_attestation.data.beacon_block_root = b'\x42' * 32 sign_attestation(spec, state, incorrect_attestation) indexed_attestation = spec.get_indexed_attestation(state, correct_attestation) participants = get_indexed_attestation_participants(spec, indexed_attestation) assert len(participants) > 0 single_correct_state = state.copy() dup_state = state.copy() inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot) add_attestations_to_state(spec, dup_state, [incorrect_attestation], inclusion_slot) add_attestations_to_state(spec, dup_state, [correct_attestation], inclusion_slot + 1) next_epoch(spec, single_correct_state) next_epoch(spec, dup_state) # Run non-duplicate inclusion rewards for comparison. Do not yield test vectors for _ in run_process_rewards_and_penalties(spec, single_correct_state): pass # Output duplicate inclusion to test vectors yield from run_process_rewards_and_penalties(spec, dup_state) for index in participants: assert state.balances[index] < single_correct_state.balances[index] # Inclusion delay does not take into account correctness so equal reward assert single_correct_state.balances[index] == dup_state.balances[index] @with_all_phases @spec_state_test # Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties. def test_attestations_some_slashed(spec, state): attestations = prepare_state_with_attestations(spec, state) attesting_indices_before_slashings = list(spec.get_unslashed_attesting_indices(state, attestations)) # Slash maximum amount of validators allowed per epoch. for i in range(spec.config.MIN_PER_EPOCH_CHURN_LIMIT): spec.slash_validator(state, attesting_indices_before_slashings[i]) if not is_post_altair(spec): assert len(state.previous_epoch_attestations) == len(attestations) pre_state = state.copy() yield from run_process_rewards_and_penalties(spec, state) attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) assert len(attesting_indices) > 0 assert len(attesting_indices_before_slashings) - len(attesting_indices) == spec.config.MIN_PER_EPOCH_CHURN_LIMIT validate_resulting_balances(spec, pre_state, state, attestations)
17,792
37.680435
116
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_eth1_data_reset.py
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with, ) from eth2spec.test.helpers.state import transition_to def run_process_eth1_data_reset(spec, state): yield from run_epoch_processing_with(spec, state, 'process_eth1_data_reset') @with_all_phases @spec_state_test def test_eth1_vote_no_reset(spec, state): assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1 # skip ahead to the end of the epoch transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1) for i in range(state.slot + 1): # add a vote for each skipped slot. state.eth1_data_votes.append( spec.Eth1Data(deposit_root=b'\xaa' * 32, deposit_count=state.eth1_deposit_index, block_hash=b'\xbb' * 32)) yield from run_process_eth1_data_reset(spec, state) assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH @with_all_phases @spec_state_test def test_eth1_vote_reset(spec, state): # skip ahead to the end of the voting period state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1 for i in range(state.slot + 1): # add a vote for each skipped slot. state.eth1_data_votes.append( spec.Eth1Data(deposit_root=b'\xaa' * 32, deposit_count=state.eth1_deposit_index, block_hash=b'\xbb' * 32)) yield from run_process_eth1_data_reset(spec, state) assert len(state.eth1_data_votes) == 0
1,553
34.318182
80
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings.py
from random import Random from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with, run_epoch_processing_to ) from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix from eth2spec.test.helpers.random import randomize_state from eth2spec.test.helpers.state import has_active_balance_differential from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators from eth2spec.test.helpers.state import next_epoch def run_process_slashings(spec, state): yield from run_epoch_processing_with(spec, state, 'process_slashings') def slash_validators(spec, state, indices, out_epochs): total_slashed_balance = 0 for i, out_epoch in zip(indices, out_epochs): v = state.validators[i] v.slashed = True spec.initiate_validator_exit(state, i) v.withdrawable_epoch = out_epoch total_slashed_balance += v.effective_balance state.slashings[ spec.get_current_epoch(state) % spec.EPOCHS_PER_SLASHINGS_VECTOR ] = total_slashed_balance # verify some slashings happened... assert total_slashed_balance != 0 def get_slashing_multiplier(spec): if is_post_bellatrix(spec): return spec.PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX elif is_post_altair(spec): return spec.PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR else: return spec.PROPORTIONAL_SLASHING_MULTIPLIER def _setup_process_slashings_test(spec, state, not_slashable_set=set()): # Slashed count to ensure that enough validators are slashed to induce maximum penalties slashed_count = min( (len(state.validators) // get_slashing_multiplier(spec)) + 1, # Can't slash more than validator count! len(state.validators) ) out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2) eligible_indices = set(range(slashed_count)) slashed_indices = eligible_indices.difference(not_slashable_set) slash_validators(spec, state, sorted(slashed_indices), [out_epoch] * slashed_count) total_balance = spec.get_total_active_balance(state) total_penalties = sum(state.slashings) assert total_balance // get_slashing_multiplier(spec) <= total_penalties return slashed_indices @with_all_phases @spec_state_test def test_max_penalties(spec, state): slashed_indices = _setup_process_slashings_test(spec, state) yield from run_process_slashings(spec, state) for i in slashed_indices: assert state.balances[i] == 0 @with_all_phases @spec_state_test def test_low_penalty(spec, state): # Slashed count is one tenth of validator set slashed_count = (len(state.validators) // 10) + 1 out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2) slashed_indices = list(range(slashed_count)) slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count) pre_state = state.copy() yield from run_process_slashings(spec, state) for i in slashed_indices: assert 0 < state.balances[i] < pre_state.balances[i] @with_all_phases @spec_state_test def test_minimal_penalty(spec, state): # # When very few slashings, the resulting slashing penalty gets rounded down # to zero so the result of `process_slashings` is null # # Just the bare minimum for this one validator state.balances[0] = state.validators[0].effective_balance = spec.config.EJECTION_BALANCE # All the other validators get the maximum. for i in range(1, len(state.validators)): state.validators[i].effective_balance = state.balances[i] = spec.MAX_EFFECTIVE_BALANCE out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2) slash_validators(spec, state, [0], [out_epoch]) total_balance = spec.get_total_active_balance(state) total_penalties = sum(state.slashings) assert total_balance // 3 > total_penalties run_epoch_processing_to(spec, state, 'process_slashings') pre_slash_balances = list(state.balances) yield 'pre', state spec.process_slashings(state) yield 'post', state expected_penalty = ( state.validators[0].effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT * (get_slashing_multiplier(spec) * total_penalties) // total_balance * spec.EFFECTIVE_BALANCE_INCREMENT ) assert expected_penalty == 0 assert state.balances[0] == pre_slash_balances[0] @with_all_phases @spec_state_test def test_scaled_penalties(spec, state): # skip to next epoch next_epoch(spec, state) # Also mock some previous slashings, so that we test to have the delta in the penalties computation. base = spec.config.EJECTION_BALANCE incr = spec.EFFECTIVE_BALANCE_INCREMENT # Just add some random slashings. non-zero slashings are at least the minimal effective balance. state.slashings[0] = base + (incr * 12) state.slashings[4] = base + (incr * 3) state.slashings[5] = base + (incr * 6) state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7) slashed_count = len(state.validators) // (get_slashing_multiplier(spec) + 1) assert slashed_count > 10 # make the balances non-uniform. # Otherwise it would just be a simple balance slashing. Test the per-validator scaled penalties. diff = spec.MAX_EFFECTIVE_BALANCE - base increments = diff // incr for i in range(10): state.validators[i].effective_balance = base + (incr * (i % increments)) assert state.validators[i].effective_balance <= spec.MAX_EFFECTIVE_BALANCE # add/remove some, see if balances different than the effective balances are picked up state.balances[i] = state.validators[i].effective_balance + i - 5 total_balance = spec.get_total_active_balance(state) out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2) slashed_indices = list(range(slashed_count)) # Process up to the sub-transition, then Hi-jack and get the balances. # We just want to test the slashings. # But we are not interested in the other balance changes during the same epoch transition. run_epoch_processing_to(spec, state, 'process_slashings') pre_slash_balances = list(state.balances) slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count) yield 'pre', state spec.process_slashings(state) yield 'post', state total_penalties = sum(state.slashings) for i in slashed_indices: v = state.validators[i] expected_penalty = ( v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT * (get_slashing_multiplier(spec) * total_penalties) // (total_balance) * spec.EFFECTIVE_BALANCE_INCREMENT ) assert state.balances[i] == pre_slash_balances[i] - expected_penalty @with_all_phases @spec_state_test def test_slashings_with_random_state(spec, state): rng = Random(9998) randomize_state(spec, state, rng) pre_balances = state.balances.copy() target_validators = get_unslashed_exited_validators(spec, state) assert len(target_validators) != 0 assert has_active_balance_differential(spec, state) slashed_indices = _setup_process_slashings_test(spec, state, not_slashable_set=target_validators) # ensure no accidental slashings of protected set... current_target_validators = get_unslashed_exited_validators(spec, state) assert len(current_target_validators) != 0 assert current_target_validators == target_validators yield from run_process_slashings(spec, state) for i in slashed_indices: assert state.balances[i] < pre_balances[i]
7,785
35.046296
104
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.rewards import leaking import eth2spec.test.helpers.rewards as rewards_helpers @with_all_phases @spec_state_test @leaking() def test_empty_leak(spec, state): yield from rewards_helpers.run_test_empty(spec, state) @with_all_phases @spec_state_test @leaking() def test_full_leak(spec, state): yield from rewards_helpers.run_test_full_all_correct(spec, state) @with_all_phases @spec_state_test @leaking() def test_half_full_leak(spec, state): yield from rewards_helpers.run_test_half_full(spec, state) @with_all_phases @spec_state_test @leaking() def test_quarter_full_leak(spec, state): yield from rewards_helpers.run_test_partial(spec, state, 0.25) @with_all_phases @spec_state_test @leaking() def test_full_but_partial_participation_leak(spec, state): yield from rewards_helpers.run_test_full_but_partial_participation(spec, state) @with_phases([PHASE0]) @spec_state_test @leaking() def test_one_attestation_one_correct_leak(spec, state): yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state) @with_all_phases @spec_state_test @leaking() def test_with_not_yet_activated_validators_leak(spec, state): yield from rewards_helpers.run_test_with_not_yet_activated_validators(spec, state) @with_all_phases @spec_state_test @leaking() def test_with_exited_validators_leak(spec, state): yield from rewards_helpers.run_test_with_exited_validators(spec, state) @with_all_phases @spec_state_test @leaking() def test_with_slashed_validators_leak(spec, state): yield from rewards_helpers.run_test_with_slashed_validators(spec, state) @with_all_phases @spec_state_test @leaking() def test_some_very_low_effective_balances_that_attested_leak(spec, state): yield from rewards_helpers.run_test_some_very_low_effective_balances_that_attested(spec, state) @with_all_phases @spec_state_test @leaking() def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state): yield from rewards_helpers.run_test_some_very_low_effective_balances_that_did_not_attest(spec, state) # # NOTE: No source incorrect tests # All PendingAttestations in state have source validated # We choose to keep this invariant in these tests to not force clients to test with degenerate states # @with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_correct_target_incorrect_head_leak(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=True, correct_head=False, fraction_incorrect=0.5, ) @with_phases([PHASE0]) @spec_state_test @leaking() def test_full_correct_target_incorrect_head_leak(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=True, correct_head=False, fraction_incorrect=1.0, ) @with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_incorrect_target_incorrect_head_leak(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=False, correct_head=False, fraction_incorrect=0.5, ) @with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_incorrect_target_correct_head_leak(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=False, correct_head=True, fraction_incorrect=0.5, ) @with_all_phases @spec_state_test @leaking() def test_full_random_leak(spec, state): yield from rewards_helpers.run_test_full_random(spec, state) @with_all_phases @spec_state_test @leaking(epochs=7) def test_full_random_seven_epoch_leak(spec, state): yield from rewards_helpers.run_test_full_random(spec, state) @with_all_phases @spec_state_test @leaking(epochs=10) def test_full_random_ten_epoch_leak(spec, state): yield from rewards_helpers.run_test_full_random(spec, state)
4,112
25.031646
105
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test from eth2spec.test.helpers.constants import PHASE0 import eth2spec.test.helpers.rewards as rewards_helpers @with_all_phases @spec_state_test def test_empty(spec, state): yield from rewards_helpers.run_test_empty(spec, state) @with_all_phases @spec_state_test def test_full_all_correct(spec, state): yield from rewards_helpers.run_test_full_all_correct(spec, state) @with_all_phases @spec_state_test def test_half_full(spec, state): yield from rewards_helpers.run_test_half_full(spec, state) @with_all_phases @spec_state_test def test_quarter_full(spec, state): yield from rewards_helpers.run_test_partial(spec, state, 0.25) @with_all_phases @spec_state_test def test_full_but_partial_participation(spec, state): yield from rewards_helpers.run_test_full_but_partial_participation(spec, state) @with_phases([PHASE0]) @spec_state_test def test_one_attestation_one_correct(spec, state): yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state) @with_all_phases @spec_state_test def test_with_not_yet_activated_validators(spec, state): yield from rewards_helpers.run_test_with_not_yet_activated_validators(spec, state) @with_all_phases @spec_state_test def test_with_exited_validators(spec, state): yield from rewards_helpers.run_test_with_exited_validators(spec, state) @with_all_phases @spec_state_test def test_with_slashed_validators(spec, state): yield from rewards_helpers.run_test_with_slashed_validators(spec, state) @with_all_phases @spec_state_test def test_some_very_low_effective_balances_that_attested(spec, state): yield from rewards_helpers.run_test_some_very_low_effective_balances_that_attested(spec, state) @with_all_phases @spec_state_test def test_some_very_low_effective_balances_that_did_not_attest(spec, state): yield from rewards_helpers.run_test_some_very_low_effective_balances_that_did_not_attest(spec, state) # # NOTE: No source incorrect tests # All PendingAttestations in state have source validated # We choose to keep this invariant in these tests to not force clients to test with degenerate states # @with_phases([PHASE0]) @spec_state_test def test_full_half_correct_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=True, correct_head=False, fraction_incorrect=0.5, ) @with_phases([PHASE0]) @spec_state_test def test_full_correct_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=True, correct_head=False, fraction_incorrect=1.0, ) @with_phases([PHASE0]) @spec_state_test def test_full_half_incorrect_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=False, correct_head=False, fraction_incorrect=0.5, ) @with_phases([PHASE0]) @spec_state_test def test_full_half_incorrect_target_correct_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( spec, state, correct_target=False, correct_head=True, fraction_incorrect=0.5, ) @with_phases([PHASE0]) @spec_state_test def test_full_delay_one_slot(spec, state): yield from rewards_helpers.run_test_full_delay_one_slot(spec, state) @with_phases([PHASE0]) @spec_state_test def test_full_delay_max_slots(spec, state): yield from rewards_helpers.run_test_full_delay_max_slots(spec, state) @with_phases([PHASE0]) @spec_state_test def test_full_mixed_delay(spec, state): yield from rewards_helpers.run_test_full_mixed_delay(spec, state) @with_phases([PHASE0]) @spec_state_test def test_proposer_not_in_attestations(spec, state): yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state) @with_phases([PHASE0]) @spec_state_test def test_duplicate_attestations_at_later_slots(spec, state): yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state) @with_all_phases @spec_state_test def test_all_balances_too_low_for_reward(spec, state): yield from rewards_helpers.run_test_all_balances_too_low_for_reward(spec, state)
4,355
26.745223
105
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py
from random import Random from eth2spec.test.context import ( with_all_phases, spec_test, spec_state_test, with_custom_state, single_phase, low_balances, misc_balances, ) import eth2spec.test.helpers.rewards as rewards_helpers from eth2spec.test.helpers.random import ( randomize_state, patch_state_to_non_leaking, randomize_attestation_participation, ) from eth2spec.test.helpers.state import has_active_balance_differential, next_epoch from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators @with_all_phases @spec_state_test def test_full_random_0(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(1010)) @with_all_phases @spec_state_test def test_full_random_1(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(2020)) @with_all_phases @spec_state_test def test_full_random_2(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030)) @with_all_phases @spec_state_test def test_full_random_3(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040)) @with_all_phases @spec_state_test def test_full_random_4(spec, state): """ Ensure a rewards test with some exited (but not slashed) validators. """ rng = Random(5050) randomize_state(spec, state, rng) assert spec.is_in_inactivity_leak(state) target_validators = get_unslashed_exited_validators(spec, state) assert len(target_validators) != 0 assert has_active_balance_differential(spec, state) yield from rewards_helpers.run_deltas(spec, state) @with_all_phases @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test @single_phase def test_full_random_low_balances_0(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(5050)) @with_all_phases @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test @single_phase def test_full_random_low_balances_1(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(6060)) @with_all_phases @with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test @single_phase def test_full_random_misc_balances(spec, state): yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070)) @with_all_phases @spec_state_test def test_full_random_without_leak_0(spec, state): rng = Random(1010) randomize_state(spec, state, rng) assert spec.is_in_inactivity_leak(state) patch_state_to_non_leaking(spec, state) assert not spec.is_in_inactivity_leak(state) target_validators = get_unslashed_exited_validators(spec, state) assert len(target_validators) != 0 assert has_active_balance_differential(spec, state) yield from rewards_helpers.run_deltas(spec, state) @with_all_phases @spec_state_test def test_full_random_without_leak_and_current_exit_0(spec, state): """ This test specifically ensures a validator exits in the current epoch to ensure rewards are handled properly in this case. """ rng = Random(1011) randomize_state(spec, state, rng) assert spec.is_in_inactivity_leak(state) patch_state_to_non_leaking(spec, state) assert not spec.is_in_inactivity_leak(state) target_validators = get_unslashed_exited_validators(spec, state) assert len(target_validators) != 0 # move forward some epochs to process attestations added # by ``randomize_state`` before we exit validators in # what will be the current epoch for _ in range(2): next_epoch(spec, state) current_epoch = spec.get_current_epoch(state) for index in target_validators: # patch exited validators to exit in the current epoch validator = state.validators[index] validator.exit_epoch = current_epoch validator.withdrawable_epoch = current_epoch + spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY # re-randomize attestation participation for the current epoch randomize_attestation_participation(spec, state, rng) assert has_active_balance_differential(spec, state) yield from rewards_helpers.run_deltas(spec, state)
4,355
32.251908
102
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/rewards/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/finality/test_finality.py
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.state import next_epoch_via_block from eth2spec.test.helpers.attestations import next_epoch_with_attestations def check_finality(spec, state, prev_state, current_justified_changed, previous_justified_changed, finalized_changed): if current_justified_changed: assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch assert state.current_justified_checkpoint.root != prev_state.current_justified_checkpoint.root else: assert state.current_justified_checkpoint == prev_state.current_justified_checkpoint if previous_justified_changed: assert state.previous_justified_checkpoint.epoch > prev_state.previous_justified_checkpoint.epoch assert state.previous_justified_checkpoint.root != prev_state.previous_justified_checkpoint.root else: assert state.previous_justified_checkpoint == prev_state.previous_justified_checkpoint if finalized_changed: assert state.finalized_checkpoint.epoch > prev_state.finalized_checkpoint.epoch assert state.finalized_checkpoint.root != prev_state.finalized_checkpoint.root else: assert state.finalized_checkpoint == prev_state.finalized_checkpoint @with_all_phases @spec_state_test def test_finality_no_updates_at_genesis(spec, state): assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH yield 'pre', state blocks = [] for epoch in range(2): prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks # justification/finalization skipped at GENESIS_EPOCH if epoch == 0: check_finality(spec, state, prev_state, False, False, False) # justification/finalization skipped at GENESIS_EPOCH + 1 elif epoch == 1: check_finality(spec, state, prev_state, False, False, False) yield 'blocks', blocks yield 'post', state @with_all_phases @spec_state_test def test_finality_rule_4(spec, state): # get past first two epochs that finality does not run on next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) yield 'pre', state blocks = [] for epoch in range(2): prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks if epoch == 0: check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: # rule 4 of finality check_finality(spec, state, prev_state, True, True, True) assert state.finalized_checkpoint == prev_state.current_justified_checkpoint yield 'blocks', blocks yield 'post', state @with_all_phases @spec_state_test def test_finality_rule_1(spec, state): # get past first two epochs that finality does not run on next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks if epoch == 0: check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: check_finality(spec, state, prev_state, True, True, False) elif epoch == 2: # finalized by rule 1 check_finality(spec, state, prev_state, True, True, True) assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint yield 'blocks', blocks yield 'post', state @with_all_phases @spec_state_test def test_finality_rule_2(spec, state): # get past first two epochs that finality does not run on next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) yield 'pre', state blocks = [] for epoch in range(3): if epoch == 0: prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) check_finality(spec, state, prev_state, True, False, False) elif epoch == 1: prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) check_finality(spec, state, prev_state, False, True, False) elif epoch == 2: prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) # finalized by rule 2 check_finality(spec, state, prev_state, True, False, True) assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint blocks += new_blocks yield 'blocks', blocks yield 'post', state @with_all_phases @spec_state_test def test_finality_rule_3(spec, state): """ Test scenario described here https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892 """ # get past first two epochs that finality does not run on next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) yield 'pre', state blocks = [] prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks check_finality(spec, state, prev_state, True, False, False) # In epoch N, JE is set to N, prev JE is set to N-1 prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) blocks += new_blocks check_finality(spec, state, prev_state, True, True, True) # In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False) blocks += new_blocks check_finality(spec, state, prev_state, False, True, False) # In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1. # N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2 prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True) blocks += new_blocks # rule 2 check_finality(spec, state, prev_state, True, False, True) # In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3. prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True) blocks += new_blocks # rule 3 check_finality(spec, state, prev_state, True, True, True) assert state.finalized_checkpoint == prev_state.current_justified_checkpoint yield 'blocks', blocks yield 'post', state
6,732
36.614525
109
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/phase0/finality/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py
from eth2spec.test.context import ( with_phases, spec_state_test, always_bls, ) from eth2spec.test.helpers.constants import CUSTODY_GAME from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.attestations import ( run_attestation_processing, get_valid_attestation, ) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_on_time_success(spec, state): attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) yield from run_attestation_processing(spec, state, attestation) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_late_success(spec, state): attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1) yield from run_attestation_processing(spec, state, attestation)
948
26.911765
85
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_key_reveal.py
from eth2spec.test.helpers.constants import CUSTODY_GAME from eth2spec.test.helpers.custody import get_valid_custody_key_reveal from eth2spec.test.context import ( with_phases, spec_state_test, expect_assertion_error, always_bls, ) def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=True): """ Run ``process_custody_key_reveal``, yielding: - pre-state ('pre') - custody_key_reveal ('custody_key_reveal') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state yield 'custody_key_reveal', custody_key_reveal if not valid: expect_assertion_error(lambda: spec.process_custody_key_reveal(state, custody_key_reveal)) yield 'post', None return revealer_index = custody_key_reveal.revealer_index pre_next_custody_secret_to_reveal = \ state.validators[revealer_index].next_custody_secret_to_reveal spec.process_custody_key_reveal(state, custody_key_reveal) post_next_custody_secret_to_reveal = \ state.validators[revealer_index].next_custody_secret_to_reveal assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1 yield 'post', state @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_success(spec, state): state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH custody_key_reveal = get_valid_custody_key_reveal(spec, state) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_too_early(spec, state): custody_key_reveal = get_valid_custody_key_reveal(spec, state) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_wrong_period(spec, state): custody_key_reveal = get_valid_custody_key_reveal(spec, state, period=5) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_late_reveal(spec, state): state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150 custody_key_reveal = get_valid_custody_key_reveal(spec, state) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_double_reveal(spec, state): state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 2 custody_key_reveal = get_valid_custody_key_reveal(spec, state) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
2,833
30.488889
98
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py
from eth2spec.test.helpers.custody import ( get_valid_chunk_challenge, get_valid_custody_chunk_response, get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, MINIMAL, ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( expect_assertion_error, disable_process_reveal_deadlines, spec_state_test, with_phases, with_presets, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing def run_chunk_challenge_processing(spec, state, custody_chunk_challenge, valid=True): """ Run ``process_chunk_challenge``, yielding: - pre-state ('pre') - CustodyBitChallenge ('custody_chunk_challenge') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state yield 'custody_chunk_challenge', custody_chunk_challenge if not valid: expect_assertion_error(lambda: spec.process_chunk_challenge(state, custody_chunk_challenge)) yield 'post', None return spec.process_chunk_challenge(state, custody_chunk_challenge) assert state.custody_chunk_challenge_records[state.custody_chunk_challenge_index - 1].responder_index == \ custody_chunk_challenge.responder_index assert state.custody_chunk_challenge_records[state.custody_chunk_challenge_index - 1].chunk_index == \ custody_chunk_challenge.chunk_index yield 'post', state def run_custody_chunk_response_processing(spec, state, custody_response, valid=True): """ Run ``process_chunk_challenge_response``, yielding: - pre-state ('pre') - CustodyResponse ('custody_response') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state yield 'custody_response', custody_response if not valid: expect_assertion_error(lambda: spec.process_custody_response(state, custody_response)) yield 'post', None return spec.process_chunk_challenge_response(state, custody_response) assert state.custody_chunk_challenge_records[custody_response.challenge_index] == spec.CustodyChunkChallengeRecord() yield 'post', state @with_phases([CUSTODY_GAME]) @spec_state_test @with_presets([MINIMAL], reason="too slow") @disable_process_reveal_deadlines def test_challenge_appended(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) yield from run_chunk_challenge_processing(spec, state, challenge) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_challenge_empty_element_replaced(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) state.custody_chunk_challenge_records.append(spec.CustodyChunkChallengeRecord()) yield from run_chunk_challenge_processing(spec, state, challenge) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_duplicate_challenge(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) yield from run_chunk_challenge_processing(spec, state, challenge, valid=False) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_second_challenge(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD) challenge0 = get_valid_chunk_challenge(spec, state, attestation, shard_transition, chunk_index=0) _, _, _ = run_chunk_challenge_processing(spec, state, challenge0) challenge1 = get_valid_chunk_challenge(spec, state, attestation, shard_transition, chunk_index=1) yield from run_chunk_challenge_processing(spec, state, challenge1) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_multiple_epochs_custody(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 3) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) yield from run_chunk_challenge_processing(spec, state, challenge) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_many_epochs_custody(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 20) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) yield from run_chunk_challenge_processing(spec, state, challenge) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_off_chain_attestation(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) yield from run_chunk_challenge_processing(spec, state, challenge) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_custody_response(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) chunk_challenge_index = state.custody_chunk_challenge_index - 1 custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3) yield from run_custody_chunk_response_processing(spec, state, custody_response) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_custody_response_chunk_index_2(spec, state): transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition, chunk_index=2) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) chunk_challenge_index = state.custody_chunk_challenge_index - 1 custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3) yield from run_custody_chunk_response_processing(spec, state, custody_response) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_custody_response_multiple_epochs(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 3) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) chunk_challenge_index = state.custody_chunk_challenge_index - 1 custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3) yield from run_custody_chunk_response_processing(spec, state, custody_response) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_custody_response_many_epochs(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 20) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) chunk_challenge_index = state.custody_chunk_challenge_index - 1 custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3) yield from run_custody_chunk_response_processing(spec, state, custody_response)
15,146
39.608579
120
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_early_derived_secret_reveal.py
from eth2spec.test.helpers.constants import CUSTODY_GAME from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal from eth2spec.test.helpers.state import next_epoch_via_block, get_balance from eth2spec.test.context import ( with_phases, spec_state_test, expect_assertion_error, always_bls, never_bls, ) def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, valid=True): """ Run ``process_randao_key_reveal``, yielding: - pre-state ('pre') - randao_key_reveal ('randao_key_reveal') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state yield 'randao_key_reveal', randao_key_reveal if not valid: expect_assertion_error(lambda: spec.process_early_derived_secret_reveal(state, randao_key_reveal)) yield 'post', None return pre_slashed_balance = get_balance(state, randao_key_reveal.revealed_index) spec.process_early_derived_secret_reveal(state, randao_key_reveal) slashed_validator = state.validators[randao_key_reveal.revealed_index] if randao_key_reveal.epoch >= spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING: assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH assert get_balance(state, randao_key_reveal.revealed_index) < pre_slashed_balance yield 'post', state @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_success(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) @with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_reveal_from_current_epoch(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) @with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_reveal_from_past_epoch(spec, state): next_epoch_via_block(spec, state) randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state) - 1) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_with_custody_padding(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal( spec, state, spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING, ) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) @with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_with_custody_padding_minus_one(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal( spec, state, spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING - 1, ) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) @with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_double_reveal(spec, state): epoch = spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS randao_key_reveal1 = get_valid_early_derived_secret_reveal( spec, state, epoch, ) _, _, _ = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1)) randao_key_reveal2 = get_valid_early_derived_secret_reveal( spec, state, epoch, ) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False) @with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_revealer_is_slashed(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state)) state.validators[randao_key_reveal.revealed_index].slashed = True yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) @with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_far_future_epoch(spec, state): randao_key_reveal = get_valid_early_derived_secret_reveal( spec, state, spec.get_current_epoch(state) + spec.EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS, ) yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
4,595
32.304348
109
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py
from eth2spec.test.helpers.custody import ( get_valid_custody_slashing, get_custody_slashable_shard_transition, ) from eth2spec.test.helpers.attestations import ( get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, MINIMAL, ) from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.ssz.ssz_typing import ByteList from eth2spec.test.helpers.state import get_balance, transition_to from eth2spec.test.context import ( with_phases, spec_state_test, expect_assertion_error, disable_process_reveal_deadlines, with_presets, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing def run_custody_slashing_processing(spec, state, custody_slashing, valid=True, correct=True): """ Run ``process_bit_challenge``, yielding: - pre-state ('pre') - CustodySlashing ('custody_slashing') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state yield 'custody_slashing', custody_slashing if not valid: expect_assertion_error(lambda: spec.process_custody_slashing(state, custody_slashing)) yield 'post', None return if correct: pre_slashed_balance = get_balance(state, custody_slashing.message.malefactor_index) else: pre_slashed_balance = get_balance(state, custody_slashing.message.whistleblower_index) spec.process_custody_slashing(state, custody_slashing) if correct: slashed_validator = state.validators[custody_slashing.message.malefactor_index] assert get_balance(state, custody_slashing.message.malefactor_index) < pre_slashed_balance else: slashed_validator = state.validators[custody_slashing.message.whistleblower_index] assert get_balance(state, custody_slashing.message.whistleblower_index) < pre_slashed_balance assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH yield 'post', state def run_standard_custody_slashing_test(spec, state, shard_lateness=None, shard=None, validator_index=None, block_lengths=None, slashing_message_data=None, correct=True, valid=True): transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 if shard_lateness is None: shard_lateness = spec.SLOTS_PER_EPOCH transition_to(spec, state, state.slot + shard_lateness) if shard is None: shard = 0 if validator_index is None: validator_index = spec.get_beacon_committee(state, state.slot, shard)[0] offset_slots = spec.get_offset_slots(state, shard) if block_lengths is None: block_lengths = [2**15 // 3] * len(offset_slots) custody_secret = spec.get_custody_secret( state, validator_index, privkeys[validator_index], spec.get_current_epoch(state), ) shard_transition, slashable_test_vector = get_custody_slashable_shard_transition( spec, state.slot, block_lengths, custody_secret, slashable=correct, ) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) slashing = get_valid_custody_slashing(spec, state, attestation, shard_transition, custody_secret, slashable_test_vector) if slashing_message_data is not None: slashing.message.data = slashing_message_data yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_incorrect_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state, correct=False) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_multiple_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_many_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5) @with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_presets([MINIMAL], reason="too slow") def test_invalid_custody_slashing(spec, state): yield from run_standard_custody_slashing_test( spec, state, slashing_message_data=ByteList[spec.MAX_SHARD_BLOCK_SIZE](), valid=False, )
5,666
34.41875
104
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py
from typing import Dict, Sequence from eth2spec.test.context import ( with_phases, spec_state_test, with_presets, ) from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.constants import ( CUSTODY_GAME, MINIMAL, ) from eth2spec.test.helpers.custody import ( get_custody_slashable_test_vector, get_valid_chunk_challenge, get_valid_custody_chunk_response, get_valid_custody_key_reveal, get_valid_custody_slashing, get_valid_early_derived_secret_reveal, ) from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.shard_block import ( build_shard_block, get_committee_index_of_shard, get_sample_shard_block_body, get_shard_transitions, ) from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot, transition_to def run_beacon_block(spec, state, block, valid=True): yield 'pre', state.copy() if not valid: signed_beacon_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) yield 'block', signed_beacon_block yield 'post', None return signed_beacon_block = state_transition_and_sign_block(spec, state, block) yield 'block', signed_beacon_block yield 'post', state # # Beacon block with custody operations # @with_phases([CUSTODY_GAME]) @spec_state_test def test_with_shard_transition_with_custody_challenge_and_response(spec, state): transition_to_valid_shard_slot(spec, state) # build shard block shard = 0 committee_index = get_committee_index_of_shard(spec, state, state.slot, shard) body = get_sample_shard_block_body(spec) shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True) shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) block = build_empty_block(spec, state, slot=state.slot + 1) block.body.attestations = [attestation] block.body.shard_transitions = shard_transitions # CustodyChunkChallenge operation challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transitions[shard]) block.body.chunk_challenges = [challenge] # CustodyChunkResponse operation chunk_challenge_index = state.custody_chunk_challenge_index custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=body) block.body.chunk_challenge_responses = [custody_response] yield from run_beacon_block(spec, state, block) @with_phases([CUSTODY_GAME]) @spec_state_test @with_presets([MINIMAL]) def test_custody_key_reveal(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH) block = build_empty_block(spec, state, slot=state.slot + 1) custody_key_reveal = get_valid_custody_key_reveal(spec, state) block.body.custody_key_reveals = [custody_key_reveal] yield from run_beacon_block(spec, state, block) @with_phases([CUSTODY_GAME]) @spec_state_test def test_early_derived_secret_reveal(spec, state): transition_to_valid_shard_slot(spec, state) block = build_empty_block(spec, state, slot=state.slot + 1) early_derived_secret_reveal = get_valid_early_derived_secret_reveal(spec, state) block.body.early_derived_secret_reveals = [early_derived_secret_reveal] yield from run_beacon_block(spec, state, block) @with_phases([CUSTODY_GAME]) @spec_state_test def test_custody_slashing(spec, state): transition_to_valid_shard_slot(spec, state) # Build shard block shard = 0 committee_index = get_committee_index_of_shard(spec, state, state.slot, shard) # Create slashable shard block body validator_index = spec.get_beacon_committee(state, state.slot, committee_index)[0] custody_secret = spec.get_custody_secret( state, validator_index, privkeys[validator_index], spec.get_current_epoch(state), ) slashable_body = get_custody_slashable_test_vector(spec, custody_secret, length=100, slashable=True) shard_block = build_shard_block(spec, state, shard, body=slashable_body, slot=state.slot, signed=True) shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) block = build_empty_block(spec, state, slot=state.slot + 1) block.body.attestations = [attestation] block.body.shard_transitions = shard_transitions _, _, _ = run_beacon_block(spec, state, block) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) block = build_empty_block(spec, state, slot=state.slot + 1) custody_slashing = get_valid_custody_slashing( spec, state, attestation, shard_transitions[shard], custody_secret, slashable_body ) block.body.custody_slashings = [custody_slashing] yield from run_beacon_block(spec, state, block)
5,545
36.221477
118
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/sanity/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_reveal_deadlines.py
from eth2spec.test.helpers.custody import ( get_valid_custody_key_reveal, ) from eth2spec.test.helpers.state import transition_to from eth2spec.test.context import ( with_phases, with_presets, spec_state_test, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, MINIMAL, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import ( run_custody_key_reveal_processing, ) def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') @with_phases([CUSTODY_GAME]) @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_validator_slashed_after_reveal_deadline(spec, state): assert state.validators[0].slashed == 0 transition_to(spec, state, spec.get_randao_epoch_for_custody_period(0, 0) * spec.SLOTS_PER_EPOCH) # Need to run at least one reveal so that not all validators are slashed (otherwise spec fails to find proposers) custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=1) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH) state.validators[0].slashed = 0 yield from run_process_challenge_deadlines(spec, state) assert state.validators[0].slashed == 1 @with_phases([CUSTODY_GAME]) @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_validator_not_slashed_after_reveal(spec, state): transition_to(spec, state, spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH) custody_key_reveal = get_valid_custody_key_reveal(spec, state) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) assert state.validators[0].slashed == 0 transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH) yield from run_process_challenge_deadlines(spec, state) assert state.validators[0].slashed == 0
2,118
34.316667
117
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py
from eth2spec.test.helpers.custody import ( get_valid_chunk_challenge, get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( get_valid_attestation, ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( spec_state_test, with_phases, with_presets, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.constants import ( CUSTODY_GAME, MINIMAL, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, ) def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') @with_phases([CUSTODY_GAME]) @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_validator_slashed_after_chunk_challenge(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) validator_index = spec.get_beacon_committee( state, attestation.data.slot, attestation.data.index )[0] challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) assert state.validators[validator_index].slashed == 0 transition_to(spec, state, state.slot + spec.MAX_CHUNK_CHALLENGE_DELAY * spec.SLOTS_PER_EPOCH) state.validators[validator_index].slashed = 0 yield from run_process_challenge_deadlines(spec, state) assert state.validators[validator_index].slashed == 1
2,270
33.938462
102
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py
from eth2spec.test.helpers.constants import ( CUSTODY_GAME, ) from eth2spec.test.helpers.custody import ( get_valid_chunk_challenge, get_valid_custody_chunk_response, get_valid_custody_key_reveal, get_sample_shard_transition ) from eth2spec.test.helpers.attestations import ( get_valid_attestation, ) from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( with_phases, spec_state_test, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, run_custody_chunk_response_processing, ) from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import ( run_custody_key_reveal_processing, ) def run_process_custody_final_updates(spec, state): yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates') @with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_delay(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 spec.initiate_validator_exit(state, 0) assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH yield from run_process_custody_final_updates(spec, state) assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH @with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_reenable_after_custody_reveal(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 spec.initiate_validator_exit(state, 0) assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH next_epoch_via_block(spec, state) assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH while spec.get_current_epoch(state) < state.validators[0].exit_epoch: next_epoch_via_block(spec, state) while (state.validators[0].next_custody_secret_to_reveal <= spec.get_custody_period_for_validator(0, state.validators[0].exit_epoch - 1)): custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=0) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) yield from run_process_custody_final_updates(spec, state) assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH @with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) validator_index = spec.get_beacon_committee( state, attestation.data.slot, attestation.data.index )[0] spec.initiate_validator_exit(state, validator_index) assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH) assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH while spec.get_current_epoch(state) < state.validators[validator_index].exit_epoch: next_epoch_via_block(spec, state) while (state.validators[validator_index].next_custody_secret_to_reveal <= spec.get_custody_period_for_validator( validator_index, state.validators[validator_index].exit_epoch - 1)): custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=validator_index) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) next_epoch_via_block(spec, state) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) yield from run_process_custody_final_updates(spec, state) assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH @with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state): transition_to_valid_shard_slot(spec, state) transition_to(spec, state, state.slot + 1) # Make len(offset_slots) == 1 shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) attestation = get_valid_attestation(spec, state, index=shard, signed=True, shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) _, _, _ = run_attestation_processing(spec, state, attestation) validator_index = spec.get_beacon_committee( state, attestation.data.slot, attestation.data.index )[0] spec.initiate_validator_exit(state, validator_index) assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH next_epoch_via_block(spec, state) assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH while spec.get_current_epoch(state) < state.validators[validator_index].exit_epoch: next_epoch_via_block(spec, state) while (state.validators[validator_index].next_custody_secret_to_reveal <= spec.get_custody_period_for_validator( validator_index, state.validators[validator_index].exit_epoch - 1)): custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=validator_index) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) next_epoch_via_block(spec, state) challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition) _, _, _ = run_chunk_challenge_processing(spec, state, challenge) next_epoch_via_block(spec, state) assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH chunk_challenge_index = state.custody_chunk_challenge_index - 1 custody_response = get_valid_custody_chunk_response( spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3) _, _, _ = run_custody_chunk_response_processing(spec, state, custody_response) yield from run_process_custody_final_updates(spec, state) assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
7,210
39.511236
107
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
from eth2spec.utils.hash_function import hash from math import log2 ZERO_BYTES32 = b'\x00' * 32 zerohashes = [ZERO_BYTES32] for layer in range(1, 100): zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1])) def calc_merkle_tree_from_leaves(values, layer_count=32): values = list(values) tree = [values[::]] for h in range(layer_count): if len(values) % 2 == 1: values.append(zerohashes[h]) values = [hash(values[i] + values[i + 1]) for i in range(0, len(values), 2)] tree.append(values[::]) return tree def get_merkle_tree(values, pad_to=None): layer_count = (len(values) - 1).bit_length() if pad_to is None else (pad_to - 1).bit_length() if len(values) == 0: return zerohashes[layer_count] return calc_merkle_tree_from_leaves(values, layer_count) def get_merkle_root(values, pad_to=1): if pad_to == 0: return zerohashes[0] layer_count = int(log2(pad_to)) if len(values) == 0: return zerohashes[layer_count] return calc_merkle_tree_from_leaves(values, layer_count)[-1][0] def get_merkle_proof(tree, item_index, tree_len=None): proof = [] for i in range(tree_len if tree_len is not None else len(tree)): subindex = (item_index // 2**i) ^ 1 proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) return proof def merkleize_chunks(chunks, limit=None): # If no limit is defined, we are just merkleizing chunks (e.g. SSZ container). if limit is None: limit = len(chunks) count = len(chunks) # See if the input is within expected size. # If not, a list-limit is set incorrectly, or a value is unexpectedly large. assert count <= limit if limit == 0: return zerohashes[0] depth = max(count - 1, 0).bit_length() max_depth = (limit - 1).bit_length() tmp = [None for _ in range(max_depth + 1)] def merge(h, i): j = 0 while True: if i & (1 << j) == 0: if i == count and j < depth: h = hash(h + zerohashes[j]) # keep going if we are complementing the void to the next power of 2 else: break else: h = hash(tmp[j] + h) j += 1 tmp[j] = h # merge in leaf by leaf. for i in range(count): merge(chunks[i], i) # complement with 0 if empty, or if not the right power of 2 if 1 << depth != count: merge(zerohashes[0], count) # the next power of two may be smaller than the ultimate virtual size, complement with zero-hashes at each depth. for j in range(depth, max_depth): tmp[j + 1] = hash(tmp[j] + zerohashes[j]) return tmp[max_depth]
2,774
29.833333
117
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/kzg.py
# Ref: # - https://github.com/ethereum/research/blob/8f084630528ba33d92b2bc05edf5338dd193c6f1/trusted_setup/trusted_setup.py # - https://github.com/asn-d6/kzgverify import json import os from typing import ( Tuple, Sequence, ) from pathlib import Path from eth_utils import encode_hex from py_ecc.typing import ( Optimized_Point3D, ) from eth2spec.utils import bls from eth2spec.utils.bls import ( BLS_MODULUS, ) PRIMITIVE_ROOT_OF_UNITY = 7 def generate_setup(generator: Optimized_Point3D, secret: int, length: int) -> Tuple[Optimized_Point3D]: """ Generate trusted setup of ``generator`` in ``length``. """ result = [generator] for _ in range(1, length): result.append(bls.multiply(result[-1], secret)) return tuple(result) def fft(vals: Sequence[Optimized_Point3D], modulus: int, domain: int) -> Sequence[Optimized_Point3D]: """ FFT for group elements """ if len(vals) == 1: return vals L = fft(vals[::2], modulus, domain[::2]) R = fft(vals[1::2], modulus, domain[::2]) o = [0] * len(vals) for i, (x, y) in enumerate(zip(L, R)): y_times_root = bls.multiply(y, domain[i]) o[i] = bls.add(x, y_times_root) o[i + len(L)] = bls.add(x, bls.neg(y_times_root)) return o def compute_root_of_unity(length: int) -> int: """ Generate a w such that ``w**length = 1``. """ assert (BLS_MODULUS - 1) % length == 0 return pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // length, BLS_MODULUS) def compute_roots_of_unity(field_elements_per_blob: int) -> Tuple[int]: """ Compute a list of roots of unity for a given order. The order must divide the BLS multiplicative group order, i.e. BLS_MODULUS - 1 """ field_elements_per_blob = int(field_elements_per_blob) # to non-SSZ int assert (BLS_MODULUS - 1) % field_elements_per_blob == 0 root_of_unity = compute_root_of_unity(length=field_elements_per_blob) roots = [] current_root_of_unity = 1 for _ in range(field_elements_per_blob): roots.append(current_root_of_unity) current_root_of_unity = current_root_of_unity * root_of_unity % BLS_MODULUS return tuple(roots) def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]: """ Convert a G1 or G2 portion of a setup into the Lagrange basis. """ root_of_unity = compute_root_of_unity(len(setup)) assert pow(root_of_unity, len(setup), BLS_MODULUS) == 1 domain = [pow(root_of_unity, i, BLS_MODULUS) for i in range(len(setup))] # TODO: introduce an IFFT function for simplicity fft_output = fft(setup, BLS_MODULUS, domain) inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS) return tuple(bls.G1_to_bytes48(bls.multiply(fft_output[-i], inv_length)) for i in range(len(fft_output))) def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None: bls.use_fastest() setup_g1 = generate_setup(bls.G1(), secret, g1_length) setup_g2 = generate_setup(bls.G2(), secret, g2_length) setup_g1_lagrange = get_lagrange(setup_g1) roots_of_unity = compute_roots_of_unity(g1_length) serailized_setup_g1 = [encode_hex(bls.G1_to_bytes48(p)) for p in setup_g1] serialized_setup_g2 = [encode_hex(bls.G2_to_bytes96(p)) for p in setup_g2] serialized_setup_g1_lagrange = [encode_hex(x) for x in setup_g1_lagrange] output_dir_path = Path(output_dir) if not os.path.exists(output_dir_path): os.makedirs(output_dir_path) print("Created directory: ", output_dir_path) file_path = output_dir_path / 'testing_trusted_setups.json' with open(file_path, 'w+') as f: json.dump( { "setup_G1": serailized_setup_g1, "setup_G2": serialized_setup_g2, "setup_G1_lagrange": serialized_setup_g1_lagrange, "roots_of_unity": roots_of_unity, }, f) print(f'Generated trusted setup file: {file_path}\n')
4,018
32.773109
117
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/test_merkle_proof_util.py
import pytest # Note: these functions are extract from merkle-proofs.md (deprecated), # the tests are temporary to show correctness while the document is still there. def get_power_of_two_ceil(x: int) -> int: if x <= 1: return 1 elif x == 2: return 2 else: return 2 * get_power_of_two_ceil((x + 1) // 2) def get_power_of_two_floor(x: int) -> int: if x <= 1: return 1 if x == 2: return x else: return 2 * get_power_of_two_floor(x // 2) power_of_two_ceil_cases = [ (0, 1), (1, 1), (2, 2), (3, 4), (4, 4), (5, 8), (6, 8), (7, 8), (8, 8), (9, 16), ] power_of_two_floor_cases = [ (0, 1), (1, 1), (2, 2), (3, 2), (4, 4), (5, 4), (6, 4), (7, 4), (8, 8), (9, 8), ] @pytest.mark.parametrize( 'value,expected', power_of_two_ceil_cases, ) def test_get_power_of_two_ceil(value, expected): assert get_power_of_two_ceil(value) == expected @pytest.mark.parametrize( 'value,expected', power_of_two_floor_cases, ) def test_get_power_of_two_floor(value, expected): assert get_power_of_two_floor(value) == expected
1,112
22.1875
84
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/bls.py
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2 from py_ecc.optimized_bls12_381 import ( # noqa: F401 G1 as py_ecc_G1, G2 as py_ecc_G2, Z1 as py_ecc_Z1, add as py_ecc_add, multiply as py_ecc_mul, neg as py_ecc_neg, pairing as py_ecc_pairing, final_exponentiate as py_ecc_final_exponentiate, FQ12 as py_ecc_GT, ) from py_ecc.bls.g2_primitives import ( # noqa: F401 curve_order as BLS_MODULUS, G1_to_pubkey as py_ecc_G1_to_bytes48, pubkey_to_G1 as py_ecc_bytes48_to_G1, G2_to_signature as py_ecc_G2_to_bytes96, signature_to_G2 as py_ecc_bytes96_to_G2, ) from py_arkworks_bls12381 import ( G1Point as arkworks_G1, G2Point as arkworks_G2, Scalar as arkworks_Scalar, GT as arkworks_GT, ) import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option class fastest_bls: G1 = arkworks_G1 G2 = arkworks_G2 Scalar = arkworks_Scalar GT = arkworks_GT _AggregatePKs = milagro_bls._AggregatePKs Sign = milagro_bls.Sign Verify = milagro_bls.Verify Aggregate = milagro_bls.Aggregate AggregateVerify = milagro_bls.AggregateVerify FastAggregateVerify = milagro_bls.FastAggregateVerify SkToPk = milagro_bls.SkToPk # Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing. bls_active = True # To change bls implementation, default to PyECC for correctness. Milagro is a good faster alternative. bls = py_ecc_bls STUB_SIGNATURE = b'\x11' * 96 STUB_PUBKEY = b'\x22' * 48 G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95 STUB_COORDINATES = _signature_to_G2(G2_POINT_AT_INFINITY) def use_milagro(): """ Shortcut to use Milagro as BLS library """ global bls bls = milagro_bls def use_arkworks(): """ Shortcut to use Arkworks as BLS library """ global bls bls = arkworks_bls def use_py_ecc(): """ Shortcut to use Py-ecc as BLS library """ global bls bls = py_ecc_bls def use_fastest(): """ Shortcut to use Milagro for signatures and Arkworks for other BLS operations """ global bls bls = fastest_bls def only_with_bls(alt_return=None): """ Decorator factory to make a function only run when BLS is active. Otherwise return the default. """ def runner(fn): def entry(*args, **kw): if bls_active: return fn(*args, **kw) else: return alt_return return entry return runner @only_with_bls(alt_return=True) def Verify(PK, message, signature): try: if bls == arkworks_bls: # no signature API in arkworks result = py_ecc_bls.Verify(PK, message, signature) else: result = bls.Verify(PK, message, signature) except Exception: result = False finally: return result @only_with_bls(alt_return=True) def AggregateVerify(pubkeys, messages, signature): try: if bls == arkworks_bls: # no signature API in arkworks result = py_ecc_bls.AggregateVerify(list(pubkeys), list(messages), signature) else: result = bls.AggregateVerify(list(pubkeys), list(messages), signature) except Exception: result = False finally: return result @only_with_bls(alt_return=True) def FastAggregateVerify(pubkeys, message, signature): try: if bls == arkworks_bls: # no signature API in arkworks result = py_ecc_bls.FastAggregateVerify(list(pubkeys), message, signature) else: result = bls.FastAggregateVerify(list(pubkeys), message, signature) except Exception: result = False finally: return result @only_with_bls(alt_return=STUB_SIGNATURE) def Aggregate(signatures): if bls == arkworks_bls: # no signature API in arkworks return py_ecc_bls.Aggregate(signatures) return bls.Aggregate(signatures) @only_with_bls(alt_return=STUB_SIGNATURE) def Sign(SK, message): if bls == arkworks_bls: # no signature API in arkworks return py_ecc_bls.Sign(SK, message) elif bls == py_ecc_bls: return bls.Sign(SK, message) else: return bls.Sign(SK.to_bytes(32, 'big'), message) @only_with_bls(alt_return=STUB_COORDINATES) def signature_to_G2(signature): return _signature_to_G2(signature) @only_with_bls(alt_return=STUB_PUBKEY) def AggregatePKs(pubkeys): if bls == py_ecc_bls: assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys) elif bls == milagro_bls: # milagro_bls._AggregatePKs checks KeyValidate internally pass if bls == arkworks_bls: # no signature API in arkworks return py_ecc_bls._AggregatePKs(list(pubkeys)) return bls._AggregatePKs(list(pubkeys)) @only_with_bls(alt_return=STUB_SIGNATURE) def SkToPk(SK): if bls == py_ecc_bls or bls == arkworks_bls: # no signature API in arkworks return py_ecc_bls.SkToPk(SK) else: return bls.SkToPk(SK.to_bytes(32, 'big')) def pairing_check(values): if bls == arkworks_bls or bls == fastest_bls: p_q_1, p_q_2 = values g1s = [p_q_1[0], p_q_2[0]] g2s = [p_q_1[1], p_q_2[1]] return arkworks_GT.multi_pairing(g1s, g2s) == arkworks_GT.one() else: p_q_1, p_q_2 = values final_exponentiation = py_ecc_final_exponentiate( py_ecc_pairing(p_q_1[1], p_q_1[0], final_exponentiate=False) * py_ecc_pairing(p_q_2[1], p_q_2[0], final_exponentiate=False) ) return final_exponentiation == py_ecc_GT.one() def add(lhs, rhs): """ Performs point addition of `lhs` and `rhs`. The points can either be in G1 or G2. """ if bls == arkworks_bls or bls == fastest_bls: return lhs + rhs return py_ecc_add(lhs, rhs) def multiply(point, scalar): """ Performs Scalar multiplication between `point` and `scalar`. `point` can either be in G1 or G2 """ if bls == arkworks_bls or bls == fastest_bls: int_as_bytes = scalar.to_bytes(32, 'little') scalar = arkworks_Scalar.from_le_bytes(int_as_bytes) return point * scalar return py_ecc_mul(point, scalar) def neg(point): """ Returns the point negation of `point` `point` can either be in G1 or G2 """ if bls == arkworks_bls or bls == fastest_bls: return -point return py_ecc_neg(point) def Z1(): """ Returns the identity point in G1 """ if bls == arkworks_bls or bls == fastest_bls: return arkworks_G1.identity() return py_ecc_Z1 def G1(): """ Returns the chosen generator point in G1 """ if bls == arkworks_bls or bls == fastest_bls: return arkworks_G1() return py_ecc_G1 def G2(): """ Returns the chosen generator point in G2 """ if bls == arkworks_bls or bls == fastest_bls: return arkworks_G2() return py_ecc_G2 def G1_to_bytes48(point): """ Serializes a point in G1. Returns a bytearray of size 48 as we use the compressed format """ if bls == arkworks_bls or bls == fastest_bls: return bytes(point.to_compressed_bytes()) return py_ecc_G1_to_bytes48(point) def G2_to_bytes96(point): """ Serializes a point in G2. Returns a bytearray of size 96 as we use the compressed format """ if bls == arkworks_bls or bls == fastest_bls: return bytes(point.to_compressed_bytes()) return py_ecc_G2_to_bytes96(point) def bytes48_to_G1(bytes48): """ Deserializes a purported compressed serialized point in G1. - No subgroup checks are performed - If the bytearray is not a valid serialization of a point in G1, then this method will raise an exception """ if bls == arkworks_bls or bls == fastest_bls: return arkworks_G1.from_compressed_bytes_unchecked(bytes48) return py_ecc_bytes48_to_G1(bytes48) def bytes96_to_G2(bytes96): """ Deserializes a purported compressed serialized point in G2. - No subgroup checks are performed - If the bytearray is not a valid serialization of a point in G2, then this method will raise an exception """ if bls == arkworks_bls or bls == fastest_bls: return arkworks_G2.from_compressed_bytes_unchecked(bytes96) return py_ecc_bytes96_to_G2(bytes96) @only_with_bls(alt_return=True) def KeyValidate(pubkey): return py_ecc_bls.KeyValidate(pubkey)
8,681
26.388013
119
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/test_merkle_minimal.py
import pytest from .merkle_minimal import zerohashes, merkleize_chunks, get_merkle_root from .hash_function import hash def h(a: bytes, b: bytes) -> bytes: return hash(a + b) def e(v: int) -> bytes: # prefix with 0xfff... to make it non-zero return b'\xff' * 28 + v.to_bytes(length=4, byteorder='little') def z(i: int) -> bytes: return zerohashes[i] cases = [ # limit 0: always zero hash (0, 0, z(0)), (1, 0, None), # cut-off due to limit (2, 0, None), # cut-off due to limit # limit 1: padded to 1 element if not already. Returned (like identity func) (0, 1, z(0)), (1, 1, e(0)), (2, 1, None), # cut-off due to limit (1, 1, e(0)), (0, 2, h(z(0), z(0))), (1, 2, h(e(0), z(0))), (2, 2, h(e(0), e(1))), (3, 2, None), # cut-off due to limit (16, 2, None), # bigger cut-off due to limit (0, 4, h(h(z(0), z(0)), z(1))), (1, 4, h(h(e(0), z(0)), z(1))), (2, 4, h(h(e(0), e(1)), z(1))), (3, 4, h(h(e(0), e(1)), h(e(2), z(0)))), (4, 4, h(h(e(0), e(1)), h(e(2), e(3)))), (5, 4, None), # cut-off due to limit (0, 8, h(h(h(z(0), z(0)), z(1)), z(2))), (1, 8, h(h(h(e(0), z(0)), z(1)), z(2))), (2, 8, h(h(h(e(0), e(1)), z(1)), z(2))), (3, 8, h(h(h(e(0), e(1)), h(e(2), z(0))), z(2))), (4, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), z(2))), (5, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1)))), (6, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0))))), (7, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0))))), (8, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7))))), (9, 8, None), # cut-off due to limit (0, 16, h(h(h(h(z(0), z(0)), z(1)), z(2)), z(3))), (1, 16, h(h(h(h(e(0), z(0)), z(1)), z(2)), z(3))), (2, 16, h(h(h(h(e(0), e(1)), z(1)), z(2)), z(3))), (3, 16, h(h(h(h(e(0), e(1)), h(e(2), z(0))), z(2)), z(3))), (4, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), z(2)), z(3))), (5, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1))), z(3))), (6, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0)))), z(3))), (7, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0)))), z(3))), (8, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), z(3))), (9, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), h(h(h(e(8), z(0)), z(1)), z(2)))), ] @pytest.mark.parametrize( 'count,limit,value', cases, ) def test_merkleize_chunks_and_get_merkle_root(count, limit, value): chunks = [e(i) for i in range(count)] if value is None: bad = False try: merkleize_chunks(chunks, limit=limit) bad = True except AssertionError: pass if bad: assert False, "expected merkleization to be invalid" else: assert merkleize_chunks(chunks, limit=limit) == value assert get_merkle_root(chunks, pad_to=limit) == value
3,043
36.580247
117
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/hash_function.py
from hashlib import sha256 from remerkleable.byte_arrays import Bytes32 from typing import Union ZERO_BYTES32 = b'\x00' * 32 def hash(x: Union[bytes, bytearray, memoryview]) -> Bytes32: return Bytes32(sha256(x).digest())
228
21.9
60
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/ssz/ssz_impl.py
from typing import TypeVar from remerkleable.basic import uint from remerkleable.core import View from remerkleable.byte_arrays import Bytes32 def serialize(obj: View) -> bytes: return obj.encode_bytes() def hash_tree_root(obj: View) -> Bytes32: return Bytes32(obj.get_backing().merkle_root()) def uint_to_bytes(n: uint) -> bytes: return serialize(n) V = TypeVar('V', bound=View) # Helper method for typing copies, and avoiding a example_input.copy() method call, instead of copy(example_input) def copy(obj: V) -> V: return obj.copy()
563
20.692308
114
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/ssz/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py
# flake8: noqa # Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec. from remerkleable.complex import Container, Vector, List from remerkleable.union import Union from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256 from remerkleable.bitfields import Bitvector, Bitlist from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList from remerkleable.core import BasicView, View, Path Bytes20 = ByteVector[20] # type: ignore
587
44.230769
113
py
consensus-specs
consensus-specs-master/tests/generators/random/main.py
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.random.test_' + key for key in [ 'random', ]} altair_mods = {key: 'eth2spec.test.altair.random.test_' + key for key in [ 'random', ]} bellatrix_mods = {key: 'eth2spec.test.bellatrix.random.test_' + key for key in [ 'random', ]} capella_mods = {key: 'eth2spec.test.capella.random.test_' + key for key in [ 'random', ]} deneb_mods = {key: 'eth2spec.test.deneb.random.test_' + key for key in [ 'random', ]} all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, } run_state_test_generators(runner_name="random", all_mods=all_mods)
968
30.258065
85
py
consensus-specs
consensus-specs-master/tests/generators/random/generate.py
""" This test format currently uses code generation to assemble the tests as the current test infra does not have a facility to dynamically generate tests that can be seen by ``pytest``. This will likely change in future releases of the testing infra. NOTE: To add additional scenarios, add test cases below in ``_generate_randomized_scenarios``. """ import sys import random import warnings from typing import Callable import itertools from eth2spec.test.utils.randomized_block_tests import ( no_block, no_op_validation, randomize_state, randomize_state_altair, randomize_state_bellatrix, randomize_state_capella, randomize_state_deneb, random_block, random_block_altair_with_cycling_sync_committee_participation, random_block_bellatrix, random_block_capella, random_block_deneb, last_slot_in_epoch, random_slot_in_epoch, penultimate_slot_in_epoch, epoch_transition, slot_transition, transition_with_random_block, transition_to_leaking, transition_without_leak, ) from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB # Ensure this many blocks are present in *each* randomized scenario BLOCK_TRANSITIONS_COUNT = 2 def _normalize_transition(transition): """ Provide "empty" or "no op" sub-transitions to a given transition. """ if isinstance(transition, Callable): transition = transition() if "epochs_to_skip" not in transition: transition["epochs_to_skip"] = 0 if "slots_to_skip" not in transition: transition["slots_to_skip"] = 0 if "block_producer" not in transition: transition["block_producer"] = no_block if "validation" not in transition: transition["validation"] = no_op_validation return transition def _normalize_scenarios(scenarios): """ "Normalize" a "scenario" so that a producer of a test case does not need to provide every expected key/value. """ for scenario in scenarios: transitions = scenario["transitions"] for i, transition in enumerate(transitions): transitions[i] = _normalize_transition(transition) def _flatten(t): leak_transition = t[0] result = [leak_transition] for transition_batch in t[1]: for transition in transition_batch: if isinstance(transition, tuple): for subtransition in transition: result.append(subtransition) else: result.append(transition) return result def _generate_randomized_scenarios(block_randomizer): """ Generates a set of randomized testing scenarios. Return a sequence of "scenarios" where each scenario: 1. Provides some setup 2. Provides a sequence of transitions that mutate the state in some way, possibly yielding blocks along the way NOTE: scenarios are "normalized" with empty/no-op elements before returning to the test generation to facilitate brevity when writing scenarios by hand. NOTE: the main block driver builds a block for the **next** slot, so the slot transitions are offset by -1 to target certain boundaries. """ # go forward 0 or 1 epochs epochs_set = ( epoch_transition(n=0), epoch_transition(n=1), ) # within those epochs, go forward to: slots_set = ( # the first slot in an epoch (see note in docstring about offsets...) slot_transition(last_slot_in_epoch), # the second slot in an epoch slot_transition(n=0), # some random number of slots, but not at epoch boundaries slot_transition(random_slot_in_epoch), # the last slot in an epoch (see note in docstring about offsets...) slot_transition(penultimate_slot_in_epoch), ) # and produce a block... blocks_set = ( transition_with_random_block(block_randomizer), ) rng = random.Random(1447) all_skips = list(itertools.product(epochs_set, slots_set)) randomized_skips = ( rng.sample(all_skips, len(all_skips)) for _ in range(BLOCK_TRANSITIONS_COUNT) ) # build a set of block transitions from combinations of sub-transitions transitions_generator = ( itertools.product(prefix, blocks_set) for prefix in randomized_skips ) block_transitions = zip(*transitions_generator) # and preface each set of block transitions with the possible leak transitions leak_transitions = ( transition_without_leak, transition_to_leaking, ) scenarios = [ {"transitions": _flatten(t)} for t in itertools.product(leak_transitions, block_transitions) ] _normalize_scenarios(scenarios) return scenarios def _id_from_scenario(test_description): """ Construct a name for the scenario based its data. """ def _to_id_part(prefix, x): suffix = str(x) if isinstance(x, Callable): suffix = x.__name__ return f"{prefix}{suffix}" def _id_from_transition(transition): return ",".join(( _to_id_part("epochs:", transition["epochs_to_skip"]), _to_id_part("slots:", transition["slots_to_skip"]), _to_id_part("with-block:", transition["block_producer"]) )) return "|".join(map(_id_from_transition, test_description["transitions"])) test_imports_template = """\"\"\" This module is generated from the ``random`` test generator. Please do not edit this file manually. See the README for that generator for more information. \"\"\" from eth2spec.test.helpers.constants import {phase} from eth2spec.test.context import ( misc_balances_in_default_range_with_many_validators, with_phases, zero_activation_threshold, only_generator, ) from eth2spec.test.context import ( always_bls, spec_test, with_custom_state, single_phase, ) from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, )""" test_template = """ @only_generator(\"randomized test for broad coverage, not point-to-point CI\") @with_phases([{phase}]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold ) @spec_test @single_phase @always_bls def test_randomized_{index}(spec, state): # scenario as high-level, informal text: {name_as_comment} scenario = {scenario} # noqa: E501 yield from run_generated_randomized_test( spec, state, scenario, )""" def _to_comment(name, indent_level): parts = name.split("|") indentation = " " * indent_level parts = [ indentation + "# " + part for part in parts ] return "\n".join(parts) def run_generate_tests_to_std_out(phase, state_randomizer, block_randomizer): scenarios = _generate_randomized_scenarios(block_randomizer) test_content = {"phase": phase.upper()} test_imports = test_imports_template.format(**test_content) test_file = [test_imports] for index, scenario in enumerate(scenarios): # required for setup phase scenario["state_randomizer"] = state_randomizer.__name__ # need to pass name, rather than function reference... transitions = scenario["transitions"] for transition in transitions: for name, value in transition.items(): if isinstance(value, Callable): transition[name] = value.__name__ test_content = test_content.copy() name = _id_from_scenario(scenario) test_content["name_as_comment"] = _to_comment(name, 1) test_content["index"] = index test_content["scenario"] = scenario test_instance = test_template.format(**test_content) test_file.append(test_instance) print("\n\n".join(test_file)) if __name__ == "__main__": did_generate = False if PHASE0 in sys.argv: did_generate = True run_generate_tests_to_std_out( PHASE0, state_randomizer=randomize_state, block_randomizer=random_block, ) if ALTAIR in sys.argv: did_generate = True run_generate_tests_to_std_out( ALTAIR, state_randomizer=randomize_state_altair, block_randomizer=random_block_altair_with_cycling_sync_committee_participation, ) if BELLATRIX in sys.argv: did_generate = True run_generate_tests_to_std_out( BELLATRIX, state_randomizer=randomize_state_bellatrix, block_randomizer=random_block_bellatrix, ) if CAPELLA in sys.argv: did_generate = True run_generate_tests_to_std_out( CAPELLA, state_randomizer=randomize_state_capella, block_randomizer=random_block_capella, ) if DENEB in sys.argv: did_generate = True run_generate_tests_to_std_out( DENEB, state_randomizer=randomize_state_deneb, block_randomizer=random_block_deneb, ) if not did_generate: warnings.warn("no phase given for test generation")
9,175
31.083916
94
py
consensus-specs
consensus-specs-master/tests/generators/ssz_static/main.py
from random import Random from typing import Iterable from inspect import getmembers, isclass from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.debug import random_value, encode from eth2spec.test.helpers.constants import TESTGEN_FORKS, MINIMAL, MAINNET from eth2spec.test.context import spec_targets from eth2spec.utils.ssz.ssz_typing import Container from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, serialize, ) MAX_BYTES_LENGTH = 1000 MAX_LIST_LENGTH = 10 def create_test_case(rng: Random, typ, mode: random_value.RandomizationMode, chaos: bool) -> Iterable[gen_typing.TestCasePart]: value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos) yield "value", "data", encode.encode(value) yield "serialized", "ssz", serialize(value) roots_data = { "root": '0x' + hash_tree_root(value).hex() } yield "roots", "data", roots_data def get_spec_ssz_types(spec): return [ (name, value) for (name, value) in getmembers(spec, isclass) if issubclass(value, Container) and value != Container # only the subclasses, not the imported base class ] def ssz_static_cases(fork_name: str, preset_name: str, seed: int, name, ssz_type, mode: random_value.RandomizationMode, chaos: bool, count: int): random_mode_name = mode.to_name() # Reproducible RNG rng = Random(seed) for i in range(count): yield gen_typing.TestCase( fork_name=fork_name, preset_name=preset_name, runner_name='ssz_static', handler_name=name, suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}", case_name=f"case_{i}", case_fn=lambda: create_test_case(rng, ssz_type, mode, chaos) ) def create_provider(fork_name, preset_name: str, seed: int, mode: random_value.RandomizationMode, chaos: bool, cases_if_random: int) -> gen_typing.TestProvider: def prepare_fn() -> None: return def cases_fn() -> Iterable[gen_typing.TestCase]: count = cases_if_random if chaos or mode.is_changing() else 1 spec = spec_targets[preset_name][fork_name] for (i, (name, ssz_type)) in enumerate(get_spec_ssz_types(spec)): yield from ssz_static_cases(fork_name, preset_name, seed * 1000 + i, name, ssz_type, mode, chaos, count) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": # [(seed, config name, randomization mode, chaos on/off, cases_if_random)] settings = [] seed = 1 for mode in random_value.RandomizationMode: settings.append((seed, MINIMAL, mode, False, 30)) seed += 1 settings.append((seed, MINIMAL, random_value.RandomizationMode.mode_random, True, 30)) seed += 1 settings.append((seed, MAINNET, random_value.RandomizationMode.mode_random, False, 5)) seed += 1 for fork in TESTGEN_FORKS: gen_runner.run_generator("ssz_static", [ create_provider(fork, preset_name, seed, mode, chaos, cases_if_random) for (seed, preset_name, mode, chaos, cases_if_random) in settings ])
3,260
35.640449
116
py
consensus-specs
consensus-specs-master/tests/generators/ssz_static/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/generators/fork_choice/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": # Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore altair_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ 'get_head', 'on_block', 'ex_ante', 'reorg', 'withholding', ]} # For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several # PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.fork_choice.test_' + key for key in [ 'on_merge_block', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific fork choice tests deneb_mods = capella_mods # No additional Deneb specific fork choice tests eip6110_mods = deneb_mods # No additional EIP6110 specific fork choice tests all_mods = { ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="fork_choice", all_mods=all_mods)
1,384
39.735294
99
py
consensus-specs
consensus-specs-master/tests/generators/sanity/main.py
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.sanity.test_' + key for key in [ 'blocks', 'slots', ]} _new_altair_mods = {key: 'eth2spec.test.altair.sanity.test_' + key for key in [ 'blocks', ]} altair_mods = combine_mods(_new_altair_mods, phase_0_mods) _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.sanity.test_' + key for key in [ 'blocks', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.sanity.test_' + key for key in [ 'blocks', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) _new_deneb_mods = {key: 'eth2spec.test.deneb.sanity.test_' + key for key in [ 'blocks', ]} deneb_mods = combine_mods(_new_deneb_mods, capella_mods) _new_eip6110_mods = {key: 'eth2spec.test.eip6110.sanity.' + key for key in [ 'blocks', ]} eip6110_mods = combine_mods(_new_eip6110_mods, deneb_mods) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="sanity", all_mods=all_mods)
1,492
31.456522
94
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_bitvector.py
from ssz_test_case import invalid_test_case, valid_test_case from eth2spec.utils.ssz.ssz_typing import Bitvector from eth2spec.utils.ssz.ssz_impl import serialize from random import Random from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object def bitvector_case_fn(rng: Random, mode: RandomizationMode, size: int, invalid_making_pos: int=None): bits = get_random_ssz_object(rng, Bitvector[size], max_bytes_length=(size + 7) // 8, max_list_length=size, mode=mode, chaos=False) if invalid_making_pos is not None and invalid_making_pos <= size: already_invalid = False for i in range(invalid_making_pos, size): if bits[i]: already_invalid = True if not already_invalid: bits[invalid_making_pos] = True return bits def valid_cases(): rng = Random(1234) for size in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]: for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'bitvec_{size}_{mode.to_name()}', valid_test_case(lambda: bitvector_case_fn(rng, mode, size)) def invalid_cases(): # zero length bitvecors are illegal yield 'bitvec_0', invalid_test_case(lambda: b'') rng = Random(1234) # Create a vector with test_size bits, but make the type typ_size instead, # which is invalid when used with the given type size # (and a bit set just after typ_size bits if necessary to avoid the valid 0 padding-but-same-last-byte case) for (typ_size, test_size) in [(1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (8, 9), (9, 8), (16, 8), (32, 33), (512, 513)]: for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'bitvec_{typ_size}_{mode.to_name()}_{test_size}', \ invalid_test_case(lambda: serialize(bitvector_case_fn(rng, mode, test_size, invalid_making_pos=typ_size)))
2,159
49.232558
112
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/main.py
from typing import Iterable from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing import ssz_basic_vector import ssz_bitlist import ssz_bitvector import ssz_boolean import ssz_uints import ssz_container from eth2spec.test.helpers.constants import PHASE0 def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider: def prepare_fn() -> None: return def cases_fn() -> Iterable[gen_typing.TestCase]: for (case_name, case_fn) in case_maker(): yield gen_typing.TestCase( fork_name=PHASE0, preset_name="general", runner_name='ssz_generic', handler_name=handler_name, suite_name=suite_name, case_name=case_name, case_fn=case_fn ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": gen_runner.run_generator("ssz_generic", [ create_provider("basic_vector", "valid", ssz_basic_vector.valid_cases), create_provider("basic_vector", "invalid", ssz_basic_vector.invalid_cases), create_provider("bitlist", "valid", ssz_bitlist.valid_cases), create_provider("bitlist", "invalid", ssz_bitlist.invalid_cases), create_provider("bitvector", "valid", ssz_bitvector.valid_cases), create_provider("bitvector", "invalid", ssz_bitvector.invalid_cases), create_provider("boolean", "valid", ssz_boolean.valid_cases), create_provider("boolean", "invalid", ssz_boolean.invalid_cases), create_provider("uints", "valid", ssz_uints.valid_cases), create_provider("uints", "invalid", ssz_uints.invalid_cases), create_provider("containers", "valid", ssz_container.valid_cases), create_provider("containers", "invalid", ssz_container.invalid_cases), ])
1,886
39.148936
95
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_basic_vector.py
from ssz_test_case import invalid_test_case, valid_test_case from eth2spec.utils.ssz.ssz_typing import boolean, uint8, uint16, uint32, uint64, uint128, uint256, Vector, BasicView from eth2spec.utils.ssz.ssz_impl import serialize from random import Random from typing import Dict, Type from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object def basic_vector_case_fn(rng: Random, mode: RandomizationMode, elem_type: Type[BasicView], length: int): return get_random_ssz_object(rng, Vector[elem_type, length], max_bytes_length=length * 8, max_list_length=length, mode=mode, chaos=False) BASIC_TYPES: Dict[str, Type[BasicView]] = { 'bool': boolean, 'uint8': uint8, 'uint16': uint16, 'uint32': uint32, 'uint64': uint64, 'uint128': uint128, 'uint256': uint256 } def valid_cases(): rng = Random(1234) for (name, typ) in BASIC_TYPES.items(): random_modes = [RandomizationMode.mode_zero, RandomizationMode.mode_max] if name != 'bool': random_modes.append(RandomizationMode.mode_random) for length in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]: for mode in random_modes: yield f'vec_{name}_{length}_{mode.to_name()}', \ valid_test_case(lambda: basic_vector_case_fn(rng, mode, typ, length)) def invalid_cases(): # zero length vectors are illegal for (name, typ) in BASIC_TYPES.items(): yield f'vec_{name}_0', invalid_test_case(lambda: b'') rng = Random(1234) for (name, typ) in BASIC_TYPES.items(): random_modes = [RandomizationMode.mode_zero, RandomizationMode.mode_max] if name != 'bool': random_modes.append(RandomizationMode.mode_random) for length in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]: yield f'vec_{name}_{length}_nil', invalid_test_case(lambda: b'') for mode in random_modes: if length == 1: # empty bytes, no elements. It may seem valid, but empty fixed-size elements are not valid SSZ. yield f'vec_{name}_{length}_{mode.to_name()}_one_less', \ invalid_test_case(lambda: b"") else: yield f'vec_{name}_{length}_{mode.to_name()}_one_less', \ invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length - 1))) yield f'vec_{name}_{length}_{mode.to_name()}_one_more', \ invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length + 1))) yield f'vec_{name}_{length}_{mode.to_name()}_one_byte_less', \ invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length))[:-1]) yield f'vec_{name}_{length}_{mode.to_name()}_one_byte_more', \ invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length)) + serialize(basic_vector_case_fn(rng, mode, uint8, 1)))
3,158
46.863636
117
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_uints.py
from ssz_test_case import invalid_test_case, valid_test_case from eth2spec.utils.ssz.ssz_typing import BasicView, uint8, uint16, uint32, uint64, uint128, uint256 from random import Random from typing import Type from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object def uint_case_fn(rng: Random, mode: RandomizationMode, typ: Type[BasicView]): return get_random_ssz_object(rng, typ, max_bytes_length=typ.type_byte_length(), max_list_length=1, mode=mode, chaos=False) UINT_TYPES = [uint8, uint16, uint32, uint64, uint128, uint256] def valid_cases(): rng = Random(1234) for uint_type in UINT_TYPES: byte_len = uint_type.type_byte_length() yield f'uint_{byte_len * 8}_last_byte_empty', \ valid_test_case(lambda: uint_type((2 ** ((byte_len - 1) * 8)) - 1)) for variation in range(5): for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'uint_{byte_len * 8}_{mode.to_name()}_{variation}', \ valid_test_case(lambda: uint_case_fn(rng, mode, uint_type)) def invalid_cases(): for uint_type in UINT_TYPES: byte_len = uint_type.type_byte_length() yield f'uint_{byte_len * 8}_one_too_high', \ invalid_test_case(lambda: (2 ** (byte_len * 8)).to_bytes(byte_len + 1, 'little')) for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]: byte_len = uint_type.type_byte_length() yield f'uint_{byte_len * 8}_one_byte_longer', \ invalid_test_case(lambda: (2 ** (byte_len * 8) - 1).to_bytes(byte_len + 1, 'little')) for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]: byte_len = uint_type.type_byte_length() yield f'uint_{byte_len * 8}_one_byte_shorter', \ invalid_test_case(lambda: (2 ** ((byte_len - 1) * 8) - 1).to_bytes(byte_len - 1, 'little'))
2,045
46.581395
113
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/uint_test_cases.py
import random from eth_utils import ( to_tuple, ) import ssz from ssz.sedes import ( UInt, ) from renderers import ( render_test_case, ) random.seed(0) BIT_SIZES = [8, 16, 32, 64, 128, 256] RANDOM_TEST_CASES_PER_BIT_SIZE = 10 RANDOM_TEST_CASES_PER_LENGTH = 3 def get_random_bytes(length): return bytes(random.randint(0, 255) for _ in range(length)) @to_tuple def generate_random_uint_test_cases(): for bit_size in BIT_SIZES: sedes = UInt(bit_size) for _ in range(RANDOM_TEST_CASES_PER_BIT_SIZE): value = random.randrange(0, 2**bit_size) serial = ssz.encode(value, sedes) # note that we need to create the tags in each loop cycle, otherwise ruamel will use # YAML references which makes the resulting file harder to read tags = tuple(["atomic", "uint", "random"]) yield render_test_case( sedes=sedes, valid=True, value=value, serial=serial, tags=tags, ) @to_tuple def generate_uint_wrong_length_test_cases(): for bit_size in BIT_SIZES: sedes = UInt(bit_size) lengths = sorted({ 0, sedes.length // 2, sedes.length - 1, sedes.length + 1, sedes.length * 2, }) for length in lengths: for _ in range(RANDOM_TEST_CASES_PER_LENGTH): tags = tuple(["atomic", "uint", "wrong_length"]) yield render_test_case( sedes=sedes, valid=False, serial=get_random_bytes(length), tags=tags, ) @to_tuple def generate_uint_bounds_test_cases(): common_tags = ("atomic", "uint") for bit_size in BIT_SIZES: sedes = UInt(bit_size) for value, tag in ((0, "uint_lower_bound"), (2 ** bit_size - 1, "uint_upper_bound")): serial = ssz.encode(value, sedes) yield render_test_case( sedes=sedes, valid=True, value=value, serial=serial, tags=common_tags + (tag,), ) @to_tuple def generate_uint_out_of_bounds_test_cases(): common_tags = ("atomic", "uint") for bit_size in BIT_SIZES: sedes = UInt(bit_size) for value, tag in ((-1, "uint_underflow"), (2 ** bit_size, "uint_overflow")): yield render_test_case( sedes=sedes, valid=False, value=value, tags=common_tags + (tag,), )
2,643
25.707071
96
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_test_case.py
from eth2spec.utils.ssz.ssz_impl import serialize, hash_tree_root from eth2spec.debug.encode import encode from eth2spec.utils.ssz.ssz_typing import View from typing import Callable def valid_test_case(value_fn: Callable[[], View]): def case_fn(): value = value_fn() yield "value", "data", encode(value) yield "serialized", "ssz", serialize(value) yield "root", "meta", '0x' + hash_tree_root(value).hex() return case_fn def invalid_test_case(bytez_fn: Callable[[], bytes]): def case_fn(): yield "serialized", "ssz", bytez_fn() return case_fn
602
29.15
65
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_boolean.py
from ssz_test_case import valid_test_case, invalid_test_case from eth2spec.utils.ssz.ssz_typing import boolean def valid_cases(): yield "true", valid_test_case(lambda: boolean(True)) yield "false", valid_test_case(lambda: boolean(False)) def invalid_cases(): yield "byte_2", invalid_test_case(lambda: b'\x02') yield "byte_rev_nibble", invalid_test_case(lambda: b'\x10') yield "byte_0x80", invalid_test_case(lambda: b'\x80') yield "byte_full", invalid_test_case(lambda: b'\xff')
506
32.8
63
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_bitlist.py
from ssz_test_case import invalid_test_case, valid_test_case from eth2spec.utils.ssz.ssz_typing import Bitlist from eth2spec.utils.ssz.ssz_impl import serialize from random import Random from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object def bitlist_case_fn(rng: Random, mode: RandomizationMode, limit: int): return get_random_ssz_object(rng, Bitlist[limit], max_bytes_length=(limit // 8) + 1, max_list_length=limit, mode=mode, chaos=False) def valid_cases(): rng = Random(1234) for size in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]: for variation in range(5): for mode in [RandomizationMode.mode_nil_count, RandomizationMode.mode_max_count, RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'bitlist_{size}_{mode.to_name()}_{variation}', \ valid_test_case(lambda: bitlist_case_fn(rng, mode, size)) def invalid_cases(): yield 'bitlist_no_delimiter_empty', invalid_test_case(lambda: b'') yield 'bitlist_no_delimiter_zero_byte', invalid_test_case(lambda: b'\x00') yield 'bitlist_no_delimiter_zeroes', invalid_test_case(lambda: b'\x00\x00\x00') rng = Random(1234) for (typ_limit, test_limit) in [(1, 2), (1, 8), (1, 9), (2, 3), (3, 4), (4, 5), (5, 6), (8, 9), (32, 64), (32, 33), (512, 513)]: yield f'bitlist_{typ_limit}_but_{test_limit}', \ invalid_test_case(lambda: serialize( bitlist_case_fn(rng, RandomizationMode.mode_max_count, test_limit)))
1,787
46.052632
86
py
consensus-specs
consensus-specs-master/tests/generators/ssz_generic/ssz_container.py
from ssz_test_case import invalid_test_case, valid_test_case from eth2spec.utils.ssz.ssz_typing import View, Container, byte, uint8, uint16, \ uint32, uint64, List, ByteList, Vector, Bitvector, Bitlist from eth2spec.utils.ssz.ssz_impl import serialize from random import Random from typing import Dict, Tuple, Sequence, Callable, Type from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object class SingleFieldTestStruct(Container): A: byte class SmallTestStruct(Container): A: uint16 B: uint16 class FixedTestStruct(Container): A: uint8 B: uint64 C: uint32 class VarTestStruct(Container): A: uint16 B: List[uint16, 1024] C: uint8 class ComplexTestStruct(Container): A: uint16 B: List[uint16, 128] C: uint8 D: ByteList[256] E: VarTestStruct F: Vector[FixedTestStruct, 4] G: Vector[VarTestStruct, 2] class BitsStruct(Container): A: Bitlist[5] B: Bitvector[2] C: Bitvector[1] D: Bitlist[6] E: Bitvector[8] def container_case_fn(rng: Random, mode: RandomizationMode, typ: Type[View]): return get_random_ssz_object(rng, typ, max_bytes_length=2000, max_list_length=2000, mode=mode, chaos=False) PRESET_CONTAINERS: Dict[str, Tuple[Type[View], Sequence[int]]] = { 'SingleFieldTestStruct': (SingleFieldTestStruct, []), 'SmallTestStruct': (SmallTestStruct, []), 'FixedTestStruct': (FixedTestStruct, []), 'VarTestStruct': (VarTestStruct, [2]), 'ComplexTestStruct': (ComplexTestStruct, [2, 2 + 4 + 1, 2 + 4 + 1 + 4]), 'BitsStruct': (BitsStruct, [0, 4 + 1 + 1, 4 + 1 + 1 + 4]), } def valid_cases(): rng = Random(1234) for (name, (typ, offsets)) in PRESET_CONTAINERS.items(): for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'{name}_{mode.to_name()}', valid_test_case(lambda: container_case_fn(rng, mode, typ)) random_modes = [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max] if len(offsets) != 0: random_modes.extend([RandomizationMode.mode_nil_count, RandomizationMode.mode_one_count, RandomizationMode.mode_max_count]) for mode in random_modes: for variation in range(10): yield f'{name}_{mode.to_name()}_{variation}', \ valid_test_case(lambda: container_case_fn(rng, mode, typ)) for variation in range(3): yield f'{name}_{mode.to_name()}_chaos_{variation}', \ valid_test_case(lambda: container_case_fn(rng, mode, typ)) def mod_offset(b: bytes, offset_index: int, change: Callable[[int], int]): return b[:offset_index] + \ (change(int.from_bytes(b[offset_index:offset_index + 4], byteorder='little')) & 0xffffffff) \ .to_bytes(length=4, byteorder='little') + \ b[offset_index + 4:] def invalid_cases(): rng = Random(1234) for (name, (typ, offsets)) in PRESET_CONTAINERS.items(): # using mode_max_count, so that the extra byte cannot be picked up as normal list content yield f'{name}_extra_byte', \ invalid_test_case(lambda: serialize( container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff') if len(offsets) != 0: # Note: there are many more ways to have invalid offsets, # these are just example to get clients started looking into hardening ssz. for mode in [RandomizationMode.mode_random, RandomizationMode.mode_nil_count, RandomizationMode.mode_one_count, RandomizationMode.mode_max_count]: for index, offset_index in enumerate(offsets): yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x + 1 )) yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: 0 )) if index == 0: yield f'{name}_{mode.to_name()}_offset_{offset_index}_minus_one', \ invalid_test_case(lambda: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x - 1 )) if mode == RandomizationMode.mode_max_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[:2] yield f'{name}_{mode.to_name()}_last_offset_{offset_index}_overflow', \ invalid_test_case(lambda: serialized) if mode == RandomizationMode.mode_one_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[:1] yield f'{name}_{mode.to_name()}_last_offset_{offset_index}_wrong_byte_length', \ invalid_test_case(lambda: serialized)
5,836
41.605839
111
py
consensus-specs
consensus-specs-master/tests/generators/sync/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": bellatrix_mods = {key: 'eth2spec.test.bellatrix.sync.test_' + key for key in [ 'optimistic', ]} capella_mods = bellatrix_mods deneb_mods = capella_mods eip6110_mods = deneb_mods all_mods = { BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="sync", all_mods=all_mods)
610
28.095238
82
py
consensus-specs
consensus-specs-master/tests/generators/genesis/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.genesis.test_' + key for key in [ 'initialization', 'validity', ]} altair_mods = phase_0_mods # we have new unconditional lines in `initialize_beacon_state_from_eth1` and we want to test it _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.genesis.test_' + key for key in [ 'initialization', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific genesis tests deneb_mods = capella_mods # No additional Deneb specific genesis tests eip6110_mods = deneb_mods # No additional EIP6110 specific genesis tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="genesis", all_mods=all_mods)
1,190
37.419355
99
py
consensus-specs
consensus-specs-master/tests/generators/epoch_processing/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.epoch_processing.test_process_' + key for key in [ 'justification_and_finalization', 'rewards_and_penalties', 'registry_updates', 'slashings', 'eth1_data_reset', 'effective_balance_updates', 'slashings_reset', 'randao_mixes_reset', 'historical_roots_update', 'participation_record_updates', ]} _new_altair_mods = {key: 'eth2spec.test.altair.epoch_processing.test_process_' + key for key in [ 'inactivity_updates', 'participation_flag_updates', 'sync_committee_updates', ]} altair_mods = combine_mods(_new_altair_mods, phase_0_mods) # No epoch-processing changes in Bellatrix and previous testing repeats with new types, # so no additional tests required. bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.epoch_processing.test_process_' + key for key in [ 'historical_summaries_update', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) deneb_mods = capella_mods eip6110_mods = deneb_mods # TODO Custody Game testgen is disabled for now # custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [ # 'reveal_deadlines', # 'challenge_deadlines', # 'custody_final_updates', # ]}, **phase_0_mods} # also run the previous phase 0 tests (but against custody game spec) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="epoch_processing", all_mods=all_mods)
2,012
34.946429
113
py
consensus-specs
consensus-specs-master/tests/generators/bls/main.py
""" BLS test vectors generator """ from hashlib import sha256 from typing import Tuple, Iterable, Any, Callable, Dict from eth_utils import ( encode_hex, int_to_big_endian, ) import milagro_bls_binding as milagro_bls from eth2spec.utils import bls from eth2spec.test.helpers.constants import PHASE0, ALTAIR from eth2spec.test.helpers.typing import SpecForkName from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.altair import spec def to_bytes(i): return i.to_bytes(32, "big") def hash(x): return sha256(x).digest() def int_to_hex(n: int, byte_length: int = None) -> str: byte_value = int_to_big_endian(n) if byte_length: byte_value = byte_value.rjust(byte_length, b'\x00') return encode_hex(byte_value) def hex_to_int(x: str) -> int: return int(x, 16) MESSAGES = [ bytes(b'\x00' * 32), bytes(b'\x56' * 32), bytes(b'\xab' * 32), ] SAMPLE_MESSAGE = b'\x12' * 32 PRIVKEYS = [ # Curve order is 256, so private keys use 32 bytes at most. # Also, not all integers are valid private keys. Therefore, using pre-generated keys. hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'), hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'), hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'), ] PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS] ZERO_PUBKEY = b'\x00' * 48 G1_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 47 ZERO_SIGNATURE = b'\x00' * 96 G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95 ZERO_PRIVKEY = 0 ZERO_PRIVKEY_BYTES = b'\x00' * 32 def expect_exception(func, *args): try: func(*args) except Exception: pass else: raise Exception("should have raised exception") def case01_sign(): # Valid cases for privkey in PRIVKEYS: for message in MESSAGES: sig = bls.Sign(privkey, message) assert sig == milagro_bls.Sign(to_bytes(privkey), message) # double-check with milagro identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}' yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'privkey': int_to_hex(privkey), 'message': encode_hex(message), }, 'output': encode_hex(sig) } # Edge case: privkey == 0 expect_exception(bls.Sign, ZERO_PRIVKEY, message) expect_exception(milagro_bls.Sign, ZERO_PRIVKEY_BYTES, message) yield 'sign_case_zero_privkey', { 'input': { 'privkey': encode_hex(ZERO_PRIVKEY_BYTES), 'message': encode_hex(message), }, 'output': None } def case02_verify(): for i, privkey in enumerate(PRIVKEYS): for message in MESSAGES: # Valid signature signature = bls.Sign(privkey, message) pubkey = bls.SkToPk(privkey) assert milagro_bls.SkToPk(to_bytes(privkey)) == pubkey assert milagro_bls.Sign(to_bytes(privkey), message) == signature identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}' assert bls.Verify(pubkey, message, signature) assert milagro_bls.Verify(pubkey, message, signature) yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkey': encode_hex(pubkey), 'message': encode_hex(message), 'signature': encode_hex(signature), }, 'output': True, } # Invalid signatures -- wrong pubkey wrong_pubkey = bls.SkToPk(PRIVKEYS[(i + 1) % len(PRIVKEYS)]) identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}' assert not bls.Verify(wrong_pubkey, message, signature) assert not milagro_bls.Verify(wrong_pubkey, message, signature) yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkey': encode_hex(wrong_pubkey), 'message': encode_hex(message), 'signature': encode_hex(signature), }, 'output': False, } # Invalid signature -- tampered with signature tampered_signature = signature[:-4] + b'\xFF\xFF\xFF\xFF' identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}' assert not bls.Verify(pubkey, message, tampered_signature) assert not milagro_bls.Verify(pubkey, message, tampered_signature) yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkey': encode_hex(pubkey), 'message': encode_hex(message), 'signature': encode_hex(tampered_signature), }, 'output': False, } # Invalid pubkey and signature with the point at infinity assert not bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) assert not milagro_bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) yield 'verify_infinity_pubkey_and_infinity_signature', { 'input': { 'pubkey': encode_hex(G1_POINT_AT_INFINITY), 'message': encode_hex(SAMPLE_MESSAGE), 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } def case03_aggregate(): for message in MESSAGES: sigs = [bls.Sign(privkey, message) for privkey in PRIVKEYS] aggregate_sig = bls.Aggregate(sigs) assert aggregate_sig == milagro_bls.Aggregate(sigs) yield f'aggregate_{encode_hex(message)}', { 'input': [encode_hex(sig) for sig in sigs], 'output': encode_hex(aggregate_sig), } # Invalid pubkeys -- len(pubkeys) == 0 expect_exception(bls.Aggregate, []) # No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID. # https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04#section-2.8 yield 'aggregate_na_signatures', { 'input': [], 'output': None, } # Valid to aggregate G2 point at infinity aggregate_sig = bls.Aggregate([G2_POINT_AT_INFINITY]) assert aggregate_sig == milagro_bls.Aggregate([G2_POINT_AT_INFINITY]) == G2_POINT_AT_INFINITY yield 'aggregate_infinity_signature', { 'input': [encode_hex(G2_POINT_AT_INFINITY)], 'output': encode_hex(aggregate_sig), } def case04_fast_aggregate_verify(): for i, message in enumerate(MESSAGES): privkeys = PRIVKEYS[:i + 1] sigs = [bls.Sign(privkey, message) for privkey in privkeys] aggregate_signature = bls.Aggregate(sigs) pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys] # Valid signature identifier = f'{pubkeys_serial}_{encode_hex(message)}' assert bls.FastAggregateVerify(pubkeys, message, aggregate_signature) assert milagro_bls.FastAggregateVerify(pubkeys, message, aggregate_signature) yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), 'signature': encode_hex(aggregate_signature), }, 'output': True, } # Invalid signature -- extra pubkey pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}' assert not bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature) assert not milagro_bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature) yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_extra_serial, 'message': encode_hex(message), 'signature': encode_hex(aggregate_signature), }, 'output': False, } # Invalid signature -- tampered with signature tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff' identifier = f'{pubkeys_serial}_{encode_hex(message)}' assert not bls.FastAggregateVerify(pubkeys, message, tampered_signature) assert not milagro_bls.FastAggregateVerify(pubkeys, message, tampered_signature) yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), 'signature': encode_hex(tampered_signature), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE assert not bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) assert not milagro_bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) yield 'fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... assert not bls.FastAggregateVerify([], message, ZERO_SIGNATURE) assert not milagro_bls.FastAggregateVerify([], message, ZERO_SIGNATURE) yield 'fast_aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity pubkeys = PUBKEYS.copy() pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] aggregate_signature = bls.Aggregate(signatures) assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) assert not milagro_bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) yield 'fast_aggregate_verify_infinity_pubkey', { 'input': { 'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], 'message': encode_hex(SAMPLE_MESSAGE), 'signature': encode_hex(aggregate_signature), }, 'output': False, } def case05_aggregate_verify(): pubkeys = [] pubkeys_serial = [] messages = [] messages_serial = [] sigs = [] for privkey, message in zip(PRIVKEYS, MESSAGES): sig = bls.Sign(privkey, message) pubkey = bls.SkToPk(privkey) pubkeys.append(pubkey) pubkeys_serial.append(encode_hex(pubkey)) messages.append(message) messages_serial.append(encode_hex(message)) sigs.append(sig) aggregate_signature = bls.Aggregate(sigs) assert bls.AggregateVerify(pubkeys, messages, aggregate_signature) assert milagro_bls.AggregateVerify(pubkeys, messages, aggregate_signature) yield 'aggregate_verify_valid', { 'input': { 'pubkeys': pubkeys_serial, 'messages': messages_serial, 'signature': encode_hex(aggregate_signature), }, 'output': True, } tampered_signature = aggregate_signature[:4] + b'\xff\xff\xff\xff' assert not bls.AggregateVerify(pubkey, messages, tampered_signature) assert not milagro_bls.AggregateVerify(pubkeys, messages, tampered_signature) yield 'aggregate_verify_tampered_signature', { 'input': { 'pubkeys': pubkeys_serial, 'messages': messages_serial, 'signature': encode_hex(tampered_signature), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE assert not bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) assert not milagro_bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) yield 'aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'messages': [], 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... assert not bls.AggregateVerify([], [], ZERO_SIGNATURE) assert not milagro_bls.AggregateVerify([], [], ZERO_SIGNATURE) yield 'aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'messages': [], 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] messages_with_sample = messages + [SAMPLE_MESSAGE] assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature) assert not milagro_bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature) yield 'aggregate_verify_infinity_pubkey', { 'input': { 'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], 'messages': [encode_hex(message) for message in messages_with_sample], 'signature': encode_hex(aggregate_signature), }, 'output': False, } def case06_eth_aggregate_pubkeys(): for pubkey in PUBKEYS: encoded_pubkey = encode_hex(pubkey) aggregate_pubkey = spec.eth_aggregate_pubkeys([pubkey]) # Should be unchanged assert aggregate_pubkey == milagro_bls._AggregatePKs([pubkey]) == pubkey # Valid pubkey yield f'eth_aggregate_pubkeys_valid_{(hash(bytes(encoded_pubkey, "utf-8"))[:8]).hex()}', { 'input': [encode_hex(pubkey)], 'output': encode_hex(aggregate_pubkey), } # Valid pubkeys aggregate_pubkey = spec.eth_aggregate_pubkeys(PUBKEYS) assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS) yield 'eth_aggregate_pubkeys_valid_pubkeys', { 'input': [encode_hex(pubkey) for pubkey in PUBKEYS], 'output': encode_hex(aggregate_pubkey), } # Invalid pubkeys -- len(pubkeys) == 0 expect_exception(spec.eth_aggregate_pubkeys, []) expect_exception(milagro_bls._AggregatePKs, []) yield 'eth_aggregate_pubkeys_empty_list', { 'input': [], 'output': None, } # Invalid pubkeys -- [ZERO_PUBKEY] expect_exception(spec.eth_aggregate_pubkeys, [ZERO_PUBKEY]) expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) yield 'eth_aggregate_pubkeys_zero_pubkey', { 'input': [encode_hex(ZERO_PUBKEY)], 'output': None, } # Invalid pubkeys -- G1 point at infinity expect_exception(spec.eth_aggregate_pubkeys, [G1_POINT_AT_INFINITY]) expect_exception(milagro_bls._AggregatePKs, [G1_POINT_AT_INFINITY]) yield 'eth_aggregate_pubkeys_infinity_pubkey', { 'input': [encode_hex(G1_POINT_AT_INFINITY)], 'output': None, } # Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey x40_pubkey = b'\x40' + b'\00' * 47 expect_exception(spec.eth_aggregate_pubkeys, [x40_pubkey]) expect_exception(milagro_bls._AggregatePKs, [x40_pubkey]) yield 'eth_aggregate_pubkeys_x40_pubkey', { 'input': [encode_hex(x40_pubkey)], 'output': None, } def case07_eth_fast_aggregate_verify(): """ Similar to `case04_fast_aggregate_verify` except for the empty case """ for i, message in enumerate(MESSAGES): privkeys = PRIVKEYS[:i + 1] sigs = [bls.Sign(privkey, message) for privkey in privkeys] aggregate_signature = bls.Aggregate(sigs) pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys] # Valid signature identifier = f'{pubkeys_serial}_{encode_hex(message)}' assert spec.eth_fast_aggregate_verify(pubkeys, message, aggregate_signature) yield f'eth_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), 'signature': encode_hex(aggregate_signature), }, 'output': True, } # Invalid signature -- extra pubkey pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}' assert not spec.eth_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_extra_serial, 'message': encode_hex(message), 'signature': encode_hex(aggregate_signature), }, 'output': False, } # Invalid signature -- tampered with signature tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff' identifier = f'{pubkeys_serial}_{encode_hex(message)}' assert not spec.eth_fast_aggregate_verify(pubkeys, message, tampered_signature) yield f'eth_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), 'signature': encode_hex(tampered_signature), }, 'output': False, } # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY is VALID assert spec.eth_fast_aggregate_verify([], message, G2_POINT_AT_INFINITY) yield 'eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': True, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... assert not spec.eth_fast_aggregate_verify([], message, ZERO_SIGNATURE) yield 'eth_fast_aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity pubkeys = PUBKEYS.copy() pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] aggregate_signature = bls.Aggregate(signatures) assert not spec.eth_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) yield 'eth_fast_aggregate_verify_infinity_pubkey', { 'input': { 'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], 'message': encode_hex(SAMPLE_MESSAGE), 'signature': encode_hex(aggregate_signature), }, 'output': False, } def create_provider(fork_name: SpecForkName, handler_name: str, test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: def prepare_fn() -> None: # Nothing to load / change in spec. Maybe in future forks. # Put the tests into the general config category, to not require any particular configuration. return def cases_fn() -> Iterable[gen_typing.TestCase]: for data in test_case_fn(): (case_name, case_content) = data yield gen_typing.TestCase( fork_name=fork_name, preset_name='general', runner_name='bls', handler_name=handler_name, suite_name='small', case_name=case_name, case_fn=lambda: [('data', 'data', case_content)] ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct. gen_runner.run_generator("bls", [ # PHASE0 create_provider(PHASE0, 'sign', case01_sign), create_provider(PHASE0, 'verify', case02_verify), create_provider(PHASE0, 'aggregate', case03_aggregate), create_provider(PHASE0, 'fast_aggregate_verify', case04_fast_aggregate_verify), create_provider(PHASE0, 'aggregate_verify', case05_aggregate_verify), # ALTAIR create_provider(ALTAIR, 'eth_aggregate_pubkeys', case06_eth_aggregate_pubkeys), create_provider(ALTAIR, 'eth_fast_aggregate_verify', case07_eth_fast_aggregate_verify), ])
21,609
38.797422
117
py
consensus-specs
consensus-specs-master/tests/generators/forks/main.py
from typing import Iterable from eth2spec.test.helpers.constants import ( PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, MAINNET, ) from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName from eth2spec.test.altair.fork import test_altair_fork_basic, test_altair_fork_random from eth2spec.test.bellatrix.fork import test_bellatrix_fork_basic, test_bellatrix_fork_random from eth2spec.test.capella.fork import test_capella_fork_basic, test_capella_fork_random from eth2spec.test.deneb.fork import test_deneb_fork_basic, test_deneb_fork_random from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests def create_provider(tests_src, preset_name: PresetBaseName, phase: SpecForkName, fork_name: SpecForkName) -> gen_typing.TestProvider: def prepare_fn() -> None: return def cases_fn() -> Iterable[gen_typing.TestCase]: return generate_from_tests( runner_name='fork', handler_name='fork', src=tests_src, fork_name=fork_name, preset_name=preset_name, phase=phase, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) def _get_fork_tests_providers(): for preset in [MINIMAL, MAINNET]: yield create_provider(test_altair_fork_basic, preset, PHASE0, ALTAIR) yield create_provider(test_altair_fork_random, preset, PHASE0, ALTAIR) yield create_provider(test_bellatrix_fork_basic, preset, ALTAIR, BELLATRIX) yield create_provider(test_bellatrix_fork_random, preset, ALTAIR, BELLATRIX) yield create_provider(test_capella_fork_basic, preset, BELLATRIX, CAPELLA) yield create_provider(test_capella_fork_random, preset, BELLATRIX, CAPELLA) yield create_provider(test_deneb_fork_basic, preset, CAPELLA, DENEB) yield create_provider(test_deneb_fork_random, preset, CAPELLA, DENEB) if __name__ == "__main__": gen_runner.run_generator("forks", list(_get_fork_tests_providers()))
2,095
41.77551
94
py
consensus-specs
consensus-specs-master/tests/generators/transition/main.py
from typing import Iterable from eth2spec.test.helpers.constants import ( MINIMAL, MAINNET, ALL_PRE_POST_FORKS, ) from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.gen_helpers.gen_from_tests.gen import ( generate_from_tests, ) from eth2spec.test.altair.transition import ( test_transition as test_altair_transition, test_activations_and_exits as test_altair_activations_and_exits, test_leaking as test_altair_leaking, test_slashing as test_altair_slashing, test_operations as test_altair_operations, ) from eth2spec.test.deneb.transition import ( test_operations as test_deneb_operations, ) def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider: def prepare_fn() -> None: return def cases_fn() -> Iterable[gen_typing.TestCase]: return generate_from_tests( runner_name='transition', handler_name='core', src=tests_src, fork_name=post_fork_name, phase=pre_fork_name, preset_name=preset_name, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": all_tests = ( test_altair_transition, test_altair_activations_and_exits, test_altair_leaking, test_altair_slashing, test_altair_operations, test_deneb_operations, ) for transition_test_module in all_tests: for pre_fork, post_fork in ALL_PRE_POST_FORKS: gen_runner.run_generator("transition", [ create_provider(transition_test_module, MINIMAL, pre_fork, post_fork), create_provider(transition_test_module, MAINNET, pre_fork, post_fork), ])
1,802
30.631579
117
py
consensus-specs
consensus-specs-master/tests/generators/kzg_4844/main.py
""" KZG 4844 test vectors generator """ from hashlib import sha256 from typing import Tuple, Iterable, Any, Callable, Dict from eth_utils import ( encode_hex, int_to_big_endian, ) from eth2spec.utils import bls from eth2spec.test.helpers.constants import DENEB from eth2spec.test.helpers.typing import SpecForkName from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.deneb import spec def expect_exception(func, *args): try: func(*args) except Exception: pass else: raise Exception("should have raised exception") def field_element_bytes(x): return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.KZG_ENDIANNESS) def field_element_bytes_unchecked(x): return int.to_bytes(x, 32, spec.KZG_ENDIANNESS) def encode_hex_list(a): return [encode_hex(x) for x in a] def bls_add_one(x): """ Adds "one" (actually bls.G1()) to a compressed group element. Useful to compute definitely incorrect proofs. """ return bls.G1_to_bytes48( bls.add(bls.bytes48_to_G1(x), bls.G1()) ) def evaluate_blob_at(blob, z): return field_element_bytes( spec.evaluate_polynomial_in_evaluation_form(spec.blob_to_polynomial(blob), spec.bytes_to_bls_field(z)) ) BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.KZG_ENDIANNESS) G1 = bls.G1_to_bytes48(bls.G1()) G1_INVALID_TOO_FEW_BYTES = G1[:-1] G1_INVALID_TOO_MANY_BYTES = G1 + b"\x00" G1_INVALID_P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef0123456789abcdef") G1_INVALID_P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef0123456789abcde0") INVALID_G1_POINTS = [G1_INVALID_TOO_FEW_BYTES, G1_INVALID_TOO_MANY_BYTES, G1_INVALID_P1_NOT_IN_G1, G1_INVALID_P1_NOT_ON_CURVE] BLOB_ALL_ZEROS = spec.Blob() BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) BLOB_RANDOM_VALID2 = spec.Blob(b''.join([field_element_bytes(pow(3, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) BLOB_RANDOM_VALID3 = spec.Blob(b''.join([field_element_bytes(pow(5, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) BLOB_ALL_MODULUS_MINUS_ONE = spec.Blob(b''.join([field_element_bytes(spec.BLS_MODULUS - 1) for n in range(4096)])) BLOB_ALMOST_ZERO = spec.Blob(b''.join([field_element_bytes(1 if n == 3211 else 0) for n in range(4096)])) BLOB_INVALID = spec.Blob(b'\xFF' * 4096 * 32) BLOB_INVALID_CLOSE = spec.Blob(b''.join( [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)] )) BLOB_INVALID_LENGTH_PLUS_ONE = BLOB_RANDOM_VALID1 + b"\x00" BLOB_INVALID_LENGTH_MINUS_ONE = BLOB_RANDOM_VALID1[:-1] VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2, BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO] INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE, BLOB_INVALID_LENGTH_PLUS_ONE, BLOB_INVALID_LENGTH_MINUS_ONE] FE_VALID1 = field_element_bytes(0) FE_VALID2 = field_element_bytes(1) FE_VALID3 = field_element_bytes(2) FE_VALID4 = field_element_bytes(pow(5, 1235, spec.BLS_MODULUS)) FE_VALID5 = field_element_bytes(spec.BLS_MODULUS - 1) FE_VALID6 = field_element_bytes(spec.ROOTS_OF_UNITY[1]) VALID_FIELD_ELEMENTS = [FE_VALID1, FE_VALID2, FE_VALID3, FE_VALID4, FE_VALID5, FE_VALID6] FE_INVALID_EQUAL_TO_MODULUS = field_element_bytes_unchecked(spec.BLS_MODULUS) FE_INVALID_MODULUS_PLUS_ONE = field_element_bytes_unchecked(spec.BLS_MODULUS + 1) FE_INVALID_UINT256_MAX = field_element_bytes_unchecked(2**256 - 1) FE_INVALID_UINT256_MID = field_element_bytes_unchecked(2**256 - 2**128) FE_INVALID_LENGTH_PLUS_ONE = VALID_FIELD_ELEMENTS[0] + b"\x00" FE_INVALID_LENGTH_MINUS_ONE = VALID_FIELD_ELEMENTS[0][:-1] INVALID_FIELD_ELEMENTS = [FE_INVALID_EQUAL_TO_MODULUS, FE_INVALID_MODULUS_PLUS_ONE, FE_INVALID_UINT256_MAX, FE_INVALID_UINT256_MID, FE_INVALID_LENGTH_PLUS_ONE, FE_INVALID_LENGTH_MINUS_ONE] def hash(x): return sha256(x).digest() def int_to_hex(n: int, byte_length: int = None) -> str: byte_value = int_to_big_endian(n) if byte_length: byte_value = byte_value.rjust(byte_length, b'\x00') return encode_hex(byte_value) def case01_blob_to_kzg_commitment(): # Valid cases for blob in VALID_BLOBS: commitment = spec.blob_to_kzg_commitment(blob) identifier = f'{encode_hex(hash(blob))}' yield f'blob_to_kzg_commitment_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), }, 'output': encode_hex(commitment) } # Edge case: Invalid blobs for blob in INVALID_BLOBS: identifier = f'{encode_hex(hash(blob))}' expect_exception(spec.blob_to_kzg_commitment, blob) yield f'blob_to_kzg_commitment_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob) }, 'output': None } def case02_compute_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'z': encode_hex(z), }, 'output': (encode_hex(proof), encode_hex(y)) } # Edge case: Invalid blobs for blob in INVALID_BLOBS: z = VALID_FIELD_ELEMENTS[0] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(blob))}' yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'z': encode_hex(z), }, 'output': None } # Edge case: Invalid z for z in INVALID_FIELD_ELEMENTS: blob = VALID_BLOBS[4] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(z))}' yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'z': encode_hex(z), }, 'output': None } def case03_verify_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) assert spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': True } # Incorrect proofs for blob in VALID_BLOBS: for z in VALID_FIELD_ELEMENTS: proof_orig, y = spec.compute_kzg_proof(blob, z) proof = bls_add_one(proof_orig) commitment = spec.blob_to_kzg_commitment(blob) assert not spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': False } # Edge case: Invalid commitment for commitment in INVALID_G1_POINTS: blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, z) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(commitment)}' yield f'verify_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': None } # Edge case: Invalid z for z in INVALID_FIELD_ELEMENTS: blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(z)}' yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': None } # Edge case: Invalid y for y in INVALID_FIELD_ELEMENTS: blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, _ = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(y)}' yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': None } # Edge case: Invalid proof for proof in INVALID_G1_POINTS: blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] _, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(proof)}' yield f'verify_kzg_proof_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), 'y': encode_hex(y), 'proof': encode_hex(proof), }, 'output': None } def case04_compute_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: commitment = spec.blob_to_kzg_commitment(blob) proof = spec.compute_blob_kzg_proof(blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), }, 'output': encode_hex(proof) } # Edge case: Invalid blob for blob in INVALID_BLOBS: commitment = G1 expect_exception(spec.compute_blob_kzg_proof, blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), }, 'output': None } # Edge case: Invalid commitment for commitment in INVALID_G1_POINTS: blob = VALID_BLOBS[1] expect_exception(spec.compute_blob_kzg_proof, blob, commitment) identifier = f'{encode_hex(hash(commitment))}' yield f'compute_blob_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), }, 'output': None } def case05_verify_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: commitment = spec.blob_to_kzg_commitment(blob) proof = spec.compute_blob_kzg_proof(blob, commitment) assert spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), 'proof': encode_hex(proof), }, 'output': True } # Incorrect proofs for blob in VALID_BLOBS: commitment = spec.blob_to_kzg_commitment(blob) proof = bls_add_one(spec.compute_blob_kzg_proof(blob, commitment)) assert not spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), 'proof': encode_hex(proof), }, 'output': False } # Edge case: Invalid blob for blob in INVALID_BLOBS: proof = G1 commitment = G1 expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), 'proof': encode_hex(proof), }, 'output': None } # Edge case: Invalid commitment for commitment in INVALID_G1_POINTS: blob = VALID_BLOBS[1] proof = G1 expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) identifier = f'{encode_hex(hash(commitment))}' yield f'verify_blob_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), 'proof': encode_hex(proof), }, 'output': None } # Edge case: Invalid proof for proof in INVALID_G1_POINTS: blob = VALID_BLOBS[1] commitment = G1 expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) identifier = f'{encode_hex(hash(proof))}' yield f'verify_blob_kzg_proof_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'commitment': encode_hex(commitment), 'proof': encode_hex(proof), }, 'output': None } def case06_verify_blob_kzg_proof_batch(): # Valid cases proofs = [] commitments = [] for blob in VALID_BLOBS: commitments.append(spec.blob_to_kzg_commitment(blob)) proofs.append(spec.compute_blob_kzg_proof(blob, commitments[-1])) for i in range(len(proofs)): assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i]) identifier = f'{encode_hex(hash(b"".join(VALID_BLOBS[:i])))}' yield f'verify_blob_kzg_proof_batch_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS[:i]), 'commitments': encode_hex_list(commitments[:i]), 'proofs': encode_hex_list(proofs[:i]), }, 'output': True } # Incorrect proof proofs_incorrect = [bls_add_one(proofs[0])] + proofs[1:] assert not spec.verify_blob_kzg_proof_batch(VALID_BLOBS, commitments, proofs_incorrect) yield 'verify_blob_kzg_proof_batch_case_invalid_proof', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS), 'commitments': encode_hex_list(commitments), 'proofs': encode_hex_list(proofs_incorrect), }, 'output': False } # Edge case: Invalid blobs for blob in INVALID_BLOBS: blobs_invalid = VALID_BLOBS[:4] + [blob] + VALID_BLOBS[5:] expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_batch_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blobs': encode_hex_list(blobs_invalid), 'commitments': encode_hex_list(commitments), 'proofs': encode_hex_list(proofs), }, 'output': None } # Edge case: Invalid commitment for commitment in INVALID_G1_POINTS: blobs = VALID_BLOBS commitments_invalid = [commitment] + commitments[1:] expect_exception(spec.verify_blob_kzg_proof_batch, blobs, commitments_invalid, proofs) identifier = f'{encode_hex(hash(commitment))}' yield f'verify_blob_kzg_proof_batch_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blobs': encode_hex_list(blobs), 'commitments': encode_hex_list(commitments_invalid), 'proofs': encode_hex_list(proofs), }, 'output': None } # Edge case: Invalid proof for proof in INVALID_G1_POINTS: blobs = VALID_BLOBS proofs_invalid = [proof] + proofs[1:] expect_exception(spec.verify_blob_kzg_proof_batch, blobs, commitments, proofs_invalid) identifier = f'{encode_hex(hash(proof))}' yield f'verify_blob_kzg_proof_batch_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blobs': encode_hex_list(blobs), 'commitments': encode_hex_list(commitments), 'proofs': encode_hex_list(proofs_invalid), }, 'output': None } # Edge case: Blob length different expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs) yield 'verify_blob_kzg_proof_batch_case_blob_length_different', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS[:-1]), 'commitments': encode_hex_list(commitments), 'proofs': encode_hex_list(proofs), }, 'output': None } # Edge case: Commitment length different expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments[:-1], proofs) yield 'verify_blob_kzg_proof_batch_case_commitment_length_different', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS), 'commitments': encode_hex_list(commitments[:-1]), 'proofs': encode_hex_list(proofs), }, 'output': None } # Edge case: Proof length different expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs[:-1]) yield 'verify_blob_kzg_proof_batch_case_proof_length_different', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS), 'commitments': encode_hex_list(commitments), 'proofs': encode_hex_list(proofs[:-1]), }, 'output': None } def create_provider(fork_name: SpecForkName, handler_name: str, test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: def prepare_fn() -> None: # Nothing to load / change in spec. Maybe in future forks. # Put the tests into the general config category, to not require any particular configuration. return def cases_fn() -> Iterable[gen_typing.TestCase]: for data in test_case_fn(): (case_name, case_content) = data yield gen_typing.TestCase( fork_name=fork_name, preset_name='general', runner_name='kzg', handler_name=handler_name, suite_name='small', case_name=case_name, case_fn=lambda: [('data', 'data', case_content)] ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": bls.use_arkworks() gen_runner.run_generator("kzg", [ # DENEB create_provider(DENEB, 'blob_to_kzg_commitment', case01_blob_to_kzg_commitment), create_provider(DENEB, 'compute_kzg_proof', case02_compute_kzg_proof), create_provider(DENEB, 'verify_kzg_proof', case03_verify_kzg_proof), create_provider(DENEB, 'compute_blob_kzg_proof', case04_compute_blob_kzg_proof), create_provider(DENEB, 'verify_blob_kzg_proof', case05_verify_blob_kzg_proof), create_provider(DENEB, 'verify_blob_kzg_proof_batch', case06_verify_blob_kzg_proof_batch), ])
21,655
38.446266
119
py
consensus-specs
consensus-specs-master/tests/generators/rewards/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.rewards.test_' + key for key in [ 'basic', 'leak', 'random', ]} # No additional Altair specific rewards tests, yet. altair_mods = phase_0_mods # No additional Bellatrix specific rewards tests, yet. # Note: Block rewards are non-epoch rewards and are tested as part of block processing tests. # Transaction fees are part of the execution-layer. bellatrix_mods = altair_mods capella_mods = bellatrix_mods deneb_mods = capella_mods eip6110_mods = deneb_mods all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="rewards", all_mods=all_mods)
1,049
31.8125
97
py
consensus-specs
consensus-specs-master/tests/generators/finality/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'} altair_mods = phase_0_mods # No additional Altair specific finality tests bellatrix_mods = altair_mods # No additional Bellatrix specific finality tests capella_mods = bellatrix_mods # No additional Capella specific finality tests deneb_mods = capella_mods # No additional Deneb specific finality tests eip6110_mods = deneb_mods # No additional EIP6110 specific finality tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="finality", all_mods=all_mods)
964
40.956522
94
py
consensus-specs
consensus-specs-master/tests/generators/shuffling/main.py
from typing import Iterable import random from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.test.helpers.typing import PresetBaseName from eth2spec.phase0 import mainnet as spec_mainnet, minimal as spec_minimal from eth2spec.test.helpers.constants import PHASE0, MINIMAL, MAINNET def generate_random_bytes(rng=random.Random(5566)): random_bytes = bytes(rng.randint(0, 255) for _ in range(32)) return random_bytes # NOTE: somehow the random.Random generated seeds do not have pickle issue. rng = random.Random(1234) seeds = [generate_random_bytes(rng) for i in range(30)] def shuffling_case_fn(spec, seed, count): yield 'mapping', 'data', { 'seed': '0x' + seed.hex(), 'count': count, 'mapping': [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)] } def shuffling_case(spec, seed, count): return f'shuffle_0x{seed.hex()}_{count}', lambda: shuffling_case_fn(spec, seed, count) def shuffling_test_cases(spec): for seed in seeds: for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]: yield shuffling_case(spec, seed, count) def create_provider(preset_name: PresetBaseName) -> gen_typing.TestProvider: def prepare_fn() -> None: return def cases_fn() -> Iterable[gen_typing.TestCase]: if preset_name == MAINNET: spec = spec_mainnet elif preset_name == MINIMAL: spec = spec_minimal else: raise Exception(f"unrecognized preset: {preset_name}") for (case_name, case_fn) in shuffling_test_cases(spec): yield gen_typing.TestCase( fork_name=PHASE0, preset_name=preset_name, runner_name='shuffling', handler_name='core', suite_name='shuffle', case_name=case_name, case_fn=case_fn, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) if __name__ == "__main__": gen_runner.run_generator("shuffling", [ create_provider(MINIMAL), create_provider(MAINNET)] )
2,140
30.028986
91
py
consensus-specs
consensus-specs-master/tests/generators/operations/main.py
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.block_processing.test_process_' + key for key in [ 'attestation', 'attester_slashing', 'block_header', 'deposit', 'proposer_slashing', 'voluntary_exit', ]} _new_altair_mods = { **{'sync_aggregate': [ 'eth2spec.test.altair.block_processing.sync_aggregate.test_process_' + key for key in ['sync_aggregate', 'sync_aggregate_random'] ]}, **{key: 'eth2spec.test.altair.block_processing.test_process_' + key for key in [ 'deposit', ]} } altair_mods = combine_mods(_new_altair_mods, phase_0_mods) _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.block_processing.test_process_' + key for key in [ 'deposit', 'execution_payload', 'voluntary_exit', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.block_processing.test_process_' + key for key in [ 'bls_to_execution_change', 'deposit', 'execution_payload', 'withdrawals', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) _new_deneb_mods = {key: 'eth2spec.test.deneb.block_processing.test_process_' + key for key in [ 'execution_payload', 'voluntary_exit', ]} deneb_mods = combine_mods(_new_deneb_mods, capella_mods) _new_eip6110_mods = {key: 'eth2spec.test.eip6110.block_processing.test_process_' + key for key in [ 'deposit_receipt', ]} eip6110_mods = combine_mods(_new_eip6110_mods, deneb_mods) # TODO Custody Game testgen is disabled for now # _new_custody_game_mods = {key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [ # 'attestation', # 'chunk_challenge', # 'custody_key_reveal', # 'custody_slashing', # 'early_derived_secret_reveal', # ]} # custody_game_mods = combine_mods(_new_custody_game_mods, phase0_mods) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, EIP6110: eip6110_mods, } run_state_test_generators(runner_name="operations", all_mods=all_mods)
2,548
34.901408
115
py
consensus-specs
consensus-specs-master/tests/generators/light_client/main.py
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators if __name__ == "__main__": altair_mods = {key: 'eth2spec.test.altair.light_client.test_' + key for key in [ 'single_merkle_proof', 'sync', 'update_ranking', ]} bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) deneb_mods = capella_mods all_mods = { ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, } run_state_test_generators(runner_name="light_client", all_mods=all_mods)
849
30.481481
91
py
consensus-specs
consensus-specs-master/tests/generators/light_client/__init__.py
0
0
0
py
consensus-specs
consensus-specs-master/solidity_deposit_contract/web3_tester/setup.py
from distutils.core import setup setup( name='deposit_contract_tester', packages=['deposit_contract'], package_dir={"": "."}, tests_requires=[], install_requires=[] # see requirements.txt file )
217
20.8
52
py
consensus-specs
consensus-specs-master/solidity_deposit_contract/web3_tester/tests/conftest.py
import pytest import eth_tester from eth_tester import ( EthereumTester, PyEVMBackend, ) from web3 import Web3 from web3.providers.eth_tester import EthereumTesterProvider import json import os DIR = os.path.dirname(__file__) def get_deposit_contract_json(): file_path = os.path.join(DIR, '../../deposit_contract.json') deposit_contract_json = open(file_path).read() return json.loads(deposit_contract_json) # Constants MIN_DEPOSIT_AMOUNT = 1000000000 # Gwei FULL_DEPOSIT_AMOUNT = 32000000000 # Gwei DEPOSIT_CONTRACT_TREE_DEPTH = 32 TWO_TO_POWER_OF_TREE_DEPTH = 2**DEPOSIT_CONTRACT_TREE_DEPTH @pytest.fixture def tester(): return EthereumTester(PyEVMBackend()) @pytest.fixture def a0(tester): return tester.get_accounts()[0] @pytest.fixture def w3(tester): web3 = Web3(EthereumTesterProvider(tester)) return web3 @pytest.fixture def registration_contract(w3, tester): contract_bytecode = get_deposit_contract_json()['bytecode'] contract_abi = get_deposit_contract_json()['abi'] registration = w3.eth.contract( abi=contract_abi, bytecode=contract_bytecode) tx_hash = registration.constructor().transact() tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash) registration_deployed = w3.eth.contract( address=tx_receipt.contractAddress, abi=contract_abi ) return registration_deployed @pytest.fixture def assert_tx_failed(tester): def assert_tx_failed(function_to_test, exception=eth_tester.exceptions.TransactionFailed): snapshot_id = tester.take_snapshot() with pytest.raises(exception): function_to_test() tester.revert_to_snapshot(snapshot_id) return assert_tx_failed
1,734
23.785714
94
py
consensus-specs
consensus-specs-master/solidity_deposit_contract/web3_tester/tests/test_deposit.py
from random import randint import pytest import eth_utils from eth2spec.phase0.spec import DepositData from eth2spec.utils.ssz.ssz_typing import List from eth2spec.utils.ssz.ssz_impl import hash_tree_root from tests.conftest import ( FULL_DEPOSIT_AMOUNT, MIN_DEPOSIT_AMOUNT, ) SAMPLE_PUBKEY = b'\x11' * 48 SAMPLE_WITHDRAWAL_CREDENTIALS = b'\x22' * 32 SAMPLE_VALID_SIGNATURE = b'\x33' * 96 @pytest.fixture def deposit_input(amount): """ pubkey: bytes[48] withdrawal_credentials: bytes[32] signature: bytes[96] deposit_data_root: bytes[32] """ return ( SAMPLE_PUBKEY, SAMPLE_WITHDRAWAL_CREDENTIALS, SAMPLE_VALID_SIGNATURE, hash_tree_root( DepositData( pubkey=SAMPLE_PUBKEY, withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, amount=amount, signature=SAMPLE_VALID_SIGNATURE, ), ) ) @pytest.mark.parametrize( ('success', 'amount'), [ (True, FULL_DEPOSIT_AMOUNT), (True, MIN_DEPOSIT_AMOUNT), (False, MIN_DEPOSIT_AMOUNT - 1), (True, FULL_DEPOSIT_AMOUNT + 1) ] ) def test_deposit_amount(registration_contract, w3, success, amount, assert_tx_failed, deposit_input): call = registration_contract.functions.deposit(*deposit_input) if success: assert call.transact({"value": amount * eth_utils.denoms.gwei}) else: assert_tx_failed( lambda: call.transact({"value": amount * eth_utils.denoms.gwei}) ) @pytest.mark.parametrize( 'amount', [ (FULL_DEPOSIT_AMOUNT) ] ) @pytest.mark.parametrize( 'invalid_pubkey,invalid_withdrawal_credentials,invalid_signature,success', [ (False, False, False, True), (True, False, False, False), (False, True, False, False), (False, False, True, False), ] ) def test_deposit_inputs(registration_contract, w3, assert_tx_failed, amount, invalid_pubkey, invalid_withdrawal_credentials, invalid_signature, success): pubkey = SAMPLE_PUBKEY[2:] if invalid_pubkey else SAMPLE_PUBKEY withdrawal_credentials = ( SAMPLE_WITHDRAWAL_CREDENTIALS[2:] if invalid_withdrawal_credentials else SAMPLE_WITHDRAWAL_CREDENTIALS ) signature = SAMPLE_VALID_SIGNATURE[2:] if invalid_signature else SAMPLE_VALID_SIGNATURE call = registration_contract.functions.deposit( pubkey, withdrawal_credentials, signature, hash_tree_root( DepositData( pubkey=SAMPLE_PUBKEY if invalid_pubkey else pubkey, withdrawal_credentials=( SAMPLE_WITHDRAWAL_CREDENTIALS if invalid_withdrawal_credentials else withdrawal_credentials ), amount=amount, signature=SAMPLE_VALID_SIGNATURE if invalid_signature else signature, ), ) ) if success: assert call.transact({"value": amount * eth_utils.denoms.gwei}) else: assert_tx_failed( lambda: call.transact({"value": amount * eth_utils.denoms.gwei}) ) def test_deposit_event_log(registration_contract, a0, w3): log_filter = registration_contract.events.DepositEvent.createFilter( fromBlock='latest', ) deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(3)] for i in range(3): deposit_input = ( SAMPLE_PUBKEY, SAMPLE_WITHDRAWAL_CREDENTIALS, SAMPLE_VALID_SIGNATURE, hash_tree_root( DepositData( pubkey=SAMPLE_PUBKEY, withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, amount=deposit_amount_list[i], signature=SAMPLE_VALID_SIGNATURE, ), ) ) registration_contract.functions.deposit( *deposit_input, ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) logs = log_filter.get_new_entries() assert len(logs) == 1 log = logs[0]['args'] assert log['pubkey'] == deposit_input[0] assert log['withdrawal_credentials'] == deposit_input[1] assert log['amount'] == deposit_amount_list[i].to_bytes(8, 'little') assert log['signature'] == deposit_input[2] assert log['index'] == i.to_bytes(8, 'little') def test_deposit_tree(registration_contract, w3, assert_tx_failed): log_filter = registration_contract.events.DepositEvent.createFilter( fromBlock='latest', ) deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)] deposit_data_list = [] for i in range(0, 10): deposit_data = DepositData( pubkey=SAMPLE_PUBKEY, withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, amount=deposit_amount_list[i], signature=SAMPLE_VALID_SIGNATURE, ) deposit_input = ( SAMPLE_PUBKEY, SAMPLE_WITHDRAWAL_CREDENTIALS, SAMPLE_VALID_SIGNATURE, hash_tree_root(deposit_data), ) deposit_data_list.append(deposit_data) tx_hash = registration_contract.functions.deposit( *deposit_input, ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) receipt = w3.eth.getTransactionReceipt(tx_hash) print("deposit transaction consumes %d gas" % receipt['gasUsed']) logs = log_filter.get_new_entries() assert len(logs) == 1 log = logs[0]['args'] assert log["index"] == i.to_bytes(8, 'little') # Check deposit count and root count = len(deposit_data_list).to_bytes(8, 'little') assert count == registration_contract.functions.get_deposit_count().call() root = hash_tree_root(List[DepositData, 2**32](*deposit_data_list)) assert root == registration_contract.functions.get_deposit_root().call()
6,357
31.605128
99
py
consensus-specs
consensus-specs-master/solidity_deposit_contract/web3_tester/tests/__init__.py
0
0
0
py
drone-net
drone-net-master/split.py
import glob import os import Tkinter import Tkconstants import tkFileDialog while True: print("Please select your image directory.") current_dir = tkFileDialog.askdirectory() if current_dir == None or current_dir == "": print("You must select a directory.") continue break # Percentage of images to be used for the test set percentage_test = 10 # Create and/or truncate train.txt and test.txt file_train = open('train.txt', 'w') file_test = open('test.txt', 'w') # Populate train.txt and test.txt counter = 1 index_test = round(100 / percentage_test) for pathAndFilename in glob.iglob(os.path.join(current_dir, "*.jpg")): title, ext = os.path.splitext(os.path.basename(pathAndFilename)) if counter == index_test: counter = 1 file_test.write(current_dir + "/" + title + '.jpg' + "\n") else: file_train.write(current_dir + "/" + title + '.jpg' + "\n") counter = counter + 1
948
31.724138
70
py
drone-net
drone-net-master/normalize_labels.py
from __future__ import division from PIL import Image import os img_dir = 'images' lab_dir = 'labels' out_dir = 'normalized-labels' def normalize_label(label, width, height): cl, cx, cy, wx, wy = label return int(cl), cx/width, cy/height, wx/width, wy/height if not os.path.exists(out_dir): os.makedirs(out_dir) for filename in os.listdir(img_dir): if filename.endswith(".jpg"): im_path = os.path.join(img_dir, filename) filename_txt = filename.split('.')[0] + '.txt' lab_path = os.path.join(lab_dir, filename_txt) norm_lab_path = os.path.join(out_dir, filename_txt) # Open image im = Image.open(im_path) width, height = im.size # Open labels with open(lab_path, "r") as f: # there may be multiple bboxes, hence multiple lines split_lines = [l.strip().split() for l in f.readlines()] labels = [map(float, l) for l in split_lines]) # Create normalized label norm_labels = [normalize_label(label, width, height) for label in labels] # Save new label with open(norm_lab_path, "w") as f: for norm_label in norm_labels: c, x, y, w, h = norm_label print(f"{int(c)} {x} {y} {w} {h}", file=f) # print(str_norm_label) # print(im_path, width, height, label, norm_label)
1,393
28.041667
81
py
wav2letter
wav2letter-main/recipes/seq2seq_tds/librispeech/prepare.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines Command : python3 prepare.py --data_dst [...] --model_dst [...] Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os from collections import defaultdict import sentencepiece as spm if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument( "--data_dst", help="data destination directory", default="./librispeech" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./seq2seq_tds_librispeech", ) parser.add_argument( "-p", "--process", help="# of process for Multiprocessing", default=8, type=int ) args = parser.parse_args() os.system( "python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format( os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process ) ) subpaths = { "train": ["train-clean-100", "train-clean-360", "train-other-500"], "dev": ["dev-clean", "dev-other"], "test": ["test-clean", "test-other"], } lists_path = os.path.join(args.data_dst, "lists") am_path = os.path.join(args.model_dst, "am") decoder_path = os.path.join(args.model_dst, "decoder") os.makedirs(am_path, exist_ok=True) os.makedirs(decoder_path, exist_ok=True) # Generating am/* num_wordpieces = 10000 nbest = 10 train_all_text = os.path.join(am_path, "train.txt") prefix = "librispeech-train-all-unigram-{}".format(num_wordpieces) prefix = os.path.join(am_path, prefix) vocab_name = prefix + ".vocab" model_name = prefix + ".model" # prepare data print("Preparing tokens and lexicon for acoustic model...\n", flush=True) word_dict = defaultdict(set) with open(train_all_text, "w") as ftext: for key, names in subpaths.items(): for name in names: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: transcription = line.strip().split(" ")[3:] if key == "train": ftext.write(" ".join(transcription) + "\n") word_dict[key].update(transcription) lexicon_words = sorted(word_dict["train"] | word_dict["dev"]) # train print("Computing word pieces...\n", flush=True) train_cmd = ( "--input={input} --model_prefix={prefix} --vocab_size={sz}" " --character_coverage=1.0 --model_type=unigram" " --split_by_unicode_script=false".format( input=train_all_text, prefix=prefix, sz=num_wordpieces ) ) spm.SentencePieceTrainer.Train(train_cmd) # word piece dictionary print("Creating word piece list...\n", flush=True) exclude_list = {"<unk>", "<s>", "</s>"} with open(vocab_name.replace(".vocab", ".tokens"), "w") as fvocab_filt: with open(vocab_name, "r", encoding="utf-8") as fvocab: for line in fvocab: val, _ = line.strip().split("\t", 1) if val not in exclude_list: fvocab_filt.write(val.replace("\u2581", "_") + "\n") # word -> word piece lexicon for loading targets print("Creating word -> word pieces lexicon...\n", flush=True) sp = spm.SentencePieceProcessor() sp.Load(model_name) lexicon_name = "librispeech-train+dev-unigram-{sz}-nbest{n}.lexicon".format( sz=num_wordpieces, n=nbest ) with open(os.path.join(am_path, lexicon_name), "w") as f_lexicon: for word in lexicon_words: wps = sp.NBestEncodeAsPieces(word, nbest) for wp in wps: # the order matters for our training f_lexicon.write( word + "\t" + " ".join([w.replace("\u2581", "_") for w in wp]) + "\n" ) print("Done!", flush=True)
4,355
34.704918
87
py
wav2letter
wav2letter-main/recipes/conv_glu/wsj/prepare.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines Please install `sph2pipe` on your own - see https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools \ with commands : wget https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/ctools/sph2pipe_v2.5.tar.gz tar -xzf sph2pipe_v2.5.tar.gz && cd sph2pipe_v2.5 gcc -o sph2pipe *.c -lm Command : python3 prepare_data.py --wsj0 [...]/WSJ0/media \ --wsj1 [...]/WSJ1/media --data_dst [...] --model_dst [...] --sph2pipe [...]/sph2pipe_v2.5/sph2pipe --kenlm [...] Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os import re from collections import defaultdict import numpy def get_spelling(word): spelling = re.sub(r"\(\S+\)", "", word) # not pronounced spelling = re.sub(r'[,\.:\-/&\?\!\(\)";\{\}\_#]+', "", spelling) if word == "'single-quote": spelling = spelling.replace("'", "") return spelling if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument("--wsj0", help="top level directory containing all WSJ0 discs") parser.add_argument("--wsj1", help="top level directory containing all WSJ1 discs") parser.add_argument( "--data_dst", help="data destination directory", default="./wsj" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./conv_glu_librispeech_char", ) parser.add_argument( "--wsj1_type", help="if you are using larger corpus LDC94S13A, set parameter to `LDC94S13A`", default="LDC94S13B", ) parser.add_argument( "--sph2pipe", help="path to sph2pipe executable", default="./sph2pipe_v2.5/sph2pipe", ) parser.add_argument("--kenlm", help="location to installed kenlm directory") parser.add_argument( "-p", "--process", help="# of process for Multiprocessing", default=8, type=int ) args = parser.parse_args() os.system( "python3 {}/../../../data/wsj/prepare.py " "--wsj0 {} --wsj1 {} --sph2pipe {} --wsj1_type {} --dst {} -p {}".format( os.path.dirname(os.path.abspath(__file__)), args.wsj0, args.wsj1, args.sph2pipe, args.wsj1_type, args.data_dst, args.process, ) ) lists_path = os.path.join(args.data_dst, "lists") am_path = os.path.join(args.model_dst, "am") lm_data_path = os.path.join(args.data_dst, "text/lm.txt") decoder_path = os.path.join(args.model_dst, "decoder") os.makedirs(am_path, exist_ok=True) os.makedirs(decoder_path, exist_ok=True) # Generating am/* print("Generating tokens.txt for acoustic model training", flush=True) with open(os.path.join(am_path, "tokens.txt"), "w") as f_tokens: f_tokens.write("|\n") f_tokens.write("'\n") for alphabet in range(ord("a"), ord("z") + 1): f_tokens.write(chr(alphabet) + "\n") print( "Generating lexicon.txt (word -> tokens) for acoustic model training", flush=True, ) # words used in training/eval to prepare spelling words_set = set() # words from lm data and train transcription for decoder lexicon_dict = defaultdict(int) for name in ["si284", "nov93dev"]: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: transcription = line.strip().split(" ")[3:] words_set.update(transcription) if name == "si284": for word in transcription: lexicon_dict[word] += 1 print( "Writing lexicon file - {}...".format( os.path.join(am_path, "lexicon_si284+nov93dev.txt") ), flush=True, ) with open(os.path.join(am_path, "lexicon_si284+nov93dev.txt"), "w") as f: for word in words_set: spelling = get_spelling(word) assert re.match( r"[a-z']+", spelling ), "invalid spelling for word '{}'".format(word) f.write( "{word}\t{tokens} |\n".format( word=word, tokens=" ".join(list(spelling)) ) ) # Generating decoder/* # prepare lexicon word -> tokens spelling # write words to lexicon.txt file print("Generating lexicon.txt (word -> tokens) for decoding", flush=True) lex_file = os.path.join(decoder_path, "lexicon.txt") print("Writing lexicon file - {}...".format(lex_file), flush=True) with open(lex_file, "w") as f, open(lm_data_path, "r") as f_lm: for line in f_lm: for word in line.strip().split(" "): lexicon_dict[word] += 1 sorted_indices = numpy.argsort(list(lexicon_dict.values()))[::-1] words = list(lexicon_dict.keys()) for index in sorted_indices: spelling = get_spelling(words[index]) if re.match("^[a-z']+$", spelling): f.write("{w}\t{s} |\n".format(w=words[index], s=" ".join(spelling))) else: print('Ignore word "{}" in lexicon'.format(words[index])) # Train 4-gram language model train_data = os.path.join(decoder_path, "lm+si284.txt") os.system( "cp {lm_data} {dst} && cat {trans} >> {dst}".format( lm_data=lm_data_path, dst=train_data, trans=os.path.join(args.data_dst, "text/si284.txt"), ) ) lmplz = os.path.join(args.kenlm, "build", "bin", "lmplz") binary = os.path.join(args.kenlm, "build", "bin", "build_binary") lm_file = os.path.join(decoder_path, "lm-4g") cmd = "{bin} -T /tmp -S 10G --discount_fallback -o 4 --text {file} > {lm_file}.arpa" os.system(cmd.format(bin=lmplz, lm_file=lm_file, file=train_data)) os.system("{bin} {lm_file}.arpa {lm_file}.bin".format(bin=binary, lm_file=lm_file)) print("Done!", flush=True)
6,352
35.096591
90
py
wav2letter
wav2letter-main/recipes/conv_glu/librispeech/prepare.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines Please install `kenlm` on your own - https://github.com/kpu/kenlm Command : python3 prepare.py --data_dst [...] --model_dst [...] --kenlm [...]/kenlm/ Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os from collections import defaultdict if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument( "--data_dst", help="data destination directory", default="./librispeech" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./conv_glu_librispeech_char", ) parser.add_argument("--kenlm", help="location to installed kenlm directory") parser.add_argument( "-p", "--process", help="# of process for Multiprocessing", default=8, type=int ) args = parser.parse_args() os.system( "python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format( os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process ) ) subpaths = { "train": ["train-clean-100", "train-clean-360", "train-other-500"], "dev": ["dev-clean", "dev-other"], "test": ["test-clean", "test-other"], } lists_path = os.path.join(args.data_dst, "lists") am_path = os.path.join(args.model_dst, "am") decoder_path = os.path.join(args.model_dst, "decoder") os.makedirs(am_path, exist_ok=True) os.makedirs(decoder_path, exist_ok=True) # Generating am/* print("Generating tokens.txt for acoustic model training", flush=True) with open(os.path.join(am_path, "tokens.txt"), "w") as fout: fout.write("|\n") fout.write("'\n") for alphabet in range(ord("a"), ord("z") + 1): fout.write(chr(alphabet) + "\n") print( "Generating lexicon.txt (word -> tokens) for acoustic model training", flush=True, ) word_dict = defaultdict(set) for key, names in subpaths.items(): for name in names: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: transcription = line.strip().split(" ")[3:] word_dict[key].update(transcription) lexicon_words = sorted(word_dict["train"] | word_dict["dev"]) with open(os.path.join(am_path, "lexicon_train+dev.txt"), "w") as f: for word in lexicon_words: f.write( "{word}\t{tokens} |\n".format(word=word, tokens=" ".join(list(word))) ) # Generating decoder/* cmd = [ "python3 {}/../../utilities/prepare_librispeech_official_lm.py", "--dst {}", "--kenlm {}", ] os.system( " ".join(cmd).format( os.path.dirname(os.path.abspath(__file__)), decoder_path, args.kenlm ) ) print("Done!", flush=True)
3,286
32.20202
87
py
wav2letter
wav2letter-main/recipes/learnable_frontend/prepare.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines Please install `sph2pipe` on your own - see https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools \ with commands : wget https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/ctools/sph2pipe_v2.5.tar.gz tar -xzf sph2pipe_v2.5.tar.gz && cd sph2pipe_v2.5 gcc -o sph2pipe *.c -lm Command : python3 prepare.py \ --src [...]/timit --data_dst [...] --model_dst [...] --sph2pipe [...]/sph2pipe_v2.5/sph2pipe Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument( "--src", help="Source directory with downloaded and unzipped TIMIT data" ) parser.add_argument( "--data_dst", help="data destination directory", default="./wsj" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./" ) parser.add_argument( "--sph2pipe", help="path to sph2pipe executable", default="./sph2pipe_v2.5/sph2pipe", ) parser.add_argument( "-p", "--process", help="# of process for Multiprocessing", default=8, type=int ) args = parser.parse_args() os.system( "python3 {}/../../data/timit/prepare.py " "--src {} --sph2pipe {} --dst {} -p {}".format( os.path.dirname(os.path.abspath(__file__)), args.src, args.sph2pipe, args.data_dst, args.process, ) ) am_path = os.path.join(args.model_dst, "am") os.makedirs(am_path, exist_ok=True) with open( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../data/timit/phones.txt" ), "r", ) as fin, open(os.path.join(am_path, "tokens.txt"), "w") as fout, open( os.path.join(am_path, "lexicon.txt"), "w" ) as fout_lexicon: for line in fin: if line.strip() == "": continue fout.write(line) for token in line.strip().split(" "): fout_lexicon.write("{}\t{}\n".format(token, token)) print("Done!", flush=True)
2,576
30.814815
90
py
wav2letter
wav2letter-main/recipes/local_prior_match/librispeech/prepare_unpaired.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare unpaired data for training a model with local prior matching Command : python3 prepare_unpaired.py --data_dst [...] --model_dst [...] Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument( "--data_dst", help="data destination directory", default="./librispeech" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./lpm_librispeech", ) args = parser.parse_args() subpaths = { "unpaired": ["train-clean-360", "train-other-500"], } lists_path = os.path.join(args.data_dst, "lists") am_path = os.path.join(args.model_dst, "am") unpaired_lists_path = os.path.join(args.model_dst, "lpm_data") reflen_dict = set() for name in subpaths["unpaired"]: unpaired_data = {} with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: file_tag, audio_path, audio_length, _ = line.strip().split(" ", 3) unpaired_data[file_tag] = (audio_path, audio_length) with open( os.path.join(unpaired_lists_path, name + "-viterbi.out"), "r" ) as fdata: with open( os.path.join(unpaired_lists_path, name + "-lpm.lst"), "w" ) as fout: for line in fdata: file_tag, reflen = line.strip().split(" ", 1) fout.write( "%s %s %s %s\n" % ( file_tag, unpaired_data[file_tag][0], unpaired_data[file_tag][1], reflen ) ) reflen_dict.add(reflen) # append reflen* to the new lexicon orig_lexicon = "librispeech-paired-train+dev-unigram-5000-nbest10.lexicon" lpm_lexicon = \ "librispeech-paired-train-unpaired-viterbi+dev-unigram-5000-nbest10.lexicon" with open(os.path.join(am_path, lpm_lexicon), "w") as fout: with open(os.path.join(am_path, orig_lexicon), "r") as fin: for line in fin: fout.write(line) for r in reflen_dict: # r's format is "reflen1", "reflen2", ... "reflen100", etc. fout.write(r + "\t" + " ".join(["a"] * int(r[6:])) + "\n") print("Done!", flush=True)
2,841
32.435294
84
py
wav2letter
wav2letter-main/recipes/local_prior_match/librispeech/prepare.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the MIT-style license found in the LICENSE file in the root directory of this source tree. ---------- Script to prepare token set and lexicon to train/eval model on Librispeech in wav2letter++ pipelines Command : python3 prepare.py --data_dst [...] --model_dst [...] Replace [...] with appropriate paths """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os import sentencepiece as spm if __name__ == "__main__": parser = argparse.ArgumentParser(description="Librispeech Dataset creation.") parser.add_argument( "--data_dst", help="data destination directory", default="./librispeech" ) parser.add_argument( "--model_dst", help="model auxilary files destination directory", default="./lpm_librispeech", ) parser.add_argument( "-p", "--process", help="# of process for Multiprocessing", default=8, type=int ) args = parser.parse_args() os.system( "python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format( os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process ) ) subpaths = { "paired": ["train-clean-100"], "unpaired": ["train-clean-360", "train-other-500"], "dev": ["dev-clean", "dev-other"], } lists_path = os.path.join(args.data_dst, "lists") am_path = os.path.join(args.model_dst, "am") lm_path = os.path.join(args.model_dst, "lm") unpaired_lists_path = os.path.join(args.model_dst, "lpm_data") os.makedirs(am_path, exist_ok=True) os.makedirs(lm_path, exist_ok=True) os.makedirs(unpaired_lists_path, exist_ok=True) # Generating am/* num_wordpieces = 5000 nbest = 10 train_all_text = os.path.join(am_path, "train.txt") prefix = "librispeech-paired-train-unigram-{}".format(num_wordpieces) prefix = os.path.join(am_path, prefix) vocab_name = prefix + ".vocab" model_name = prefix + ".model" # prepare paired data from train-clean-100 print("Preparing tokens and lexicon from paired data...\n", flush=True) word_dict = set() with open(train_all_text, "w") as ftext: for name in subpaths["paired"]: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: transcription = line.strip().split(" ")[3:] ftext.write(" ".join(transcription) + "\n") word_dict.update(transcription) for name in subpaths["dev"]: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: for line in flist: transcription = line.strip().split(" ")[3:] word_dict.update(transcription) lexicon_words = sorted(word_dict) # prepare dummy file lists and lexicon for unpaired data for name in subpaths["unpaired"]: with open(os.path.join(lists_path, name + ".lst"), "r") as flist: with open( os.path.join(unpaired_lists_path, name + "-dummy.lst"), "w" ) as fout: for line in flist: file_tag, audio_path, audio_length, _ = \ line.strip().split(" ", 3) # use a random word from the lexicon as the transcription fout.write("%s %s %s %s\n" % (file_tag, audio_path, audio_length, lexicon_words[0])) # train print("Computing word pieces...\n", flush=True) train_cmd = ( "--input={input} --model_prefix={prefix} --vocab_size={sz}" " --character_coverage=1.0 --model_type=unigram" " --split_by_unicode_script=false".format( input=train_all_text, prefix=prefix, sz=num_wordpieces ) ) spm.SentencePieceTrainer.Train(train_cmd) # word piece dictionary print("Creating word piece list...\n", flush=True) exclude_list = {"<unk>", "<s>", "</s>"} with open(vocab_name.replace(".vocab", ".tokens"), "w") as fvocab_filt: with open(vocab_name, "r", encoding="utf-8") as fvocab: for line in fvocab: val, _ = line.strip().split("\t", 1) if val not in exclude_list: fvocab_filt.write(val.replace("\u2581", "_") + "\n") # word -> word piece lexicon for loading targets print("Creating word -> word pieces lexicon...\n", flush=True) sp = spm.SentencePieceProcessor() sp.Load(model_name) lexicon_name = "librispeech-paired-train+dev-unigram-{sz}-nbest{n}.lexicon".format( sz=num_wordpieces, n=nbest ) with open(os.path.join(am_path, lexicon_name), "w") as f_lexicon: for word in lexicon_words: wps = sp.NBestEncodeAsPieces(word, nbest) for wp in wps: # the order matters for our training f_lexicon.write( word + "\t" + " ".join([w.replace("\u2581", "_") for w in wp]) + "\n" ) print("Done!", flush=True)
5,194
36.107143
100
py