repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/federatedml/framework/hetero/sync/paillier_keygen_sync.py
|
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.encrypt import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.util import consts
class Arbiter(object):
# noinspection PyAttributeOutsideInit
def _register_paillier_keygen(self, pubkey_transfer):
self._pubkey_transfer = pubkey_transfer
def paillier_keygen(self, method, key_length, suffix=tuple()):
if method == consts.PAILLIER:
cipher = PaillierEncrypt()
elif method == consts.PAILLIER_IPCL:
cipher = IpclPaillierEncrypt()
else:
raise ValueError(f"Unsupported encryption method: {method}")
cipher.generate_key(key_length)
pub_key = cipher.get_public_key()
self._pubkey_transfer.remote(obj=pub_key, role=consts.HOST, idx=-1, suffix=suffix)
self._pubkey_transfer.remote(obj=pub_key, role=consts.GUEST, idx=-1, suffix=suffix)
return cipher
class _Client(object):
# noinspection PyAttributeOutsideInit
def _register_paillier_keygen(self, pubkey_transfer):
self._pubkey_transfer = pubkey_transfer
def gen_paillier_cipher_operator(self, suffix=tuple(), method=consts.PAILLIER):
pubkey = self._pubkey_transfer.get(idx=0, suffix=suffix)
if method == consts.PAILLIER:
cipher = PaillierEncrypt()
elif method == consts.PAILLIER_IPCL:
cipher = IpclPaillierEncrypt()
else:
raise ValueError(f"Unsupported encryption method: {method}")
cipher.set_public_key(pubkey)
return cipher
Host = _Client
Guest = _Client
| 2,185 | 34.258065 | 91 |
py
|
FATE
|
FATE-master/python/federatedml/framework/hetero/sync/gradient_sync.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class Guest(object):
@abc.abstractmethod
def compute_intermediate(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
@abc.abstractmethod
def aggregate_host_result(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
def compute_gradient_procedure(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
class Host(object):
@abc.abstractmethod
def compute_intermediate(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
def compute_gradient_procedure(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
class Arbiter(object):
def compute_gradient_procedure(self, *args, **kwargs):
raise NotImplementedError("This method should be be called here")
| 1,599 | 33.042553 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/framework/hetero/sync/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/framework/hetero/sync/loss_sync.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.util import consts
class Arbiter(object):
def _register_loss_sync(self, loss_transfer):
self.loss_transfer = loss_transfer
def sync_loss_info(self, suffix=tuple()):
loss = self.loss_transfer.get(idx=0, suffix=suffix)
return loss
class Guest(object):
def _register_loss_sync(self, host_loss_regular_transfer, loss_transfer, loss_intermediate_transfer):
self.host_loss_regular_transfer = host_loss_regular_transfer
self.loss_transfer = loss_transfer
self.loss_intermediate_transfer = loss_intermediate_transfer
def sync_loss_info(self, loss, suffix=tuple()):
self.loss_transfer.remote(loss, role=consts.ARBITER, idx=0, suffix=suffix)
def get_host_loss_intermediate(self, suffix=tuple()):
loss_intermediate = self.loss_intermediate_transfer.get(idx=-1, suffix=suffix)
return loss_intermediate
def get_host_loss_regular(self, suffix=tuple()):
losses = self.host_loss_regular_transfer.get(idx=-1, suffix=suffix)
return losses
class Host(object):
def _register_loss_sync(self, host_loss_regular_transfer, loss_transfer, loss_intermediate_transfer):
self.host_loss_regular_transfer = host_loss_regular_transfer
self.loss_transfer = loss_transfer
self.loss_intermediate_transfer = loss_intermediate_transfer
def remote_loss_intermediate(self, loss_intermediate, suffix=tuple()):
self.loss_intermediate_transfer.remote(obj=loss_intermediate, role=consts.GUEST, idx=0, suffix=suffix)
def remote_loss_regular(self, loss_regular, suffix=tuple()):
self.host_loss_regular_transfer.remote(obj=loss_regular, role=consts.GUEST, idx=0, suffix=suffix)
| 2,388 | 38.816667 | 110 |
py
|
FATE
|
FATE-master/rust/fate_crypto/fate_crypto/__init__.py
|
from .fate_crypto import hash, psi
__all__ = ["hash", "psi"]
| 62 | 14.75 | 34 |
py
|
FATE
|
FATE-master/rust/fate_crypto/fate_crypto/hash/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/rust/fate_crypto/fate_crypto/psi/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/rust/fate_crypto/scripts/bump_version.py
|
import toml
import pathlib
import argparse
root_path = pathlib.Path(__file__).parent.parent.resolve()
def update_version(version):
with open(root_path.joinpath("Cargo.toml")) as f:
cargo = toml.load(f)
old_version = cargo["package"]["version"]
print(f"bump fate_crypto version from `{old_version}` to `{version}`")
cargo["package"]["version"] = version
with open(root_path.joinpath("Cargo.toml"), "w") as f:
toml.dump(cargo, f)
if __name__ == "__main__":
parse = argparse.ArgumentParser("bump version")
parse.add_argument("version", type=str)
args = parse.parse_args()
update_version(args.version)
| 655 | 27.521739 | 74 |
py
|
FATE
|
FATE-master/rust/fate_crypto/tests/test_psi_bench.py
|
import random
import hashlib
from fate_crypto.psi import Curve25519
def ecdh(k, m):
return k.encrypt(m)
def dh(k, e):
return k.diffie_hellman(e)
def sha256(value):
return hashlib.sha256(bytes(value, encoding="utf-8")).digest()
def test_ecdh_encrypt_bench(benchmark):
k = Curve25519()
m = random.SystemRandom().getrandbits(256).to_bytes(32, "little")
result = benchmark(ecdh, k, m)
def test_ecdh_dh_bench(benchmark):
k = Curve25519()
m = random.SystemRandom().getrandbits(256).to_bytes(32, "little")
e = k.encrypt(m)
result = benchmark(dh, k, e)
def test_sha256_bench(benchmark):
m = "1000000000"
result = benchmark(sha256, m)
| 686 | 19.205882 | 69 |
py
|
FATE
|
FATE-master/rust/fate_crypto/tests/test_psi_ecdh.py
|
from fate_crypto.psi import Curve25519
import pickle
import unittest
import random
class TestStringMethods(unittest.TestCase):
def test_ecdh(self):
k1 = Curve25519()
k2 = Curve25519()
m = random.SystemRandom().getrandbits(33 * 8).to_bytes(33, "little")
self.assertEqual(
k2.diffie_hellman(k1.encrypt(m)), k1.diffie_hellman(k2.encrypt(m))
)
def test_pickle(self):
k1 = Curve25519()
m = random.SystemRandom().getrandbits(33 * 8).to_bytes(33, "little")
pickled = pickle.dumps(k1)
k2 = pickle.loads(pickled)
self.assertEqual(k1.encrypt(m), k2.encrypt(m))
if __name__ == "__main__":
unittest.main()
| 703 | 26.076923 | 78 |
py
|
FATE
|
FATE-master/rust/fate_crypto/tests/test_sm3_hash.py
|
import unittest
from fate_crypto.hash import sm3_hash
class TestCorrect(unittest.TestCase):
def test_hash_1(self):
data = b"abc"
expected = "66c7f0f462eeedd9d1f2d46bdc10e4e24167c4875cf2f7a2297da02b8f4ba8e0"
self.assertEqual(sm3_hash(data).hex(), expected)
def test_hash_2(self):
data = b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
expected = "debe9ff92275b8a138604889c18e5a4d6fdb70e5387e5765293dcba39c0c5732"
self.assertEqual(sm3_hash(data).hex(), expected)
if __name__ == "__main__":
unittest.main()
| 589 | 30.052632 | 85 |
py
|
FATE
|
FATE-master/doc/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/doc/mkdocs/hook.py
|
import os
import re
import glob
import git
_INCLUDE_EXAMPLES_REGEX = re.compile(
r"""(?P<_includer_indent>[^\S\r\n]*){\s*%\s*include-examples\s*"(?P<example_name>[^")]+)"\s*%\s*}\s*""",
flags=re.VERBOSE | re.DOTALL,
)
_INCLUDE_EXAMPLE_REGEX = re.compile(
r"""(?P<_includer_indent>[^\S\r\n]*){\s*%\s*include-example\s*"(?P<example_path>[^")]+)"\s*%\s*}\s*""",
flags=re.VERBOSE | re.DOTALL,
)
_LINT_MAP = {
".py": "python",
".json": "json",
".yaml": "yaml",
".yml": "yaml",
".sh": "sh",
".md": "md",
}
_REPO_BASE = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
_EXAMPLES_BASE = os.path.abspath(os.path.join(_REPO_BASE, "examples"))
def sub_include_examples(match):
example_name = match.group("example_name")
indents_level0 = match.group("_includer_indent")
lines = []
lines.append(f"{indents_level0}???+ Example\n")
lines.append(f"{indents_level0}\n")
indents_level1 = indents_level0 + " "
for example_type, pretty_name in [("pipeline", "Pipeline"), ("dsl/v2", "DSL")]:
include_path = os.path.join(_EXAMPLES_BASE, example_type, example_name, "*.*")
lines.append(f'{indents_level1}=== "{pretty_name}"\n\n')
indents_level2 = f"{indents_level1} "
for name in glob.glob(include_path):
if name.endswith("README.md") or name.endswith("readme.md"):
lines.append(f"{indents_level2}```markdown\n")
with open(name) as f:
for line in f.readlines():
lines.append(f"{indents_level2}{line}")
lines.append(f"{indents_level2}\n")
lines.append(f"{indents_level2}```\n")
lines.append(f"{indents_level2}\n")
for file_name in glob.glob(include_path):
if file_name.endswith("README.md") or file_name.endswith("readme.md"):
continue
_, file_extension = os.path.splitext(file_name)
lint = _LINT_MAP.get(file_extension, "")
lines.append(
f'{indents_level2}??? Example "{os.path.basename(file_name)}"\n'
)
lines.append(f"{indents_level2} ```{lint}\n")
head = True
with open(file_name) as f:
for line in f.readlines():
# skip license
if head:
if line.strip() == "" or line.lstrip().startswith("#"):
continue
head = False
lines.append(f"{indents_level2} {line}")
lines.append(f"{indents_level2} \n")
lines.append(f"{indents_level2} ```\n")
lines.append(f"{indents_level2} \n")
return "".join(lines)
def sub_include_example(src_file_path):
def sub(match):
example_path = match.group("example_path")
indents_level0 = match.group("_includer_indent")
lines = []
lines.append(f"{indents_level0}\n")
lines.append(f'{indents_level0}??? Example "{example_path}"\n')
lines.append(f"{indents_level0}\n")
indents_level1 = indents_level0 + " "
abs_file_path = os.path.abspath(
os.path.join(src_file_path, os.pardir, example_path)
)
if os.path.exists(abs_file_path):
with open(abs_file_path) as f:
_, file_extension = os.path.splitext(abs_file_path)
lint = _LINT_MAP.get(file_extension, "")
lines.append(f"{indents_level1}```{lint}\n")
head = True
for line in f.readlines():
# skip license
if head:
if line.strip() == "" or line.lstrip().startswith("#"):
continue
head = False
lines.append(f"{indents_level1} {line}")
lines.append(f"{indents_level1}\n")
lines.append(f"{indents_level1}```\n")
lines.append(f"{indents_level1}\n")
return "".join(lines)
return sub
try:
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
url = repo.remote().url
if url.endswith(".git"):
url = url[:-4]
GITHUB_REPO = f"{url}/tree/{sha}"
except BaseException:
GITHUB_REPO = "https://github.com/FederatedAI/FATE/tree/master"
_DIR_URL_REGEX = re.compile(
r"""(?P<text>\[\s*:file_folder:[^\(]*\])\((?P<url>[^\)]+)\)""",
flags=re.VERBOSE | re.DOTALL,
)
def _fix_dir_url(src_path):
def _replace(match):
text = match.group("text")
url = match.group("url")
if not url.startswith("http"):
url_rel_to_repo_base = os.path.relpath(
os.path.abspath(os.path.join(src_path, os.path.pardir, url)), _REPO_BASE
)
url = f"{GITHUB_REPO}/{url_rel_to_repo_base}"
return f"{text}({url})"
return _replace
_COMMENT_REGEX = re.compile(
r"""[^\S\r\n]*<!--\s*mkdocs\s*\n(?P<_content>.*?)-->""",
flags=re.VERBOSE | re.DOTALL,
)
def _remove_comment(match):
content = match.group("_content")
return content
def on_page_markdown(markdown, page, **kwargs):
markdown = re.sub(_DIR_URL_REGEX, _fix_dir_url(page.file.abs_src_path), markdown)
# remove specific commnent
markdown = re.sub(_COMMENT_REGEX, _remove_comment, markdown)
markdown = re.sub(
_INCLUDE_EXAMPLES_REGEX,
sub_include_examples,
markdown,
)
markdown = re.sub(
_INCLUDE_EXAMPLE_REGEX,
sub_include_example(page.file.abs_src_path),
markdown,
)
return markdown
| 5,721 | 31.146067 | 108 |
py
|
FATE
|
FATE-master/doc/mkdocs/gen_params_doc.py
|
"""Generate parms pages."""
import os
import mkdocs_gen_files
repo_base = os.path.abspath(
os.path.join(
os.path.abspath(__file__), os.path.pardir, os.path.pardir, os.path.pardir
)
)
params_source = os.path.join(repo_base, "python", "federatedml", "param")
params_doc_target = os.path.join(repo_base, "doc", "federatedml_component", "params")
md_template = """\
# {name}
::: federatedml.param.{name}
options:
heading_level: 2
show_source: true
show_root_heading: true
show_root_toc_entry: false
show_root_full_path: false
"""
def create_params_doc():
os.makedirs(params_doc_target, exist_ok=True)
for file_name in os.listdir(params_source):
if file_name.endswith(".py") and file_name != "__init__.py":
name = file_name[:-3]
full_doc_path = os.path.join(params_doc_target, f"{name}.md")
with mkdocs_gen_files.open(full_doc_path, "w") as fd:
print(md_template.format(name=name), file=fd)
mkdocs_gen_files.set_edit_path(full_doc_path, os.path.join(params_source, file_name))
| 1,105 | 32.515152 | 97 |
py
|
CRL
|
CRL-main/run_continual.py
|
import torch
from config import Param
from methods.utils import setup_seed
from methods.manager import Manager
def run(args):
setup_seed(args.seed)
print("hyper-parameter configurations:")
print(str(args.__dict__))
manager = Manager(args)
manager.train(args)
if __name__ == '__main__':
param = Param() # There are detailed hyper-parameter configurations.
args = param.args
torch.cuda.set_device(args.gpu)
args.device = torch.device(args.device)
args.n_gpu = torch.cuda.device_count()
args.task_name = args.dataname
args.rel_per_task = 8 if args.dataname == 'FewRel' else 4
run(args)
| 653 | 24.153846 | 72 |
py
|
CRL
|
CRL-main/config.py
|
import argparse
import os
"""
Detailed hyper-parameter configurations.
"""
class Param:
def __init__(self):
parser = argparse.ArgumentParser()
parser = self.all_param(parser)
all_args, unknown = parser.parse_known_args()
self.args = all_args
def all_param(self, parser):
##################################common parameters####################################
parser.add_argument("--gpu", default=0, type=int)
parser.add_argument("--dataname", default='FewRel', type=str, help="Use TACRED or FewRel datasets.")
parser.add_argument("--task_name", default='FewRel', type=str)
parser.add_argument("--max_length", default=256, type=int)
parser.add_argument("--this_name", default="continual", type=str)
parser.add_argument("--device", default="cuda", type=str)
############################### training ################################################
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--learning_rate", default=5e-6, type=float)
parser.add_argument("--total_round", default=5, type=int)
parser.add_argument("--rel_per_task", default=4)
parser.add_argument("--pattern", default="entity_marker")
parser.add_argument("--encoder_output_size", default=768, type=int)
parser.add_argument("--vocab_size", default=30522, type =int)
parser.add_argument("--marker_size", default=4, type=int)
# Temperature parameter in CL and CR
parser.add_argument("--temp", default=0.1, type=float)
# The projection head outputs dimensions
parser.add_argument("--feat_dim", default=64, type=int)
# Temperature parameter in KL
parser.add_argument("--kl_temp", default=10, type=float)
parser.add_argument("--num_workers", default=0, type=int)
# epoch1
parser.add_argument("--step1_epochs", default=10, type=int)
# epoch2
parser.add_argument("--step2_epochs", default=10, type=int)
parser.add_argument("--seed", default=2021, type=int)
parser.add_argument("--max_grad_norm", default=10, type=float)
# Memory size
parser.add_argument("--num_protos", default=20, type=int)
parser.add_argument("--optim", default='adam', type=str)
# dataset path
parser.add_argument("--data_path", default='datasets/', type=str)
# bert-base-uncased weights path
parser.add_argument("--bert_path", default="datasets/bert-base-uncased", type=str)
return parser
| 2,647 | 32.518987 | 108 |
py
|
CRL
|
CRL-main/methods/utils.py
|
from dataloaders.data_loader import get_data_loader
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm, trange
import random
class Moment:
def __init__(self, args) -> None:
self.labels = None
self.mem_labels = None
self.memlen = 0
self.sample_k = 500
self.temperature= args.temp
def get_mem_proto(self):
c = self._compute_centroids_ind()
return c
def _compute_centroids_ind(self):
cinds = []
for x in self.mem_labels:
if x.item() not in cinds:
cinds.append(x.item())
num = len(cinds)
feats = self.mem_features
centroids = torch.zeros((num, feats.size(1)), dtype=torch.float32, device=feats.device)
for i, c in enumerate(cinds):
ind = np.where(self.mem_labels.cpu().numpy() == c)[0]
centroids[i, :] = F.normalize(feats[ind, :].mean(dim=0), p=2, dim=0)
return centroids
def update(self, ind, feature, init=False):
self.features[ind] = feature
def update_mem(self, ind, feature, hidden=None):
self.mem_features[ind] = feature
if hidden is not None:
self.hidden_features[ind] = hidden
@torch.no_grad()
def init_moment(self, args, encoder, datasets, is_memory=False):
encoder.eval()
datalen = len(datasets)
if not is_memory:
self.features = torch.zeros(datalen, args.feat_dim).cuda()
data_loader = get_data_loader(args, datasets)
td = tqdm(data_loader)
lbs = []
for step, batch_data in enumerate(td):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
_, reps = encoder.bert_forward(tokens)
self.update(ind, reps.detach())
lbs.append(labels)
lbs = torch.cat(lbs)
self.labels = lbs.to(args.device)
else:
self.memlen = datalen
self.mem_features = torch.zeros(datalen, args.feat_dim).cuda()
self.hidden_features = torch.zeros(datalen, args.encoder_output_size).cuda()
lbs = []
data_loader = get_data_loader(args, datasets)
td = tqdm(data_loader)
for step, batch_data in enumerate(td):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
self.update_mem(ind, reps.detach(), hidden.detach())
lbs.append(labels)
lbs = torch.cat(lbs)
self.mem_labels = lbs.to(args.device)
def loss(self, x, labels, is_mem=False, mapping=None):
if is_mem:
ct_x = self.mem_features
ct_y = self.mem_labels
else:
if self.sample_k is not None:
# sample some instances
idx = list(range(len(self.features)))
if len(idx) > self.sample_k:
sample_id = random.sample(idx, self.sample_k)
else:
sample_id = idx
ct_x = self.features[sample_id]
ct_y = self.labels[sample_id]
else:
ct_x = self.features
ct_y = self.labels
device = torch.device("cuda") if x.is_cuda else torch.device("cpu")
dot_product_tempered = torch.mm(x, ct_x.T) / self.temperature # n * m
# Minus max for numerical stability with exponential. Same done in cross entropy. Epsilon added to avoid log(0)
exp_dot_tempered = (
torch.exp(dot_product_tempered - torch.max(dot_product_tempered, dim=1, keepdim=True)[0].detach()) + 1e-5
)
mask_combined = (labels.unsqueeze(1).repeat(1, ct_y.shape[0]) == ct_y).to(device) # n*m
cardinality_per_samples = torch.sum(mask_combined, dim=1)
log_prob = -torch.log(exp_dot_tempered / (torch.sum(exp_dot_tempered, dim=1, keepdim=True)))
supervised_contrastive_loss_per_sample = torch.sum(log_prob * mask_combined, dim=1) / cardinality_per_samples
supervised_contrastive_loss = torch.mean(supervised_contrastive_loss_per_sample)
return supervised_contrastive_loss
def dot_dist(x1, x2):
return torch.matmul(x1, x2.t())
def osdist(x, c):
pairwise_distances_squared = torch.sum(x ** 2, dim=1, keepdim=True) + \
torch.sum(c.t() ** 2, dim=0, keepdim=True) - \
2.0 * torch.matmul(x, c.t())
error_mask = pairwise_distances_squared <= 0.0
pairwise_distances = pairwise_distances_squared.clamp(min=1e-16)#.sqrt()
pairwise_distances = torch.mul(pairwise_distances, ~error_mask)
return pairwise_distances
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 5,091 | 38.78125 | 119 |
py
|
CRL
|
CRL-main/methods/model.py
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from .backbone import Bert_Encoder
class Encoder(nn.Module):
def __init__(self, args):
super().__init__()
self.encoder = Bert_Encoder(args)
self.output_size = self.encoder.out_dim
dim_in = self.output_size
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, args.feat_dim)
)
def bert_forward(self, x):
out = self.encoder(x)
xx = self.head(out)
xx = F.normalize(xx, p=2, dim=1)
return out, xx
| 644 | 27.043478 | 48 |
py
|
CRL
|
CRL-main/methods/backbone.py
|
import torch.nn as nn
import torch
import numpy as np
from transformers import BertModel, BertConfig
class Bert_Encoder(nn.Module):
def __init__(self, config, out_token=False):
super(Bert_Encoder, self).__init__()
# load model
self.encoder = BertModel.from_pretrained(config.bert_path).cuda()
self.bert_config = BertConfig.from_pretrained(config.bert_path)
# the dimension for the final outputs
self.output_size = config.encoder_output_size
self.out_dim = self.output_size
# find which encoding is used
if config.pattern in ['standard', 'entity_marker']:
self.pattern = config.pattern
else:
raise Exception('Wrong encoding.')
if self.pattern == 'entity_marker':
self.encoder.resize_token_embeddings(config.vocab_size + config.marker_size)
self.linear_transform = nn.Linear(self.bert_config.hidden_size*2, self.output_size, bias=True)
else:
self.linear_transform = nn.Linear(self.bert_config.hidden_size, self.output_size, bias=True)
self.layer_normalization = nn.LayerNorm([self.output_size])
def get_output_size(self):
return self.output_size
def forward(self, inputs):
# generate representation under a certain encoding strategy
if self.pattern == 'standard':
# in the standard mode, the representation is generated according to
# the representation of[CLS] mark.
output = self.encoder(inputs)[1]
else:
# in the entity_marker mode, the representation is generated from the representations of
# marks [E11] and [E21] of the head and tail entities.
e11 = []
e21 = []
# for each sample in the batch, acquire the positions of its [E11] and [E21]
for i in range(inputs.size()[0]):
tokens = inputs[i].cpu().numpy()
e11.append(np.argwhere(tokens == 30522)[0][0])
e21.append(np.argwhere(tokens == 30524)[0][0])
# input the sample to BERT
tokens_output = self.encoder(inputs)[0] # [B,N] --> [B,N,H]
output = []
# for each sample in the batch, acquire its representations for [E11] and [E21]
for i in range(len(e11)):
if inputs.device.type in ['cuda']:
instance_output = torch.index_select(tokens_output, 0, torch.tensor(i).cuda())
instance_output = torch.index_select(instance_output, 1, torch.tensor([e11[i], e21[i]]).cuda())
else:
instance_output = torch.index_select(tokens_output, 0, torch.tensor(i))
instance_output = torch.index_select(instance_output, 1, torch.tensor([e11[i], e21[i]]))
output.append(instance_output) # [B,N] --> [B,2,H]
# for each sample in the batch, concatenate the representations of [E11] and [E21], and reshape
output = torch.cat(output, dim=0)
output = output.view(output.size()[0], -1) # [B,N] --> [B,H*2]
output = self.linear_transform(output)
return output
| 3,220 | 42.527027 | 115 |
py
|
CRL
|
CRL-main/methods/manager.py
|
from dataloaders.sampler import data_sampler
from dataloaders.data_loader import get_data_loader
from .model import Encoder
from .utils import Moment, dot_dist
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from tqdm import tqdm, trange
from sklearn.cluster import KMeans
from .utils import osdist
class Manager(object):
def __init__(self, args):
super().__init__()
self.id2rel = None
self.rel2id = None
def get_proto(self, args, encoder, mem_set):
# aggregate the prototype set for further use.
data_loader = get_data_loader(args, mem_set, False, False, 1)
features = []
encoder.eval()
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
with torch.no_grad():
feature, rep= encoder.bert_forward(tokens)
features.append(feature)
self.lbs.append(labels.item())
features = torch.cat(features, dim=0)
proto = torch.mean(features, dim=0, keepdim=True)
return proto, features
# Use K-Means to select what samples to save, similar to at_least = 0
def select_data(self, args, encoder, sample_set):
data_loader = get_data_loader(args, sample_set, shuffle=False, drop_last=False, batch_size=1)
features = []
encoder.eval()
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
tokens=torch.stack([x.to(args.device) for x in tokens],dim=0)
with torch.no_grad():
feature, rp = encoder.bert_forward(tokens)
features.append(feature.detach().cpu())
features = np.concatenate(features)
num_clusters = min(args.num_protos, len(sample_set))
distances = KMeans(n_clusters=num_clusters, random_state=0).fit_transform(features)
mem_set = []
current_feat = []
for k in range(num_clusters):
sel_index = np.argmin(distances[:, k])
instance = sample_set[sel_index]
mem_set.append(instance)
current_feat.append(features[sel_index])
current_feat = np.stack(current_feat, axis=0)
current_feat = torch.from_numpy(current_feat)
return mem_set, current_feat, current_feat.mean(0)
def get_optimizer(self, args, encoder):
print('Use {} optim!'.format(args.optim))
def set_param(module, lr, decay=0):
parameters_to_optimize = list(module.named_parameters())
no_decay = ['undecay']
parameters_to_optimize = [
{'params': [p for n, p in parameters_to_optimize
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': lr},
{'params': [p for n, p in parameters_to_optimize
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': lr}
]
return parameters_to_optimize
params = set_param(encoder, args.learning_rate)
if args.optim == 'adam':
pytorch_optim = optim.Adam
else:
raise NotImplementedError
optimizer = pytorch_optim(
params
)
return optimizer
def train_simple_model(self, args, encoder, training_data, epochs):
data_loader = get_data_loader(args, training_data, shuffle=True)
encoder.train()
optimizer = self.get_optimizer(args, encoder)
def train_data(data_loader_, name = "", is_mem = False):
losses = []
td = tqdm(data_loader_, desc=name)
for step, batch_data in enumerate(td):
optimizer.zero_grad()
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
loss = self.moment.loss(reps, labels)
losses.append(loss.item())
td.set_postfix(loss = np.array(losses).mean())
loss.backward()
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
optimizer.step()
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach())
else:
self.moment.update(ind, reps.detach())
print(f"{name} loss is {np.array(losses).mean()}")
for epoch_i in range(epochs):
train_data(data_loader, "init_train_{}".format(epoch_i), is_mem=False)
def train_mem_model(self, args, encoder, mem_data, proto_mem, epochs, seen_relations):
history_nums = len(seen_relations) - args.rel_per_task
if len(proto_mem)>0:
proto_mem = F.normalize(proto_mem, p =2, dim=1)
dist = dot_dist(proto_mem, proto_mem)
dist = dist.to(args.device)
mem_loader = get_data_loader(args, mem_data, shuffle=True)
encoder.train()
temp_rel2id = [self.rel2id[x] for x in seen_relations]
map_relid2tempid = {k:v for v,k in enumerate(temp_rel2id)}
map_tempid2relid = {k:v for k, v in map_relid2tempid.items()}
optimizer = self.get_optimizer(args, encoder)
def train_data(data_loader_, name = "", is_mem = False):
losses = []
kl_losses = []
td = tqdm(data_loader_, desc=name)
for step, batch_data in enumerate(td):
optimizer.zero_grad()
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
zz, reps = encoder.bert_forward(tokens)
hidden = reps
need_ratio_compute = ind < history_nums * args.num_protos
total_need = need_ratio_compute.sum()
if total_need >0 :
# Knowledge Distillation for Relieve Forgetting
need_ind = ind[need_ratio_compute]
need_labels = labels[need_ratio_compute]
temp_labels = [map_relid2tempid[x.item()] for x in need_labels]
gold_dist = dist[temp_labels]
current_proto = self.moment.get_mem_proto()[:history_nums]
this_dist = dot_dist(hidden[need_ratio_compute], current_proto.to(args.device))
loss1 = self.kl_div_loss(gold_dist, this_dist, t=args.kl_temp)
loss1.backward(retain_graph=True)
else:
loss1 = 0.0
# Contrastive Replay
cl_loss = self.moment.loss(reps, labels, is_mem=True, mapping=map_relid2tempid)
if isinstance(loss1, float):
kl_losses.append(loss1)
else:
kl_losses.append(loss1.item())
loss = cl_loss
if isinstance(loss, float):
losses.append(loss)
td.set_postfix(loss = np.array(losses).mean(), kl_loss = np.array(kl_losses).mean())
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach(), hidden.detach())
else:
self.moment.update(ind, reps.detach())
continue
losses.append(loss.item())
td.set_postfix(loss = np.array(losses).mean(), kl_loss = np.array(kl_losses).mean())
loss.backward()
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
optimizer.step()
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach())
else:
self.moment.update(ind, reps.detach())
print(f"{name} loss is {np.array(losses).mean()}")
for epoch_i in range(epochs):
train_data(mem_loader, "memory_train_{}".format(epoch_i), is_mem=True)
def kl_div_loss(self, x1, x2, t=10):
batch_dist = F.softmax(t * x1, dim=1)
temp_dist = F.log_softmax(t * x2, dim=1)
loss = F.kl_div(temp_dist, batch_dist, reduction="batchmean")
return loss
@torch.no_grad()
def evaluate_strict_model(self, args, encoder, test_data, protos4eval, featrues4eval, seen_relations):
data_loader = get_data_loader(args, test_data, batch_size=1)
encoder.eval()
n = len(test_data)
temp_rel2id = [self.rel2id[x] for x in seen_relations]
map_relid2tempid = {k:v for v,k in enumerate(temp_rel2id)}
map_tempid2relid = {k:v for k, v in map_relid2tempid.items()}
correct = 0
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
labels = [map_relid2tempid[x.item()] for x in labels]
logits = -osdist(hidden, protos4eval)
seen_relation_ids = [self.rel2id[relation] for relation in seen_relations]
seen_relation_ids = [map_relid2tempid[x] for x in seen_relation_ids]
seen_sim = logits[:,seen_relation_ids]
seen_sim = seen_sim.cpu().data.numpy()
max_smi = np.max(seen_sim,axis=1)
label_smi = logits[:,labels].cpu().data.numpy()
if label_smi >= max_smi:
correct += 1
return correct/n
def train(self, args):
# set training batch
for i in range(args.total_round):
test_cur = []
test_total = []
# set random seed
random.seed(args.seed+i*100)
# sampler setup
sampler = data_sampler(args=args, seed=args.seed+i*100)
self.id2rel = sampler.id2rel
self.rel2id = sampler.rel2id
# encoder setup
encoder = Encoder(args=args).to(args.device)
# initialize memory and prototypes
num_class = len(sampler.id2rel)
memorized_samples = {}
# load data and start computation
history_relation = []
proto4repaly = []
for steps, (training_data, valid_data, test_data, current_relations, historic_test_data, seen_relations) in enumerate(sampler):
print(current_relations)
# Initial
train_data_for_initial = []
for relation in current_relations:
history_relation.append(relation)
train_data_for_initial += training_data[relation]
# train model
# no memory. first train with current task
self.moment = Moment(args)
self.moment.init_moment(args, encoder, train_data_for_initial, is_memory=False)
self.train_simple_model(args, encoder, train_data_for_initial, args.step1_epochs)
# repaly
if len(memorized_samples)>0:
# select current task sample
for relation in current_relations:
memorized_samples[relation], _, _ = self.select_data(args, encoder, training_data[relation])
train_data_for_memory = []
for relation in history_relation:
train_data_for_memory += memorized_samples[relation]
self.moment.init_moment(args, encoder, train_data_for_memory, is_memory=True)
self.train_mem_model(args, encoder, train_data_for_memory, proto4repaly, args.step2_epochs, seen_relations)
feat_mem = []
proto_mem = []
for relation in current_relations:
memorized_samples[relation], feat, temp_proto = self.select_data(args, encoder, training_data[relation])
feat_mem.append(feat)
proto_mem.append(temp_proto)
feat_mem = torch.cat(feat_mem, dim=0)
temp_proto = torch.stack(proto_mem, dim=0)
protos4eval = []
featrues4eval = []
self.lbs = []
for relation in history_relation:
if relation not in current_relations:
protos, featrues = self.get_proto(args, encoder, memorized_samples[relation])
protos4eval.append(protos)
featrues4eval.append(featrues)
if protos4eval:
protos4eval = torch.cat(protos4eval, dim=0).detach()
protos4eval = torch.cat([protos4eval, temp_proto.to(args.device)], dim=0)
else:
protos4eval = temp_proto.to(args.device)
proto4repaly = protos4eval.clone()
test_data_1 = []
for relation in current_relations:
test_data_1 += test_data[relation]
test_data_2 = []
for relation in seen_relations:
test_data_2 += historic_test_data[relation]
cur_acc = self.evaluate_strict_model(args, encoder, test_data_1, protos4eval, featrues4eval,seen_relations)
total_acc = self.evaluate_strict_model(args, encoder, test_data_2, protos4eval, featrues4eval,seen_relations)
print(f'Restart Num {i+1}')
print(f'task--{steps + 1}:')
print(f'current test acc:{cur_acc}')
print(f'history test acc:{total_acc}')
test_cur.append(cur_acc)
test_total.append(total_acc)
print(test_cur)
print(test_total)
del self.moment
| 14,339 | 42.98773 | 139 |
py
|
CRL
|
CRL-main/dataloaders/sampler.py
|
import pickle
import random
import json, os
from transformers import BertTokenizer
import numpy as np
def get_tokenizer(args):
tokenizer = BertTokenizer.from_pretrained(args.bert_path, additional_special_tokens=["[E11]", "[E12]", "[E21]", "[E22]"])
return tokenizer
class data_sampler(object):
def __init__(self, args, seed=None):
self.set_path(args)
self.args = args
temp_name = [args.dataname, args.seed]
file_name = "{}.pkl".format(
"-".join([str(x) for x in temp_name])
)
mid_dir = "datasets/"
if not os.path.exists(mid_dir):
os.mkdir(mid_dir)
for temp_p in ["_process_path"]:
mid_dir = os.path.join(mid_dir, temp_p)
if not os.path.exists(mid_dir):
os.mkdir(mid_dir)
self.save_data_path = os.path.join(mid_dir, file_name)
self.tokenizer = get_tokenizer(args)
# read relation data
self.id2rel, self.rel2id = self._read_relations(args.relation_file)
# random sampling
self.seed = seed
if self.seed is not None:
random.seed(self.seed)
self.shuffle_index = list(range(len(self.id2rel)))
random.shuffle(self.shuffle_index)
self.shuffle_index = np.argsort(self.shuffle_index)
# regenerate data
self.training_dataset, self.valid_dataset, self.test_dataset = self._read_data(self.args.data_file)
# generate the task number
self.batch = 0
self.task_length = len(self.id2rel) // self.args.rel_per_task
# record relations
self.seen_relations = []
self.history_test_data = {}
def set_path(self, args):
use_marker = ""
if args.dataname in ['FewRel']:
args.data_file = os.path.join(args.data_path,"data_with{}_marker.json".format(use_marker))
args.relation_file = os.path.join(args.data_path, "id2rel.json")
args.num_of_relation = 80
args.num_of_train = 420
args.num_of_val = 140
args.num_of_test = 140
elif args.dataname in ['TACRED']:
args.data_file = os.path.join(args.data_path,"data_with{}_marker_tacred.json".format(use_marker))
args.relation_file = os.path.join(args.data_path, "id2rel_tacred.json")
args.num_of_relation = 40
args.num_of_train = 420
args.num_of_val = 140
args.num_of_test = 140
def set_seed(self, seed):
self.seed = seed
if self.seed != None:
random.seed(self.seed)
self.shuffle_index = list(range(len(self.id2rel)))
random.shuffle(self.shuffle_index)
self.shuffle_index = np.argsort(self.shuffle_index)
def __iter__(self):
return self
def __next__(self):
if self.batch == self.task_length:
raise StopIteration()
indexs = self.shuffle_index[self.args.rel_per_task*self.batch: self.args.rel_per_task*(self.batch+1)]
self.batch += 1
current_relations = []
cur_training_data = {}
cur_valid_data = {}
cur_test_data = {}
for index in indexs:
current_relations.append(self.id2rel[index])
self.seen_relations.append(self.id2rel[index])
cur_training_data[self.id2rel[index]] = self.training_dataset[index]
cur_valid_data[self.id2rel[index]] = self.valid_dataset[index]
cur_test_data[self.id2rel[index]] = self.test_dataset[index]
self.history_test_data[self.id2rel[index]] = self.test_dataset[index]
return cur_training_data, cur_valid_data, cur_test_data, current_relations, self.history_test_data, self.seen_relations
def _read_data(self, file):
if os.path.isfile(self.save_data_path):
with open(self.save_data_path, 'rb') as f:
datas = pickle.load(f)
train_dataset, val_dataset, test_dataset = datas
return train_dataset, val_dataset, test_dataset
else:
data = json.load(open(file, 'r', encoding='utf-8'))
train_dataset = [[] for i in range(self.args.num_of_relation)]
val_dataset = [[] for i in range(self.args.num_of_relation)]
test_dataset = [[] for i in range(self.args.num_of_relation)]
for relation in data.keys():
rel_samples = data[relation]
if self.seed != None:
random.seed(self.seed)
random.shuffle(rel_samples)
count = 0
count1 = 0
for i, sample in enumerate(rel_samples):
tokenized_sample = {}
tokenized_sample['relation'] = self.rel2id[sample['relation']]
tokenized_sample['tokens'] = self.tokenizer.encode(' '.join(sample['tokens']),
padding='max_length',
truncation=True,
max_length=self.args.max_length)
if self.args.task_name == 'FewRel':
if i < self.args.num_of_train:
train_dataset[self.rel2id[relation]].append(tokenized_sample)
elif i < self.args.num_of_train + self.args.num_of_val:
val_dataset[self.rel2id[relation]].append(tokenized_sample)
else:
test_dataset[self.rel2id[relation]].append(tokenized_sample)
else:
if i < len(rel_samples) // 5 and count <= 40:
count += 1
test_dataset[self.rel2id[relation]].append(tokenized_sample)
else:
count1 += 1
train_dataset[self.rel2id[relation]].append(tokenized_sample)
if count1 >= 320:
break
with open(self.save_data_path, 'wb') as f:
pickle.dump((train_dataset, val_dataset, test_dataset), f)
return train_dataset, val_dataset, test_dataset
def _read_relations(self, file):
id2rel = json.load(open(file, 'r', encoding='utf-8'))
rel2id = {}
for i, x in enumerate(id2rel):
rel2id[x] = i
return id2rel, rel2id
| 6,542 | 41.764706 | 127 |
py
|
CRL
|
CRL-main/dataloaders/data_loader.py
|
import torch
from torch.utils.data import Dataset, DataLoader
class data_set(Dataset):
def __init__(self, data,config=None):
self.data = data
self.config = config
self.bert = True
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return (self.data[idx], idx)
def collate_fn(self, data):
label = torch.tensor([item[0]['relation'] for item in data])
tokens = [torch.tensor(item[0]['tokens']) for item in data]
ind = torch.tensor([item[1] for item in data])
return (
label,
tokens,
ind
)
def get_data_loader(config, data, shuffle = False, drop_last = False, batch_size = None):
dataset = data_set(data, config)
if batch_size == None:
batch_size = min(config.batch_size, len(data))
else:
batch_size = min(batch_size, len(data))
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
num_workers=config.num_workers,
collate_fn=dataset.collate_fn,
drop_last=drop_last)
return data_loader
| 1,180 | 24.673913 | 89 |
py
|
consensus-specs
|
consensus-specs-master/setup.py
|
from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py
from distutils import dir_util
from distutils.util import convert_path
from pathlib import Path
import os
import re
import string
import textwrap
from typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar, Tuple
from abc import ABC, abstractmethod
import ast
import subprocess
import sys
import copy
from collections import OrderedDict
import json
# NOTE: have to programmatically include third-party dependencies in `setup.py`.
def installPackage(package: str):
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
RUAMEL_YAML_VERSION = "ruamel.yaml==0.17.21"
try:
import ruamel.yaml
except ImportError:
installPackage(RUAMEL_YAML_VERSION)
from ruamel.yaml import YAML
MARKO_VERSION = "marko==1.0.2"
try:
import marko
except ImportError:
installPackage(MARKO_VERSION)
from marko.block import Heading, FencedCode, LinkRefDef, BlankLine
from marko.inline import CodeSpan
from marko.ext.gfm import gfm
from marko.ext.gfm.elements import Table
# Definitions in context.py
PHASE0 = 'phase0'
ALTAIR = 'altair'
BELLATRIX = 'bellatrix'
CAPELLA = 'capella'
DENEB = 'deneb'
EIP6110 = 'eip6110'
WHISK = 'whisk'
PREVIOUS_FORK_OF = {
PHASE0: None,
ALTAIR: PHASE0,
BELLATRIX: ALTAIR,
CAPELLA: BELLATRIX,
DENEB: CAPELLA,
EIP6110: DENEB,
WHISK: CAPELLA,
}
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
IGNORE_SPEC_FILES = [
"specs/phase0/deposit-contract.md"
]
EXTRA_SPEC_FILES = {
BELLATRIX: "sync/optimistic.md"
}
# The helper functions that are used when defining constants
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
def ceillog2(x: int) -> uint64:
if x < 1:
raise ValueError(f"ceillog2 accepts only positive values, x={x}")
return uint64((x - 1).bit_length())
def floorlog2(x: int) -> uint64:
if x < 1:
raise ValueError(f"floorlog2 accepts only positive values, x={x}")
return uint64(x.bit_length() - 1)
'''
OPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''
def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
return bls.AggregatePKs(pubkeys)
'''
class ProtocolDefinition(NamedTuple):
# just function definitions currently. May expand with configuration vars in future.
functions: Dict[str, str]
class VariableDefinition(NamedTuple):
type_name: Optional[str]
value: str
comment: Optional[str] # e.g. "noqa: E501"
type_hint: Optional[str] # e.g., "Final"
class SpecObject(NamedTuple):
functions: Dict[str, str]
protocols: Dict[str, ProtocolDefinition]
custom_types: Dict[str, str]
constant_vars: Dict[str, VariableDefinition]
preset_vars: Dict[str, VariableDefinition]
config_vars: Dict[str, VariableDefinition]
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
ssz_objects: Dict[str, str]
dataclasses: Dict[str, str]
def is_post_fork(a, b) -> bool:
"""
Returns true if fork a is after b, or if a == b
"""
if a == b:
return True
prev_fork = PREVIOUS_FORK_OF[a]
if prev_fork == b:
return True
elif prev_fork == None:
return False
else:
return is_post_fork(prev_fork, b)
def get_fork_directory(fork):
dir1 = f'specs/{fork}'
if os.path.exists(dir1):
return dir1
dir2 = f'specs/_features/{fork}'
if os.path.exists(dir2):
return dir2
raise FileNotFoundError(f"No directory found for fork: {fork}")
def _get_name_from_heading(heading: Heading) -> Optional[str]:
last_child = heading.children[-1]
if isinstance(last_child, CodeSpan):
return last_child.children
return None
def _get_source_from_code_block(block: FencedCode) -> str:
return block.children[0].children.strip()
def _get_function_name_from_source(source: str) -> str:
fn = ast.parse(source).body[0]
return fn.name
def _get_self_type_from_source(source: str) -> Optional[str]:
fn = ast.parse(source).body[0]
args = fn.args.args
if len(args) == 0:
return None
if args[0].arg != 'self':
return None
if args[0].annotation is None:
return None
return args[0].annotation.id
def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
class_def = ast.parse(source).body[0]
base = class_def.bases[0]
if isinstance(base, ast.Name):
parent_class = base.id
else:
# NOTE: SSZ definition derives from earlier phase...
# e.g. `phase0.SignedBeaconBlock`
# TODO: check for consistency with other phases
parent_class = None
return class_def.name, parent_class
def _is_constant_id(name: str) -> bool:
if name[0] not in string.ascii_uppercase + '_':
return False
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))
def _load_kzg_trusted_setups(preset_name):
"""
[TODO] it's not the final mainnet trusted setup.
We will update it after the KZG ceremony.
"""
file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/testing_trusted_setups.json'
with open(file_path, 'r') as f:
json_data = json.load(f)
trusted_setup_G1 = json_data['setup_G1']
trusted_setup_G2 = json_data['setup_G2']
trusted_setup_G1_lagrange = json_data['setup_G1_lagrange']
roots_of_unity = json_data['roots_of_unity']
return trusted_setup_G1, trusted_setup_G2, trusted_setup_G1_lagrange, roots_of_unity
ALL_KZG_SETUPS = {
'minimal': _load_kzg_trusted_setups('minimal'),
'mainnet': _load_kzg_trusted_setups('mainnet')
}
ETH2_SPEC_COMMENT_PREFIX = "eth2spec:"
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
_, _, title = child._parse_info
if not (title[0] == "(" and title[len(title)-1] == ")"):
return None
title = title[1:len(title)-1]
if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):
return None
return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()
def _parse_value(name: str, typed_value: str, type_hint: Optional[str]=None) -> VariableDefinition:
comment = None
if name == "BLS12_381_Q":
comment = "noqa: E501"
typed_value = typed_value.strip()
if '(' not in typed_value:
return VariableDefinition(type_name=None, value=typed_value, comment=comment, type_hint=type_hint)
i = typed_value.index('(')
type_name = typed_value[:i]
return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment, type_hint=type_hint)
def _update_constant_vars_with_kzg_setups(constant_vars, preset_name):
comment = "noqa: E501"
kzg_setups = ALL_KZG_SETUPS[preset_name]
constant_vars['KZG_SETUP_G1'] = VariableDefinition(constant_vars['KZG_SETUP_G1'].value, str(kzg_setups[0]), comment, None)
constant_vars['KZG_SETUP_G2'] = VariableDefinition(constant_vars['KZG_SETUP_G2'].value, str(kzg_setups[1]), comment, None)
constant_vars['KZG_SETUP_LAGRANGE'] = VariableDefinition(constant_vars['KZG_SETUP_LAGRANGE'].value, str(kzg_setups[2]), comment, None)
constant_vars['ROOTS_OF_UNITY'] = VariableDefinition(constant_vars['ROOTS_OF_UNITY'].value, str(kzg_setups[3]), comment, None)
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject:
functions: Dict[str, str] = {}
protocols: Dict[str, ProtocolDefinition] = {}
constant_vars: Dict[str, VariableDefinition] = {}
preset_vars: Dict[str, VariableDefinition] = {}
config_vars: Dict[str, VariableDefinition] = {}
ssz_dep_constants: Dict[str, str] = {}
ssz_objects: Dict[str, str] = {}
dataclasses: Dict[str, str] = {}
custom_types: Dict[str, str] = {}
with open(file_name) as source_file:
document = gfm.parse(source_file.read())
current_name = None
should_skip = False
for child in document.children:
if isinstance(child, BlankLine):
continue
if should_skip:
should_skip = False
continue
if isinstance(child, Heading):
current_name = _get_name_from_heading(child)
elif isinstance(child, FencedCode):
if child.lang != "python":
continue
source = _get_source_from_code_block(child)
if source.startswith("def"):
current_name = _get_function_name_from_source(source)
self_type_name = _get_self_type_from_source(source)
function_def = "\n".join(line.rstrip() for line in source.splitlines())
if self_type_name is None:
functions[current_name] = function_def
else:
if self_type_name not in protocols:
protocols[self_type_name] = ProtocolDefinition(functions={})
protocols[self_type_name].functions[current_name] = function_def
elif source.startswith("@dataclass"):
dataclasses[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
elif source.startswith("class"):
class_name, parent_class = _get_class_info_from_source(source)
# check consistency with spec
assert class_name == current_name
if parent_class:
assert parent_class == "Container"
# NOTE: trim whitespace from spec
ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
else:
raise Exception("unrecognized python code element: " + source)
elif isinstance(child, Table):
for row in child.children:
cells = row.children
if len(cells) >= 2:
name_cell = cells[0]
name = name_cell.children[0].children
value_cell = cells[1]
value = value_cell.children[0].children
if isinstance(value, list):
# marko parses `**X**` as a list containing a X
value = value[0].children
if not _is_constant_id(name):
# Check for short type declarations
if value.startswith(("uint", "Bytes", "ByteList", "Union", "Vector", "List", "ByteVector")):
custom_types[name] = value
continue
if value.startswith("get_generalized_index"):
ssz_dep_constants[name] = value
continue
value_def = _parse_value(name, value)
if name in preset:
preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment, None)
elif name in config:
config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment, None)
else:
if name in ('ENDIANNESS', 'KZG_ENDIANNESS'):
# Deal with mypy Literal typing check
value_def = _parse_value(name, value, type_hint='Final')
constant_vars[name] = value_def
elif isinstance(child, LinkRefDef):
comment = _get_eth2_spec_comment(child)
if comment == "skip":
should_skip = True
# Load KZG trusted setup from files
if any('KZG_SETUP' in name for name in constant_vars):
_update_constant_vars_with_kzg_setups(constant_vars, preset_name)
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
constant_vars=constant_vars,
preset_vars=preset_vars,
config_vars=config_vars,
ssz_dep_constants=ssz_dep_constants,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
class SpecBuilder(ABC):
@property
@abstractmethod
def fork(self) -> str:
raise NotImplementedError()
@classmethod
@abstractmethod
def imports(cls, preset_name: str) -> str:
"""
Import objects from other libraries.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def preparations(cls) -> str:
"""
Define special types/constants for building pyspec or call functions.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def sundry_functions(cls) -> str:
"""
The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.
"""
raise NotImplementedError()
@classmethod
def execution_engine_cls(cls) -> str:
raise NotImplementedError()
@classmethod
@abstractmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
"""
The constants that are required for SSZ objects.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]: # TODO
"""
The constants that are required for custom types.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
raise NotImplementedError()
@classmethod
@abstractmethod
def build_spec(cls, preset_name: str,
source_files: List[Path], preset_files: Sequence[Path], config_file: Path) -> str:
raise NotImplementedError()
#
# Phase0SpecBuilder
#
class Phase0SpecBuilder(SpecBuilder):
fork: str = PHASE0
@classmethod
def imports(cls, preset_name: str) -> str:
return '''from lru import LRU
from dataclasses import (
dataclass,
field,
)
from typing import (
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar, NamedTuple, Final
)
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint8, uint32, uint64, uint256,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
from eth2spec.utils import bls
from eth2spec.utils.hash_function import hash
'''
@classmethod
def preparations(cls) -> str:
return '''
SSZObject = TypeVar('SSZObject', bound=View)
'''
@classmethod
def sundry_functions(cls) -> str:
return '''
def get_eth1_data(block: Eth1Block) -> Eth1Data:
"""
A stub function return mocking Eth1Data.
"""
return Eth1Data(
deposit_root=block.deposit_root,
deposit_count=block.deposit_count,
block_hash=hash_tree_root(block))
def cache_this(key_fn, value_fn, lru_size): # type: ignore
cache_dict = LRU(size=lru_size)
def wrapper(*args, **kw): # type: ignore
key = key_fn(*args, **kw)
nonlocal cache_dict
if key not in cache_dict:
cache_dict[key] = value_fn(*args, **kw)
return cache_dict[key]
return wrapper
_compute_shuffled_index = compute_shuffled_index
compute_shuffled_index = cache_this(
lambda index, index_count, seed: (index, index_count, seed),
_compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)
_get_total_active_balance = get_total_active_balance
get_total_active_balance = cache_this(
lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),
_get_total_active_balance, lru_size=10)
_get_base_reward = get_base_reward
get_base_reward = cache_this(
lambda state, index: (state.validators.hash_tree_root(), state.slot, index),
_get_base_reward, lru_size=2048)
_get_committee_count_per_slot = get_committee_count_per_slot
get_committee_count_per_slot = cache_this(
lambda state, epoch: (state.validators.hash_tree_root(), epoch),
_get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)
_get_active_validator_indices = get_active_validator_indices
get_active_validator_indices = cache_this(
lambda state, epoch: (state.validators.hash_tree_root(), epoch),
_get_active_validator_indices, lru_size=3)
_get_beacon_committee = get_beacon_committee
get_beacon_committee = cache_this(
lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),
_get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)
_get_matching_target_attestations = get_matching_target_attestations
get_matching_target_attestations = cache_this(
lambda state, epoch: (state.hash_tree_root(), epoch),
_get_matching_target_attestations, lru_size=10)
_get_matching_head_attestations = get_matching_head_attestations
get_matching_head_attestations = cache_this(
lambda state, epoch: (state.hash_tree_root(), epoch),
_get_matching_head_attestations, lru_size=10)
_get_attesting_indices = get_attesting_indices
get_attesting_indices = cache_this(
lambda state, data, bits: (
state.randao_mixes.hash_tree_root(),
state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()
),
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
@classmethod
def execution_engine_cls(cls) -> str:
return ""
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
return {}
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
return {}
@classmethod
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
return functions
@classmethod
def build_spec(cls, preset_name: str,
source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:
return _build_spec(preset_name, cls.fork, source_files, preset_files, config_file)
#
# AltairSpecBuilder
#
class AltairSpecBuilder(Phase0SpecBuilder):
fork: str = ALTAIR
@classmethod
def imports(cls, preset_name: str) -> str:
return super().imports(preset_name) + '\n' + f'''
from typing import NewType, Union as PyUnion
from eth2spec.phase0 import {preset_name} as phase0
from eth2spec.test.helpers.merkle import build_proof
from eth2spec.utils.ssz.ssz_typing import Path
'''
@classmethod
def preparations(cls):
return super().preparations() + '\n' + '''
SSZVariableName = str
GeneralizedIndex = NewType('GeneralizedIndex', int)
'''
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex:
ssz_path = Path(ssz_class)
for item in path:
ssz_path = ssz_path / item
return GeneralizedIndex(ssz_path.gindex())
def compute_merkle_proof_for_state(state: BeaconState,
index: GeneralizedIndex) -> Sequence[Bytes32]:
return build_proof(state.get_backing(), index)'''
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
constants = {
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
'CURRENT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(54)',
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
}
return {**super().hardcoded_ssz_dep_constants(), **constants}
@classmethod
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
if "eth_aggregate_pubkeys" in functions:
functions["eth_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()
return super().implement_optimizations(functions)
#
# BellatrixSpecBuilder
#
class BellatrixSpecBuilder(AltairSpecBuilder):
fork: str = BELLATRIX
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from typing import Protocol
from eth2spec.altair import {preset_name} as altair
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector
'''
@classmethod
def preparations(cls):
return super().preparations()
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + """
ExecutionState = Any
def get_pow_block(hash: Bytes32) -> Optional[PowBlock]:
return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0))
def get_execution_state(_execution_state_root: Bytes32) -> ExecutionState:
pass
def get_pow_chain_head() -> PowBlock:
pass"""
@classmethod
def execution_engine_cls(cls) -> str:
return "\n\n" + """
class NoopExecutionEngine(ExecutionEngine):
def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
return True
def notify_forkchoice_updated(self: ExecutionEngine,
head_block_hash: Hash32,
safe_block_hash: Hash32,
finalized_block_hash: Hash32,
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
pass
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
# pylint: disable=unused-argument
raise NotImplementedError("no default block production")
def is_valid_block_hash(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
return True
def verify_and_notify_new_payload(self: ExecutionEngine,
new_payload_request: NewPayloadRequest) -> bool:
return True
EXECUTION_ENGINE = NoopExecutionEngine()"""
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
constants = {
'MAX_BYTES_PER_TRANSACTION': spec_object.preset_vars['MAX_BYTES_PER_TRANSACTION'].value,
}
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
#
# CapellaSpecBuilder
#
class CapellaSpecBuilder(BellatrixSpecBuilder):
fork: str = CAPELLA
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.bellatrix import {preset_name} as bellatrix
'''
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def compute_merkle_proof_for_block_body(body: BeaconBlockBody,
index: GeneralizedIndex) -> Sequence[Bytes32]:
return build_proof(body.get_backing(), index)'''
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
constants = {
'EXECUTION_PAYLOAD_INDEX': 'GeneralizedIndex(25)',
}
return {**super().hardcoded_ssz_dep_constants(), **constants}
#
# DenebSpecBuilder
#
class DenebSpecBuilder(CapellaSpecBuilder):
fork: str = DENEB
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.capella import {preset_name} as capella
'''
@classmethod
def preparations(cls):
return super().preparations() + '\n' + '''
T = TypeVar('T') # For generic function
'''
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]:
# pylint: disable=unused-argument
return ("TEST", "TEST")'''
@classmethod
def execution_engine_cls(cls) -> str:
return "\n\n" + """
class NoopExecutionEngine(ExecutionEngine):
def notify_new_payload(self: ExecutionEngine,
execution_payload: ExecutionPayload,
parent_beacon_block_root: Root) -> bool:
return True
def notify_forkchoice_updated(self: ExecutionEngine,
head_block_hash: Hash32,
safe_block_hash: Hash32,
finalized_block_hash: Hash32,
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
pass
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
# pylint: disable=unused-argument
raise NotImplementedError("no default block production")
def is_valid_block_hash(self: ExecutionEngine,
execution_payload: ExecutionPayload,
parent_beacon_block_root: Root) -> bool:
return True
def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPayloadRequest) -> bool:
return True
def verify_and_notify_new_payload(self: ExecutionEngine,
new_payload_request: NewPayloadRequest) -> bool:
return True
EXECUTION_ENGINE = NoopExecutionEngine()"""
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
constants = {
'BYTES_PER_FIELD_ELEMENT': spec_object.constant_vars['BYTES_PER_FIELD_ELEMENT'].value,
'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
}
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
#
# EIP6110SpecBuilder
#
class EIP6110SpecBuilder(DenebSpecBuilder):
fork: str = EIP6110
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.deneb import {preset_name} as deneb
'''
#
# WhiskSpecBuilder
#
class WhiskSpecBuilder(CapellaSpecBuilder):
fork: str = WHISK
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.capella import {preset_name} as capella
'''
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
# Necessary for custom types `WhiskShuffleProof` and `WhiskTrackerProof`
constants = {
'WHISK_MAX_SHUFFLE_PROOF_SIZE': spec_object.preset_vars['WHISK_MAX_SHUFFLE_PROOF_SIZE'].value,
'WHISK_MAX_OPENING_PROOF_SIZE': spec_object.preset_vars['WHISK_MAX_OPENING_PROOF_SIZE'].value,
}
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
spec_builders = {
builder.fork: builder
for builder in (
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
EIP6110SpecBuilder, WhiskSpecBuilder,
)
}
def is_byte_vector(value: str) -> bool:
return value.startswith(('ByteVector'))
def make_function_abstract(protocol_def: ProtocolDefinition, key: str):
function = protocol_def.functions[key].split('"""')
protocol_def.functions[key] = function[0] + "..."
def objects_to_spec(preset_name: str,
spec_object: SpecObject,
builder: SpecBuilder,
ordered_class_objects: Dict[str, str]) -> str:
"""
Given all the objects that constitute a spec, combine them into a single pyfile.
"""
new_type_definitions = (
'\n\n'.join(
[
f"class {key}({value}):\n pass\n" if not is_byte_vector(value) else f"class {key}({value}): # type: ignore\n pass\n"
for key, value in spec_object.custom_types.items()
]
)
)
def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:
abstract_functions = ["verify_and_notify_new_payload"]
for key in protocol_def.functions.keys():
if key in abstract_functions:
make_function_abstract(protocol_def, key)
protocol = f"class {protocol_name}(Protocol):"
for fn_source in protocol_def.functions.values():
fn_source = fn_source.replace("self: "+protocol_name, "self")
protocol += "\n\n" + textwrap.indent(fn_source, " ")
return protocol
protocols_spec = '\n\n\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())
for k in list(spec_object.functions):
if k in [
"ceillog2",
"floorlog2",
"compute_merkle_proof_for_block_body",
"compute_merkle_proof_for_state",
]:
del spec_object.functions[k]
functions = builder.implement_optimizations(spec_object.functions)
functions_spec = '\n\n\n'.join(functions.values())
# Access global dict of config vars for runtime configurables
for name in spec_object.config_vars.keys():
functions_spec = re.sub(r"\b%s\b" % name, 'config.' + name, functions_spec)
def format_config_var(name: str, vardef: VariableDefinition) -> str:
if vardef.type_name is None:
out = f'{name}={vardef.value},'
else:
out = f'{name}={vardef.type_name}({vardef.value}),'
if vardef.comment is not None:
out += f' # {vardef.comment}'
return out
config_spec = 'class Configuration(NamedTuple):\n'
config_spec += ' PRESET_BASE: str\n'
config_spec += '\n'.join(f' {k}: {v.type_name if v.type_name is not None else "int"}'
for k, v in spec_object.config_vars.items())
config_spec += '\n\n\nconfig = Configuration(\n'
config_spec += f' PRESET_BASE="{preset_name}",\n'
config_spec += '\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())
config_spec += '\n)\n'
def format_constant(name: str, vardef: VariableDefinition) -> str:
if vardef.type_name is None:
if vardef.type_hint is None:
out = f'{name} = {vardef.value}'
else:
out = f'{name}: {vardef.type_hint} = {vardef.value}'
else:
out = f'{name} = {vardef.type_name}({vardef.value})'
if vardef.comment is not None:
out += f' # {vardef.comment}'
return out
constant_vars_spec = '# Constant vars\n' + '\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())
preset_vars_spec = '# Preset vars\n' + '\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))
ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))
custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants(spec_object)[x]), builder.hardcoded_custom_type_dep_constants(spec_object)))
spec = (
builder.imports(preset_name)
+ builder.preparations()
+ '\n\n' + f"fork = \'{builder.fork}\'\n"
# The constants that some SSZ containers require. Need to be defined before `new_type_definitions`
+ ('\n\n' + custom_type_dep_constants + '\n' if custom_type_dep_constants != '' else '')
+ '\n\n' + new_type_definitions
+ '\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS
# The constants that some SSZ containers require. Need to be defined before `constants_spec`
+ ('\n\n' + ssz_dep_constants if ssz_dep_constants != '' else '')
+ '\n\n' + constant_vars_spec
+ '\n\n' + preset_vars_spec
+ '\n\n\n' + config_spec
+ '\n\n' + ordered_class_objects_spec
+ ('\n\n\n' + protocols_spec if protocols_spec != '' else '')
+ '\n\n\n' + functions_spec
+ '\n\n' + builder.sundry_functions()
+ builder.execution_engine_cls()
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
# as same as the spec definition.
+ ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')
+ '\n'
)
return spec
def combine_protocols(old_protocols: Dict[str, ProtocolDefinition],
new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:
for key, value in new_protocols.items():
if key not in old_protocols:
old_protocols[key] = value
else:
functions = combine_dicts(old_protocols[key].functions, value.functions)
old_protocols[key] = ProtocolDefinition(functions=functions)
return old_protocols
T = TypeVar('T')
def combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:
return {**old_dict, **new_dict}
ignored_dependencies = [
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
'Bytes1', 'Bytes4', 'Bytes8', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
'bytes', 'byte', 'ByteList', 'ByteVector',
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
'Optional', 'Sequence',
]
def dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
"""
Determines which SSZ Object is dependent on which other and orders them appropriately
"""
items = list(objects.items())
for key, value in items:
dependencies = []
for line in value.split('\n'):
if not re.match(r'\s+\w+: .+', line):
continue # skip whitespace etc.
line = line[line.index(':') + 1:] # strip of field name
if '#' in line:
line = line[:line.index('#')] # strip of comment
dependencies.extend(re.findall(r'(\w+)', line)) # catch all legible words, potential dependencies
dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants
dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)
dependencies = filter(lambda x: x not in custom_types, dependencies)
for dep in dependencies:
key_list = list(objects.keys())
for item in [dep, key] + key_list[key_list.index(dep)+1:]:
objects[item] = objects.pop(item)
def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:
"""
Takes in old spec and new spec ssz objects, combines them,
and returns the newer versions of the objects in dependency order.
"""
for key, value in new_objects.items():
old_objects[key] = value
return old_objects
def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
"""
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
"""
protocols = combine_protocols(spec0.protocols, spec1.protocols)
functions = combine_dicts(spec0.functions, spec1.functions)
custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)
constant_vars = combine_dicts(spec0.constant_vars, spec1.constant_vars)
preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)
config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)
ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)
ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)
dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
constant_vars=constant_vars,
preset_vars=preset_vars,
config_vars=config_vars,
ssz_dep_constants=ssz_dep_constants,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
def parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:
"""
Parses a dict of basic str/int/list types into a dict for insertion into the spec code.
"""
out: Dict[str, str] = dict()
for k, v in conf.items():
if isinstance(v, str) and (v.startswith("0x") or k == 'PRESET_BASE' or k == 'CONFIG_NAME'):
# Represent byte data with string, to avoid misinterpretation as big-endian int.
# Everything except PRESET_BASE and CONFIG_NAME is either byte data or an integer.
out[k] = f"'{v}'"
else:
out[k] = str(int(v))
return out
def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:
"""
Loads the a directory of preset files, merges the result into one preset.
"""
preset = {}
for fork_file in preset_files:
yaml = YAML(typ='base')
fork_preset: dict = yaml.load(fork_file)
if fork_preset is None: # for empty YAML files
continue
if not set(fork_preset.keys()).isdisjoint(preset.keys()):
duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))
raise Exception(f"duplicate config var(s) in preset files: {', '.join(duplicates)}")
preset.update(fork_preset)
assert preset != {}
return parse_config_vars(preset)
def load_config(config_path: Path) -> Dict[str, str]:
"""
Loads the given configuration file.
"""
yaml = YAML(typ='base')
config_data = yaml.load(config_path)
return parse_config_vars(config_data)
def _build_spec(preset_name: str, fork: str,
source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:
preset = load_preset(preset_files)
config = load_config(config_file)
all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files]
spec_object = all_specs[0]
for value in all_specs[1:]:
spec_object = combine_spec_objects(spec_object, value)
class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}
# Ensure it's ordered after multiple forks
new_objects = {}
while OrderedDict(new_objects) != OrderedDict(class_objects):
new_objects = copy.deepcopy(class_objects)
dependency_order_class_objects(class_objects, spec_object.custom_types)
return objects_to_spec(preset_name, spec_object, spec_builders[fork], class_objects)
class BuildTarget(NamedTuple):
name: str
preset_paths: List[Path]
config_path: Path
class PySpecCommand(Command):
"""Convert spec markdown files to a spec python file"""
description = "Convert spec markdown files to a spec python file"
spec_fork: str
md_doc_paths: str
parsed_md_doc_paths: List[str]
build_targets: str
parsed_build_targets: List[BuildTarget]
out_dir: str
# The format is (long option, short option, description).
user_options = [
('spec-fork=', None, "Spec fork to tag build with. Used to select md-docs defaults."),
('md-doc-paths=', None, "List of paths of markdown files to build spec with"),
('build-targets=', None, "Names, directory paths of compile-time presets, and default config paths."),
('out-dir=', None, "Output directory to write spec package to")
]
def initialize_options(self):
"""Set default values for options."""
# Each user option must be listed here with their default value.
self.spec_fork = PHASE0
self.md_doc_paths = ''
self.out_dir = 'pyspec_output'
self.build_targets = """
minimal:presets/minimal:configs/minimal.yaml
mainnet:presets/mainnet:configs/mainnet.yaml
"""
def finalize_options(self):
"""Post-process options."""
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
self.md_doc_paths = ""
for fork in ALL_FORKS:
if is_post_fork(self.spec_fork, fork):
# Append all files in fork directory recursively
for root, dirs, files in os.walk(get_fork_directory(fork)):
for filename in files:
filepath = os.path.join(root, filename)
if filepath.endswith('.md') and filepath not in IGNORE_SPEC_FILES:
self.md_doc_paths += filepath + "\n"
# Append extra files if any
if fork in EXTRA_SPEC_FILES:
self.md_doc_paths += EXTRA_SPEC_FILES[fork] + "\n"
if len(self.md_doc_paths) == 0:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
self.parsed_md_doc_paths = self.md_doc_paths.split()
for filename in self.parsed_md_doc_paths:
if not os.path.exists(filename):
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
self.parsed_build_targets = []
for target in self.build_targets.split():
target = target.strip()
data = target.split(':')
if len(data) != 3:
raise Exception('invalid target, expected "name:preset_dir:config_file" format, but got: %s' % target)
name, preset_dir_path, config_path = data
if any((c not in string.digits + string.ascii_letters) for c in name):
raise Exception('invalid target name: "%s"' % name)
if not os.path.exists(preset_dir_path):
raise Exception('Preset dir "%s" does not exist' % preset_dir_path)
_, _, preset_file_names = next(os.walk(preset_dir_path))
preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]
if not os.path.exists(config_path):
raise Exception('Config file "%s" does not exist' % config_path)
self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))
def run(self):
if not self.dry_run:
dir_util.mkpath(self.out_dir)
for (name, preset_paths, config_path) in self.parsed_build_targets:
spec_str = spec_builders[self.spec_fork].build_spec(
name, self.parsed_md_doc_paths, preset_paths, config_path)
if self.dry_run:
self.announce('dry run successfully prepared contents for spec.'
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}", build target: "{name}"')
self.debug_print(spec_str)
else:
with open(os.path.join(self.out_dir, name+'.py'), 'w') as out:
out.write(spec_str)
if not self.dry_run:
with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:
# `mainnet` is the default spec.
out.write("from . import mainnet as spec # noqa:F401\n")
class BuildPyCommand(build_py):
"""Customize the build command to run the spec-builder on setup.py build"""
def initialize_options(self):
super(BuildPyCommand, self).initialize_options()
def run_pyspec_cmd(self, spec_fork: str, **opts):
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
cmd_obj.spec_fork = spec_fork
cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)
for k, v in opts.items():
setattr(cmd_obj, k, v)
self.run_command('pyspec')
def run(self):
for spec_fork in spec_builders:
self.run_pyspec_cmd(spec_fork=spec_fork)
super(BuildPyCommand, self).run()
class PyspecDevCommand(Command):
"""Build the markdown files in-place to their source location for testing."""
description = "Build the markdown files in-place to their source location for testing."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run_pyspec_cmd(self, spec_fork: str, **opts):
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
cmd_obj.spec_fork = spec_fork
eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])
cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)
for k, v in opts.items():
setattr(cmd_obj, k, v)
self.run_command('pyspec')
def run(self):
print("running build_py command")
for spec_fork in spec_builders:
self.run_pyspec_cmd(spec_fork=spec_fork)
commands = {
'pyspec': PySpecCommand,
'build_py': BuildPyCommand,
'pyspecdev': PyspecDevCommand,
}
with open("README.md", "rt", encoding="utf8") as f:
readme = f.read()
# How to use "VERSION.txt" file:
# - dev branch contains "X.Y.Z.dev", where "X.Y.Z" is the target version to release dev into.
# -> Changed as part of 'master' backport to 'dev'
# - master branch contains "X.Y.Z", where "X.Y.Z" is the current version.
# -> Changed as part of 'dev' release (or other branch) into 'master'
# -> In case of a commit on master without git tag, target the next version
# with ".postN" (release candidate, numbered) suffixed.
# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers
with open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:
spec_version = f.read().strip()
setup(
name='eth2spec',
version=spec_version,
description="Eth2 spec, provided as Python package for tooling and testing",
long_description=readme,
long_description_content_type="text/markdown",
author="ethereum",
url="https://github.com/ethereum/eth2.0-specs",
include_package_data=False,
package_data={'configs': ['*.yaml'],
'presets': ['*.yaml'],
'specs': ['**/*.md'],
'eth2spec': ['VERSION.txt']},
package_dir={
"eth2spec": "tests/core/pyspec/eth2spec",
"configs": "configs",
"presets": "presets",
"specs": "specs",
},
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
py_modules=["eth2spec"],
cmdclass=commands,
python_requires=">=3.9, <4",
extras_require={
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
"generator": ["python-snappy==0.6.1", "filelock", "pathos==0.3.0"],
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
},
install_requires=[
"eth-utils>=2.0.0,<3",
"eth-typing>=3.2.0,<4.0.0",
"pycryptodome==3.15.0",
"py_ecc==6.0.0",
"milagro_bls_binding==1.9.0",
"remerkleable==0.1.27",
"trie==2.0.2",
RUAMEL_YAML_VERSION,
"lru-dict==1.2.0",
MARKO_VERSION,
"py_arkworks_bls12381==0.3.4",
"curdleproofs @ git+https://github.com/nalinbhardwaj/curdleproofs.pie@805d06785b6ff35fde7148762277dd1ae678beeb#egg=curdleproofs&subdirectory=curdleproofs",
]
)
| 47,658 | 35.887771 | 192 |
py
|
consensus-specs
|
consensus-specs-master/scripts/gen_kzg_trusted_setups.py
|
import os
from pathlib import Path
from eth2spec.utils.kzg import (
dump_kzg_trusted_setup_files,
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--secret",
dest="secret",
type=int,
required=True,
help='the secret of trusted setup',
)
parser.add_argument(
"--g1-length",
dest="g1_length",
type=int,
required=True,
help='the length of G1 trusted setup',
)
parser.add_argument(
"--g2-length",
dest="g2_length",
type=int,
required=True,
help='the length of G2 trusted setup',
)
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
required=True,
help='the output directory',
)
args = parser.parse_args()
dump_kzg_trusted_setup_files(args.secret, args.g1_length, args.g2_length, args.output_dir)
| 972 | 21.113636 | 94 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/__init__.py
|
# See setup.py about usage of VERSION.txt
import os
with open(os.path.join(os.path.dirname(__file__), 'VERSION.txt')) as f:
__version__ = f.read().strip()
| 159 | 31 | 71 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/debug/decode.py
|
from typing import Any
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import (
uint, Container, List, boolean,
Vector, ByteVector, ByteList, Union, View
)
def decode(data: Any, typ):
if issubclass(typ, (uint, boolean)):
return typ(data)
elif issubclass(typ, (List, Vector)):
return typ(decode(element, typ.element_cls()) for element in data)
elif issubclass(typ, ByteVector):
return typ(bytes.fromhex(data[2:]))
elif issubclass(typ, ByteList):
return typ(bytes.fromhex(data[2:]))
elif issubclass(typ, Container):
temp = {}
for field_name, field_type in typ.fields().items():
temp[field_name] = decode(data[field_name], field_type)
if field_name + "_hash_tree_root" in data:
assert (data[field_name + "_hash_tree_root"][2:] ==
hash_tree_root(temp[field_name]).hex())
ret = typ(**temp)
if "hash_tree_root" in data:
assert (data["hash_tree_root"][2:] ==
hash_tree_root(ret).hex())
return ret
elif issubclass(typ, Union):
selector = int(data["selector"])
options = typ.options()
value_typ = options[selector]
value: View
if value_typ is None: # handle the "nil" type case
assert data["value"] is None
value = None
else:
value = decode(data["value"], value_typ)
return typ(selector=selector, value=value)
else:
raise Exception(f"Type not recognized: data={data}, typ={typ}")
| 1,620 | 36.697674 | 74 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/debug/random_value.py
|
from random import Random
from enum import Enum
from typing import Type
from eth2spec.utils.ssz.ssz_typing import (
View, BasicView, uint, Container, List, boolean,
Vector, ByteVector, ByteList, Bitlist, Bitvector, Union
)
# in bytes
UINT_BYTE_SIZES = (1, 2, 4, 8, 16, 32)
random_mode_names = ("random", "zero", "max", "nil", "one", "lengthy")
class RandomizationMode(Enum):
# random content / length
mode_random = 0
# Zero-value
mode_zero = 1
# Maximum value, limited to count 1 however
mode_max = 2
# Return 0 values, i.e. empty
mode_nil_count = 3
# Return 1 value, random content
mode_one_count = 4
# Return max amount of values, random content
mode_max_count = 5
def to_name(self):
return random_mode_names[self.value]
def is_changing(self):
return self.value in [0, 4, 5]
def get_random_ssz_object(rng: Random,
typ: Type[View],
max_bytes_length: int,
max_list_length: int,
mode: RandomizationMode,
chaos: bool) -> View:
"""
Create an object for a given type, filled with random data.
:param rng: The random number generator to use.
:param typ: The type to instantiate
:param max_bytes_length: the max. length for a random bytes array
:param max_list_length: the max. length for a random list
:param mode: how to randomize
:param chaos: if true, the randomization-mode will be randomly changed
:return: the random object instance, of the given type.
"""
if chaos:
mode = rng.choice(list(RandomizationMode))
if issubclass(typ, ByteList):
# ByteList array
if mode == RandomizationMode.mode_nil_count:
return typ(b'')
elif mode == RandomizationMode.mode_max_count:
return typ(get_random_bytes_list(rng, min(max_bytes_length, typ.limit())))
elif mode == RandomizationMode.mode_one_count:
return typ(get_random_bytes_list(rng, min(1, typ.limit())))
elif mode == RandomizationMode.mode_zero:
return typ(b'\x00' * min(1, typ.limit()))
elif mode == RandomizationMode.mode_max:
return typ(b'\xff' * min(1, typ.limit()))
else:
return typ(get_random_bytes_list(rng, rng.randint(0, min(max_bytes_length, typ.limit()))))
if issubclass(typ, ByteVector):
# Random byte vectors can be bigger than max bytes size, e.g. custody chunk data.
# No max-bytes-length limitation here.
if mode == RandomizationMode.mode_zero:
return typ(b'\x00' * typ.type_byte_length())
elif mode == RandomizationMode.mode_max:
return typ(b'\xff' * typ.type_byte_length())
else:
return typ(get_random_bytes_list(rng, typ.type_byte_length()))
elif issubclass(typ, (boolean, uint)):
# Basic types
if mode == RandomizationMode.mode_zero:
return get_min_basic_value(typ)
elif mode == RandomizationMode.mode_max:
return get_max_basic_value(typ)
else:
return get_random_basic_value(rng, typ)
elif issubclass(typ, (Vector, Bitvector)):
elem_type = typ.element_cls() if issubclass(typ, Vector) else boolean
return typ(
get_random_ssz_object(rng, elem_type, max_bytes_length, max_list_length, mode, chaos)
for _ in range(typ.vector_length())
)
elif issubclass(typ, List) or issubclass(typ, Bitlist):
length = rng.randint(0, min(typ.limit(), max_list_length))
if mode == RandomizationMode.mode_one_count:
length = 1
elif mode == RandomizationMode.mode_max_count:
length = max_list_length
elif mode == RandomizationMode.mode_nil_count:
length = 0
if typ.limit() < length: # SSZ imposes a hard limit on lists, we can't put in more than that
length = typ.limit()
elem_type = typ.element_cls() if issubclass(typ, List) else boolean
return typ(
get_random_ssz_object(rng, elem_type, max_bytes_length, max_list_length, mode, chaos)
for _ in range(length)
)
elif issubclass(typ, Container):
fields = typ.fields()
# Container
return typ(**{
field_name:
get_random_ssz_object(rng, field_type, max_bytes_length, max_list_length, mode, chaos)
for field_name, field_type in fields.items()
})
elif issubclass(typ, Union):
options = typ.options()
selector: int
if mode == RandomizationMode.mode_zero:
selector = 0
elif mode == RandomizationMode.mode_max:
selector = len(options) - 1
else:
selector = rng.randrange(0, len(options))
elem_type = options[selector]
elem: View
if elem_type is None:
elem = None
else:
elem = get_random_ssz_object(rng, elem_type, max_bytes_length, max_list_length, mode, chaos)
return typ(selector=selector, value=elem)
else:
raise Exception(f"Type not recognized: typ={typ}")
def get_random_bytes_list(rng: Random, length: int) -> bytes:
return bytes(rng.getrandbits(8) for _ in range(length))
def get_random_basic_value(rng: Random, typ) -> BasicView:
if issubclass(typ, boolean):
return typ(rng.choice((True, False)))
elif issubclass(typ, uint):
assert typ.type_byte_length() in UINT_BYTE_SIZES
return typ(rng.randint(0, 256 ** typ.type_byte_length() - 1))
else:
raise ValueError(f"Not a basic type: typ={typ}")
def get_min_basic_value(typ) -> BasicView:
if issubclass(typ, boolean):
return typ(False)
elif issubclass(typ, uint):
assert typ.type_byte_length() in UINT_BYTE_SIZES
return typ(0)
else:
raise ValueError(f"Not a basic type: typ={typ}")
def get_max_basic_value(typ) -> BasicView:
if issubclass(typ, boolean):
return typ(True)
elif issubclass(typ, uint):
assert typ.type_byte_length() in UINT_BYTE_SIZES
return typ(256 ** typ.type_byte_length() - 1)
else:
raise ValueError(f"Not a basic type: typ={typ}")
| 6,311 | 36.129412 | 104 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/debug/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/debug/encode.py
|
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, serialize
from eth2spec.utils.ssz.ssz_typing import (
uint, boolean,
Bitlist, Bitvector, Container, Vector, List, Union
)
def encode(value, include_hash_tree_roots=False):
if isinstance(value, uint):
# Larger uints are boxed and the class declares their byte length
if value.__class__.type_byte_length() > 8:
return str(int(value))
return int(value)
elif isinstance(value, boolean):
return value == 1
elif isinstance(value, (Bitlist, Bitvector)):
return '0x' + serialize(value).hex()
elif isinstance(value, list): # normal python lists
return [encode(element, include_hash_tree_roots) for element in value]
elif isinstance(value, (List, Vector)):
return [encode(element, include_hash_tree_roots) for element in value]
elif isinstance(value, bytes): # bytes, ByteList, ByteVector
return '0x' + value.hex()
elif isinstance(value, Container):
ret = {}
for field_name in value.fields().keys():
field_value = getattr(value, field_name)
ret[field_name] = encode(field_value, include_hash_tree_roots)
if include_hash_tree_roots:
ret[field_name + "_hash_tree_root"] = '0x' + hash_tree_root(field_value).hex()
if include_hash_tree_roots:
ret["hash_tree_root"] = '0x' + hash_tree_root(value).hex()
return ret
elif isinstance(value, Union):
inner_value = value.value()
return {
'selector': int(value.selector()),
'value': None if inner_value is None else encode(inner_value, include_hash_tree_roots)
}
else:
raise Exception(f"Type not recognized: value={value}, typ={type(value)}")
| 1,803 | 41.952381 | 98 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/config/config_util.py
|
from pathlib import Path
from typing import Dict, Iterable, Union, BinaryIO, TextIO, Any
from ruamel.yaml import YAML
def parse_config_vars(conf: Dict[str, Any]) -> Dict[str, Any]:
"""
Parses a dict of basic str/int/list types into more detailed python types
"""
out: Dict[str, Any] = dict()
for k, v in conf.items():
if isinstance(v, list):
# Clean up integer values. YAML parser renders lists of ints as list of str
out[k] = [int(item) if item.isdigit() else item for item in v]
elif isinstance(v, str) and v.startswith("0x"):
out[k] = bytes.fromhex(v[2:])
elif k != 'PRESET_BASE' and k != 'CONFIG_NAME':
out[k] = int(v)
else:
out[k] = v
return out
def load_preset(preset_files: Iterable[Union[Path, BinaryIO, TextIO]]) -> Dict[str, Any]:
"""
Loads the a directory of preset files, merges the result into one preset.
"""
preset = {}
for fork_file in preset_files:
yaml = YAML(typ='base')
fork_preset: dict = yaml.load(fork_file)
if fork_preset is None: # for empty YAML files
continue
if not set(fork_preset.keys()).isdisjoint(preset.keys()):
duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))
raise Exception(f"duplicate config var(s) in preset files: {', '.join(duplicates)}")
preset.update(fork_preset)
assert preset != {}
return parse_config_vars(preset)
def load_config_file(config_path: Union[Path, BinaryIO, TextIO]) -> Dict[str, Any]:
"""
Loads the given configuration file.
"""
yaml = YAML(typ='base')
config_data = yaml.load(config_path)
return parse_config_vars(config_data)
mainnet_config_data: Dict[str, Any]
minimal_config_data: Dict[str, Any]
loaded_defaults = False
def load_defaults(spec_configs_path: Path) -> None:
global mainnet_config_data, minimal_config_data
mainnet_config_data = load_config_file(spec_configs_path / 'mainnet.yaml')
minimal_config_data = load_config_file(spec_configs_path / 'minimal.yaml')
global loaded_defaults
loaded_defaults = True
| 2,176 | 33.015625 | 96 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/config/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py
|
import multiprocessing
# Generator mode setting
MODE_SINGLE_PROCESS = 'MODE_SINGLE_PROCESS'
MODE_MULTIPROCESSING = 'MODE_MULTIPROCESSING'
# Test generator mode
GENERATOR_MODE = MODE_SINGLE_PROCESS
# Number of subprocesses when using MODE_MULTIPROCESSING
NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1
# Diagnostics
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
| 363 | 25 | 56 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py
|
from dataclasses import (
dataclass,
field,
)
import os
import time
import shutil
import argparse
from pathlib import Path
import sys
import json
from typing import Iterable, AnyStr, Any, Callable
import traceback
from collections import namedtuple
from ruamel.yaml import (
YAML,
)
from filelock import FileLock
from snappy import compress
from pathos.multiprocessing import ProcessingPool as Pool
from eth_utils import encode_hex
from eth2spec.test import context
from eth2spec.test.exceptions import SkippedTest
from .gen_typing import TestProvider
from .settings import (
GENERATOR_MODE,
MODE_MULTIPROCESSING,
MODE_SINGLE_PROCESS,
NUM_PROCESS,
TIME_THRESHOLD_TO_PRINT,
)
# Flag that the runner does NOT run test via pytest
context.is_pytest = False
@dataclass
class Diagnostics(object):
collected_test_count: int = 0
generated_test_count: int = 0
skipped_test_count: int = 0
test_identifiers: list = field(default_factory=list)
TestCaseParams = namedtuple(
'TestCaseParams', [
'test_case', 'case_dir', 'log_file', 'file_mode',
])
def worker_function(item):
return generate_test_vector(*item)
def get_default_yaml():
yaml = YAML(pure=True)
yaml.default_flow_style = None
def _represent_none(self, _):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
yaml.representer.add_representer(type(None), _represent_none)
return yaml
def get_cfg_yaml():
# Spec config is using a YAML subset
cfg_yaml = YAML(pure=True)
cfg_yaml.default_flow_style = False # Emit separate line for each key
def cfg_represent_bytes(self, data):
return self.represent_int(encode_hex(data))
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
def cfg_represent_quoted_str(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
return cfg_yaml
def validate_output_dir(path_str):
path = Path(path_str)
if not path.exists():
raise argparse.ArgumentTypeError("Output directory must exist")
if not path.is_dir():
raise argparse.ArgumentTypeError("Output path must lead to a directory")
return path
def get_test_case_dir(test_case, output_dir):
return (
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
/ Path(test_case.runner_name) / Path(test_case.handler_name)
/ Path(test_case.suite_name) / Path(test_case.case_name)
)
def get_test_identifier(test_case):
return "::".join([
test_case.preset_name,
test_case.fork_name,
test_case.runner_name,
test_case.handler_name,
test_case.suite_name,
test_case.case_name
])
def get_incomplete_tag_file(case_dir):
return case_dir / "INCOMPLETE"
def should_skip_case_dir(case_dir, is_force, diagnostics_obj):
is_skip = False
incomplete_tag_file = get_incomplete_tag_file(case_dir)
if case_dir.exists():
if not is_force and not incomplete_tag_file.exists():
diagnostics_obj.skipped_test_count += 1
print(f'Skipping already existing test: {case_dir}')
is_skip = True
else:
print(f'Warning, output directory {case_dir} already exist,'
' old files will be deleted and it will generate test vector files with the latest version')
# Clear the existing case_dir folder
shutil.rmtree(case_dir)
return is_skip, diagnostics_obj
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
"""
Implementation for a general test generator.
:param generator_name: The name of the generator. (lowercase snake_case)
:param test_providers: A list of test provider,
each of these returns a callable that returns an iterable of test cases.
The call to get the iterable may set global configuration,
and the iterable should not be resumed after a pause with a change of that configuration.
:return:
"""
parser = argparse.ArgumentParser(
prog="gen-" + generator_name,
description=f"Generate YAML test suite files for {generator_name}",
)
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
required=True,
type=validate_output_dir,
help="directory into which the generated YAML files will be dumped"
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="if set re-generate and overwrite test files if they already exist",
)
parser.add_argument(
"-l",
"--preset-list",
dest="preset_list",
nargs='*',
type=str,
required=False,
help="specify presets to run with. Allows all if no preset names are specified.",
)
parser.add_argument(
"-c",
"--collect-only",
action="store_true",
default=False,
help="if set only print tests to generate, do not actually run the test and dump the target data",
)
args = parser.parse_args()
output_dir = args.output_dir
if not args.force:
file_mode = "x"
else:
file_mode = "w"
log_file = Path(output_dir) / 'testgen_error_log.txt'
print(f"Generating tests into {output_dir}")
print(f'Error log file: {log_file}')
presets = args.preset_list
if presets is None:
presets = []
if len(presets) != 0:
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
collect_only = args.collect_only
diagnostics_obj = Diagnostics()
provider_start = time.time()
if GENERATOR_MODE == MODE_MULTIPROCESSING:
all_test_case_params = []
for tprov in test_providers:
if not collect_only:
# runs anything that we don't want to repeat for every test case.
tprov.prepare()
for test_case in tprov.make_cases():
# If preset list is assigned, filter by presets.
if len(presets) != 0 and test_case.preset_name not in presets:
continue
case_dir = get_test_case_dir(test_case, output_dir)
print(f"Collected test at: {case_dir}")
diagnostics_obj.collected_test_count += 1
is_skip, diagnostics_obj = should_skip_case_dir(case_dir, args.force, diagnostics_obj)
if is_skip:
continue
if GENERATOR_MODE == MODE_SINGLE_PROCESS:
result = generate_test_vector(test_case, case_dir, log_file, file_mode)
write_result_into_diagnostics_obj(result, diagnostics_obj)
elif GENERATOR_MODE == MODE_MULTIPROCESSING:
item = TestCaseParams(test_case, case_dir, log_file, file_mode)
all_test_case_params.append(item)
if GENERATOR_MODE == MODE_MULTIPROCESSING:
with Pool(processes=NUM_PROCESS) as pool:
results = pool.map(worker_function, iter(all_test_case_params))
for result in results:
write_result_into_diagnostics_obj(result, diagnostics_obj)
provider_end = time.time()
span = round(provider_end - provider_start, 2)
if collect_only:
print(f"Collected {diagnostics_obj.collected_test_count} tests in total")
else:
summary_message = f"completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests"
summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)"
if span > TIME_THRESHOLD_TO_PRINT:
summary_message += f" in {span} seconds"
print(summary_message)
diagnostics_output = {
"collected_test_count": diagnostics_obj.collected_test_count,
"generated_test_count": diagnostics_obj.generated_test_count,
"skipped_test_count": diagnostics_obj.skipped_test_count,
"test_identifiers": diagnostics_obj.test_identifiers,
"durations": [f"{span} seconds"],
}
diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json"))
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock"))
with diagnostics_lock:
diagnostics_path.touch(exist_ok=True)
if os.path.getsize(diagnostics_path) == 0:
with open(diagnostics_path, "w+") as f:
json.dump(diagnostics_output, f)
else:
with open(diagnostics_path, "r+") as f:
existing_diagnostics = json.load(f)
for k, v in diagnostics_output.items():
existing_diagnostics[k] += v
with open(diagnostics_path, "w+") as f:
json.dump(existing_diagnostics, f)
print(f"wrote diagnostics_obj to {diagnostics_path}")
def generate_test_vector(test_case, case_dir, log_file, file_mode):
cfg_yaml = get_cfg_yaml()
yaml = get_default_yaml()
written_part = False
print(f'Generating test: {case_dir}')
test_start = time.time()
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
incomplete_tag_file = get_incomplete_tag_file(case_dir)
case_dir.mkdir(parents=True, exist_ok=True)
with incomplete_tag_file.open("w") as f:
f.write("\n")
result = None
try:
meta = dict()
try:
written_part, meta = execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml)
except SkippedTest as e:
result = 0 # 0 means skipped
print(e)
shutil.rmtree(case_dir)
return result
# Once all meta data is collected (if any), write it to a meta data file.
if len(meta) != 0:
written_part = True
output_part(case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
except Exception as e:
result = -1 # -1 means error
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
print(error_message)
traceback.print_exc()
else:
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
if not written_part:
print(f"[Error] test case {case_dir} did not produce any written_part")
shutil.rmtree(case_dir)
result = -1
else:
result = get_test_identifier(test_case)
# Only remove `INCOMPLETE` tag file
os.remove(incomplete_tag_file)
test_end = time.time()
span = round(test_end - test_start, 2)
if span > TIME_THRESHOLD_TO_PRINT:
print(f' - generated in {span} seconds')
return result
def write_result_into_diagnostics_obj(result, diagnostics_obj):
if result == -1: # error
pass
elif result == 0:
diagnostics_obj.skipped_test_count += 1
elif result is not None:
diagnostics_obj.generated_test_count += 1
diagnostics_obj.test_identifiers.append(result)
else:
raise Exception(f"Unexpected result: {result}")
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
def dump(case_path: Path):
out_path = case_path / Path(name + '.yaml')
with out_path.open(file_mode) as f:
yaml_encoder.dump(data, f)
f.close()
return dump
def output_part(case_dir, log_file, out_kind: str, name: str, fn: Callable[[Path, ], None]):
# make sure the test case directory is created before any test part is written.
case_dir.mkdir(parents=True, exist_ok=True)
try:
fn(case_dir)
except (IOError, ValueError) as e:
error_message = f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
print(error_message)
sys.exit(error_message)
def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml):
result = test_case.case_fn()
written_part = False
for (name, out_kind, data) in result:
written_part = True
if out_kind == "meta":
meta[name] = data
elif out_kind == "cfg":
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
elif out_kind == "data":
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
elif out_kind == "ssz":
output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode))
else:
raise ValueError("Unknown out_kind %s" % out_kind)
return written_part, meta
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
def dump(case_path: Path):
out_path = case_path / Path(name + '.ssz_snappy')
compressed = compress(data)
with out_path.open(file_mode + 'b') as f: # write in raw binary mode
f.write(compressed)
return dump
| 13,418 | 31.970516 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_base/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py
|
from typing import (
Any,
Callable,
Iterable,
NewType,
Tuple,
)
from dataclasses import dataclass
# Elements: name, out_kind, data
#
# out_kind is the type of data:
# - "meta" for generic data to collect into a meta data dict
# - "cfg" for a spec config dictionary
# - "data" for generic
# - "ssz" for SSZ encoded bytes
TestCasePart = NewType("TestCasePart", Tuple[str, str, Any])
@dataclass
class TestCase(object):
fork_name: str
preset_name: str
runner_name: str
handler_name: str
suite_name: str
case_name: str
case_fn: Callable[[], Iterable[TestCasePart]]
@dataclass
class TestProvider(object):
# Prepares the context for the provider as a whole, as opposed to per-test-case changes.
prepare: Callable[[], None]
# Retrieves an iterable of cases, called after prepare()
make_cases: Callable[[], Iterable[TestCase]]
| 891 | 23.108108 | 92 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py
|
from importlib import import_module
from inspect import getmembers, isfunction
from typing import Any, Callable, Dict, Iterable, Optional, List, Union
from eth2spec.utils import bls
from eth2spec.test.helpers.constants import ALL_PRESETS, TESTGEN_FORKS
from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName
from eth2spec.gen_helpers.gen_base import gen_runner
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
fork_name: SpecForkName, preset_name: PresetBaseName,
bls_active: bool = True,
phase: Optional[str]=None) -> Iterable[TestCase]:
"""
Generate a list of test cases by running tests from the given src in generator-mode.
:param runner_name: to categorize the test in general as.
:param handler_name: to categorize the test specialization as.
:param src: to retrieve tests from (discovered using inspect.getmembers).
:param fork_name: the folder name for these tests.
(if multiple forks are applicable, indicate the last fork)
:param preset_name: to select a preset. Tests that do not support the preset will be skipped.
:param bls_active: optional, to override BLS switch preference. Defaults to True.
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
Set to the pre-fork (w.r.t. fork_name) in multi-fork tests.
:return: an iterable of test cases.
"""
fn_names = [
name for (name, _) in getmembers(src, isfunction)
if name.startswith('test_')
]
if phase is None:
phase = fork_name
print("generating test vectors from tests source: %s" % src.__name__)
for name in fn_names:
tfn = getattr(src, name)
# strip off the `test_`
case_name = name
if case_name.startswith('test_'):
case_name = case_name[5:]
yield TestCase(
fork_name=fork_name,
preset_name=preset_name,
runner_name=runner_name,
handler_name=handler_name,
suite_name=getattr(tfn, 'suite_name', 'pyspec_tests'),
case_name=case_name,
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
case_fn=lambda: tfn(generator_mode=True, phase=phase, preset=preset_name, bls_active=bls_active)
)
def get_provider(create_provider_fn: Callable[[SpecForkName, PresetBaseName, str, str], TestProvider],
fork_name: SpecForkName,
preset_name: PresetBaseName,
all_mods: Dict[str, Dict[str, Union[List[str], str]]]) -> Iterable[TestProvider]:
for key, mod_name in all_mods[fork_name].items():
if not isinstance(mod_name, List):
mod_name = [mod_name]
yield create_provider_fn(
fork_name=fork_name,
preset_name=preset_name,
handler_name=key,
tests_src_mod_name=mod_name,
)
def get_create_provider_fn(runner_name: str) -> Callable[[SpecForkName, str, str, PresetBaseName], TestProvider]:
def prepare_fn() -> None:
bls.use_milagro()
return
def create_provider(fork_name: SpecForkName, preset_name: PresetBaseName,
handler_name: str, tests_src_mod_name: List[str]) -> TestProvider:
def cases_fn() -> Iterable[TestCase]:
for mod_name in tests_src_mod_name:
tests_src = import_module(mod_name)
yield from generate_from_tests(
runner_name=runner_name,
handler_name=handler_name,
src=tests_src,
fork_name=fork_name,
preset_name=preset_name,
)
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
return create_provider
def run_state_test_generators(runner_name: str,
all_mods: Dict[str, Dict[str, str]],
presets: Iterable[PresetBaseName] = ALL_PRESETS,
forks: Iterable[SpecForkName] = TESTGEN_FORKS) -> None:
"""
Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_PRESETS` presets of the given runner.
"""
for preset_name in presets:
for fork_name in forks:
if fork_name in all_mods:
gen_runner.run_generator(runner_name, get_provider(
create_provider_fn=get_create_provider_fn(runner_name),
fork_name=fork_name,
preset_name=preset_name,
all_mods=all_mods,
))
def combine_mods(dict_1, dict_2):
"""
Return the merged dicts, where the result value would be a list of the values from two dicts.
"""
# The duplicate dict_1 items would be ignored here.
dict_3 = {**dict_1, **dict_2}
intersection = dict_1.keys() & dict_2.keys()
for key in intersection:
# To list
if not isinstance(dict_3[key], List):
dict_3[key] = [dict_3[key]]
# Append dict_1 value to list
if isinstance(dict_1[key], List):
dict_3[key] += dict_1[key]
else:
dict_3[key].append(dict_1[key])
return dict_3
| 5,408 | 39.669173 | 113 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/context.py
|
import pytest
from copy import deepcopy
from dataclasses import dataclass
import importlib
from eth2spec.phase0 import mainnet as spec_phase0_mainnet, minimal as spec_phase0_minimal
from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal
from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal
from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal
from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal
from eth2spec.eip6110 import mainnet as spec_eip6110_mainnet, minimal as spec_eip6110_minimal
from eth2spec.utils import bls
from .exceptions import SkippedTest
from .helpers.constants import (
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
EIP6110,
MINIMAL, MAINNET,
ALL_PHASES,
ALL_FORK_UPGRADES,
LIGHT_CLIENT_TESTING_FORKS,
)
from .helpers.forks import is_post_fork
from .helpers.typing import SpecForkName, PresetBaseName
from .helpers.genesis import create_genesis_state
from .utils import (
vector_test,
with_meta_tags,
)
from random import Random
from typing import Any, Callable, Sequence, TypedDict, Protocol, Dict
from lru import LRU
# Without pytest CLI arg or pyspec-test-generator 'preset' argument, this will be the config to apply.
DEFAULT_TEST_PRESET = MINIMAL
# Without pytest CLI arg or pyspec-test-generator 'run-phase' argument, this will be the config to apply.
DEFAULT_PYTEST_FORKS = ALL_PHASES
# TODO: currently phases are defined as python modules.
# It would be better if they would be more well-defined interfaces for stronger typing.
class Configuration(Protocol):
PRESET_BASE: str
class Spec(Protocol):
fork: str
config: Configuration
class SpecPhase0(Spec):
...
class SpecAltair(Spec):
...
class SpecBellatrix(Spec):
...
class SpecCapella(Spec):
...
@dataclass(frozen=True)
class ForkMeta:
pre_fork_name: str
post_fork_name: str
fork_epoch: int
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
MINIMAL: {
PHASE0: spec_phase0_minimal,
ALTAIR: spec_altair_minimal,
BELLATRIX: spec_bellatrix_minimal,
CAPELLA: spec_capella_minimal,
DENEB: spec_deneb_minimal,
EIP6110: spec_eip6110_minimal,
},
MAINNET: {
PHASE0: spec_phase0_mainnet,
ALTAIR: spec_altair_mainnet,
BELLATRIX: spec_bellatrix_mainnet,
CAPELLA: spec_capella_mainnet,
DENEB: spec_deneb_mainnet,
EIP6110: spec_eip6110_mainnet,
},
}
class SpecForks(TypedDict, total=False):
PHASE0: SpecPhase0
ALTAIR: SpecAltair
BELLATRIX: SpecBellatrix
CAPELLA: SpecCapella
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
spec: Spec, phases: SpecForks):
balances = balances_fn(spec)
activation_threshold = threshold_fn(spec)
state = create_genesis_state(spec=spec, validator_balances=balances,
activation_threshold=activation_threshold)
return state
_custom_state_cache_dict = LRU(size=10)
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
threshold_fn: Callable[[Any], int]):
def deco(fn):
def entry(*args, spec: Spec, phases: SpecForks, **kw):
# make a key for the state, unique to the fork + config (incl preset choice) and balances/activations
key = (spec.fork, spec.config.__hash__(), spec.__file__, balances_fn, threshold_fn)
global _custom_state_cache_dict
if key not in _custom_state_cache_dict:
state = _prepare_state(balances_fn, threshold_fn, spec, phases)
_custom_state_cache_dict[key] = state.get_backing()
# Take an entry out of the LRU.
# No copy is necessary, as we wrap the immutable backing with a new view.
state = spec.BeaconState(backing=_custom_state_cache_dict[key])
kw['state'] = state
return fn(*args, spec=spec, phases=phases, **kw)
return entry
return deco
def default_activation_threshold(spec: Spec):
"""
Helper method to use the default balance activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=default_activation_threshold, ...)`
"""
return spec.MAX_EFFECTIVE_BALANCE
def zero_activation_threshold(spec: Spec):
"""
Helper method to use 0 gwei as the activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=zero_activation_threshold, ...)`
"""
return 0
def default_balances(spec: Spec):
"""
Helper method to create a series of default balances.
Usage: `@with_custom_state(balances_fn=default_balances, ...)`
"""
num_validators = spec.SLOTS_PER_EPOCH * 8
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
def scaled_churn_balances(spec: Spec):
"""
Helper method to create enough validators to scale the churn limit.
(This is *firmly* over the churn limit -- thus the +2 instead of just +1)
See the second argument of ``max`` in ``get_validator_churn_limit``.
Usage: `@with_custom_state(balances_fn=scaled_churn_balances, ...)`
"""
num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (2 + spec.config.MIN_PER_EPOCH_CHURN_LIMIT)
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
with_state = with_custom_state(default_balances, default_activation_threshold)
def low_balances(spec: Spec):
"""
Helper method to create a series of low balances.
Usage: `@with_custom_state(balances_fn=low_balances, ...)`
"""
num_validators = spec.SLOTS_PER_EPOCH * 8
# Technically the balances cannot be this low starting from genesis, but it is useful for testing
low_balance = 18 * 10 ** 9
return [low_balance] * num_validators
def misc_balances(spec: Spec):
"""
Helper method to create a series of balances that includes some misc. balances.
Usage: `@with_custom_state(balances_fn=misc_balances, ...)`
"""
num_validators = spec.SLOTS_PER_EPOCH * 8
balances = [spec.MAX_EFFECTIVE_BALANCE * 2 * i // num_validators for i in range(num_validators)]
rng = Random(1234)
rng.shuffle(balances)
return balances
def misc_balances_in_default_range_with_many_validators(spec: Spec):
"""
Helper method to create a series of balances that includes some misc. balances but
none that are below the ``EJECTION_BALANCE``.
"""
# Double validators to facilitate randomized testing
num_validators = spec.SLOTS_PER_EPOCH * 8 * 2
floor = spec.config.EJECTION_BALANCE + spec.EFFECTIVE_BALANCE_INCREMENT
balances = [
max(spec.MAX_EFFECTIVE_BALANCE * 2 * i // num_validators, floor) for i in range(num_validators)
]
rng = Random(1234)
rng.shuffle(balances)
return balances
def low_single_balance(spec: Spec):
"""
Helper method to create a single of balance of 1 Gwei.
Usage: `@with_custom_state(balances_fn=low_single_balance, ...)`
"""
return [1]
def large_validator_set(spec: Spec):
"""
Helper method to create a large series of default balances.
Usage: `@with_custom_state(balances_fn=default_balances, ...)`
"""
num_validators = 2 * spec.SLOTS_PER_EPOCH * spec.MAX_COMMITTEES_PER_SLOT * spec.TARGET_COMMITTEE_SIZE
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
def single_phase(fn):
"""
Decorator that filters out the phases data.
most state tests only focus on behavior of a single phase (the "spec").
This decorator is applied as part of spec_state_test(fn).
"""
def entry(*args, **kw):
if 'phases' in kw:
kw.pop('phases')
return fn(*args, **kw)
return entry
# BLS is turned on by default, it can be disabled in tests by overriding this, or using `--disable-bls`.
# *This is for performance purposes during TESTING, DO NOT DISABLE IN PRODUCTION*.
# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON).
# - Some tests are marked as BLS-requiring, and ignore this setting.
# (tests that express differences caused by BLS, e.g. invalid signatures being rejected)
# - Some other tests are marked as BLS-ignoring, and ignore this setting.
# (tests that are heavily performance impacted / require unsigned state transitions)
# - Most tests respect the BLS setting.
DEFAULT_BLS_ACTIVE = True
is_pytest = True
def dump_skipping_message(reason: str) -> None:
message = f"[Skipped test] {reason}"
if is_pytest:
pytest.skip(message)
else:
raise SkippedTest(message)
def description(case_description: str):
def entry(fn):
return with_meta_tags({'description': case_description})(fn)
return entry
def spec_test(fn):
# Bls switch must be wrapped by vector_test,
# to fully go through the yielded bls switch data, before setting back the BLS setting.
# A test may apply BLS overrides such as @always_bls,
# but if it yields data (n.b. @always_bls yields the bls setting), it should be wrapped by this decorator.
# This is why @alway_bls has its own bls switch, since the override is beyond the reach of the outer switch.
return vector_test()(bls_switch(fn))
# shorthand for decorating @spec_test @with_state @single_phase
def spec_state_test(fn):
return spec_test(with_state(single_phase(fn)))
def spec_configured_state_test(conf):
overrides = with_config_overrides(conf)
def decorator(fn):
return spec_test(overrides(with_state(single_phase(fn))))
return decorator
def _check_current_version(spec, state, version_name):
fork_version_field = version_name.upper() + '_FORK_VERSION'
try:
fork_version = getattr(spec.config, fork_version_field)
except Exception:
return False
else:
return state.fork.current_version == fork_version
def config_fork_epoch_overrides(spec, state):
if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
return {}
for fork in ALL_PHASES:
if fork != PHASE0 and _check_current_version(spec, state, fork):
overrides = {}
for f in ALL_PHASES:
if f != PHASE0 and is_post_fork(fork, f):
overrides[f.upper() + '_FORK_EPOCH'] = spec.GENESIS_EPOCH
return overrides
def with_matching_spec_config(emitted_fork=None):
def decorator(fn):
def wrapper(*args, spec: Spec, **kw):
overrides = config_fork_epoch_overrides(spec, kw['state'])
deco = with_config_overrides(overrides, emitted_fork)
return deco(fn)(*args, spec=spec, **kw)
return wrapper
return decorator
def spec_state_test_with_matching_config(fn):
return spec_test(with_state(with_matching_spec_config()(single_phase(fn))))
def expect_assertion_error(fn):
bad = False
try:
fn()
bad = True
except AssertionError:
pass
except IndexError:
# Index errors are special; the spec is not explicit on bound checking, an IndexError is like a failed assert.
pass
if bad:
raise AssertionError('expected an assertion error, but got none.')
def never_bls(fn):
"""
Decorator to apply on ``bls_switch`` decorator to force BLS de-activation. Useful to mark tests as BLS-ignorant.
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
as the yielding needs to complete before setting back the BLS setting.
"""
def entry(*args, **kw):
# override bls setting
kw['bls_active'] = False
return bls_switch(fn)(*args, **kw)
return with_meta_tags({'bls_setting': 2})(entry)
def always_bls(fn):
"""
Decorator to apply on ``bls_switch`` decorator to force BLS activation. Useful to mark tests as BLS-dependent.
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
as the yielding needs to complete before setting back the BLS setting.
"""
def entry(*args, **kw):
# override bls setting
kw['bls_active'] = True
return bls_switch(fn)(*args, **kw)
return with_meta_tags({'bls_setting': 1})(entry)
def bls_switch(fn):
"""
Decorator to make a function execute with BLS ON, or BLS off.
Based on an optional bool argument ``bls_active``, passed to the function at runtime.
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
as the yielding needs to complete before setting back the BLS setting.
"""
def entry(*args, **kw):
old_state = bls.bls_active
bls.bls_active = kw.pop('bls_active', DEFAULT_BLS_ACTIVE)
res = fn(*args, **kw)
if res is not None:
yield from res
bls.bls_active = old_state
return entry
def disable_process_reveal_deadlines(fn):
"""
Decorator to make a function execute with `process_reveal_deadlines` OFF.
This is for testing long-range epochs transition without considering the reveal-deadline slashing effect.
"""
def entry(*args, spec: Spec, **kw):
if hasattr(spec, 'process_reveal_deadlines'):
old_state = spec.process_reveal_deadlines
spec.process_reveal_deadlines = lambda state: None
yield from fn(*args, spec=spec, **kw)
if hasattr(spec, 'process_reveal_deadlines'):
spec.process_reveal_deadlines = old_state
return with_meta_tags({'reveal_deadlines_setting': 1})(entry)
def with_all_phases(fn):
"""
A decorator for running a test with every phase
"""
return with_phases(ALL_PHASES)(fn)
def with_all_phases_from(earliest_phase):
"""
A decorator factory for running a tests with every phase except the ones listed
"""
def decorator(fn):
return with_phases([phase for phase in ALL_PHASES if is_post_fork(phase, earliest_phase)])(fn)
return decorator
def with_all_phases_except(exclusion_phases):
"""
A decorator factory for running a tests with every phase except the ones listed
"""
def decorator(fn):
return with_phases([phase for phase in ALL_PHASES if phase not in exclusion_phases])(fn)
return decorator
def _get_preset_targets(kw):
preset_name = DEFAULT_TEST_PRESET
if 'preset' in kw:
preset_name = kw.pop('preset')
return spec_targets[preset_name]
def _get_run_phases(phases, kw):
"""
Return the fork names for the base `spec` in test cases
"""
if 'phase' in kw:
# Limit phases if one explicitly specified
phase = kw.pop('phase')
if phase not in phases:
dump_skipping_message(f"doesn't support this fork: {phase}")
return None
run_phases = [phase]
else:
# If pytest `--fork` flag is set, filter out the rest of the forks
run_phases = set(phases).intersection(DEFAULT_PYTEST_FORKS)
return run_phases
def _get_available_phases(run_phases, other_phases):
"""
Return the available fork names for multi-phase tests
"""
available_phases = set(run_phases)
if other_phases is not None:
available_phases |= set(other_phases)
return available_phases
def _run_test_case_with_phases(fn, phases, other_phases, kw, args, is_fork_transition=False):
run_phases = _get_run_phases(phases, kw)
if len(run_phases) == 0:
if not is_fork_transition:
dump_skipping_message("none of the recognized phases are executable, skipping test.")
return None
available_phases = _get_available_phases(run_phases, other_phases)
targets = _get_preset_targets(kw)
# Populate all phases for multi-phase tests
phase_dir = {}
for phase in available_phases:
phase_dir[phase] = targets[phase]
# Return is ignored whenever multiple phases are ran.
# This return is for test generators to emit python generators (yielding test vector outputs)
for phase in run_phases:
ret = fn(spec=targets[phase], phases=phase_dir, *args, **kw)
return ret
def with_phases(phases, other_phases=None):
"""
Decorator factory that returns a decorator that runs a test for the appropriate phases.
Additional phases that do not initially run, but are made available through the test, are optional.
"""
def decorator(fn):
def wrapper(*args, **kw):
if 'fork_metas' in kw:
fork_metas = kw.pop('fork_metas')
if 'phase' in kw:
# When running test generator, it sets specific `phase`
phase = kw['phase']
_phases = [phase]
_other_phases = [ALL_FORK_UPGRADES[phase]]
ret = _run_test_case_with_phases(fn, _phases, _other_phases, kw, args, is_fork_transition=True)
else:
# When running pytest, go through `fork_metas` instead of using `phases`
for fork_meta in fork_metas:
_phases = [fork_meta.pre_fork_name]
_other_phases = [fork_meta.post_fork_name]
ret = _run_test_case_with_phases(fn, _phases, _other_phases, kw, args, is_fork_transition=True)
else:
ret = _run_test_case_with_phases(fn, phases, other_phases, kw, args)
return ret
return wrapper
return decorator
def with_presets(preset_bases, reason=None):
available_presets = set(preset_bases)
def decorator(fn):
def wrapper(*args, spec: Spec, **kw):
if spec.config.PRESET_BASE not in available_presets:
message = f"doesn't support this preset base: {spec.config.PRESET_BASE}."
if reason is not None:
message = f"{message} Reason: {reason}"
dump_skipping_message(message)
return None
return fn(*args, spec=spec, **kw)
return wrapper
return decorator
with_light_client = with_phases(LIGHT_CLIENT_TESTING_FORKS)
with_altair_and_later = with_all_phases_from(ALTAIR)
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
with_capella_and_later = with_all_phases_from(CAPELLA)
with_deneb_and_later = with_all_phases_from(DENEB)
with_eip6110_and_later = with_all_phases_from(EIP6110)
class quoted_str(str):
pass
def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Get dict of basic types from a dict of SSZ objects.
"""
result = {}
for k, v in ssz_dict.items():
if isinstance(v, int):
value = int(v)
elif isinstance(v, bytes):
value = bytes(bytearray(v))
else:
value = quoted_str(v)
result[k] = value
return result
def get_copy_of_spec(spec):
fork = spec.fork
preset = spec.config.PRESET_BASE
module_path = f"eth2spec.{fork}.{preset}"
module_spec = importlib.util.find_spec(module_path)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
# Preserve existing config overrides
module.config = deepcopy(spec.config)
return module
def spec_with_config_overrides(spec, config_overrides):
# apply our overrides to a copy of it, and apply it to the spec
config = spec.config._asdict()
config.update((k, config_overrides[k]) for k in config.keys() & config_overrides.keys())
config_types = spec.Configuration.__annotations__
modified_config = {k: config_types[k](v) for k, v in config.items()}
spec.config = spec.Configuration(**modified_config)
# To output the changed config in a format compatible with yaml test vectors,
# the dict SSZ objects have to be converted into Python built-in types.
output_config = _get_basic_dict(modified_config)
return spec, output_config
def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
"""
WARNING: the spec_test decorator must wrap this, to ensure the decorated test actually runs.
This decorator forces the test to yield, and pytest doesn't run generator tests, and instead silently passes it.
Use 'spec_configured_state_test' instead of 'spec_state_test' if you are unsure.
This is a decorator that applies a dict of config value overrides to the spec during execution.
"""
def decorator(fn):
def wrapper(*args, spec: Spec, **kw):
# Apply config overrides to spec
spec, output_config = spec_with_config_overrides(get_copy_of_spec(spec), config_overrides)
# Apply config overrides to additional phases, if present
if 'phases' in kw:
phases = {}
for fork in kw['phases']:
phases[fork], output = spec_with_config_overrides(
get_copy_of_spec(kw['phases'][fork]), config_overrides)
if emitted_fork == fork:
output_config = output
kw['phases'] = phases
# Emit requested spec (with overrides)
if emit:
yield 'config', 'cfg', output_config
# Run the function
out = fn(*args, spec=spec, **kw)
# If it's not returning None like a normal test function,
# it's generating things, and we need to complete it before setting back the config.
if out is not None:
yield from out
return wrapper
return decorator
def only_generator(reason):
def _decorator(inner):
def _wrapper(*args, **kwargs):
if is_pytest:
dump_skipping_message(reason)
return None
return inner(*args, **kwargs)
return _wrapper
return _decorator
def with_test_suite_name(suite_name: str):
def _decorator(inner):
inner.suite_name = suite_name
return inner
return _decorator
#
# Fork transition state tests
#
def set_fork_metas(fork_metas: Sequence[ForkMeta]):
def decorator(fn):
def wrapper(*args, **kwargs):
return fn(*args, fork_metas=fork_metas, **kwargs)
return wrapper
return decorator
def with_fork_metas(fork_metas: Sequence[ForkMeta]):
"""
A decorator to construct a "transition" test from one fork to another.
Decorator takes a list of `ForkMeta` and each item defines `pre_fork_name`,
`post_fork_name`, and `fork_epoch`.
Decorator assumes a transition from the `pre_fork_name` fork to the
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
fork occurs or they must compute one (yielding to the generator) during the test
if more custom behavior is desired.
A test using this decorator should expect to receive as parameters:
`state`: the default state constructed for the `pre_fork_name` fork
according to the `with_state` decorator.
`fork_epoch`: the `fork_epoch` provided to this decorator, if given.
`spec`: the version of the eth2 spec corresponding to `pre_fork_name`.
`post_spec`: the version of the eth2 spec corresponding to `post_fork_name`.
`pre_tag`: a function to tag data as belonging to `pre_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
"""
run_yield_fork_meta = yield_fork_meta(fork_metas)
run_with_phases = with_phases(ALL_PHASES)
run_set_fork_metas = set_fork_metas(fork_metas)
def decorator(fn):
return run_set_fork_metas(run_with_phases(spec_test(with_state(run_yield_fork_meta(fn)))))
return decorator
def yield_fork_meta(fork_metas: Sequence[ForkMeta]):
"""
Yield meta fields to `meta.yaml` and pass post spec and meta fields to `fn`.
"""
def decorator(fn):
def wrapper(*args, **kw):
phases = kw.pop('phases')
spec = kw["spec"]
try:
fork_meta = next(filter(lambda m: m.pre_fork_name == spec.fork, fork_metas))
except StopIteration:
dump_skipping_message(f"doesn't support this fork: {spec.fork}")
post_spec = phases[fork_meta.post_fork_name]
# Reset counter
pre_fork_counter = 0
def pre_tag(obj):
nonlocal pre_fork_counter
pre_fork_counter += 1
return obj
def post_tag(obj):
return obj
yield "post_fork", "meta", fork_meta.post_fork_name
has_fork_epoch = False
if fork_meta.fork_epoch:
kw["fork_epoch"] = fork_meta.fork_epoch
has_fork_epoch = True
yield "fork_epoch", "meta", fork_meta.fork_epoch
result = fn(
*args,
post_spec=post_spec,
pre_tag=pre_tag,
post_tag=post_tag,
**kw,
)
if result is not None:
for part in result:
if part[0] == "fork_epoch":
has_fork_epoch = True
yield part
assert has_fork_epoch
if pre_fork_counter > 0:
yield "fork_block", "meta", pre_fork_counter - 1
return wrapper
return decorator
| 25,667 | 33.224 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/exceptions.py
|
class SkippedTest(Exception):
...
class BlockNotFoundException(Exception):
...
| 89 | 11.857143 | 40 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/conftest.py
|
from eth2spec.test import context
from eth2spec.test.helpers.constants import (
ALL_PHASES,
)
from eth2spec.utils import bls as bls_utils
# We import pytest only when it's present, i.e. when we are running tests.
# The test-cases themselves can be generated without installing pytest.
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def fixture(*args, **kwargs):
if module_exists("pytest"):
import pytest
return pytest.fixture(*args, **kwargs)
else:
def ignore():
pass
return ignore
def pytest_addoption(parser):
parser.addoption(
"--preset", action="store", type=str, default="minimal",
help="preset: make the pyspec use the specified preset"
)
parser.addoption(
"--fork", action="append", type=str,
help=(
"fork: make the pyspec only run with the specified phase."
" To run multiple phases, e.g., --fork=phase0 --fork=altair"
)
)
parser.addoption(
"--disable-bls", action="store_true", default=False,
help="bls-default: make tests that are not dependent on BLS run without BLS"
)
parser.addoption(
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"],
help=(
"bls-type: use specified BLS implementation;"
"fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)"
)
)
def _validate_fork_name(forks):
for fork in forks:
if fork not in set(ALL_PHASES):
raise ValueError(
f'The given --fork argument "{fork}" is not an available fork.'
f' The available forks: {ALL_PHASES}'
)
@fixture(autouse=True)
def preset(request):
context.DEFAULT_TEST_PRESET = request.config.getoption("--preset")
@fixture(autouse=True)
def run_phases(request):
forks = request.config.getoption("--fork", default=None)
if forks:
forks = [fork.lower() for fork in forks]
_validate_fork_name(forks)
context.DEFAULT_PYTEST_FORKS = set(forks)
else:
context.DEFAULT_PYTEST_FORKS = ALL_PHASES
@fixture(autouse=True)
def bls_default(request):
disable_bls = request.config.getoption("--disable-bls")
if disable_bls:
context.DEFAULT_BLS_ACTIVE = False
@fixture(autouse=True)
def bls_type(request):
bls_type = request.config.getoption("--bls-type")
if bls_type == "py_ecc":
bls_utils.use_py_ecc()
elif bls_type == "milagro":
bls_utils.use_milagro()
elif bls_type == "arkworks":
bls_utils.use_arkworks()
elif bls_type == "fastest":
bls_utils.use_fastest()
else:
raise Exception(f"unrecognized bls type: {bls_type}")
| 2,887 | 27.88 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/block_processing/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py
|
from eth2spec.test.context import spec_state_test, always_bls, with_eip6110_and_later
from eth2spec.test.helpers.deposits import (
prepare_deposit_receipt,
run_deposit_receipt_processing,
run_deposit_receipt_processing_with_specific_fork_version
)
from eth2spec.test.helpers.state import next_epoch_via_block
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
@with_eip6110_and_later
@spec_state_test
def test_new_deposit_under_max(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
# effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
amount = spec.MAX_EFFECTIVE_BALANCE - 1
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_new_deposit_max(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
# effective balance will be exactly the same as balance.
amount = spec.MAX_EFFECTIVE_BALANCE
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_new_deposit_over_max(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
# just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
amount = spec.MAX_EFFECTIVE_BALANCE + 1
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_new_deposit_eth1_withdrawal_credentials(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
withdrawal_credentials = (
spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
+ b'\x00' * 11 # specified 0s
+ b'\x59' * 20 # a 20-byte eth1 address
)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit_receipt = prepare_deposit_receipt(
spec,
validator_index,
amount,
withdrawal_credentials=withdrawal_credentials,
signed=True,
)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
withdrawal_credentials = (
b'\xFF' # Non specified withdrawal credentials version
+ b'\x02' * 31 # Garabage bytes
)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit_receipt = prepare_deposit_receipt(
spec,
validator_index,
amount,
withdrawal_credentials=withdrawal_credentials,
signed=True,
)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
@always_bls
def test_correct_sig_but_forked_state(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
# deposits will always be valid, regardless of the current fork
state.fork.current_version = spec.Version('0x1234abcd')
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
@always_bls
def test_incorrect_sig_new_deposit(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, effective=False)
@with_eip6110_and_later
@spec_state_test
def test_top_up__max_effective_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE
state.validators[validator_index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE + amount
assert state.validators[validator_index].effective_balance == spec.MAX_EFFECTIVE_BALANCE
@with_eip6110_and_later
@spec_state_test
def test_top_up__less_effective_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
initial_balance = spec.MAX_EFFECTIVE_BALANCE - 1000
initial_effective_balance = spec.MAX_EFFECTIVE_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
state.balances[validator_index] = initial_balance
state.validators[validator_index].effective_balance = initial_effective_balance
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
assert state.balances[validator_index] == initial_balance + amount
# unchanged effective balance
assert state.validators[validator_index].effective_balance == initial_effective_balance
@with_eip6110_and_later
@spec_state_test
def test_top_up__zero_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
initial_balance = 0
initial_effective_balance = 0
state.balances[validator_index] = initial_balance
state.validators[validator_index].effective_balance = initial_effective_balance
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
assert state.balances[validator_index] == initial_balance + amount
# unchanged effective balance
assert state.validators[validator_index].effective_balance == initial_effective_balance
@with_eip6110_and_later
@spec_state_test
@always_bls
def test_incorrect_sig_top_up(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
# invalid signatures, in top-ups, are allowed!
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_incorrect_withdrawal_credentials_top_up(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
deposit_receipt = prepare_deposit_receipt(
spec,
validator_index,
amount,
withdrawal_credentials=withdrawal_credentials
)
# inconsistent withdrawal credentials, in top-ups, are allowed!
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_key_validate_invalid_subgroup(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
# All-zero pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
pubkey = b'\x00' * 48
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
def test_key_validate_invalid_decompression(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
# `deserialization_fails_infinity_with_true_b_flag` BLS G1 deserialization test case.
# This pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
pubkey_hex = 'c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
pubkey = bytes.fromhex(pubkey_hex)
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
@with_eip6110_and_later
@spec_state_test
@always_bls
def test_ineffective_deposit_with_previous_fork_version(spec, state):
# Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`.
# It's an ineffective deposit because it fails at BLS sig verification.
# NOTE: it was effective in Altair.
assert state.fork.previous_version != state.fork.current_version
yield from run_deposit_receipt_processing_with_specific_fork_version(
spec,
state,
fork_version=state.fork.previous_version,
effective=False,
)
@with_eip6110_and_later
@spec_state_test
@always_bls
def test_effective_deposit_with_genesis_fork_version(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from run_deposit_receipt_processing_with_specific_fork_version(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
)
@with_eip6110_and_later
@spec_state_test
def test_success_top_up_to_withdrawn_validator(spec, state):
validator_index = 0
# Fully withdraw validator
set_validator_fully_withdrawable(spec, state, validator_index)
assert state.balances[validator_index] > 0
next_epoch_via_block(spec, state)
assert state.balances[validator_index] == 0
assert state.validators[validator_index].effective_balance > 0
next_epoch_via_block(spec, state)
assert state.validators[validator_index].effective_balance == 0
# Make a top-up balance to validator
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, len(state.validators), signed=True)
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
assert state.balances[validator_index] == amount
assert state.validators[validator_index].effective_balance == 0
validator = state.validators[validator_index]
balance = state.balances[validator_index]
current_epoch = spec.get_current_epoch(state)
assert spec.is_fully_withdrawable_validator(validator, balance, current_epoch)
| 10,954 | 37.710247 | 115 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/sanity/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/__init__.py
|
from .test_deposit_transition import * # noqa: F401 F403
| 58 | 28.5 | 57 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py
|
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.context import (
spec_state_test,
with_phases,
EIP6110,
)
from eth2spec.test.helpers.deposits import (
build_deposit_data,
deposit_from_context,
prepare_deposit_receipt,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.keys import privkeys, pubkeys
from eth2spec.test.helpers.state import (
state_transition_and_sign_block
)
def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True):
"""
Run ``process_block``, yielding:
- pre-state ('pre')
- block ('block')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
signed_block = state_transition_and_sign_block(spec, state, block, not valid)
yield 'blocks', [signed_block]
yield 'post', state if valid else None
# Check that deposits are applied
if valid:
expected_pubkeys = [d.data.pubkey for d in block.body.deposits]
deposit_receipts = block.body.execution_payload.deposit_receipts
expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)]
actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]]
assert actual_pubkeys == expected_pubkeys
def prepare_state_and_block(spec,
state,
deposit_cnt,
deposit_receipt_cnt,
first_deposit_receipt_index=0,
deposit_receipts_start_index=None,
eth1_data_deposit_count=None):
deposits = []
deposit_receipts = []
keypair_index = len(state.validators)
# Prepare deposits
deposit_data_list = []
for index in range(deposit_cnt):
deposit_data = build_deposit_data(spec,
pubkeys[keypair_index],
privkeys[keypair_index],
# use max effective balance
spec.MAX_EFFECTIVE_BALANCE,
# insecurely use pubkey as withdrawal key
spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkeys[keypair_index])[1:],
signed=True)
deposit_data_list.append(deposit_data)
keypair_index += 1
deposit_root = None
for index in range(deposit_cnt):
deposit, deposit_root, _ = deposit_from_context(spec, deposit_data_list, index)
deposits.append(deposit)
if deposit_root:
state.eth1_deposit_index = 0
if not eth1_data_deposit_count:
eth1_data_deposit_count = deposit_cnt
state.eth1_data = spec.Eth1Data(deposit_root=deposit_root,
deposit_count=eth1_data_deposit_count,
block_hash=state.eth1_data.block_hash)
# Prepare deposit receipts
for offset in range(deposit_receipt_cnt):
deposit_receipt = prepare_deposit_receipt(spec,
keypair_index,
# use max effective balance
spec.MAX_EFFECTIVE_BALANCE,
first_deposit_receipt_index + offset,
signed=True)
deposit_receipts.append(deposit_receipt)
keypair_index += 1
# Set start index if defined
if deposit_receipts_start_index:
state.deposit_receipts_start_index = deposit_receipts_start_index
block = build_empty_block_for_next_slot(spec, state)
# Assign deposits and deposit receipts
block.body.deposits = deposits
block.body.execution_payload.deposit_receipts = deposit_receipts
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
return state, block
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__start_index_is_set(spec, state):
# 0 deposits, 2 deposit receipts, unset deposit_receipts_start_index
state, block = prepare_state_and_block(spec, state,
deposit_cnt=0,
deposit_receipt_cnt=2,
first_deposit_receipt_index=state.eth1_data.deposit_count + 11)
yield from run_deposit_transition_block(spec, state, block)
# deposit_receipts_start_index must be set to the index of the first receipt
assert state.deposit_receipts_start_index == block.body.execution_payload.deposit_receipts[0].index
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__process_eth1_deposits(spec, state):
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
state, block = prepare_state_and_block(spec, state,
deposit_cnt=3,
deposit_receipt_cnt=1,
first_deposit_receipt_index=11,
deposit_receipts_start_index=7)
yield from run_deposit_transition_block(spec, state, block)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__process_max_eth1_deposits(spec, state):
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS
state, block = prepare_state_and_block(spec, state,
deposit_cnt=spec.MAX_DEPOSITS,
deposit_receipt_cnt=1,
first_deposit_receipt_index=spec.MAX_DEPOSITS + 1,
deposit_receipts_start_index=spec.MAX_DEPOSITS,
eth1_data_deposit_count=23)
yield from run_deposit_transition_block(spec, state, block)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state):
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count == state.deposit_receipts_start_index
state, block = prepare_state_and_block(spec, state,
deposit_cnt=3,
deposit_receipt_cnt=1,
first_deposit_receipt_index=7,
deposit_receipts_start_index=3)
yield from run_deposit_transition_block(spec, state, block)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state):
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
state, block = prepare_state_and_block(spec, state,
deposit_cnt=3,
deposit_receipt_cnt=1,
first_deposit_receipt_index=29,
deposit_receipts_start_index=23,
eth1_data_deposit_count=17)
yield from run_deposit_transition_block(spec, state, block, valid=False)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state):
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.eth1_data_index
state, block = prepare_state_and_block(spec, state,
deposit_cnt=3,
deposit_receipt_cnt=1,
first_deposit_receipt_index=11,
deposit_receipts_start_index=7,
eth1_data_deposit_count=2)
yield from run_deposit_transition_block(spec, state, block, valid=False)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(spec, state):
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS - 1
state, block = prepare_state_and_block(spec, state,
deposit_cnt=spec.MAX_DEPOSITS,
deposit_receipt_cnt=1,
first_deposit_receipt_index=spec.MAX_DEPOSITS,
deposit_receipts_start_index=spec.MAX_DEPOSITS - 1,
eth1_data_deposit_count=23)
yield from run_deposit_transition_block(spec, state, block, valid=False)
@with_phases([EIP6110])
@spec_state_test
def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
# 1 deposit, 1 deposit receipt that top ups deposited validator
state, block = prepare_state_and_block(spec, state,
deposit_cnt=1,
deposit_receipt_cnt=1,
first_deposit_receipt_index=11,
deposit_receipts_start_index=7)
# Artificially assign deposit's pubkey to a deposit receipt of the same block
top_up_keys = [block.body.deposits[0].data.pubkey]
block.body.execution_payload.deposit_receipts[0].pubkey = top_up_keys[0]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
yield from run_deposit_transition_block(spec, state, block, top_up_keys=top_up_keys)
# Check the top up
expected_balance = block.body.deposits[0].data.amount + block.body.execution_payload.deposit_receipts[0].amount
assert state.balances[len(state.balances) - 1] == expected_balance
| 10,366 | 44.073913 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/shard_block.py
|
from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
@only_with_bls()
def sign_shard_block(spec, beacon_state, shard, block, proposer_index=None):
slot = block.message.slot
if proposer_index is None:
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
privkey = privkeys[proposer_index]
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSAL, spec.compute_epoch_at_slot(slot))
signing_root = spec.compute_signing_root(block.message, domain)
block.signature = bls.Sign(privkey, signing_root)
def build_shard_block(spec,
beacon_state,
shard,
slot=None,
body=None,
shard_parent_state=None,
signed=False):
if shard_parent_state is None:
shard_parent_state = beacon_state.shard_states[shard]
if slot is None:
slot = shard_parent_state.slot + 1
if body is None:
body = get_sample_shard_block_body(spec)
beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot)
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
block = spec.ShardBlock(
shard_parent_root=shard_parent_state.latest_block_root,
beacon_parent_root=beacon_parent_root,
slot=slot,
shard=shard,
proposer_index=proposer_index,
body=body,
)
signed_block = spec.SignedShardBlock(
message=block,
)
if signed:
sign_shard_block(spec, beacon_state, shard, signed_block, proposer_index=proposer_index)
return signed_block
def get_shard_transitions(spec, parent_beacon_state, shard_block_dict):
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
on_time_slot = parent_beacon_state.slot + 1
for shard, blocks in shard_block_dict.items():
shard_transition = spec.get_shard_transition(parent_beacon_state, shard, blocks)
offset_slots = spec.compute_offset_slots(
spec.get_latest_slot_for_shard(parent_beacon_state, shard),
on_time_slot,
)
len_offset_slots = len(offset_slots)
shard_transition = spec.get_shard_transition(parent_beacon_state, shard, blocks)
if len(blocks) > 0:
shard_block_root = blocks[-1].message.hash_tree_root()
assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root
assert shard_transition.shard_states[len_offset_slots - 1].slot == offset_slots[-1]
shard_transitions[shard] = shard_transition
return shard_transitions
def get_committee_index_of_shard(spec, state, slot, shard): # Optional[CommitteeIndex]
active_shard_count = spec.get_active_shard_count(state)
committee_count = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot))
start_shard = spec.get_start_shard(state, slot)
for committee_index in range(committee_count):
if (start_shard + committee_index) % active_shard_count == shard:
return committee_index
return None
def get_sample_shard_block_body(spec, is_max=False):
size = spec.MAX_SHARD_BLOCK_SIZE if is_max else 128
return b'\x56' * size
| 3,443 | 37.696629 | 108 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/attestations.py
|
from lru import LRU
from typing import List
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.forks import is_post_altair, is_post_deneb
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.ssz.ssz_typing import Bitlist
def run_attestation_processing(spec, state, attestation, valid=True):
"""
Run ``process_attestation``, yielding:
- pre-state ('pre')
- attestation ('attestation')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
# yield pre-state
yield 'pre', state
yield 'attestation', attestation
# If the attestation is invalid, processing is aborted, and there is no post-state.
if not valid:
expect_assertion_error(lambda: spec.process_attestation(state, attestation))
yield 'post', None
return
if not is_post_altair(spec):
current_epoch_count = len(state.current_epoch_attestations)
previous_epoch_count = len(state.previous_epoch_attestations)
# process attestation
spec.process_attestation(state, attestation)
# Make sure the attestation has been processed
if not is_post_altair(spec):
if attestation.data.target.epoch == spec.get_current_epoch(state):
assert len(state.current_epoch_attestations) == current_epoch_count + 1
else:
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
else:
# After accounting reform, there are cases when processing an attestation does not result in any flag updates
pass
# yield post-state
yield 'post', state
def build_attestation_data(spec, state, slot, index, shard=None):
assert state.slot >= slot
if slot == state.slot:
block_root = build_empty_block_for_next_slot(spec, state).parent_root
else:
block_root = spec.get_block_root_at_slot(state, slot)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
if slot < current_epoch_start_slot:
epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state))
elif slot == current_epoch_start_slot:
epoch_boundary_root = block_root
else:
epoch_boundary_root = spec.get_block_root(state, spec.get_current_epoch(state))
if slot < current_epoch_start_slot:
source_epoch = state.previous_justified_checkpoint.epoch
source_root = state.previous_justified_checkpoint.root
else:
source_epoch = state.current_justified_checkpoint.epoch
source_root = state.current_justified_checkpoint.root
data = spec.AttestationData(
slot=slot,
index=index,
beacon_block_root=block_root,
source=spec.Checkpoint(epoch=source_epoch, root=source_root),
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
)
# if spec.fork == SHARDING # TODO: add extra data for shard voting
return data
def get_valid_attestation(spec,
state,
slot=None,
index=None,
filter_participant_set=None,
signed=False):
# If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
# Thus strictly speaking invalid when no participant is added later.
if slot is None:
slot = state.slot
if index is None:
index = 0
attestation_data = build_attestation_data(
spec, state, slot=slot, index=index
)
beacon_committee = spec.get_beacon_committee(
state,
attestation_data.slot,
attestation_data.index,
)
committee_size = len(beacon_committee)
aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
attestation = spec.Attestation(
aggregation_bits=aggregation_bits,
data=attestation_data,
)
# fill the attestation with (optionally filtered) participants, and optionally sign it
fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
return attestation
def sign_aggregate_attestation(spec, state, attestation_data, participants: List[int]):
signatures = []
for validator_index in participants:
privkey = privkeys[validator_index]
signatures.append(
get_attestation_signature(
spec,
state,
attestation_data,
privkey
)
)
return bls.Aggregate(signatures)
def sign_indexed_attestation(spec, state, indexed_attestation):
participants = indexed_attestation.attesting_indices
data = indexed_attestation.data
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
def sign_attestation(spec, state, attestation):
participants = spec.get_attesting_indices(
state,
attestation.data,
attestation.aggregation_bits,
)
attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants)
def get_attestation_signature(spec, state, attestation_data, privkey):
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
signing_root = spec.compute_signing_root(attestation_data, domain)
return bls.Sign(privkey, signing_root)
def compute_max_inclusion_slot(spec, attestation):
if is_post_deneb(spec):
next_epoch = spec.compute_epoch_at_slot(attestation.data.slot) + 1
end_of_next_epoch = spec.compute_start_slot_at_epoch(next_epoch + 1) - 1
return end_of_next_epoch
return attestation.data.slot + spec.SLOTS_PER_EPOCH
def fill_aggregate_attestation(spec, state, attestation, signed=False, filter_participant_set=None):
"""
`signed`: Signing is optional.
`filter_participant_set`: Optional, filters the full committee indices set (default) to a subset that participates
"""
beacon_committee = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index,
)
# By default, have everyone participate
participants = set(beacon_committee)
# But optionally filter the participants to a smaller amount
if filter_participant_set is not None:
participants = filter_participant_set(participants)
for i in range(len(beacon_committee)):
attestation.aggregation_bits[i] = beacon_committee[i] in participants
if signed and len(participants) > 0:
sign_attestation(spec, state, attestation)
def add_attestations_to_state(spec, state, attestations, slot):
if state.slot < slot:
spec.process_slots(state, slot)
for attestation in attestations:
spec.process_attestation(state, attestation)
def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
for index in range(committees_per_slot):
def participants_filter(comm):
if participation_fn is None:
return comm
else:
return participation_fn(state.slot, index, comm)
# if spec.fork == SHARDING: TODO: add shard data to attestation, include shard headers in block
yield get_valid_attestation(
spec,
state,
slot_to_attest,
index=index,
signed=True,
filter_participant_set=participants_filter
)
def next_slots_with_attestations(spec,
state,
slot_count,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None):
"""
participation_fn: (slot, committee_index, committee_indices_set) -> participants_indices_set
"""
post_state = state.copy()
signed_blocks = []
for _ in range(slot_count):
signed_block = state_transition_with_full_block(
spec,
post_state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn,
)
signed_blocks.append(signed_block)
return state, signed_blocks, post_state
def next_epoch_with_attestations(spec,
state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None):
assert state.slot % spec.SLOTS_PER_EPOCH == 0
return next_slots_with_attestations(
spec,
state,
spec.SLOTS_PER_EPOCH,
fill_cur_epoch,
fill_prev_epoch,
participation_fn,
)
def state_transition_with_full_block(spec,
state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None,
sync_aggregate=None,
block=None):
"""
Build and apply a block with attestations at the calculated `slot_to_attest` of current epoch and/or previous epoch.
"""
if block is None:
block = build_empty_block_for_next_slot(spec, state)
if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)):
attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
participation_fn=participation_fn
)
for attestation in attestations:
block.body.attestations.append(attestation)
if fill_prev_epoch:
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
participation_fn=participation_fn
)
for attestation in attestations:
block.body.attestations.append(attestation)
if sync_aggregate is not None:
block.body.sync_aggregate = sync_aggregate
signed_block = state_transition_and_sign_block(spec, state, block)
return signed_block
def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, fill_prev_epoch):
"""
Build and apply a block with attestions at all valid slots of current epoch and/or previous epoch.
"""
# Build a block with previous attestations
block = build_empty_block_for_next_slot(spec, state)
attestations = []
if fill_cur_epoch:
# current epoch
slots = state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(slots):
target_slot = state.slot - slot_offset
attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,
)
if fill_prev_epoch:
# attest previous epoch
slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(1, slots):
target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset
attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,
)
block.body.attestations = attestations
signed_block = state_transition_and_sign_block(spec, state, block)
return signed_block
def prepare_state_with_attestations(spec, state, participation_fn=None):
"""
Prepare state with attestations according to the ``participation_fn``.
If no ``participation_fn``, default to "full" -- max committee participation at each slot.
participation_fn: (slot, committee_index, committee_indices_set) -> participants_indices_set
"""
# Go to start of next epoch to ensure can have full participation
next_epoch(spec, state)
start_slot = state.slot
start_epoch = spec.get_current_epoch(state)
next_epoch_start_slot = spec.compute_start_slot_at_epoch(start_epoch + 1)
attestations = []
for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
# create an attestation for each index in each slot in epoch
if state.slot < next_epoch_start_slot:
for committee_index in range(spec.get_committee_count_per_slot(state, spec.get_current_epoch(state))):
def temp_participants_filter(comm):
if participation_fn is None:
return comm
else:
return participation_fn(state.slot, committee_index, comm)
attestation = get_valid_attestation(spec, state, index=committee_index,
filter_participant_set=temp_participants_filter, signed=True)
if any(attestation.aggregation_bits): # Only if there is at least 1 participant.
attestations.append(attestation)
# fill each created slot in state after inclusion delay
if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY:
inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY
include_attestations = [att for att in attestations if att.data.slot == inclusion_slot]
add_attestations_to_state(spec, state, include_attestations, state.slot)
next_slot(spec, state)
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
if not is_post_altair(spec):
assert len(state.previous_epoch_attestations) == len(attestations)
return attestations
_prep_state_cache_dict = LRU(size=10)
def cached_prepare_state_with_attestations(spec, state):
"""
Cached version of prepare_state_with_attestations,
but does not return anything, and does not support a participation fn argument
"""
# If the pre-state is not already known in the LRU, then take it,
# prepare it with attestations, and put it in the LRU.
# The input state is likely already cached, so the hash-tree-root does not affect speed.
key = (spec.fork, state.hash_tree_root())
global _prep_state_cache_dict
if key not in _prep_state_cache_dict:
prepare_state_with_attestations(spec, state)
_prep_state_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
# Put the LRU cache result into the state view, as if we transitioned the original view
state.set_backing(_prep_state_cache_dict[key])
| 15,116 | 37.465649 | 120 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/constants.py
|
from .typing import SpecForkName, PresetBaseName
#
# SpecForkName
#
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
PHASE0 = SpecForkName('phase0')
ALTAIR = SpecForkName('altair')
BELLATRIX = SpecForkName('bellatrix')
CAPELLA = SpecForkName('capella')
DENEB = SpecForkName('deneb')
# Experimental phases (not included in default "ALL_PHASES"):
SHARDING = SpecForkName('sharding')
CUSTODY_GAME = SpecForkName('custody_game')
DAS = SpecForkName('das')
EIP6110 = SpecForkName('eip6110')
#
# SpecFork settings
#
# The forks that are deployed on Mainnet
MAINNET_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA)
LATEST_FORK = MAINNET_FORKS[-1]
# The forks that pytest can run with.
ALL_PHASES = (
# Formal forks
*MAINNET_FORKS,
DENEB,
# Experimental patches
EIP6110,
)
# The forks that have light client specs
LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], DENEB)
# The forks that output to the test vectors.
TESTGEN_FORKS = (*MAINNET_FORKS, DENEB, EIP6110)
ALL_FORK_UPGRADES = {
# pre_fork_name: post_fork_name
PHASE0: ALTAIR,
ALTAIR: BELLATRIX,
BELLATRIX: CAPELLA,
CAPELLA: DENEB,
DENEB: EIP6110,
}
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0}
AFTER_BELLATRIX_PRE_POST_FORKS = AFTER_BELLATRIX_UPGRADES.items()
AFTER_CAPELLA_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
if key not in [PHASE0, ALTAIR]}
AFTER_CAPELLA_PRE_POST_FORKS = AFTER_CAPELLA_UPGRADES.items()
AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
if key not in [PHASE0, ALTAIR, BELLATRIX]}
AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items()
#
# Config and Preset
#
MAINNET = PresetBaseName('mainnet')
MINIMAL = PresetBaseName('minimal')
ALL_PRESETS = (MINIMAL, MAINNET)
#
# Number
#
MAX_UINT_64 = 2**64 - 1
| 2,012 | 26.958333 | 100 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/block_processing.py
|
def for_ops(state, operations, fn) -> None:
for operation in operations:
fn(state, operation)
def get_process_calls(spec):
return {
# PHASE0
'process_block_header':
lambda state, block: spec.process_block_header(state, block),
'process_randao':
lambda state, block: spec.process_randao(state, block.body),
'process_eth1_data':
lambda state, block: spec.process_eth1_data(state, block.body),
'process_proposer_slashing':
lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing),
'process_attester_slashing':
lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing),
'process_shard_header':
lambda state, block: for_ops(state, block.body.shard_headers, spec.process_shard_header),
'process_attestation':
lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation),
'process_deposit':
lambda state, block: for_ops(state, block.body.deposits, spec.process_deposit),
'process_voluntary_exit':
lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit),
# Altair
'process_sync_aggregate':
lambda state, block: spec.process_sync_aggregate(state, block.body.sync_aggregate),
# Bellatrix
'process_application_payload':
lambda state, block: spec.process_application_payload(state, block.body),
# TODO: add sharding processing functions when spec stabilizes.
# Custody Game
'process_custody_game_operations':
lambda state, block: spec.process_custody_game_operations(state, block.body),
}
def run_block_processing_to(spec, state, block, process_name: str):
"""
Processes to the block transition, up to, but not including, the sub-transition named ``process_name``.
Returns a Callable[[state, block], None] for the remaining ``process_name`` transition.
Tests should create full blocks to ensure a valid state transition, even if the operation itself is isolated.
(e.g. latest_header in the beacon state is up-to-date in a sync-committee test).
A test prepares a pre-state by calling this function, output the pre-state,
and it can then proceed to run the returned callable, and output a post-state.
"""
# transition state to slot before block state transition
if state.slot < block.slot:
spec.process_slots(state, block.slot)
# process components of block transition
for name, call in get_process_calls(spec).items():
if name == process_name:
return call
# only run when present. Later phases introduce more to the block-processing.
if hasattr(spec, name):
call(state, block)
| 2,922 | 46.145161 | 113 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/keys.py
|
from py_ecc.bls import G2ProofOfPossession as bls
# Enough keys for 256 validators per slot in worst-case epoch length
privkeys = [i + 1 for i in range(32 * 256)]
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
| 304 | 42.571429 | 83 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/withdrawals.py
|
import random
def set_validator_fully_withdrawable(spec, state, index, withdrawable_epoch=None):
if withdrawable_epoch is None:
withdrawable_epoch = spec.get_current_epoch(state)
validator = state.validators[index]
validator.withdrawable_epoch = withdrawable_epoch
# set exit epoch as well to avoid interactions with other epoch process, e.g. forced ejections
if validator.exit_epoch > withdrawable_epoch:
validator.exit_epoch = withdrawable_epoch
if validator.withdrawal_credentials[0:1] == spec.BLS_WITHDRAWAL_PREFIX:
validator.withdrawal_credentials = spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
if state.balances[index] == 0:
state.balances[index] = 10000000000
assert spec.is_fully_withdrawable_validator(validator, state.balances[index], withdrawable_epoch)
def set_eth1_withdrawal_credential_with_balance(spec, state, index, balance):
validator = state.validators[index]
validator.withdrawal_credentials = spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
validator.effective_balance = min(balance, spec.MAX_EFFECTIVE_BALANCE)
state.balances[index] = balance
def set_validator_partially_withdrawable(spec, state, index, excess_balance=1000000000):
set_eth1_withdrawal_credential_with_balance(spec, state, index, spec.MAX_EFFECTIVE_BALANCE + excess_balance)
validator = state.validators[index]
assert spec.is_partially_withdrawable_validator(validator, state.balances[index])
def prepare_expected_withdrawals(spec, state,
num_full_withdrawals=0, num_partial_withdrawals=0, rng=random.Random(5566)):
bound = min(len(state.validators), spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
assert num_full_withdrawals + num_partial_withdrawals <= bound
eligible_validator_indices = list(range(bound))
sampled_indices = rng.sample(eligible_validator_indices, num_full_withdrawals + num_partial_withdrawals)
fully_withdrawable_indices = rng.sample(sampled_indices, num_full_withdrawals)
partial_withdrawals_indices = list(set(sampled_indices).difference(set(fully_withdrawable_indices)))
for index in fully_withdrawable_indices:
set_validator_fully_withdrawable(spec, state, index)
for index in partial_withdrawals_indices:
set_validator_partially_withdrawable(spec, state, index)
return fully_withdrawable_indices, partial_withdrawals_indices
| 2,483 | 46.769231 | 117 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/block_header.py
|
from eth2spec.utils import bls
def sign_block_header(spec, state, header, privkey):
domain = spec.get_domain(
state=state,
domain_type=spec.DOMAIN_BEACON_PROPOSER,
)
signing_root = spec.compute_signing_root(header, domain)
signature = bls.Sign(privkey, signing_root)
return spec.SignedBeaconBlockHeader(message=header, signature=signature)
| 378 | 30.583333 | 76 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py
|
from random import Random
from eth2spec.utils import bls
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.forks import is_post_deneb
from eth2spec.test.helpers.keys import privkeys
def prepare_signed_exits(spec, state, indices, fork_version=None):
def create_signed_exit(index):
voluntary_exit = spec.VoluntaryExit(
epoch=spec.get_current_epoch(state),
validator_index=index,
)
return sign_voluntary_exit(spec, state, voluntary_exit, privkeys[index], fork_version=fork_version)
return [create_signed_exit(index) for index in indices]
def sign_voluntary_exit(spec, state, voluntary_exit, privkey, fork_version=None):
if fork_version is None:
if is_post_deneb(spec):
domain = spec.compute_domain(
spec.DOMAIN_VOLUNTARY_EXIT,
spec.config.CAPELLA_FORK_VERSION,
state.genesis_validators_root,
)
else:
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
else:
domain = spec.compute_domain(spec.DOMAIN_VOLUNTARY_EXIT, fork_version, state.genesis_validators_root)
signing_root = spec.compute_signing_root(voluntary_exit, domain)
return spec.SignedVoluntaryExit(
message=voluntary_exit,
signature=bls.Sign(privkey, signing_root)
)
#
# Helpers for applying effects of a voluntary exit
#
def get_exited_validators(spec, state):
current_epoch = spec.get_current_epoch(state)
return [index for (index, validator) in enumerate(state.validators) if validator.exit_epoch <= current_epoch]
def get_unslashed_exited_validators(spec, state):
return [
index for index in get_exited_validators(spec, state)
if not state.validators[index].slashed
]
def exit_validators(spec, state, validator_count, rng=None):
if rng is None:
rng = Random(1337)
indices = rng.sample(range(len(state.validators)), validator_count)
for index in indices:
spec.initiate_validator_exit(state, index)
return indices
#
# Run processing
#
def run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=True):
"""
Run ``process_voluntary_exit``, yielding:
- pre-state ('pre')
- voluntary_exit ('voluntary_exit')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
validator_index = signed_voluntary_exit.message.validator_index
yield 'pre', state
yield 'voluntary_exit', signed_voluntary_exit
if not valid:
expect_assertion_error(lambda: spec.process_voluntary_exit(state, signed_voluntary_exit))
yield 'post', None
return
pre_exit_epoch = state.validators[validator_index].exit_epoch
spec.process_voluntary_exit(state, signed_voluntary_exit)
yield 'post', state
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
| 3,028 | 30.884211 | 113 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/deposits.py
|
from random import Random
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.forks import is_post_altair
from eth2spec.test.helpers.keys import pubkeys, privkeys
from eth2spec.test.helpers.state import get_balance
from eth2spec.utils import bls
from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_proof
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import List
def mock_deposit(spec, state, index):
"""
Mock validator at ``index`` as having just made a deposit
"""
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
if is_post_altair(spec):
state.inactivity_scores[index] = 0
assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
def build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=False):
deposit_data = spec.DepositData(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
amount=amount,
)
if signed:
sign_deposit_data(spec, deposit_data, privkey)
return deposit_data
def sign_deposit_data(spec, deposit_data, privkey):
deposit_message = spec.DepositMessage(
pubkey=deposit_data.pubkey,
withdrawal_credentials=deposit_data.withdrawal_credentials,
amount=deposit_data.amount)
domain = spec.compute_domain(spec.DOMAIN_DEPOSIT)
signing_root = spec.compute_signing_root(deposit_message, domain)
deposit_data.signature = bls.Sign(privkey, signing_root)
def build_deposit(spec,
deposit_data_list,
pubkey,
privkey,
amount,
withdrawal_credentials,
signed):
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed)
index = len(deposit_data_list)
deposit_data_list.append(deposit_data)
return deposit_from_context(spec, deposit_data_list, index)
def deposit_from_context(spec, deposit_data_list, index):
deposit_data = deposit_data_list[index]
root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
proof = (
list(get_merkle_proof(tree, item_index=index, tree_len=32))
+ [len(deposit_data_list).to_bytes(32, 'little')]
)
leaf = deposit_data.hash_tree_root()
assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
deposit = spec.Deposit(proof=proof, data=deposit_data)
return deposit, root, deposit_data_list
def prepare_full_genesis_deposits(spec,
amount,
deposit_count,
min_pubkey_index=0,
signed=False,
deposit_data_list=None):
if deposit_data_list is None:
deposit_data_list = []
genesis_deposits = []
for pubkey_index in range(min_pubkey_index, min_pubkey_index + deposit_count):
pubkey = pubkeys[pubkey_index]
privkey = privkeys[pubkey_index]
# insecurely use pubkey as withdrawal key if no credentials provided
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit, root, deposit_data_list = build_deposit(
spec,
deposit_data_list=deposit_data_list,
pubkey=pubkey,
privkey=privkey,
amount=amount,
withdrawal_credentials=withdrawal_credentials,
signed=signed,
)
genesis_deposits.append(deposit)
return genesis_deposits, root, deposit_data_list
def prepare_random_genesis_deposits(spec,
deposit_count,
max_pubkey_index,
min_pubkey_index=0,
max_amount=None,
min_amount=None,
deposit_data_list=None,
rng=Random(3131)):
if max_amount is None:
max_amount = spec.MAX_EFFECTIVE_BALANCE
if min_amount is None:
min_amount = spec.MIN_DEPOSIT_AMOUNT
if deposit_data_list is None:
deposit_data_list = []
deposits = []
for _ in range(deposit_count):
pubkey_index = rng.randint(min_pubkey_index, max_pubkey_index)
pubkey = pubkeys[pubkey_index]
privkey = privkeys[pubkey_index]
amount = rng.randint(min_amount, max_amount)
random_byte = bytes([rng.randint(0, 255)])
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(random_byte)[1:]
deposit, root, deposit_data_list = build_deposit(
spec,
deposit_data_list=deposit_data_list,
pubkey=pubkey,
privkey=privkey,
amount=amount,
withdrawal_credentials=withdrawal_credentials,
signed=True,
)
deposits.append(deposit)
return deposits, root, deposit_data_list
def prepare_state_and_deposit(spec, state, validator_index, amount,
pubkey=None,
privkey=None,
withdrawal_credentials=None,
signed=False):
"""
Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
"""
deposit_data_list = []
if pubkey is None:
pubkey = pubkeys[validator_index]
if privkey is None:
privkey = privkeys[validator_index]
# insecurely use pubkey as withdrawal key if no credentials provided
if withdrawal_credentials is None:
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit, root, deposit_data_list = build_deposit(
spec,
deposit_data_list,
pubkey,
privkey,
amount,
withdrawal_credentials,
signed,
)
state.eth1_deposit_index = 0
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count = len(deposit_data_list)
return deposit
def build_deposit_receipt(spec,
index,
pubkey,
privkey,
amount,
withdrawal_credentials,
signed):
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed)
return spec.DepositReceipt(
pubkey=deposit_data.pubkey,
withdrawal_credentials=deposit_data.withdrawal_credentials,
amount=deposit_data.amount,
signature=deposit_data.signature,
index=index)
def prepare_deposit_receipt(spec, validator_index, amount,
index=None,
pubkey=None,
privkey=None,
withdrawal_credentials=None,
signed=False):
"""
Create a deposit receipt for the given validator, depositing the given amount.
"""
if index is None:
index = validator_index
if pubkey is None:
pubkey = pubkeys[validator_index]
if privkey is None:
privkey = privkeys[validator_index]
# insecurely use pubkey as withdrawal key if no credentials provided
if withdrawal_credentials is None:
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
return build_deposit_receipt(
spec,
index,
pubkey,
privkey,
amount,
withdrawal_credentials,
signed,
)
#
# Run processing
#
def run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True):
"""
Run ``process_deposit``, yielding:
- pre-state ('pre')
- deposit ('deposit')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
pre_validator_count = len(state.validators)
pre_balance = 0
is_top_up = False
# is a top-up
if validator_index < pre_validator_count:
is_top_up = True
pre_balance = get_balance(state, validator_index)
pre_effective_balance = state.validators[validator_index].effective_balance
yield 'pre', state
yield 'deposit', deposit
if not valid:
expect_assertion_error(lambda: spec.process_deposit(state, deposit))
yield 'post', None
return
spec.process_deposit(state, deposit)
yield 'post', state
if not effective or not bls.KeyValidate(deposit.data.pubkey):
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
if is_top_up:
assert get_balance(state, validator_index) == pre_balance
else:
if is_top_up:
# Top-ups do not change effective balance
assert state.validators[validator_index].effective_balance == pre_effective_balance
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
else:
# new validator
assert len(state.validators) == pre_validator_count + 1
assert len(state.balances) == pre_validator_count + 1
effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit.data.amount)
effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT
assert state.validators[validator_index].effective_balance == effective_balance
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
assert state.eth1_deposit_index == state.eth1_data.deposit_count
def run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version,
valid=True,
effective=True):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
deposit_data = spec.DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
)
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
state.eth1_deposit_index = 0
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count = 1
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, valid=True, effective=True):
"""
Run ``process_deposit_receipt``, yielding:
- pre-state ('pre')
- deposit_receipt ('deposit_receipt')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
pre_validator_count = len(state.validators)
pre_balance = 0
is_top_up = False
# is a top-up
if validator_index < pre_validator_count:
is_top_up = True
pre_balance = get_balance(state, validator_index)
pre_effective_balance = state.validators[validator_index].effective_balance
yield 'pre', state
yield 'deposit_receipt', deposit_receipt
if not valid:
expect_assertion_error(lambda: spec.process_deposit_receipt(state, deposit_receipt))
yield 'post', None
return
spec.process_deposit_receipt(state, deposit_receipt)
yield 'post', state
if not effective or not bls.KeyValidate(deposit_receipt.pubkey):
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
if is_top_up:
assert get_balance(state, validator_index) == pre_balance
else:
if is_top_up:
# Top-ups do not change effective balance
assert state.validators[validator_index].effective_balance == pre_effective_balance
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
else:
# new validator
assert len(state.validators) == pre_validator_count + 1
assert len(state.balances) == pre_validator_count + 1
effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit_receipt.amount)
effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT
assert state.validators[validator_index].effective_balance == effective_balance
assert get_balance(state, validator_index) == pre_balance + deposit_receipt.amount
def run_deposit_receipt_processing_with_specific_fork_version(
spec,
state,
fork_version,
valid=True,
effective=True):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
deposit_data = spec.DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
)
deposit_receipt = spec.DepositReceipt(
pubkey=deposit_data.pubkey,
withdrawal_credentials=deposit_data.withdrawal_credentials,
amount=deposit_data.amount,
signature=deposit_data.signature,
index=validator_index)
yield from run_deposit_receipt_processing(
spec,
state,
deposit_receipt,
validator_index,
valid=valid,
effective=effective
)
| 14,700 | 36.407125 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/genesis.py
|
from eth2spec.test.helpers.constants import (
ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_header_block_hash,
)
from eth2spec.test.helpers.forks import (
is_post_altair, is_post_bellatrix, is_post_capella, is_post_eip6110,
)
from eth2spec.test.helpers.keys import pubkeys
def build_mock_validator(spec, i: int, balance: int):
active_pubkey = pubkeys[i]
withdrawal_pubkey = pubkeys[-1 - i]
# insecurely use pubkey as withdrawal key as well
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(withdrawal_pubkey)[1:]
validator = spec.Validator(
pubkey=active_pubkey,
withdrawal_credentials=withdrawal_credentials,
activation_eligibility_epoch=spec.FAR_FUTURE_EPOCH,
activation_epoch=spec.FAR_FUTURE_EPOCH,
exit_epoch=spec.FAR_FUTURE_EPOCH,
withdrawable_epoch=spec.FAR_FUTURE_EPOCH,
effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, spec.MAX_EFFECTIVE_BALANCE)
)
return validator
def get_sample_genesis_execution_payload_header(spec,
eth1_block_hash=None):
if eth1_block_hash is None:
eth1_block_hash = b'\x55' * 32
payload_header = spec.ExecutionPayloadHeader(
parent_hash=b'\x30' * 32,
fee_recipient=b'\x42' * 20,
state_root=b'\x20' * 32,
receipts_root=b'\x20' * 32,
logs_bloom=b'\x35' * spec.BYTES_PER_LOGS_BLOOM,
prev_randao=eth1_block_hash,
block_number=0,
gas_limit=30000000,
base_fee_per_gas=1000000000,
block_hash=eth1_block_hash,
transactions_root=spec.Root(b'\x56' * 32),
)
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
withdrawals_trie_root = None
deposit_receipts_trie_root = None
if is_post_capella(spec):
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
if is_post_eip6110(spec):
deposit_receipts_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
payload_header.block_hash = compute_el_header_block_hash(
spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root,
deposit_receipts_trie_root,
)
return payload_header
def create_genesis_state(spec, validator_balances, activation_threshold):
deposit_root = b'\x42' * 32
eth1_block_hash = b'\xda' * 32
previous_version = spec.config.GENESIS_FORK_VERSION
current_version = spec.config.GENESIS_FORK_VERSION
if spec.fork == ALTAIR:
current_version = spec.config.ALTAIR_FORK_VERSION
elif spec.fork == BELLATRIX:
previous_version = spec.config.ALTAIR_FORK_VERSION
current_version = spec.config.BELLATRIX_FORK_VERSION
elif spec.fork == CAPELLA:
previous_version = spec.config.BELLATRIX_FORK_VERSION
current_version = spec.config.CAPELLA_FORK_VERSION
elif spec.fork == DENEB:
previous_version = spec.config.CAPELLA_FORK_VERSION
current_version = spec.config.DENEB_FORK_VERSION
elif spec.fork == EIP6110:
previous_version = spec.config.DENEB_FORK_VERSION
current_version = spec.config.EIP6110_FORK_VERSION
state = spec.BeaconState(
genesis_time=0,
eth1_deposit_index=len(validator_balances),
eth1_data=spec.Eth1Data(
deposit_root=deposit_root,
deposit_count=len(validator_balances),
block_hash=eth1_block_hash,
),
fork=spec.Fork(
previous_version=previous_version,
current_version=current_version,
epoch=spec.GENESIS_EPOCH,
),
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
randao_mixes=[eth1_block_hash] * spec.EPOCHS_PER_HISTORICAL_VECTOR,
)
# We "hack" in the initial validators,
# as it is much faster than creating and processing genesis deposits for every single test case.
state.balances = validator_balances
state.validators = [build_mock_validator(spec, i, state.balances[i]) for i in range(len(validator_balances))]
# Process genesis activations
for validator in state.validators:
if validator.effective_balance >= activation_threshold:
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
validator.activation_epoch = spec.GENESIS_EPOCH
if is_post_altair(spec):
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
state.inactivity_scores.append(spec.uint64(0))
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = spec.hash_tree_root(state.validators)
if is_post_altair(spec):
# Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at genesis
state.current_sync_committee = spec.get_next_sync_committee(state)
state.next_sync_committee = spec.get_next_sync_committee(state)
if is_post_bellatrix(spec):
# Initialize the execution payload header (with block number and genesis time set to 0)
state.latest_execution_payload_header = get_sample_genesis_execution_payload_header(
spec,
eth1_block_hash=eth1_block_hash,
)
if is_post_eip6110(spec):
state.deposit_receipts_start_index = spec.UNSET_DEPOSIT_RECEIPTS_START_INDEX
return state
| 5,759 | 39.56338 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/attester_slashings.py
|
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation
def get_valid_attester_slashing(spec, state, slot=None, signed_1=False, signed_2=False, filter_participant_set=None):
attestation_1 = get_valid_attestation(
spec, state,
slot=slot, signed=signed_1, filter_participant_set=filter_participant_set
)
attestation_2 = attestation_1.copy()
attestation_2.data.target.root = b'\x01' * 32
if signed_2:
sign_attestation(spec, state, attestation_2)
return spec.AttesterSlashing(
attestation_1=spec.get_indexed_attestation(state, attestation_1),
attestation_2=spec.get_indexed_attestation(state, attestation_2),
)
def get_valid_attester_slashing_by_indices(spec, state,
indices_1, indices_2=None,
slot=None,
signed_1=False, signed_2=False):
if indices_2 is None:
indices_2 = indices_1
assert indices_1 == sorted(indices_1)
assert indices_2 == sorted(indices_2)
attester_slashing = get_valid_attester_slashing(spec, state, slot=slot)
attester_slashing.attestation_1.attesting_indices = indices_1
attester_slashing.attestation_2.attesting_indices = indices_2
if signed_1:
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
if signed_2:
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
return attester_slashing
def get_indexed_attestation_participants(spec, indexed_att):
"""
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
"""
return list(indexed_att.attesting_indices)
def set_indexed_attestation_participants(spec, indexed_att, participants):
"""
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
"""
indexed_att.attesting_indices = participants
def get_attestation_1_data(spec, att_slashing):
return att_slashing.attestation_1.data
def get_attestation_2_data(spec, att_slashing):
return att_slashing.attestation_2.data
| 2,235 | 33.4 | 117 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/state.py
|
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
from eth2spec.test.helpers.forks import is_post_altair
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
def get_balance(state, index):
return state.balances[index]
def next_slot(spec, state):
"""
Transition to the next slot.
"""
spec.process_slots(state, state.slot + 1)
def next_slots(spec, state, slots):
"""
Transition given slots forward.
"""
if slots > 0:
spec.process_slots(state, state.slot + slots)
def transition_to(spec, state, slot):
"""
Transition to ``slot``.
"""
assert state.slot <= slot
for _ in range(slot - state.slot):
next_slot(spec, state)
assert state.slot == slot
def transition_to_slot_via_block(spec, state, slot):
"""
Transition to ``slot`` via an empty block transition
"""
assert state.slot < slot
apply_empty_block(spec, state, slot)
assert state.slot == slot
def transition_to_valid_shard_slot(spec, state):
"""
Transition to slot `compute_epoch_at_slot(spec.config.SHARDING_FORK_EPOCH) + 1`
and fork at `compute_epoch_at_slot(spec.config.SHARDING_FORK_EPOCH)`.
"""
transition_to(spec, state, spec.compute_epoch_at_slot(spec.config.SHARDING_FORK_EPOCH))
next_slot(spec, state)
def next_epoch(spec, state):
"""
Transition to the start slot of the next epoch
"""
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
if slot > state.slot:
spec.process_slots(state, slot)
def next_epoch_via_block(spec, state, insert_state_root=False):
"""
Transition to the start slot of the next epoch via a full block transition
"""
block = apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
if insert_state_root:
block.state_root = state.hash_tree_root()
return block
def next_epoch_via_signed_block(spec, state):
block = next_epoch_via_block(spec, state, insert_state_root=True)
return sign_block(spec, state, block)
def get_state_root(spec, state, slot) -> bytes:
"""
Return the state root at a recent ``slot``.
"""
assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
return state.state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
def state_transition_and_sign_block(spec, state, block, expect_fail=False):
"""
State transition via the provided ``block``
then package the block with the correct state root and signature.
"""
if expect_fail:
expect_assertion_error(lambda: transition_unsigned_block(spec, state, block))
else:
transition_unsigned_block(spec, state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
#
# WARNING: The following functions can only be used post-altair due to the manipulation of participation flags directly
#
def _set_full_participation(spec, state, current=True, previous=True):
assert is_post_altair(spec)
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
for index in range(len(state.validators)):
if current:
state.current_epoch_participation[index] = full_flags.copy()
if previous:
state.previous_epoch_participation[index] = full_flags.copy()
def set_full_participation(spec, state, rng=None):
_set_full_participation(spec, state)
def set_full_participation_previous_epoch(spec, state, rng=None):
_set_full_participation(spec, state, current=False, previous=True)
def _set_empty_participation(spec, state, current=True, previous=True):
assert is_post_altair(spec)
for index in range(len(state.validators)):
if current:
state.current_epoch_participation[index] = spec.ParticipationFlags(0)
if previous:
state.previous_epoch_participation[index] = spec.ParticipationFlags(0)
def set_empty_participation(spec, state, rng=None):
_set_empty_participation(spec, state)
def ensure_state_has_validators_across_lifecycle(spec, state):
"""
Scan the validator registry to ensure there is at least 1 validator
for each of the following lifecycle states:
1. Pending / deposited
2. Active
3. Exited (but not slashed)
4. Slashed
"""
has_pending = any(filter(spec.is_eligible_for_activation_queue, state.validators))
current_epoch = spec.get_current_epoch(state)
has_active = any(filter(lambda v: spec.is_active_validator(v, current_epoch), state.validators))
has_exited = any(get_unslashed_exited_validators(spec, state))
has_slashed = any(filter(lambda v: v.slashed, state.validators))
return has_pending and has_active and has_exited and has_slashed
def has_active_balance_differential(spec, state):
"""
Ensure there is a difference between the total balance of
all _active_ validators and _all_ validators.
"""
active_balance = spec.get_total_active_balance(state)
total_balance = spec.get_total_balance(state, set(range(len(state.validators))))
return active_balance // spec.EFFECTIVE_BALANCE_INCREMENT != total_balance // spec.EFFECTIVE_BALANCE_INCREMENT
| 5,444 | 31.218935 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/rewards.py
|
from random import Random
from lru import LRU
from eth2spec.phase0.mainnet import VALIDATOR_REGISTRY_LIMIT # equal everywhere, fine to import
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
from eth2spec.test.helpers.state import (
next_epoch,
)
from eth2spec.test.helpers.random import (
set_some_new_deposits, exit_random_validators, slash_random_validators,
randomize_state,
)
from eth2spec.test.helpers.attestations import (
cached_prepare_state_with_attestations,
)
from eth2spec.utils.ssz.ssz_typing import Container, uint64, List
class Deltas(Container):
rewards: List[uint64, VALIDATOR_REGISTRY_LIMIT]
penalties: List[uint64, VALIDATOR_REGISTRY_LIMIT]
def get_inactivity_penalty_quotient(spec):
if is_post_bellatrix(spec):
return spec.INACTIVITY_PENALTY_QUOTIENT_BELLATRIX
elif is_post_altair(spec):
return spec.INACTIVITY_PENALTY_QUOTIENT_ALTAIR
else:
return spec.INACTIVITY_PENALTY_QUOTIENT
def has_enough_for_reward(spec, state, index):
"""
Check if base_reward will be non-zero.
At very low balances, it is possible for a validator have a positive effective_balance
but a zero base reward.
"""
return (
state.validators[index].effective_balance * spec.BASE_REWARD_FACTOR
> spec.integer_squareroot(spec.get_total_active_balance(state)) // spec.BASE_REWARDS_PER_EPOCH
)
def has_enough_for_leak_penalty(spec, state, index):
"""
Check if effective_balance and state of leak is high enough for a leak penalty.
At very low balances / leak values, it is possible for a validator have a positive effective_balance
and be in a leak, but have zero leak penalty.
"""
if is_post_altair(spec):
return (
state.validators[index].effective_balance * state.inactivity_scores[index]
> spec.config.INACTIVITY_SCORE_BIAS * get_inactivity_penalty_quotient(spec)
)
else:
return (
state.validators[index].effective_balance * spec.get_finality_delay(state)
> spec.INACTIVITY_PENALTY_QUOTIENT
)
def run_deltas(spec, state):
"""
Run all deltas functions yielding:
- pre-state ('pre')
- source deltas ('source_deltas')
- target deltas ('target_deltas')
- head deltas ('head_deltas')
- not if is_post_altair(spec)
- inclusion delay deltas ('inclusion_delay_deltas')
- inactivity penalty deltas ('inactivity_penalty_deltas')
"""
yield 'pre', state
if is_post_altair(spec):
def get_source_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX)
def get_head_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX)
def get_target_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX)
yield from run_attestation_component_deltas(
spec,
state,
spec.get_source_deltas if not is_post_altair(spec) else get_source_deltas,
spec.get_matching_source_attestations,
'source_deltas',
)
yield from run_attestation_component_deltas(
spec,
state,
spec.get_target_deltas if not is_post_altair(spec) else get_target_deltas,
spec.get_matching_target_attestations,
'target_deltas',
)
yield from run_attestation_component_deltas(
spec,
state,
spec.get_head_deltas if not is_post_altair(spec) else get_head_deltas,
spec.get_matching_head_attestations,
'head_deltas',
)
if not is_post_altair(spec):
yield from run_get_inclusion_delay_deltas(spec, state)
yield from run_get_inactivity_penalty_deltas(spec, state)
def deltas_name_to_flag_index(spec, deltas_name):
if 'source' in deltas_name:
return spec.TIMELY_SOURCE_FLAG_INDEX
elif 'head' in deltas_name:
return spec.TIMELY_HEAD_FLAG_INDEX
elif 'target' in deltas_name:
return spec.TIMELY_TARGET_FLAG_INDEX
raise ValueError("Wrong deltas_name %s" % deltas_name)
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
"""
Run ``component_delta_fn``, yielding:
- deltas ('{``deltas_name``}')
"""
rewards, penalties = component_delta_fn(state)
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
if not is_post_altair(spec):
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
else:
matching_indices = spec.get_unslashed_participating_indices(
state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)
)
eligible_indices = spec.get_eligible_validator_indices(state)
for index in range(len(state.validators)):
if index not in eligible_indices:
assert rewards[index] == 0
assert penalties[index] == 0
continue
validator = state.validators[index]
enough_for_reward = has_enough_for_reward(spec, state, index)
if index in matching_indices and not validator.slashed:
if is_post_altair(spec):
if not spec.is_in_inactivity_leak(state) and enough_for_reward:
assert rewards[index] > 0
else:
assert rewards[index] == 0
else:
if enough_for_reward:
assert rewards[index] > 0
else:
assert rewards[index] == 0
assert penalties[index] == 0
else:
assert rewards[index] == 0
if is_post_altair(spec) and 'head' in deltas_name:
assert penalties[index] == 0
elif enough_for_reward:
assert penalties[index] > 0
else:
assert penalties[index] == 0
def run_get_inclusion_delay_deltas(spec, state):
"""
Run ``get_inclusion_delay_deltas``, yielding:
- inclusion delay deltas ('inclusion_delay_deltas')
"""
if is_post_altair(spec):
# No inclusion_delay_deltas
yield 'inclusion_delay_deltas', Deltas(rewards=[0] * len(state.validators),
penalties=[0] * len(state.validators))
return
rewards, penalties = spec.get_inclusion_delay_deltas(state)
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
eligible_attestations = spec.get_matching_source_attestations(state, spec.get_previous_epoch(state))
attesting_indices = spec.get_unslashed_attesting_indices(state, eligible_attestations)
rewarded_indices = set()
rewarded_proposer_indices = set()
# Ensure attesters with enough balance are rewarded for attestations
# Track those that are rewarded and track proposers that should be rewarded
for index in range(len(state.validators)):
if index in attesting_indices and has_enough_for_reward(spec, state, index):
assert rewards[index] > 0
rewarded_indices.add(index)
# Track proposer of earliest included attestation for the validator defined by index
earliest_attestation = min([
a for a in eligible_attestations
if index in spec.get_attesting_indices(state, a.data, a.aggregation_bits)
], key=lambda a: a.inclusion_delay)
rewarded_proposer_indices.add(earliest_attestation.proposer_index)
# Ensure all expected proposers have been rewarded
# Track reward indices
proposing_indices = [a.proposer_index for a in eligible_attestations]
for index in proposing_indices:
if index in rewarded_proposer_indices:
assert rewards[index] > 0
rewarded_indices.add(index)
# Ensure all expected non-rewarded indices received no reward
for index in range(len(state.validators)):
assert penalties[index] == 0
if index not in rewarded_indices:
assert rewards[index] == 0
def run_get_inactivity_penalty_deltas(spec, state):
"""
Run ``get_inactivity_penalty_deltas``, yielding:
- inactivity penalty deltas ('inactivity_penalty_deltas')
"""
rewards, penalties = spec.get_inactivity_penalty_deltas(state)
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
if not is_post_altair(spec):
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
else:
matching_attesting_indices = spec.get_unslashed_participating_indices(
state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)
)
eligible_indices = spec.get_eligible_validator_indices(state)
for index in range(len(state.validators)):
assert rewards[index] == 0
if index not in eligible_indices:
assert penalties[index] == 0
continue
if spec.is_in_inactivity_leak(state):
# Compute base_penalty
base_reward = spec.get_base_reward(state, index)
if not is_post_altair(spec):
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
if not has_enough_for_reward(spec, state, index):
assert penalties[index] == 0
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
if is_post_altair(spec):
assert penalties[index] == 0
else:
assert penalties[index] == base_penalty
else:
if is_post_altair(spec):
assert penalties[index] > 0
else:
assert penalties[index] > base_penalty
else:
if not is_post_altair(spec):
assert penalties[index] == 0
continue
else:
# post altair, this penalty is derived from the inactivity score
# regardless if the state is leaking or not...
if index in matching_attesting_indices:
assert penalties[index] == 0
else:
# copied from spec:
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
penalty_denominator = spec.config.INACTIVITY_SCORE_BIAS * get_inactivity_penalty_quotient(spec)
assert penalties[index] == penalty_numerator // penalty_denominator
def transition_state_to_leak(spec, state, epochs=None):
if epochs is None:
# +2 because finality delay is based on previous_epoch and must be more than `MIN_EPOCHS_TO_INACTIVITY_PENALTY`
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2
assert epochs > spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
for _ in range(epochs):
next_epoch(spec, state)
assert spec.is_in_inactivity_leak(state)
_cache_dict = LRU(size=10)
def leaking(epochs=None):
def deco(fn):
def entry(*args, spec, state, **kw):
# If the pre-state is not already known in the LRU, then take it,
# transition it to leak, and put it in the LRU.
# The input state is likely already cached, so the hash-tree-root does not affect speed.
key = (state.hash_tree_root(), spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY, spec.SLOTS_PER_EPOCH, epochs)
global _cache_dict
if key not in _cache_dict:
transition_state_to_leak(spec, state, epochs=epochs)
_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
# Take an entry out of the LRU.
# No copy is necessary, as we wrap the immutable backing with a new view.
state = spec.BeaconState(backing=_cache_dict[key])
return fn(*args, spec=spec, state=state, **kw)
return entry
return deco
def run_test_empty(spec, state):
# Do not add any attestations to state
yield from run_deltas(spec, state)
def run_test_full_all_correct(spec, state):
cached_prepare_state_with_attestations(spec, state)
yield from run_deltas(spec, state)
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
cached_prepare_state_with_attestations(spec, state)
if not is_post_altair(spec):
for a in state.previous_epoch_attestations:
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
else:
for index in range(len(state.validators)):
if rng.choice([True, False]):
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
yield from run_deltas(spec, state)
def run_test_partial(spec, state, fraction_filled):
cached_prepare_state_with_attestations(spec, state)
# Remove portion of attestations
if not is_post_altair(spec):
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
else:
for index in range(int(len(state.validators) * fraction_filled)):
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
yield from run_deltas(spec, state)
def run_test_half_full(spec, state):
yield from run_test_partial(spec, state, 0.5)
def run_test_one_attestation_one_correct(spec, state):
cached_prepare_state_with_attestations(spec, state)
# Remove all attestations except for the first one
state.previous_epoch_attestations = state.previous_epoch_attestations[:1]
yield from run_deltas(spec, state)
def run_test_with_not_yet_activated_validators(spec, state, rng=Random(5555)):
set_some_new_deposits(spec, state, rng)
cached_prepare_state_with_attestations(spec, state)
yield from run_deltas(spec, state)
def run_test_with_exited_validators(spec, state, rng=Random(1337)):
exit_random_validators(spec, state, rng)
cached_prepare_state_with_attestations(spec, state)
yield from run_deltas(spec, state)
def run_test_with_slashed_validators(spec, state, rng=Random(3322)):
exit_random_validators(spec, state, rng)
slash_random_validators(spec, state, rng)
cached_prepare_state_with_attestations(spec, state)
yield from run_deltas(spec, state)
def run_test_some_very_low_effective_balances_that_attested(spec, state):
cached_prepare_state_with_attestations(spec, state)
# Set some balances to be very low (including 0)
assert len(state.validators) >= 5
for i, index in enumerate(range(5)):
state.validators[index].effective_balance = i
yield from run_deltas(spec, state)
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
cached_prepare_state_with_attestations(spec, state)
if not is_post_altair(spec):
# Remove attestation
attestation = state.previous_epoch_attestations[0]
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
# Set removed indices effective balance to very low amount
indices = spec.get_unslashed_attesting_indices(state, [attestation])
for i, index in enumerate(indices):
state.validators[index].effective_balance = i
else:
index = 0
state.validators[index].effective_balance = 1
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
yield from run_deltas(spec, state)
def run_test_full_fraction_incorrect(spec, state, correct_target, correct_head, fraction_incorrect):
cached_prepare_state_with_attestations(spec, state)
# Make fraction_incorrect of pending attestations have bad target/head as specified
num_incorrect = int(fraction_incorrect * len(state.previous_epoch_attestations))
for pending_attestation in state.previous_epoch_attestations[:num_incorrect]:
if not correct_target:
pending_attestation.data.target.root = b'\x55' * 32
if not correct_head:
pending_attestation.data.beacon_block_root = b'\x66' * 32
yield from run_deltas(spec, state)
def run_test_full_delay_one_slot(spec, state):
cached_prepare_state_with_attestations(spec, state)
for a in state.previous_epoch_attestations:
a.inclusion_delay += 1
yield from run_deltas(spec, state)
def run_test_full_delay_max_slots(spec, state):
cached_prepare_state_with_attestations(spec, state)
for a in state.previous_epoch_attestations:
a.inclusion_delay += spec.SLOTS_PER_EPOCH
yield from run_deltas(spec, state)
def run_test_full_mixed_delay(spec, state, rng=Random(1234)):
cached_prepare_state_with_attestations(spec, state)
for a in state.previous_epoch_attestations:
a.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
yield from run_deltas(spec, state)
def run_test_proposer_not_in_attestations(spec, state):
cached_prepare_state_with_attestations(spec, state)
# Get an attestation where the proposer is not in the committee
non_proposer_attestations = []
for a in state.previous_epoch_attestations:
if a.proposer_index not in spec.get_unslashed_attesting_indices(state, [a]):
non_proposer_attestations.append(a)
assert any(non_proposer_attestations)
state.previous_epoch_attestations = non_proposer_attestations
yield from run_deltas(spec, state)
def run_test_duplicate_attestations_at_later_slots(spec, state):
cached_prepare_state_with_attestations(spec, state)
# Remove 2/3 of attestations to make it more interesting
num_attestations = int(len(state.previous_epoch_attestations) * 0.33)
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
# Get map of the proposer at each slot to make valid-looking duplicate attestations
per_slot_proposers = {
(a.data.slot + a.inclusion_delay): a.proposer_index
for a in state.previous_epoch_attestations
}
max_slot = max([a.data.slot + a.inclusion_delay for a in state.previous_epoch_attestations])
later_attestations = []
for a in state.previous_epoch_attestations:
# Only have proposers for previous epoch so do not create later
# duplicate if slot exceeds the max slot in previous_epoch_attestations
if a.data.slot + a.inclusion_delay >= max_slot:
continue
later_a = a.copy()
later_a.inclusion_delay += 1
later_a.proposer_index = per_slot_proposers[later_a.data.slot + later_a.inclusion_delay]
later_attestations.append(later_a)
assert any(later_attestations)
state.previous_epoch_attestations = sorted(
state.previous_epoch_attestations + later_attestations,
key=lambda a: a.data.slot + a.inclusion_delay
)
yield from run_deltas(spec, state)
def run_test_all_balances_too_low_for_reward(spec, state):
cached_prepare_state_with_attestations(spec, state)
for index in range(len(state.validators)):
state.validators[index].effective_balance = 10
yield from run_deltas(spec, state)
def run_test_full_random(spec, state, rng=Random(8020)):
randomize_state(spec, state, rng)
yield from run_deltas(spec, state)
| 19,779 | 36.965451 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/block.py
|
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
def get_proposer_index_maybe(spec, state, slot, proposer_index=None):
if proposer_index is None:
assert state.slot <= slot
if slot == state.slot:
proposer_index = spec.get_beacon_proposer_index(state)
else:
if spec.compute_epoch_at_slot(state.slot) + 1 > spec.compute_epoch_at_slot(slot):
print("warning: block slot far away, and no proposer index manually given."
" Signing block is slow due to transition for proposer index calculation.")
# use stub state to get proposer index of future slot
stub_state = state.copy()
if stub_state.slot < slot:
spec.process_slots(stub_state, slot)
proposer_index = spec.get_beacon_proposer_index(stub_state)
return proposer_index
@only_with_bls()
def apply_randao_reveal(spec, state, block, proposer_index=None):
assert state.slot <= block.slot
proposer_index = get_proposer_index_maybe(spec, state, block.slot, proposer_index)
privkey = privkeys[proposer_index]
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot))
signing_root = spec.compute_signing_root(spec.compute_epoch_at_slot(block.slot), domain)
block.body.randao_reveal = bls.Sign(privkey, signing_root)
# Fully ignore the function if BLS is off, beacon-proposer index calculation is slow.
@only_with_bls()
def apply_sig(spec, state, signed_block, proposer_index=None):
block = signed_block.message
proposer_index = get_proposer_index_maybe(spec, state, block.slot, proposer_index)
privkey = privkeys[proposer_index]
domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot))
signing_root = spec.compute_signing_root(block, domain)
signed_block.signature = bls.Sign(privkey, signing_root)
def sign_block(spec, state, block, proposer_index=None):
signed_block = spec.SignedBeaconBlock(message=block)
apply_sig(spec, state, signed_block, proposer_index)
return signed_block
def transition_unsigned_block(spec, state, block):
assert state.slot < block.slot # Preserve assertion from state transition to avoid strange pre-states from testing
if state.slot < block.slot:
spec.process_slots(state, block.slot)
assert state.latest_block_header.slot < block.slot # There may not already be a block in this slot or past it.
assert state.slot == block.slot # The block must be for this slot
spec.process_block(state, block)
return block
def apply_empty_block(spec, state, slot=None):
"""
Transition via an empty block (on current slot, assuming no block has been applied yet).
"""
block = build_empty_block(spec, state, slot)
return transition_unsigned_block(spec, state, block)
def build_empty_block(spec, state, slot=None):
"""
Build empty block for ``slot``, built upon the latest block header seen by ``state``.
Slot must be greater than or equal to the current slot in ``state``.
"""
if slot is None:
slot = state.slot
if slot < state.slot:
raise Exception("build_empty_block cannot build blocks for past slots")
if state.slot < slot:
# transition forward in copied state to grab relevant data from state
state = state.copy()
spec.process_slots(state, slot)
state, parent_block_root = get_state_and_beacon_parent_root_at_slot(spec, state, slot)
empty_block = spec.BeaconBlock()
empty_block.slot = slot
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
empty_block.parent_root = parent_block_root
apply_randao_reveal(spec, state, empty_block)
if is_post_altair(spec):
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
if is_post_bellatrix(spec):
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
return empty_block
def build_empty_block_for_next_slot(spec, state):
return build_empty_block(spec, state, state.slot + 1)
def get_state_and_beacon_parent_root_at_slot(spec, state, slot):
if slot < state.slot:
raise Exception("Cannot build blocks for past slots")
if slot > state.slot:
# transition forward in copied state to grab relevant data from state
state = state.copy()
spec.process_slots(state, slot)
previous_block_header = state.latest_block_header.copy()
if previous_block_header.state_root == spec.Root():
previous_block_header.state_root = hash_tree_root(state)
beacon_parent_root = hash_tree_root(previous_block_header)
return state, beacon_parent_root
| 5,113 | 40.241935 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/merkle.py
|
from remerkleable.tree import gindex_bit_iter
def build_proof(anchor, leaf_index):
if leaf_index <= 1:
return [] # Nothing to prove / invalid index
node = anchor
proof = []
# Walk down, top to bottom to the leaf
bit_iter, _ = gindex_bit_iter(leaf_index)
for bit in bit_iter:
# Always take the opposite hand for the proof.
# 1 = right as leaf, thus get left
if bit:
proof.append(node.get_left().merkle_root())
node = node.get_right()
else:
proof.append(node.get_right().merkle_root())
node = node.get_left()
return list(reversed(proof))
| 656 | 28.863636 | 56 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
|
from eth_utils import encode_hex
from eth2spec.test.exceptions import BlockNotFoundException
from eth2spec.test.helpers.attestations import (
next_epoch_with_attestations,
next_slots_with_attestations,
state_transition_with_full_block,
)
def get_anchor_root(spec, state):
anchor_block_header = state.latest_block_header.copy()
if anchor_block_header.state_root == spec.Bytes32():
anchor_block_header.state_root = spec.hash_tree_root(state)
return spec.hash_tree_root(anchor_block_header)
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True,
merge_block=False, block_not_found=False, is_optimistic=False):
pre_state = store.block_states[signed_block.message.parent_root]
if merge_block:
assert spec.is_merge_transition_block(pre_state, signed_block.message.body)
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
while store.time < block_time:
time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT
on_tick_and_append_step(spec, store, time, test_steps)
post_state = yield from add_block(
spec, store, signed_block, test_steps,
valid=valid,
block_not_found=block_not_found,
is_optimistic=is_optimistic,
)
return post_state
def add_attestation(spec, store, attestation, test_steps, is_from_block=False):
spec.on_attestation(store, attestation, is_from_block=is_from_block)
yield get_attestation_file_name(attestation), attestation
test_steps.append({'attestation': get_attestation_file_name(attestation)})
def add_attestations(spec, store, attestations, test_steps, is_from_block=False):
for attestation in attestations:
yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block)
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT
next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.config.SECONDS_PER_SLOT
if store.time < next_epoch_time:
spec.on_tick(store, next_epoch_time)
test_steps.append({'tick': int(next_epoch_time)})
yield from add_attestation(spec, store, attestation, test_steps, is_from_block)
def run_on_attestation(spec, store, attestation, is_from_block=False, valid=True):
if not valid:
try:
spec.on_attestation(store, attestation, is_from_block=is_from_block)
except AssertionError:
return
else:
assert False
spec.on_attestation(store, attestation, is_from_block=is_from_block)
def get_genesis_forkchoice_store(spec, genesis_state):
store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state)
return store
def get_genesis_forkchoice_store_and_block(spec, genesis_state):
assert genesis_state.slot == spec.GENESIS_SLOT
genesis_block = spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
return spec.get_forkchoice_store(genesis_state, genesis_block), genesis_block
def get_block_file_name(block):
return f"block_{encode_hex(block.hash_tree_root())}"
def get_attestation_file_name(attestation):
return f"attestation_{encode_hex(attestation.hash_tree_root())}"
def get_attester_slashing_file_name(attester_slashing):
return f"attester_slashing_{encode_hex(attester_slashing.hash_tree_root())}"
def on_tick_and_append_step(spec, store, time, test_steps):
spec.on_tick(store, time)
test_steps.append({'tick': int(time)})
output_store_checks(spec, store, test_steps)
def run_on_block(spec, store, signed_block, valid=True):
if not valid:
try:
spec.on_block(store, signed_block)
except AssertionError:
return
else:
assert False
spec.on_block(store, signed_block)
assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message
def add_block(spec,
store,
signed_block,
test_steps,
valid=True,
block_not_found=False,
is_optimistic=False):
"""
Run on_block and on_attestation
"""
yield get_block_file_name(signed_block), signed_block
if not valid:
if is_optimistic:
run_on_block(spec, store, signed_block, valid=True)
test_steps.append({
'block': get_block_file_name(signed_block),
'valid': False,
})
else:
try:
run_on_block(spec, store, signed_block, valid=True)
except (AssertionError, BlockNotFoundException) as e:
if isinstance(e, BlockNotFoundException) and not block_not_found:
assert False
test_steps.append({
'block': get_block_file_name(signed_block),
'valid': False,
})
return
else:
assert False
else:
run_on_block(spec, store, signed_block, valid=True)
test_steps.append({'block': get_block_file_name(signed_block)})
# An on_block step implies receiving block's attestations
for attestation in signed_block.message.body.attestations:
run_on_attestation(spec, store, attestation, is_from_block=True, valid=True)
# An on_block step implies receiving block's attester slashings
for attester_slashing in signed_block.message.body.attester_slashings:
run_on_attester_slashing(spec, store, attester_slashing, valid=True)
block_root = signed_block.message.hash_tree_root()
assert store.blocks[block_root] == signed_block.message
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
if not is_optimistic:
output_store_checks(spec, store, test_steps)
return store.block_states[signed_block.message.hash_tree_root()]
def run_on_attester_slashing(spec, store, attester_slashing, valid=True):
if not valid:
try:
spec.on_attester_slashing(store, attester_slashing)
except AssertionError:
return
else:
assert False
spec.on_attester_slashing(store, attester_slashing)
def add_attester_slashing(spec, store, attester_slashing, test_steps, valid=True):
slashing_file_name = get_attester_slashing_file_name(attester_slashing)
yield get_attester_slashing_file_name(attester_slashing), attester_slashing
if not valid:
try:
run_on_attester_slashing(spec, store, attester_slashing)
except AssertionError:
test_steps.append({
'attester_slashing': slashing_file_name,
'valid': False,
})
return
else:
assert False
run_on_attester_slashing(spec, store, attester_slashing)
test_steps.append({'attester_slashing': slashing_file_name})
def get_formatted_head_output(spec, store):
head = spec.get_head(store)
slot = store.blocks[head].slot
return {
'slot': int(slot),
'root': encode_hex(head),
}
def output_head_check(spec, store, test_steps):
test_steps.append({
'checks': {
'head': get_formatted_head_output(spec, store),
}
})
def output_store_checks(spec, store, test_steps):
test_steps.append({
'checks': {
'time': int(store.time),
'head': get_formatted_head_output(spec, store),
'justified_checkpoint': {
'epoch': int(store.justified_checkpoint.epoch),
'root': encode_hex(store.justified_checkpoint.root),
},
'finalized_checkpoint': {
'epoch': int(store.finalized_checkpoint.epoch),
'root': encode_hex(store.finalized_checkpoint.root),
},
'proposer_boost_root': encode_hex(store.proposer_boost_root),
}
})
def apply_next_epoch_with_attestations(spec,
state,
store,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None,
test_steps=None):
if test_steps is None:
test_steps = []
_, new_signed_blocks, post_state = next_epoch_with_attestations(
spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn)
for signed_block in new_signed_blocks:
block = signed_block.message
yield from tick_and_add_block(spec, store, signed_block, test_steps)
block_root = block.hash_tree_root()
assert store.blocks[block_root] == block
last_signed_block = signed_block
assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root()
return post_state, store, last_signed_block
def apply_next_slots_with_attestations(spec,
state,
store,
slots,
fill_cur_epoch,
fill_prev_epoch,
test_steps,
participation_fn=None):
_, new_signed_blocks, post_state = next_slots_with_attestations(
spec, state, slots, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn)
for signed_block in new_signed_blocks:
block = signed_block.message
yield from tick_and_add_block(spec, store, signed_block, test_steps)
block_root = block.hash_tree_root()
assert store.blocks[block_root] == block
last_signed_block = signed_block
assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root()
return post_state, store, last_signed_block
def is_ready_to_justify(spec, state):
"""
Check if the given ``state`` will trigger justification updates at epoch boundary.
"""
temp_state = state.copy()
spec.process_justification_and_finalization(temp_state)
return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch
def find_next_justifying_slot(spec,
state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn=None):
temp_state = state.copy()
signed_blocks = []
justifying_slot = None
while justifying_slot is None:
signed_block = state_transition_with_full_block(
spec,
temp_state,
fill_cur_epoch,
fill_prev_epoch,
participation_fn,
)
signed_blocks.append(signed_block)
if is_ready_to_justify(spec, temp_state):
justifying_slot = temp_state.slot
return signed_blocks, justifying_slot
def get_pow_block_file_name(pow_block):
return f"pow_block_{encode_hex(pow_block.block_hash)}"
def add_pow_block(spec, store, pow_block, test_steps):
yield get_pow_block_file_name(pow_block), pow_block
test_steps.append({'pow_block': get_pow_block_file_name(pow_block)})
| 11,578 | 34.959627 | 105 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
|
from collections import Counter
from eth2spec.test.context import (
expect_assertion_error,
)
from eth2spec.test.helpers.keys import privkeys
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.block_processing import run_block_processing_to
from eth2spec.utils import bls
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None, domain_type=None):
if not domain_type:
domain_type = spec.DOMAIN_SYNC_COMMITTEE
domain = spec.get_domain(state, domain_type, spec.compute_epoch_at_slot(slot))
if block_root is None:
if slot == state.slot:
block_root = build_empty_block_for_next_slot(spec, state).parent_root
else:
block_root = spec.get_block_root_at_slot(state, slot)
signing_root = spec.compute_signing_root(block_root, domain)
return bls.Sign(privkey, signing_root)
def compute_aggregate_sync_committee_signature(spec, state, slot, participants, block_root=None, domain_type=None):
if len(participants) == 0:
return spec.G2_POINT_AT_INFINITY
signatures = []
for validator_index in participants:
privkey = privkeys[validator_index]
signatures.append(
compute_sync_committee_signature(
spec,
state,
slot,
privkey,
block_root=block_root,
domain_type=domain_type,
)
)
return bls.Aggregate(signatures)
def compute_sync_committee_inclusion_reward(spec, state):
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
total_base_rewards = spec.get_base_reward_per_increment(state) * total_active_increments
max_participant_rewards = (total_base_rewards * spec.SYNC_REWARD_WEIGHT
// spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH)
return max_participant_rewards // spec.SYNC_COMMITTEE_SIZE
def compute_sync_committee_participant_reward_and_penalty(
spec, state, participant_index, committee_indices, committee_bits):
inclusion_reward = compute_sync_committee_inclusion_reward(spec, state)
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
not_included_indices = [index for index, bit in zip(committee_indices, committee_bits) if not bit]
included_multiplicities = Counter(included_indices)
not_included_multiplicities = Counter(not_included_indices)
return (
spec.Gwei(inclusion_reward * included_multiplicities[participant_index]),
spec.Gwei(inclusion_reward * not_included_multiplicities[participant_index])
)
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT
inclusion_reward = compute_sync_committee_inclusion_reward(spec, state)
participant_number = committee_bits.count(True)
participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator
return spec.Gwei(participant_reward * participant_number)
def compute_committee_indices(state, committee=None):
"""
Given a ``committee``, calculate and return the related indices
"""
if committee is None:
committee = state.current_sync_committee
all_pubkeys = [v.pubkey for v in state.validators]
return [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
for index in range(len(post_state.validators)):
reward = 0
penalty = 0
if index in committee_indices:
_reward, _penalty = compute_sync_committee_participant_reward_and_penalty(
spec,
pre_state,
index,
committee_indices,
committee_bits,
)
reward += _reward
penalty += _penalty
if proposer_index == index:
reward += compute_sync_committee_proposer_reward(
spec,
pre_state,
committee_indices,
committee_bits,
)
balance = pre_state.balances[index] + reward
assert post_state.balances[index] == (0 if balance < penalty else balance - penalty)
def run_sync_committee_processing(spec, state, block, expect_exception=False, skip_reward_validation=False):
"""
Processes everything up to the sync committee work, then runs the sync committee work in isolation, and
produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes.
"""
pre_state = state.copy()
# process up to the sync committee work
call = run_block_processing_to(spec, state, block, 'process_sync_aggregate')
yield 'pre', state
yield 'sync_aggregate', block.body.sync_aggregate
if expect_exception:
expect_assertion_error(lambda: call(state, block))
yield 'post', None
else:
call(state, block)
yield 'post', state
if expect_exception:
assert pre_state.balances == state.balances
else:
committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
if not skip_reward_validation:
validate_sync_committee_rewards(
spec,
pre_state,
state,
committee_indices,
committee_bits,
block.proposer_index
)
def _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits):
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=committee_bits,
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
[index for index, bit in zip(committee_indices, committee_bits) if bit],
block_root=block.parent_root,
)
)
return block
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits, skip_reward_validation=False):
block = _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits)
yield from run_sync_committee_processing(spec, state, block, skip_reward_validation=skip_reward_validation)
| 6,629 | 39.426829 | 117 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/typing.py
|
from typing import NewType
SpecForkName = NewType("SpecForkName", str)
PresetBaseName = NewType("PresetBaseName", str)
| 120 | 23.2 | 47 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
|
from eth_hash.auto import keccak
from trie import HexaryTrie
from rlp import encode
from rlp.sedes import big_endian_int, Binary, List
from eth2spec.debug.random_value import get_random_bytes_list
from eth2spec.test.helpers.forks import (
is_post_capella,
is_post_deneb,
is_post_eip6110,
)
def get_execution_payload_header(spec, execution_payload):
payload_header = spec.ExecutionPayloadHeader(
parent_hash=execution_payload.parent_hash,
fee_recipient=execution_payload.fee_recipient,
state_root=execution_payload.state_root,
receipts_root=execution_payload.receipts_root,
logs_bloom=execution_payload.logs_bloom,
prev_randao=execution_payload.prev_randao,
block_number=execution_payload.block_number,
gas_limit=execution_payload.gas_limit,
gas_used=execution_payload.gas_used,
timestamp=execution_payload.timestamp,
extra_data=execution_payload.extra_data,
base_fee_per_gas=execution_payload.base_fee_per_gas,
block_hash=execution_payload.block_hash,
transactions_root=spec.hash_tree_root(execution_payload.transactions)
)
if is_post_capella(spec):
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
if is_post_deneb(spec):
payload_header.data_gas_used = execution_payload.data_gas_used
payload_header.excess_data_gas = execution_payload.excess_data_gas
if is_post_eip6110(spec):
payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts)
return payload_header
# https://eips.ethereum.org/EIPS/eip-2718
def compute_trie_root_from_indexed_data(data):
"""
Computes the root hash of `patriciaTrie(rlp(Index) => Data)` for a data array.
"""
t = HexaryTrie(db={})
for i, obj in enumerate(data):
k = encode(i, big_endian_int)
t.set(k, obj)
return t.root_hash
# https://eips.ethereum.org/EIPS/eip-4895
# https://eips.ethereum.org/EIPS/eip-4844
def compute_el_header_block_hash(spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root=None,
deposit_receipts_trie_root=None):
"""
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
"""
execution_payload_header_rlp = [
# parent_hash
(Binary(32, 32), payload_header.parent_hash),
# ommers_hash
(Binary(32, 32), bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
# coinbase
(Binary(20, 20), payload_header.fee_recipient),
# state_root
(Binary(32, 32), payload_header.state_root),
# txs_root
(Binary(32, 32), transactions_trie_root),
# receipts_root
(Binary(32, 32), payload_header.receipts_root),
# logs_bloom
(Binary(256, 256), payload_header.logs_bloom),
# difficulty
(big_endian_int, 0),
# number
(big_endian_int, payload_header.block_number),
# gas_limit
(big_endian_int, payload_header.gas_limit),
# gas_used
(big_endian_int, payload_header.gas_used),
# timestamp
(big_endian_int, payload_header.timestamp),
# extradata
(Binary(0, 32), payload_header.extra_data),
# prev_randao
(Binary(32, 32), payload_header.prev_randao),
# nonce
(Binary(8, 8), bytes.fromhex("0000000000000000")),
# base_fee_per_gas
(big_endian_int, payload_header.base_fee_per_gas),
]
if is_post_capella(spec):
# withdrawals_root
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
if is_post_deneb(spec):
# excess_data_gas
execution_payload_header_rlp.append((big_endian_int, payload_header.data_gas_used))
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
if is_post_eip6110(spec):
# deposit_receipts_root
assert deposit_receipts_trie_root is not None
execution_payload_header_rlp.append((Binary(32, 32), deposit_receipts_trie_root))
sedes = List([schema for schema, _ in execution_payload_header_rlp])
values = [value for _, value in execution_payload_header_rlp]
encoded = encode(values, sedes)
return spec.Hash32(keccak(encoded))
# https://eips.ethereum.org/EIPS/eip-4895
def get_withdrawal_rlp(spec, withdrawal):
withdrawal_rlp = [
# index
(big_endian_int, withdrawal.index),
# validator_index
(big_endian_int, withdrawal.validator_index),
# address
(Binary(20, 20), withdrawal.address),
# amount
(big_endian_int, withdrawal.amount),
]
sedes = List([schema for schema, _ in withdrawal_rlp])
values = [value for _, value in withdrawal_rlp]
return encode(values, sedes)
def get_deposit_receipt_rlp(spec, deposit_receipt):
deposit_receipt_rlp = [
# pubkey
(Binary(48, 48), deposit_receipt.pubkey),
# withdrawal_credentials
(Binary(32, 32), deposit_receipt.withdrawal_credentials),
# amount
(big_endian_int, deposit_receipt.amount),
# pubkey
(Binary(96, 96), deposit_receipt.signature),
# index
(big_endian_int, deposit_receipt.index),
]
sedes = List([schema for schema, _ in deposit_receipt_rlp])
values = [value for _, value in deposit_receipt_rlp]
return encode(values, sedes)
def compute_el_block_hash(spec, payload):
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
withdrawals_trie_root = None
deposit_receipts_trie_root = None
if is_post_capella(spec):
withdrawals_encoded = [get_withdrawal_rlp(spec, withdrawal) for withdrawal in payload.withdrawals]
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
if is_post_eip6110(spec):
deposit_receipts_encoded = [get_deposit_receipt_rlp(spec, receipt) for receipt in payload.deposit_receipts]
deposit_receipts_trie_root = compute_trie_root_from_indexed_data(deposit_receipts_encoded)
payload_header = get_execution_payload_header(spec, payload)
return compute_el_header_block_hash(
spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root,
deposit_receipts_trie_root,
)
def build_empty_execution_payload(spec, state, randao_mix=None):
"""
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
"""
latest = state.latest_execution_payload_header
timestamp = spec.compute_timestamp_at_slot(state, state.slot)
empty_txs = spec.List[spec.Transaction, spec.MAX_TRANSACTIONS_PER_PAYLOAD]()
if randao_mix is None:
randao_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
payload = spec.ExecutionPayload(
parent_hash=latest.block_hash,
fee_recipient=spec.ExecutionAddress(),
state_root=latest.state_root, # no changes to the state
receipts_root=spec.Bytes32(bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
block_number=latest.block_number + 1,
prev_randao=randao_mix,
gas_limit=latest.gas_limit, # retain same limit
gas_used=0, # empty block, 0 gas
timestamp=timestamp,
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
transactions=empty_txs,
)
if is_post_capella(spec):
payload.withdrawals = spec.get_expected_withdrawals(state)
if is_post_deneb(spec):
payload.data_gas_used = 0
payload.excess_data_gas = 0
if is_post_eip6110(spec):
# just to be clear
payload.deposit_receipts = []
payload.block_hash = compute_el_block_hash(spec, payload)
return payload
def build_randomized_execution_payload(spec, state, rng):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
)
execution_payload.block_number = rng.randint(0, 10e10)
execution_payload.gas_limit = rng.randint(0, 10e10)
execution_payload.gas_used = rng.randint(0, 10e10)
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
get_random_bytes_list(rng, extra_data_length)
)
execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
num_transactions = rng.randint(0, 100)
execution_payload.transactions = [
spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
for _ in range(num_transactions)
]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
return execution_payload
def build_state_with_incomplete_transition(spec, state):
state = build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
assert not spec.is_merge_transition_complete(state)
return state
def build_state_with_complete_transition(spec, state):
pre_state_payload = build_empty_execution_payload(spec, state)
payload_header = get_execution_payload_header(spec, pre_state_payload)
state = build_state_with_execution_payload_header(spec, state, payload_header)
assert spec.is_merge_transition_complete(state)
return state
def build_state_with_execution_payload_header(spec, state, execution_payload_header):
pre_state = state.copy()
pre_state.latest_execution_payload_header = execution_payload_header
return pre_state
| 10,290 | 37.543071 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/bls_to_execution_changes.py
|
from eth2spec.utils import bls
from eth2spec.test.helpers.keys import pubkeys, privkeys, pubkey_to_privkey
def get_signed_address_change(spec, state,
validator_index=None,
withdrawal_pubkey=None,
to_execution_address=None,
fork_version=None,
genesis_validators_root=None):
if validator_index is None:
validator_index = 0
if withdrawal_pubkey is None:
key_index = -1 - validator_index
withdrawal_pubkey = pubkeys[key_index]
withdrawal_privkey = privkeys[key_index]
else:
withdrawal_privkey = pubkey_to_privkey[withdrawal_pubkey]
if to_execution_address is None:
to_execution_address = b'\x42' * 20
if genesis_validators_root is None:
genesis_validators_root = state.genesis_validators_root
domain = spec.compute_domain(
spec.DOMAIN_BLS_TO_EXECUTION_CHANGE,
fork_version=fork_version,
genesis_validators_root=genesis_validators_root,
)
address_change = spec.BLSToExecutionChange(
validator_index=validator_index,
from_bls_pubkey=withdrawal_pubkey,
to_execution_address=to_execution_address,
)
signing_root = spec.compute_signing_root(address_change, domain)
return spec.SignedBLSToExecutionChange(
message=address_change,
signature=bls.Sign(withdrawal_privkey, signing_root),
)
| 1,502 | 33.159091 | 75 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
|
from eth2spec.test.helpers.forks import (
is_post_altair,
is_post_capella,
)
def get_process_calls(spec):
# unrecognized processing functions will be ignored.
# This sums up the aggregate of processing functions of all phases.
# Note: make sure to explicitly remove/override a processing function in later phases,
# or the old function will stick around.
return [
'process_justification_and_finalization',
'process_inactivity_updates', # altair
'process_rewards_and_penalties',
'process_registry_updates',
'process_reveal_deadlines', # custody game
'process_challenge_deadlines', # custody game
'process_slashings',
'process_pending_header.', # sharding
'charge_confirmed_header_fees', # sharding
'reset_pending_headers', # sharding
'process_eth1_data_reset',
'process_effective_balance_updates',
'process_slashings_reset',
'process_randao_mixes_reset',
# Capella replaced `process_historical_roots_update` with `process_historical_summaries_update`
'process_historical_summaries_update' if is_post_capella(spec) else (
'process_historical_roots_update'
),
# Altair replaced `process_participation_record_updates` with `process_participation_flag_updates`
'process_participation_flag_updates' if is_post_altair(spec) else (
'process_participation_record_updates'
),
'process_sync_committee_updates', # altair
'process_full_withdrawals', # capella
'process_partial_withdrawals', # capella
# TODO: add sharding processing functions when spec stabilizes.
]
def run_epoch_processing_to(spec, state, process_name: str):
"""
Processes to the next epoch transition, up to, but not including, the sub-transition named ``process_name``
"""
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
# transition state to slot before epoch state transition
if state.slot < slot - 1:
spec.process_slots(state, slot - 1)
# start transitioning, do one slot update before the epoch itself.
spec.process_slot(state)
# process components of epoch transition before final-updates
for name in get_process_calls(spec):
if name == process_name:
break
# only run when present. Later phases introduce more to the epoch-processing.
if hasattr(spec, name):
getattr(spec, name)(state)
def run_epoch_processing_with(spec, state, process_name: str):
"""
Processes to the next epoch transition, up to and including the sub-transition named ``process_name``
- pre-state ('pre'), state before calling ``process_name``
- post-state ('post'), state after calling ``process_name``
"""
run_epoch_processing_to(spec, state, process_name)
yield 'pre', state
getattr(spec, process_name)(state)
yield 'post', state
| 3,000 | 39.013333 | 111 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
|
from random import Random
from eth2spec.test.helpers.keys import privkeys, pubkeys
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.sync_committee import (
compute_committee_indices,
compute_aggregate_sync_committee_signature,
)
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing_by_indices
from eth2spec.test.helpers.attestations import get_valid_attestation
from eth2spec.test.helpers.deposits import build_deposit, deposit_from_context
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
def run_slash_and_exit(spec, state, slash_index, exit_index, valid=True):
"""
Helper function to run a test that slashes and exits two validators
"""
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
proposer_slashing = get_valid_proposer_slashing(
spec, state, slashed_index=slash_index, signed_1=True, signed_2=True)
signed_exit = prepare_signed_exits(spec, state, [exit_index])[0]
block.body.proposer_slashings.append(proposer_slashing)
block.body.voluntary_exits.append(signed_exit)
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=(not valid))
yield 'blocks', [signed_block]
if not valid:
yield 'post', None
return
yield 'post', state
def get_random_proposer_slashings(spec, state, rng):
num_slashings = rng.randrange(1, spec.MAX_PROPOSER_SLASHINGS)
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
indices = [
index for index in active_indices
if not state.validators[index].slashed
]
slashings = [
get_valid_proposer_slashing(
spec, state,
slashed_index=indices.pop(rng.randrange(len(indices))), signed_1=True, signed_2=True,
)
for _ in range(num_slashings)
]
return slashings
def get_random_attester_slashings(spec, state, rng, slashed_indices=[]):
"""
Caller can supply ``slashed_indices`` if they are aware of other indices
that will be slashed by other operations in the same block as the one that
contains the output of this function.
"""
# ensure at least one attester slashing, the max count
# is small so not much room for random inclusion
num_slashings = rng.randrange(1, spec.MAX_ATTESTER_SLASHINGS)
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
indices = [
index for index in active_indices
if (
not state.validators[index].slashed
and index not in slashed_indices
)
]
sample_upper_bound = 4
max_slashed_count = num_slashings * sample_upper_bound - 1
if len(indices) < max_slashed_count:
return []
slot_range = list(range(state.slot - spec.SLOTS_PER_HISTORICAL_ROOT + 1, state.slot))
slashings = [
get_valid_attester_slashing_by_indices(
spec, state,
sorted([indices.pop(rng.randrange(len(indices))) for _ in range(rng.randrange(1, sample_upper_bound))]),
slot=slot_range.pop(rng.randrange(len(slot_range))),
signed_1=True, signed_2=True,
)
for _ in range(num_slashings)
]
return slashings
def get_random_attestations(spec, state, rng):
num_attestations = rng.randrange(1, spec.MAX_ATTESTATIONS)
attestations = [
get_valid_attestation(
spec, state,
slot=rng.randrange(state.slot - spec.SLOTS_PER_EPOCH + 1, state.slot),
signed=True,
)
for _ in range(num_attestations)
]
return attestations
def get_random_deposits(spec, state, rng, num_deposits=None):
if not num_deposits:
num_deposits = rng.randrange(1, spec.MAX_DEPOSITS)
if num_deposits == 0:
return [], b"\x00" * 32
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
deposits = []
# First build deposit data leaves
for i in range(num_deposits):
index = len(state.validators) + i
withdrawal_pubkey = pubkeys[-1 - index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(withdrawal_pubkey)[1:]
_, root, deposit_data_leaves = build_deposit(
spec,
deposit_data_leaves,
pubkeys[index],
privkeys[index],
spec.MAX_EFFECTIVE_BALANCE,
withdrawal_credentials=withdrawal_credentials,
signed=True,
)
# Then for that context, build deposits/proofs
for i in range(num_deposits):
index = len(state.validators) + i
deposit, _, _ = deposit_from_context(spec, deposit_data_leaves, index)
deposits.append(deposit)
return deposits, root
def prepare_state_and_get_random_deposits(spec, state, rng, num_deposits=None):
deposits, root = get_random_deposits(spec, state, rng, num_deposits=num_deposits)
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count += len(deposits)
return deposits
def _eligible_for_exit(spec, state, index):
validator = state.validators[index]
not_slashed = not validator.slashed
current_epoch = spec.get_current_epoch(state)
activation_epoch = validator.activation_epoch
active_for_long_enough = current_epoch >= activation_epoch + spec.config.SHARD_COMMITTEE_PERIOD
not_exited = validator.exit_epoch == spec.FAR_FUTURE_EPOCH
return not_slashed and active_for_long_enough and not_exited
def get_random_voluntary_exits(spec, state, to_be_slashed_indices, rng):
num_exits = rng.randrange(1, spec.MAX_VOLUNTARY_EXITS)
active_indices = set(spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy())
indices = set(
index for index in active_indices
if _eligible_for_exit(spec, state, index)
)
eligible_indices = indices - to_be_slashed_indices
indices_count = min(num_exits, len(eligible_indices))
exit_indices = [eligible_indices.pop() for _ in range(indices_count)]
return prepare_signed_exits(spec, state, exit_indices)
def get_random_sync_aggregate(spec, state, slot, block_root=None, fraction_participated=1.0, rng=Random(2099)):
committee_indices = compute_committee_indices(state, state.current_sync_committee)
participant_count = int(len(committee_indices) * fraction_participated)
participant_indices = rng.sample(range(len(committee_indices)), participant_count)
participants = [
committee_indices[index]
for index in participant_indices
]
signature = compute_aggregate_sync_committee_signature(
spec,
state,
slot,
participants,
block_root=block_root,
)
return spec.SyncAggregate(
sync_committee_bits=[index in participant_indices for index in range(len(committee_indices))],
sync_committee_signature=signature,
)
def get_random_bls_to_execution_changes(spec, state, rng=Random(2188), num_address_changes=0):
bls_indices = [
index
for index, validator in enumerate(state.validators)
if validator.withdrawal_credentials[:1] == spec.BLS_WITHDRAWAL_PREFIX
]
assert len(bls_indices) > 0
return [
get_signed_address_change(spec, state, validator_index=validator_index)
for validator_index in rng.sample(bls_indices, min(num_address_changes, len(bls_indices)))
]
def build_random_block_from_state_for_next_slot(spec, state, rng=Random(2188), deposits=None):
block = build_empty_block_for_next_slot(spec, state)
proposer_slashings = get_random_proposer_slashings(spec, state, rng)
block.body.proposer_slashings = proposer_slashings
slashed_indices = [
slashing.signed_header_1.message.proposer_index
for slashing in proposer_slashings
]
block.body.attester_slashings = get_random_attester_slashings(spec, state, rng, slashed_indices)
block.body.attestations = get_random_attestations(spec, state, rng)
if deposits:
block.body.deposits = deposits
# cannot include to be slashed indices as exits
slashed_indices = set([
slashing.signed_header_1.message.proposer_index
for slashing in block.body.proposer_slashings
])
for attester_slashing in block.body.attester_slashings:
slashed_indices = slashed_indices.union(attester_slashing.attestation_1.attesting_indices)
slashed_indices = slashed_indices.union(attester_slashing.attestation_2.attesting_indices)
block.body.voluntary_exits = get_random_voluntary_exits(spec, state, slashed_indices, rng)
return block
def run_test_full_random_operations(spec, state, rng=Random(2080)):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
# prepare state for deposits before building block
deposits = prepare_state_and_get_random_deposits(spec, state, rng)
block = build_random_block_from_state_for_next_slot(spec, state, rng, deposits=deposits)
yield 'pre', state
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
| 9,715 | 36.369231 | 116 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/sharding.py
|
import random
from eth2spec.utils.ssz.ssz_typing import (
Container,
Bytes20, Bytes32,
ByteList,
List,
Union,
boolean,
uint256, uint64,
)
from eth2spec.utils.ssz.ssz_impl import serialize
#
# Containers from EIP-4844
#
MAX_CALLDATA_SIZE = 2**24
MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
MAX_ACCESS_LIST_STORAGE_KEYS = 2**24
MAX_ACCESS_LIST_SIZE = 2**24
class AccessTuple(Container):
address: Bytes20 # Address = Bytes20
storage_keys: List[Bytes32, MAX_ACCESS_LIST_STORAGE_KEYS]
class ECDSASignature(Container):
y_parity: boolean
r: uint256
s: uint256
class BlobTransaction(Container):
chain_id: uint256
nonce: uint64
max_priority_fee_per_gas: uint256
max_fee_per_gas: uint256
gas: uint64
to: Union[None, Bytes20] # Address = Bytes20
value: uint256
data: ByteList[MAX_CALLDATA_SIZE]
access_list: List[AccessTuple, MAX_ACCESS_LIST_SIZE]
max_fee_per_data_gas: uint256
blob_versioned_hashes: List[Bytes32, MAX_VERSIONED_HASHES_LIST_SIZE]
class SignedBlobTransaction(Container):
message: BlobTransaction
signature: ECDSASignature
def get_sample_blob(spec, rng=random.Random(5566), is_valid_blob=True):
values = [
rng.randint(0, spec.BLS_MODULUS - 1) if is_valid_blob else spec.BLS_MODULUS
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
]
b = bytes()
for v in values:
b += v.to_bytes(32, spec.KZG_ENDIANNESS)
return spec.Blob(b)
def eval_poly_in_coeff_form(spec, coeffs, x):
"""
Evaluate a polynomial in coefficient form at 'x' using Horner's rule
"""
total = 0
for a in reversed(coeffs):
total = (total * x + a) % spec.BLS_MODULUS
return total % spec.BLS_MODULUS
def get_poly_in_both_forms(spec, rng=None):
"""
Generate and return a random polynomial in both coefficient form and evaluation form
"""
if rng is None:
rng = random.Random(5566)
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
coeffs = [
rng.randint(0, spec.BLS_MODULUS - 1)
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
]
evals = [
eval_poly_in_coeff_form(spec, coeffs, int(z))
for z in roots_of_unity_brp
]
return coeffs, evals
def get_sample_opaque_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blob=True):
blobs = []
blob_kzg_commitments = []
blob_kzg_proofs = []
blob_versioned_hashes = []
for _ in range(blob_count):
blob = get_sample_blob(spec, rng, is_valid_blob=is_valid_blob)
if is_valid_blob:
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
else:
blob_commitment = spec.KZGCommitment()
blob_kzg_proof = spec.KZGProof()
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
blobs.append(blob)
blob_kzg_commitments.append(blob_commitment)
blob_kzg_proofs.append(blob_kzg_proof)
blob_versioned_hashes.append(blob_versioned_hash)
signed_blob_tx = SignedBlobTransaction(
message=BlobTransaction(
blob_versioned_hashes=blob_versioned_hashes,
)
)
serialized_tx = serialize(signed_blob_tx)
opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx
return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs
| 3,487 | 26.904 | 90 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/forks.py
|
from .constants import (
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
EIP6110,
)
def is_post_fork(a, b):
if a == EIP6110:
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110]
if a == DENEB:
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB]
if a == CAPELLA:
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA]
if a == BELLATRIX:
return b in [PHASE0, ALTAIR, BELLATRIX]
if a == ALTAIR:
return b in [PHASE0, ALTAIR]
if a == PHASE0:
return b in [PHASE0]
raise ValueError("Unknown fork name %s" % a)
def is_post_altair(spec):
return is_post_fork(spec.fork, ALTAIR)
def is_post_bellatrix(spec):
return is_post_fork(spec.fork, BELLATRIX)
def is_post_capella(spec):
return is_post_fork(spec.fork, CAPELLA)
def is_post_deneb(spec):
return is_post_fork(spec.fork, DENEB)
def is_post_eip6110(spec):
return is_post_fork(spec.fork, EIP6110)
| 957 | 22.365854 | 72 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
|
from enum import Enum, auto
from eth2spec.test.helpers.attester_slashings import (
get_valid_attester_slashing_by_indices,
)
from eth2spec.test.helpers.attestations import next_slots_with_attestations
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
build_empty_block,
sign_block,
)
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
from eth2spec.test.helpers.constants import (
ALTAIR,
BELLATRIX,
CAPELLA,
DENEB,
EIP6110,
)
from eth2spec.test.helpers.deposits import (
prepare_state_and_deposit,
)
from eth2spec.test.helpers.proposer_slashings import (
get_valid_proposer_slashing,
)
from eth2spec.test.helpers.state import (
next_slot,
state_transition_and_sign_block,
transition_to,
)
from eth2spec.test.helpers.voluntary_exits import (
prepare_signed_exits,
)
class OperationType(Enum):
PROPOSER_SLASHING = auto()
ATTESTER_SLASHING = auto()
DEPOSIT = auto()
VOLUNTARY_EXIT = auto()
BLS_TO_EXECUTION_CHANGE = auto()
def _set_operations_by_dict(block, operation_dict):
for key, value in operation_dict.items():
setattr(block.body, key, value)
def _state_transition_and_sign_block_at_slot(spec,
state,
sync_aggregate=None,
operation_dict=None):
"""
Cribbed from ``transition_unsigned_block`` helper
where the early parts of the state transition have already
been applied to ``state``.
Used to produce a block during an irregular state transition.
The optional `operation_dict` is a dict of {'<BeaconBlockBody field>': <value>}.
This is used for assigning the block operations.
p.s. we can't just pass `body` and assign it because randao_reveal and eth1_data was set in `build_empty_block`
Thus use dict to pass operations.
"""
block = build_empty_block(spec, state)
if sync_aggregate is not None:
block.body.sync_aggregate = sync_aggregate
if operation_dict:
_set_operations_by_dict(block, operation_dict)
assert state.latest_block_header.slot < block.slot
assert state.slot == block.slot
spec.process_block(state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
def _all_blocks(_):
return True
def skip_slots(*slots):
"""
Skip making a block if its slot is
passed as an argument to this filter
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 not in slots
return f
def no_blocks(_):
return False
def only_at(slot):
"""
Only produce a block if its slot is ``slot``.
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 == slot
return f
def state_transition_across_slots(spec, state, to_slot, block_filter=_all_blocks):
assert state.slot < to_slot
while state.slot < to_slot:
should_make_block = block_filter(state)
if should_make_block:
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield signed_block
else:
next_slot(spec, state)
def state_transition_across_slots_with_ignoring_proposers(spec,
state,
to_slot,
ignoring_proposers,
only_last_block=False):
"""
The slashed validators can't be proposers. Here we ignore the given `ignoring_proposers`
and ensure that the result state was computed with a block with slot >= to_slot.
"""
assert state.slot < to_slot
found_valid = False
while state.slot < to_slot or not found_valid:
if state.slot + 1 < to_slot and only_last_block:
next_slot(spec, state)
continue
future_state = state.copy()
next_slot(spec, future_state)
proposer_index = spec.get_beacon_proposer_index(future_state)
if proposer_index not in ignoring_proposers:
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield signed_block
if state.slot >= to_slot:
found_valid = True
else:
next_slot(spec, state)
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
spec.process_slots(state, state.slot + 1)
assert state.slot % spec.SLOTS_PER_EPOCH == 0
assert spec.get_current_epoch(state) == fork_epoch
if post_spec.fork == ALTAIR:
state = post_spec.upgrade_to_altair(state)
elif post_spec.fork == BELLATRIX:
state = post_spec.upgrade_to_bellatrix(state)
elif post_spec.fork == CAPELLA:
state = post_spec.upgrade_to_capella(state)
elif post_spec.fork == DENEB:
state = post_spec.upgrade_to_deneb(state)
elif post_spec.fork == EIP6110:
state = post_spec.upgrade_to_eip6110(state)
assert state.fork.epoch == fork_epoch
if post_spec.fork == ALTAIR:
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
elif post_spec.fork == BELLATRIX:
assert state.fork.previous_version == post_spec.config.ALTAIR_FORK_VERSION
assert state.fork.current_version == post_spec.config.BELLATRIX_FORK_VERSION
elif post_spec.fork == CAPELLA:
assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION
assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
elif post_spec.fork == DENEB:
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
elif post_spec.fork == EIP6110:
assert state.fork.previous_version == post_spec.config.DENEB_FORK_VERSION
assert state.fork.current_version == post_spec.config.EIP6110_FORK_VERSION
if with_block:
return state, _state_transition_and_sign_block_at_slot(
post_spec,
state,
sync_aggregate=sync_aggregate,
operation_dict=operation_dict,
)
else:
return state, None
def transition_until_fork(spec, state, fork_epoch):
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
transition_to(spec, state, to_slot)
def _transition_until_fork_minus_one(spec, state, fork_epoch):
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 2
transition_to(spec, state, to_slot)
def transition_to_next_epoch_and_append_blocks(spec,
state,
post_tag,
blocks,
only_last_block=False,
ignoring_proposers=None):
to_slot = spec.SLOTS_PER_EPOCH + state.slot
if only_last_block:
block_filter = only_at(to_slot)
else:
block_filter = _all_blocks
if ignoring_proposers is None:
result_blocks = state_transition_across_slots(spec, state, to_slot, block_filter=block_filter)
else:
result_blocks = state_transition_across_slots_with_ignoring_proposers(
spec,
state,
to_slot,
ignoring_proposers,
only_last_block=only_last_block,
)
blocks.extend([
post_tag(block) for block in
result_blocks
])
def run_transition_with_operation(state,
fork_epoch,
spec,
post_spec,
pre_tag,
post_tag,
operation_type,
operation_at_slot):
"""
Generate `operation_type` operation with the spec before fork.
The operation would be included into the block at `operation_at_slot`.
"""
is_at_fork = operation_at_slot == fork_epoch * spec.SLOTS_PER_EPOCH
is_right_before_fork = operation_at_slot == fork_epoch * spec.SLOTS_PER_EPOCH - 1
assert is_at_fork or is_right_before_fork
if is_at_fork:
transition_until_fork(spec, state, fork_epoch)
elif is_right_before_fork:
_transition_until_fork_minus_one(spec, state, fork_epoch)
is_slashing_operation = operation_type in (OperationType.PROPOSER_SLASHING, OperationType.ATTESTER_SLASHING)
# prepare operation
selected_validator_index = None
if is_slashing_operation:
# avoid slashing the next proposer
future_state = state.copy()
next_slot(spec, future_state)
proposer_index = spec.get_beacon_proposer_index(future_state)
selected_validator_index = (proposer_index + 1) % len(state.validators)
if operation_type == OperationType.PROPOSER_SLASHING:
proposer_slashing = get_valid_proposer_slashing(
spec, state, slashed_index=selected_validator_index, signed_1=True, signed_2=True)
operation_dict = {'proposer_slashings': [proposer_slashing]}
else:
# operation_type == OperationType.ATTESTER_SLASHING:
attester_slashing = get_valid_attester_slashing_by_indices(
spec, state,
[selected_validator_index],
signed_1=True, signed_2=True,
)
operation_dict = {'attester_slashings': [attester_slashing]}
elif operation_type == OperationType.DEPOSIT:
# create a new deposit
selected_validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, selected_validator_index, amount, signed=True)
operation_dict = {'deposits': [deposit]}
elif operation_type == OperationType.VOLUNTARY_EXIT:
selected_validator_index = 0
signed_exits = prepare_signed_exits(spec, state, [selected_validator_index])
operation_dict = {'voluntary_exits': signed_exits}
elif operation_type == OperationType.BLS_TO_EXECUTION_CHANGE:
selected_validator_index = 0
bls_to_execution_changes = [get_signed_address_change(spec, state, selected_validator_index)]
operation_dict = {'bls_to_execution_changes': bls_to_execution_changes}
def _check_state():
if operation_type == OperationType.PROPOSER_SLASHING:
slashed_proposer = state.validators[proposer_slashing.signed_header_1.message.proposer_index]
assert slashed_proposer.slashed
elif operation_type == OperationType.ATTESTER_SLASHING:
indices = set(attester_slashing.attestation_1.attesting_indices).intersection(
attester_slashing.attestation_2.attesting_indices
)
assert selected_validator_index in indices
assert len(indices) > 0
for validator_index in indices:
assert state.validators[validator_index].slashed
elif operation_type == OperationType.DEPOSIT:
assert not post_spec.is_active_validator(
state.validators[selected_validator_index],
post_spec.get_current_epoch(state)
)
elif operation_type == OperationType.VOLUNTARY_EXIT:
validator = state.validators[selected_validator_index]
assert validator.exit_epoch < post_spec.FAR_FUTURE_EPOCH
elif operation_type == OperationType.BLS_TO_EXECUTION_CHANGE:
validator = state.validators[selected_validator_index]
assert validator.withdrawal_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
yield "pre", state
blocks = []
if is_right_before_fork:
# add a block with operation.
block = build_empty_block_for_next_slot(spec, state)
_set_operations_by_dict(block, operation_dict)
signed_block = state_transition_and_sign_block(spec, state, block)
blocks.append(pre_tag(signed_block))
_check_state()
# irregular state transition to handle fork:
_operation_at_slot = operation_dict if is_at_fork else None
state, block = do_fork(state, spec, post_spec, fork_epoch, operation_dict=_operation_at_slot)
blocks.append(post_tag(block))
if is_at_fork:
_check_state()
# after the fork
if operation_type == OperationType.DEPOSIT:
state = _transition_until_active(post_spec, state, post_tag, blocks, selected_validator_index)
else:
# avoid using the slashed validators as block proposers
ignoring_proposers = [selected_validator_index] if is_slashing_operation else None
# continue regular state transition with new spec into next epoch
transition_to_next_epoch_and_append_blocks(
post_spec,
state,
post_tag,
blocks,
only_last_block=True,
ignoring_proposers=ignoring_proposers,
)
yield "blocks", blocks
yield "post", state
def _transition_until_active(post_spec, state, post_tag, blocks, validator_index):
# continue regular state transition with new spec into next epoch
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks)
# finalize activation_eligibility_epoch
_, blocks_in_epoch, state = next_slots_with_attestations(
post_spec,
state,
post_spec.SLOTS_PER_EPOCH * 2,
fill_cur_epoch=True,
fill_prev_epoch=True,
)
blocks.extend([post_tag(block) for block in blocks_in_epoch])
assert state.finalized_checkpoint.epoch >= state.validators[validator_index].activation_eligibility_epoch
# continue regular state transition with new spec into next epoch
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
assert state.validators[validator_index].activation_epoch < post_spec.FAR_FUTURE_EPOCH
to_slot = state.validators[validator_index].activation_epoch * post_spec.SLOTS_PER_EPOCH
blocks.extend([
post_tag(block) for block in
state_transition_across_slots(post_spec, state, to_slot, block_filter=only_at(to_slot))
])
assert post_spec.is_active_validator(state.validators[validator_index], post_spec.get_current_epoch(state))
return state
| 14,768 | 37.460938 | 115 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/random.py
|
from random import Random
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
from eth2spec.test.helpers.deposits import mock_deposit
from eth2spec.test.helpers.forks import is_post_altair
from eth2spec.test.helpers.state import next_epoch
def set_some_activations(spec, state, rng, activation_epoch=None):
if activation_epoch is None:
activation_epoch = spec.get_current_epoch(state)
num_validators = len(state.validators)
selected_indices = []
for index in range(num_validators):
# If is slashed or exiting, skip
if state.validators[index].slashed or state.validators[index].exit_epoch != spec.FAR_FUTURE_EPOCH:
continue
# Set ~1/10 validators' activation_eligibility_epoch and activation_epoch
if rng.randrange(num_validators) < num_validators // 10:
state.validators[index].activation_eligibility_epoch = max(
int(activation_epoch) - int(spec.MAX_SEED_LOOKAHEAD) - 1,
spec.GENESIS_EPOCH,
)
state.validators[index].activation_epoch = activation_epoch
selected_indices.append(index)
return selected_indices
def set_some_new_deposits(spec, state, rng):
deposited_indices = []
num_validators = len(state.validators)
# Set ~1/10 to just recently deposited
for index in range(num_validators):
# If not already active, skip
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
continue
if rng.randrange(num_validators) < num_validators // 10:
mock_deposit(spec, state, index)
if rng.choice([True, False]):
# Set ~half of selected to eligible for activation
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
else:
# The validators that just made a deposit
deposited_indices.append(index)
return deposited_indices
def exit_random_validators(spec, state, rng, fraction=0.5, exit_epoch=None, withdrawable_epoch=None, from_epoch=None):
"""
Set some validators' exit_epoch and withdrawable_epoch.
If exit_epoch is configured, use the given exit_epoch. Otherwise, randomly set exit_epoch and withdrawable_epoch.
"""
if from_epoch is None:
from_epoch = spec.MAX_SEED_LOOKAHEAD + 1
epoch_diff = int(from_epoch) - int(spec.get_current_epoch(state))
for _ in range(epoch_diff):
# NOTE: if `epoch_diff` is negative, then this loop body does not execute.
next_epoch(spec, state)
current_epoch = spec.get_current_epoch(state)
exited_indices = []
for index in spec.get_active_validator_indices(state, current_epoch):
sampled = rng.random() < fraction
if not sampled:
continue
exited_indices.append(index)
validator = state.validators[index]
if exit_epoch is None:
assert withdrawable_epoch is None
validator.exit_epoch = rng.choice([current_epoch, current_epoch - 1, current_epoch - 2, current_epoch - 3])
# ~1/2 are withdrawable (note, unnatural span between exit epoch and withdrawable epoch)
if rng.choice([True, False]):
validator.withdrawable_epoch = current_epoch
else:
validator.withdrawable_epoch = current_epoch + 1
else:
validator.exit_epoch = exit_epoch
if withdrawable_epoch is None:
validator.withdrawable_epoch = validator.exit_epoch + spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
else:
validator.withdrawable_epoch = withdrawable_epoch
return exited_indices
def slash_random_validators(spec, state, rng, fraction=0.5):
slashed_indices = []
for index in range(len(state.validators)):
# slash at least one validator
sampled = rng.random() < fraction
if index == 0 or sampled:
spec.slash_validator(state, index)
slashed_indices.append(index)
return slashed_indices
def randomize_epoch_participation(spec, state, epoch, rng):
assert epoch in (spec.get_current_epoch(state), spec.get_previous_epoch(state))
if not is_post_altair(spec):
if epoch == spec.get_current_epoch(state):
pending_attestations = state.current_epoch_attestations
else:
pending_attestations = state.previous_epoch_attestations
for pending_attestation in pending_attestations:
# ~1/3 have bad target
if rng.randint(0, 2) == 0:
pending_attestation.data.target.root = b'\x55' * 32
# ~1/3 have bad head
if rng.randint(0, 2) == 0:
pending_attestation.data.beacon_block_root = b'\x66' * 32
# ~50% participation
pending_attestation.aggregation_bits = [rng.choice([True, False])
for _ in pending_attestation.aggregation_bits]
# Random inclusion delay
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
else:
if epoch == spec.get_current_epoch(state):
epoch_participation = state.current_epoch_participation
else:
epoch_participation = state.previous_epoch_participation
for index in range(len(state.validators)):
# ~1/3 have bad head or bad target or not timely enough
is_timely_correct_head = rng.randint(0, 2) != 0
flags = epoch_participation[index]
def set_flag(index, value):
nonlocal flags
flag = spec.ParticipationFlags(2**index)
if value:
flags |= flag
else:
flags &= 0xff ^ flag
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
if is_timely_correct_head:
# If timely head, then must be timely target
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
# If timely head, then must be timely source
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
else:
# ~50% of remaining have bad target or not timely enough
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
# ~50% of remaining have bad source or not timely enough
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
epoch_participation[index] = flags
def randomize_previous_epoch_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
if not is_post_altair(spec):
state.current_epoch_attestations = []
else:
state.current_epoch_participation = [spec.ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
def randomize_attestation_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
randomize_epoch_participation(spec, state, spec.get_current_epoch(state), rng)
def randomize_state(spec, state, rng=Random(8020), exit_fraction=0.5, slash_fraction=0.5):
set_some_new_deposits(spec, state, rng)
exit_random_validators(spec, state, rng, fraction=exit_fraction)
slash_random_validators(spec, state, rng, fraction=slash_fraction)
randomize_attestation_participation(spec, state, rng)
def patch_state_to_non_leaking(spec, state):
"""
This function performs an irregular state transition so that:
1. the current justified checkpoint references the previous epoch
2. the previous justified checkpoint references the epoch before previous
3. the finalized checkpoint matches the previous justified checkpoint
The effects of this function are intended to offset randomization side effects
performed by other functionality in this module so that if the ``state`` was leaking,
then the ``state`` is not leaking after.
"""
state.justification_bits[0] = True
state.justification_bits[1] = True
previous_epoch = spec.get_previous_epoch(state)
previous_root = spec.get_block_root(state, previous_epoch)
previous_previous_epoch = max(spec.GENESIS_EPOCH, spec.Epoch(previous_epoch - 1))
previous_previous_root = spec.get_block_root(state, previous_previous_epoch)
state.previous_justified_checkpoint = spec.Checkpoint(
epoch=previous_previous_epoch,
root=previous_previous_root,
)
state.current_justified_checkpoint = spec.Checkpoint(
epoch=previous_epoch,
root=previous_root,
)
state.finalized_checkpoint = spec.Checkpoint(
epoch=previous_previous_epoch,
root=previous_previous_root,
)
| 9,007 | 43.81592 | 120 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py
|
from dataclasses import dataclass
from enum import Enum
from typing import (
Dict,
Optional,
)
from eth_utils import encode_hex
from eth2spec.utils.ssz.ssz_typing import Bytes32
from eth2spec.test.helpers.fork_choice import (
add_block,
)
class PayloadStatusV1StatusAlias(Enum):
NOT_VALIDATED = "NOT_VALIDATED"
INVALIDATED = "INVALIDATED"
class PayloadStatusV1Status(Enum):
VALID = "VALID"
INVALID = "INVALID"
SYNCING = "SYNCING"
ACCEPTED = "ACCEPTED"
INVALID_BLOCK_HASH = "INVALID_BLOCK_HASH"
@property
def alias(self) -> PayloadStatusV1StatusAlias:
if self.value in (self.SYNCING.value, self.ACCEPTED.value):
return PayloadStatusV1StatusAlias.NOT_VALIDATED
elif self.value in (self.INVALID.value, self.INVALID_BLOCK_HASH.value):
return PayloadStatusV1StatusAlias.INVALIDATED
@dataclass
class PayloadStatusV1:
status: PayloadStatusV1Status = PayloadStatusV1Status.VALID
latest_valid_hash: Optional[Bytes32] = None
validation_error: Optional[str] = None
@property
def formatted_output(self):
return {
'status': str(self.status.value),
'latest_valid_hash': encode_hex(self.latest_valid_hash) if self.latest_valid_hash is not None else None,
'validation_error': str(self.validation_error) if self.validation_error is not None else None
}
class MegaStore(object):
spec = None
fc_store = None
opt_store = None
block_payload_statuses: Dict[Bytes32, PayloadStatusV1] = dict()
def __init__(self, spec, fc_store, opt_store):
self.spec = spec
self.fc_store = fc_store
self.opt_store = opt_store
def get_optimistic_store(spec, anchor_state, anchor_block):
assert anchor_block.state_root == anchor_state.hash_tree_root()
opt_store = spec.OptimisticStore(
optimistic_roots=set(),
head_block_root=anchor_block.hash_tree_root(),
)
anchor_block_root = anchor_block.hash_tree_root()
opt_store.blocks[anchor_block_root] = anchor_block.copy()
opt_store.block_states[anchor_block_root] = anchor_state.copy()
return opt_store
def get_valid_flag_value(status: PayloadStatusV1Status) -> bool:
if status == PayloadStatusV1Status.VALID:
return True
elif status.alias == PayloadStatusV1StatusAlias.NOT_VALIDATED:
return True
else:
# status.alias == PayloadStatusV1StatusAlias.INVALIDATED or other cases
return False
def add_optimistic_block(spec, mega_store, signed_block, test_steps,
payload_status=None, status=PayloadStatusV1Status.SYNCING):
"""
Add a block with optimistic sync logic
``valid`` indicates if the given ``signed_block.message.body.execution_payload`` is valid/invalid
from ``verify_and_notify_new_payload`` method response.
"""
block = signed_block.message
block_root = block.hash_tree_root()
el_block_hash = block.body.execution_payload.block_hash
if payload_status is None:
payload_status = PayloadStatusV1(status=status)
if payload_status.status == PayloadStatusV1Status.VALID:
payload_status.latest_valid_hash = el_block_hash
mega_store.block_payload_statuses[block_root] = payload_status
test_steps.append({
'block_hash': encode_hex(el_block_hash),
'payload_status': payload_status.formatted_output,
})
# Set `valid` flag
valid = get_valid_flag_value(payload_status.status)
# Optimistic sync
# Case: INVALID
if payload_status.status == PayloadStatusV1Status.INVALID:
# Update parent status to INVALID
assert payload_status.latest_valid_hash is not None
current_block = block
while el_block_hash != payload_status.latest_valid_hash and el_block_hash != spec.Bytes32():
current_block_root = current_block.hash_tree_root()
assert current_block_root in mega_store.block_payload_statuses
mega_store.block_payload_statuses[current_block_root].status = PayloadStatusV1Status.INVALID
# Get parent
current_block = mega_store.fc_store.blocks[current_block.parent_root]
el_block_hash = current_block.body.execution_payload.block_hash
yield from add_block(spec, mega_store.fc_store, signed_block,
valid=valid,
test_steps=test_steps,
is_optimistic=True)
# Update stores
is_optimistic_candidate = spec.is_optimistic_candidate_block(
mega_store.opt_store,
current_slot=spec.get_current_slot(mega_store.fc_store),
block=signed_block.message,
)
if is_optimistic_candidate:
mega_store.opt_store.optimistic_roots.add(block_root)
mega_store.opt_store.blocks[block_root] = signed_block.message.copy()
if not is_invalidated(mega_store, block_root):
mega_store.opt_store.block_states[block_root] = mega_store.fc_store.block_states[block_root].copy()
# Clean up the invalidated blocks
clean_up_store(mega_store)
# Update head
mega_store.opt_store.head_block_root = get_opt_head_block_root(spec, mega_store)
test_steps.append({
'checks': {
'head': get_formatted_optimistic_head_output(mega_store),
}
})
def get_opt_head_block_root(spec, mega_store):
"""
Copied and modified from fork-choice spec `get_head` function.
"""
store = mega_store.fc_store
# Get filtered block tree that only includes viable branches
blocks = spec.get_filtered_block_tree(store)
# Execute the LMD-GHOST fork choice
head = store.justified_checkpoint.root
while True:
children = [
root for root in blocks.keys()
if (
blocks[root].parent_root == head
and not is_invalidated(mega_store, root) # For optimistic sync
)
]
if len(children) == 0:
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda root: (spec.get_weight(store, root), root))
def is_invalidated(mega_store, block_root):
if block_root in mega_store.block_payload_statuses:
return mega_store.block_payload_statuses[block_root].status.alias == PayloadStatusV1StatusAlias.INVALIDATED
else:
return False
def get_formatted_optimistic_head_output(mega_store):
head = mega_store.opt_store.head_block_root
slot = mega_store.fc_store.blocks[head].slot
return {
'slot': int(slot),
'root': encode_hex(head),
}
def clean_up_store(mega_store):
"""
Remove invalidated blocks
"""
# TODO
...
| 6,833 | 32.336585 | 116 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/custody.py
|
from eth2spec.test.helpers.keys import privkeys
from eth2spec.test.helpers.merkle import build_proof
from eth2spec.utils import bls
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, ByteList
BYTES_PER_CHUNK = 32
def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
current_epoch = spec.get_current_epoch(state)
revealed_index = spec.get_active_validator_indices(state, current_epoch)[-1]
masker_index = spec.get_active_validator_indices(state, current_epoch)[0]
if epoch is None:
epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING
# Generate the secret that is being revealed
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, epoch)
signing_root = spec.compute_signing_root(spec.Epoch(epoch), domain)
reveal = bls.Sign(privkeys[revealed_index], signing_root)
# Generate the mask (any random 32 bytes that don't reveal the masker's secret will do)
mask = spec.hash(reveal)
# Generate masker's signature on the mask
signing_root = spec.compute_signing_root(mask, domain)
masker_signature = bls.Sign(privkeys[masker_index], signing_root)
masked_reveal = bls.Aggregate([reveal, masker_signature])
return spec.EarlyDerivedSecretReveal(
revealed_index=revealed_index,
epoch=epoch,
reveal=masked_reveal,
masker_index=masker_index,
mask=mask,
)
def get_valid_custody_key_reveal(spec, state, period=None, validator_index=None):
current_epoch = spec.get_current_epoch(state)
revealer_index = (spec.get_active_validator_indices(state, current_epoch)[0]
if validator_index is None else validator_index)
revealer = state.validators[revealer_index]
if period is None:
period = revealer.next_custody_secret_to_reveal
epoch_to_sign = spec.get_randao_epoch_for_custody_period(period, revealer_index)
# Generate the secret that is being revealed
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, epoch_to_sign)
signing_root = spec.compute_signing_root(spec.Epoch(epoch_to_sign), domain)
reveal = bls.Sign(privkeys[revealer_index], signing_root)
return spec.CustodyKeyReveal(
revealer_index=revealer_index,
reveal=reveal,
)
def bitlist_from_int(max_len, num_bits, n):
return Bitlist[max_len](*[(n >> i) & 0b1 for i in range(num_bits)])
def get_valid_custody_slashing(spec, state, attestation, shard_transition, custody_secret, data, data_index=0):
beacon_committee = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index,
)
malefactor_index = beacon_committee[0]
whistleblower_index = beacon_committee[-1]
slashing = spec.CustodySlashing(
data_index=data_index,
malefactor_index=malefactor_index,
malefactor_secret=custody_secret,
whistleblower_index=whistleblower_index,
shard_transition=shard_transition,
attestation=attestation,
data=data,
)
slashing_domain = spec.get_domain(state, spec.DOMAIN_CUSTODY_BIT_SLASHING)
slashing_root = spec.compute_signing_root(slashing, slashing_domain)
signed_slashing = spec.SignedCustodySlashing(
message=slashing,
signature=bls.Sign(privkeys[whistleblower_index], slashing_root)
)
return signed_slashing
def get_valid_chunk_challenge(spec, state, attestation, shard_transition, data_index=None, chunk_index=None):
crosslink_committee = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index
)
responder_index = crosslink_committee[0]
data_index = len(shard_transition.shard_block_lengths) - 1 if not data_index else data_index
chunk_count = (shard_transition.shard_block_lengths[data_index]
+ spec.BYTES_PER_CUSTODY_CHUNK - 1) // spec.BYTES_PER_CUSTODY_CHUNK
chunk_index = chunk_count - 1 if not chunk_index else chunk_index
return spec.CustodyChunkChallenge(
responder_index=responder_index,
attestation=attestation,
chunk_index=chunk_index,
data_index=data_index,
shard_transition=shard_transition,
)
def custody_chunkify(spec, x):
chunks = [bytes(x[i:i + spec.BYTES_PER_CUSTODY_CHUNK]) for i in range(0, len(x), spec.BYTES_PER_CUSTODY_CHUNK)]
chunks[-1] = chunks[-1].ljust(spec.BYTES_PER_CUSTODY_CHUNK, b"\0")
return [ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](c) for c in chunks]
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
block_length_or_custody_data,
invalid_chunk_data=False):
if isinstance(block_length_or_custody_data, int):
custody_data = get_custody_test_vector(block_length_or_custody_data)
else:
custody_data = block_length_or_custody_data
custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data)
chunks = custody_chunkify(spec, custody_data_block)
chunk_index = chunk_challenge.chunk_index
leaf_index = chunk_index + 2**spec.CUSTODY_RESPONSE_DEPTH
serialized_length = len(custody_data_block).to_bytes(32, 'little')
data_branch = build_proof(custody_data_block.get_backing().get_left(), leaf_index) + [serialized_length]
return spec.CustodyChunkResponse(
challenge_index=challenge_index,
chunk_index=chunk_index,
chunk=chunks[chunk_index],
branch=data_branch,
)
def get_custody_test_vector(bytelength, offset=0):
ints = bytelength // 4 + 1
return (b"".join((i + offset).to_bytes(4, "little") for i in range(ints)))[:bytelength]
def get_sample_shard_transition(spec, start_slot, block_lengths):
b = [spec.hash_tree_root(ByteList[spec.MAX_SHARD_BLOCK_SIZE](get_custody_test_vector(x)))
for x in block_lengths]
shard_transition = spec.ShardTransition(
start_slot=start_slot,
shard_block_lengths=block_lengths,
shard_data_roots=b,
shard_states=[spec.ShardState() for x in block_lengths],
proposer_signature_aggregate=spec.BLSSignature(),
)
return shard_transition
def get_custody_slashable_test_vector(spec, custody_secret, length, slashable=True):
test_vector = get_custody_test_vector(length)
offset = 0
while spec.compute_custody_bit(custody_secret, test_vector) != slashable:
offset += 1
test_vector = get_custody_test_vector(length, offset)
return test_vector
def get_custody_slashable_shard_transition(spec, start_slot, block_lengths, custody_secret, slashable=True):
shard_transition = get_sample_shard_transition(spec, start_slot, block_lengths)
slashable_test_vector = get_custody_slashable_test_vector(spec, custody_secret,
block_lengths[0], slashable=slashable)
block_data = ByteList[spec.MAX_SHARD_BLOCK_SIZE](slashable_test_vector)
shard_transition.shard_data_roots[0] = spec.hash_tree_root(block_data)
return shard_transition, slashable_test_vector
| 7,099 | 38.88764 | 115 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/pow_block.py
|
from random import Random
from eth2spec.utils.ssz.ssz_typing import uint256
class PowChain:
blocks = []
def __init__(self, blocks):
self.blocks = blocks
def __iter__(self):
return iter(self.blocks)
def head(self, offset=0):
assert offset <= 0
return self.blocks[offset - 1]
def to_dict(self):
return {
block.block_hash: block
for block in self.blocks
}
def prepare_random_pow_block(spec, rng=Random(3131)):
return spec.PowBlock(
block_hash=spec.Hash32(spec.hash(bytearray(rng.getrandbits(8) for _ in range(32)))),
parent_hash=spec.Hash32(spec.hash(bytearray(rng.getrandbits(8) for _ in range(32)))),
total_difficulty=uint256(0),
)
def prepare_random_pow_chain(spec, length, rng=Random(3131)) -> PowChain:
assert length > 0
chain = [prepare_random_pow_block(spec, rng)]
for i in range(1, length):
chain.append(prepare_random_pow_block(spec, rng))
chain[i].parent_hash = chain[i - 1].block_hash
return PowChain(chain)
| 1,082 | 26.075 | 93 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/inactivity_scores.py
|
from random import Random
def randomize_inactivity_scores(spec, state, minimum=0, maximum=50000, rng=Random(4242)):
state.inactivity_scores = [rng.randint(minimum, maximum) for _ in range(len(state.validators))]
def zero_inactivity_scores(spec, state, rng=None):
state.inactivity_scores = [0] * len(state.validators)
| 329 | 32 | 99 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/light_client.py
|
from eth2spec.test.helpers.state import (
transition_to,
)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
compute_committee_indices,
)
from math import floor
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
# By default, the sync committee signs the previous slot
if signature_slot is None:
signature_slot = state.slot + 1
# Ensure correct sync committee and fork version are selected
signature_state = state.copy()
transition_to(spec, signature_state, signature_slot)
# Fetch sync committee
committee_indices = compute_committee_indices(signature_state)
committee_size = len(committee_indices)
# By default, use full participation
if num_participants is None:
num_participants = committee_size
assert committee_size >= num_participants >= 0
# Compute sync aggregate
sync_committee_bits = [True] * num_participants + [False] * (committee_size - num_participants)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
signature_state,
max(signature_slot, 1) - 1,
committee_indices[:num_participants],
)
sync_aggregate = spec.SyncAggregate(
sync_committee_bits=sync_committee_bits,
sync_committee_signature=sync_committee_signature,
)
return sync_aggregate, signature_slot
def create_update(spec,
attested_state,
attested_block,
finalized_block,
with_next,
with_finality,
participation_rate):
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
update = spec.LightClientUpdate()
update.attested_header = spec.block_to_light_client_header(attested_block)
if with_next:
update.next_sync_committee = attested_state.next_sync_committee
update.next_sync_committee_branch = spec.compute_merkle_proof_for_state(
attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
if with_finality:
update.finalized_header = spec.block_to_light_client_header(finalized_block)
update.finality_branch = spec.compute_merkle_proof_for_state(
attested_state, spec.FINALIZED_ROOT_INDEX)
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
spec, attested_state, num_participants)
return update
| 2,457 | 33.619718 | 99 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py
|
from eth2spec.test.helpers.block_header import sign_block_header
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.state import get_balance
from eth2spec.test.helpers.sync_committee import (
compute_committee_indices,
compute_sync_committee_participant_reward_and_penalty,
)
def get_min_slashing_penalty_quotient(spec):
if is_post_bellatrix(spec):
return spec.MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX
elif is_post_altair(spec):
return spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR
else:
return spec.MIN_SLASHING_PENALTY_QUOTIENT
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=None):
slashed_validator = state.validators[slashed_index]
assert slashed_validator.slashed
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
proposer_index = spec.get_beacon_proposer_index(state)
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
# Altair introduces sync committee (SC) reward and penalty
sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0
if is_post_altair(spec) and block is not None:
committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty(
spec,
pre_state,
slashed_index,
committee_indices,
committee_bits,
)
sc_reward_for_proposer, sc_penalty_for_proposer = compute_sync_committee_participant_reward_and_penalty(
spec,
pre_state,
proposer_index,
committee_indices,
committee_bits,
)
if proposer_index != slashed_index:
# slashed validator lost initial slash penalty
assert (
get_balance(state, slashed_index)
== get_balance(pre_state, slashed_index) - slash_penalty + sc_reward_for_slashed - sc_penalty_for_slashed
)
# block proposer gained whistleblower reward
# >= because proposer could have reported multiple
assert (
get_balance(state, proposer_index)
>= (
get_balance(pre_state, proposer_index) + whistleblower_reward
+ sc_reward_for_proposer - sc_penalty_for_proposer
)
)
else:
# proposer reported themself so get penalty and reward
# >= because proposer could have reported multiple
assert (
get_balance(state, slashed_index)
>= (
get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward
+ sc_reward_for_slashed - sc_penalty_for_slashed
)
)
def get_valid_proposer_slashing(spec, state, random_root=b'\x99' * 32,
slashed_index=None, slot=None, signed_1=False, signed_2=False):
if slashed_index is None:
current_epoch = spec.get_current_epoch(state)
slashed_index = spec.get_active_validator_indices(state, current_epoch)[-1]
privkey = pubkey_to_privkey[state.validators[slashed_index].pubkey]
if slot is None:
slot = state.slot
header_1 = spec.BeaconBlockHeader(
slot=slot,
proposer_index=slashed_index,
parent_root=b'\x33' * 32,
state_root=b'\x44' * 32,
body_root=b'\x55' * 32,
)
header_2 = header_1.copy()
header_2.parent_root = random_root
if signed_1:
signed_header_1 = sign_block_header(spec, state, header_1, privkey)
else:
signed_header_1 = spec.SignedBeaconBlockHeader(message=header_1)
if signed_2:
signed_header_2 = sign_block_header(spec, state, header_2, privkey)
else:
signed_header_2 = spec.SignedBeaconBlockHeader(message=header_2)
return spec.ProposerSlashing(
signed_header_1=signed_header_1,
signed_header_2=signed_header_2,
)
| 4,413 | 39.495413 | 117 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/bellatrix/fork.py
|
BELLATRIX_FORK_TEST_META_TAGS = {
'fork': 'bellatrix',
}
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_bellatrix(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Participation
'previous_epoch_participation', 'current_epoch_participation',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
# Inactivity
'inactivity_scores',
# Sync
'current_sync_committee', 'next_sync_committee'
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.config.BELLATRIX_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
assert post_state.latest_execution_payload_header == post_spec.ExecutionPayloadHeader()
yield 'post', post_state
| 1,593 | 32.914894 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/bellatrix/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/altair/fork.py
|
ALTAIR_FORK_TEST_META_TAGS = {
'fork': 'altair',
}
def run_fork_test(post_spec, pre_state):
# Clean up state to be more realistic
pre_state.current_epoch_attestations = []
yield 'pre', pre_state
post_state = post_spec.upgrade_to_altair(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
| 1,362 | 30.697674 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/altair/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py
|
from eth2spec.test.helpers.constants import (
DENEB,
)
DENEB_FORK_TEST_META_TAGS = {
'fork': DENEB,
}
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_deneb(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Participation
'previous_epoch_participation', 'current_epoch_participation',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
# Inactivity
'inactivity_scores',
# Sync
'current_sync_committee', 'next_sync_committee',
# Withdrawals
'next_withdrawal_index', 'next_withdrawal_validator_index',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork', 'latest_execution_payload_header']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert len(pre_state.validators) == len(post_state.validators)
for pre_validator, post_validator in zip(pre_state.validators, post_state.validators):
stable_validator_fields = [
'pubkey', 'withdrawal_credentials',
'effective_balance',
'slashed',
'activation_eligibility_epoch', 'activation_epoch', 'exit_epoch', 'withdrawable_epoch',
]
for field in stable_validator_fields:
assert getattr(pre_validator, field) == getattr(post_validator, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
| 2,208 | 33.515625 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/deneb/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/capella/fork.py
|
CAPELLA_FORK_TEST_META_TAGS = {
'fork': 'capella',
}
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_capella(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Participation
'previous_epoch_participation', 'current_epoch_participation',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
# Inactivity
'inactivity_scores',
# Sync
'current_sync_committee', 'next_sync_committee',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork', 'latest_execution_payload_header']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert len(pre_state.validators) == len(post_state.validators)
for pre_validator, post_validator in zip(pre_state.validators, post_state.validators):
stable_validator_fields = [
'pubkey', 'withdrawal_credentials',
'effective_balance',
'slashed',
'activation_eligibility_epoch', 'activation_epoch', 'exit_epoch', 'withdrawable_epoch',
]
for field in stable_validator_fields:
assert getattr(pre_validator, field) == getattr(post_validator, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
| 2,067 | 35.280702 | 118 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/helpers/capella/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/random/test_random.py
|
"""
This module is generated from the ``random`` test generator.
Please do not edit this file manually.
See the README for that generator for more information.
"""
from eth2spec.test.helpers.constants import BELLATRIX
from eth2spec.test.context import (
misc_balances_in_default_range_with_many_validators,
with_phases,
zero_activation_threshold,
only_generator,
)
from eth2spec.test.context import (
always_bls,
spec_test,
with_custom_state,
single_phase,
)
from eth2spec.test.utils.randomized_block_tests import (
run_generated_randomized_test,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_0(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_1(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_2(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_3(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_4(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_5(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_6(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_7(spec, state):
# scenario as high-level, informal text:
# epochs:0,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_8(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_9(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_10(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_11(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_12(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_13(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_14(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
@only_generator("randomized test for broad coverage, not point-to-point CI")
@with_phases([BELLATRIX])
@with_custom_state(
balances_fn=misc_balances_in_default_range_with_many_validators,
threshold_fn=zero_activation_threshold
)
@spec_test
@single_phase
@always_bls
def test_randomized_15(spec, state):
# scenario as high-level, informal text:
# epochs:epochs_until_leak,slots:0,with-block:no_block
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
# epochs:1,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:no_block
# epochs:0,slots:0,with-block:random_block_bellatrix
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_bellatrix', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_bellatrix'} # noqa: E501
yield from run_generated_randomized_test(
spec,
state,
scenario,
)
| 28,480 | 63.876993 | 945 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/random/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_deposit.py
|
from eth2spec.test.context import (
spec_state_test,
always_bls,
with_bellatrix_and_later,
)
from eth2spec.test.helpers.deposits import (
run_deposit_processing_with_specific_fork_version,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_ineffective_deposit_with_previous_fork_version(spec, state):
# Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`.
# It's an ineffective deposit because it fails at BLS sig verification.
# NOTE: it was effective in Altair.
assert state.fork.previous_version != state.fork.current_version
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=state.fork.previous_version,
effective=False,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_effective_deposit_with_genesis_fork_version(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
)
| 1,185 | 29.410256 | 108 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_voluntary_exit.py
|
from eth2spec.test.context import (
spec_state_test,
always_bls,
with_bellatrix_and_later,
with_phases,
)
from eth2spec.test.helpers.constants import (
BELLATRIX,
CAPELLA,
)
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.state import (
next_epoch,
)
from eth2spec.test.helpers.voluntary_exits import (
run_voluntary_exit_processing,
sign_voluntary_exit,
)
BELLATRIX_AND_CAPELLA = [BELLATRIX, CAPELLA]
def run_voluntary_exit_processing_test(
spec,
state,
fork_version,
is_before_fork_epoch,
valid=True):
# create a fork
next_epoch(spec, state)
state.fork.epoch = spec.get_current_epoch(state)
voluntary_exit_epoch = 0 if is_before_fork_epoch else state.fork.epoch
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
voluntary_exit = spec.VoluntaryExit(
epoch=voluntary_exit_epoch,
validator_index=validator_index,
)
signed_voluntary_exit = sign_voluntary_exit(
spec,
state,
voluntary_exit,
privkey,
fork_version=fork_version,
)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=valid)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(spec, state):
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
is_before_fork_epoch=True,
valid=False,
)
@with_phases(BELLATRIX_AND_CAPELLA)
@spec_state_test
@always_bls
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
is_before_fork_epoch=False,
)
@with_phases([BELLATRIX, CAPELLA])
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=True,
)
@with_phases(BELLATRIX_AND_CAPELLA)
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=False,
valid=False,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
is_before_fork_epoch=True,
valid=False,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
is_before_fork_epoch=False,
valid=False,
)
| 3,940 | 27.352518 | 108 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
|
from random import Random
from eth2spec.test.helpers.execution_payload import (
build_empty_execution_payload,
build_randomized_execution_payload,
compute_el_block_hash,
get_execution_payload_header,
build_state_with_incomplete_transition,
build_state_with_complete_transition,
)
from eth2spec.test.context import (
BELLATRIX,
expect_assertion_error,
spec_state_test,
with_bellatrix_and_later,
with_phases,
)
from eth2spec.test.helpers.state import next_slot
def run_execution_payload_processing(spec, state, execution_payload, valid=True, execution_valid=True):
"""
Run ``process_execution_payload``, yielding:
- pre-state ('pre')
- execution payload ('execution_payload')
- execution details, to mock EVM execution ('execution.yml', a dict with 'execution_valid' key and boolean value)
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
# Before Deneb, only `body.execution_payload` matters. `BeaconBlockBody` is just a wrapper.
body = spec.BeaconBlockBody(execution_payload=execution_payload)
yield 'pre', state
yield 'execution', {'execution_valid': execution_valid}
yield 'body', body
called_new_block = False
class TestEngine(spec.NoopExecutionEngine):
def verify_and_notify_new_payload(self, new_payload_request) -> bool:
nonlocal called_new_block, execution_valid
called_new_block = True
assert new_payload_request.execution_payload == body.execution_payload
return execution_valid
if not valid:
expect_assertion_error(lambda: spec.process_execution_payload(state, body, TestEngine()))
yield 'post', None
return
spec.process_execution_payload(state, body, TestEngine())
# Make sure we called the engine
assert called_new_block
yield 'post', state
assert state.latest_execution_payload_header == get_execution_payload_header(spec, body.execution_payload)
def run_success_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_bellatrix_and_later
@spec_state_test
def test_success_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_success_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_success_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_success_test(spec, state)
def run_gap_slot_test(spec, state):
next_slot(spec, state)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_bellatrix_and_later
@spec_state_test
def test_success_first_payload_with_gap_slot(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_gap_slot_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_success_regular_payload_with_gap_slot(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_gap_slot_test(spec, state)
def run_bad_execution_test(spec, state):
# completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_execution_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_execution_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_execution_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_execution_test(spec, state)
@with_phases([BELLATRIX])
@spec_state_test
def test_bad_parent_hash_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = b'\x55' * 32
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_parent_hash_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
def run_bad_prev_randao_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.prev_randao = b'\x42' * 32
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_prev_randao_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_prev_randao_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_pre_randao_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_prev_randao_test(spec, state)
def run_bad_everything_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.prev_randao = spec.Bytes32()
execution_payload.timestamp = 0
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_everything_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_everything_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_bad_everything_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_everything_test(spec, state)
def run_bad_timestamp_test(spec, state, is_future):
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
if is_future:
timestamp = execution_payload.timestamp + 1
else:
timestamp = execution_payload.timestamp - 1
execution_payload.timestamp = timestamp
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_future_timestamp_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=True)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_future_timestamp_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=True)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_past_timestamp_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_past_timestamp_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=False)
def run_non_empty_extra_data_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.extra_data = b'\x45' * 12
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_extra_data_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_non_empty_extra_data_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_extra_data_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_non_empty_extra_data_test(spec, state)
def run_non_empty_transactions_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
num_transactions = 2
execution_payload.transactions = [
spec.Transaction(b'\x99' * 128)
for _ in range(num_transactions)
]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_transactions_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_non_empty_extra_data_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_non_empty_transactions_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_non_empty_extra_data_test(spec, state)
def run_zero_length_transaction_test(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.transactions = [spec.Transaction(b'')]
assert len(execution_payload.transactions[0]) == 0
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
@with_bellatrix_and_later
@spec_state_test
def test_zero_length_transaction_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_zero_length_transaction_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_zero_length_transaction_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_zero_length_transaction_test(spec, state)
def run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=True, rng=Random(5555)):
next_slot(spec, state)
execution_payload = build_randomized_execution_payload(spec, state, rng)
yield from run_execution_payload_processing(
spec, state,
execution_payload,
valid=execution_valid, execution_valid=execution_valid
)
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_first_payload__execution_valid(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_regular_payload__execution_valid(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_randomized_non_validated_execution_fields_first_payload__execution_invalid(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_invalid_randomized_non_validated_execution_fields_regular_payload__execution_invalid(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
| 12,744 | 34.013736 | 119 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py
|
from eth2spec.utils.ssz.ssz_typing import uint256
from eth2spec.test.exceptions import BlockNotFoundException
from eth2spec.test.context import spec_state_test, with_phases, BELLATRIX
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store_and_block,
on_tick_and_append_step,
tick_and_add_block,
)
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
)
from eth2spec.test.helpers.fork_choice import (
add_pow_block,
)
from eth2spec.test.helpers.pow_block import (
prepare_random_pow_block,
)
from eth2spec.test.helpers.execution_payload import (
build_state_with_incomplete_transition,
)
def with_pow_block_patch(spec, blocks, func):
def get_pow_block(hash: spec.Bytes32) -> spec.PowBlock:
for block in blocks:
if block.block_hash == hash:
return block
raise BlockNotFoundException()
get_pow_block_backup = spec.get_pow_block
spec.get_pow_block = get_pow_block
class AtomicBoolean():
value = False
is_called = AtomicBoolean()
def wrap(flag: AtomicBoolean):
yield from func()
flag.value = True
try:
yield from wrap(is_called)
finally:
spec.get_pow_block = get_pow_block_backup
assert is_called.value
@with_phases([BELLATRIX])
@spec_state_test
def test_all_valid(spec, state):
test_steps = []
# Initialization
state = build_state_with_incomplete_transition(spec, state)
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
pow_block_parent = prepare_random_pow_block(spec)
pow_block_parent.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - uint256(1)
pow_block = prepare_random_pow_block(spec)
pow_block.parent_hash = pow_block_parent.block_hash
pow_block.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
pow_blocks = [pow_block, pow_block_parent]
for pb in pow_blocks:
yield from add_pow_block(spec, store, pb, test_steps)
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, merge_block=True)
# valid
assert spec.get_head(store) == signed_block.message.hash_tree_root()
yield from with_pow_block_patch(spec, pow_blocks, run_func)
yield 'steps', test_steps
@with_phases([BELLATRIX])
@spec_state_test
def test_block_lookup_failed(spec, state):
test_steps = []
# Initialization
state = build_state_with_incomplete_transition(spec, state)
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
pow_block = prepare_random_pow_block(spec)
pow_block.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - uint256(1)
pow_blocks = [pow_block]
for pb in pow_blocks:
yield from add_pow_block(spec, store, pb, test_steps)
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True,
block_not_found=True)
yield from with_pow_block_patch(spec, pow_blocks, run_func)
yield 'steps', test_steps
@with_phases([BELLATRIX])
@spec_state_test
def test_too_early_for_merge(spec, state):
test_steps = []
# Initialization
state = build_state_with_incomplete_transition(spec, state)
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
pow_block_parent = prepare_random_pow_block(spec)
pow_block_parent.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - uint256(2)
pow_block = prepare_random_pow_block(spec)
pow_block.parent_hash = pow_block_parent.block_hash
pow_block.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - uint256(1)
pow_blocks = [pow_block, pow_block_parent]
for pb in pow_blocks:
yield from add_pow_block(spec, store, pb, test_steps)
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
yield from with_pow_block_patch(spec, pow_blocks, run_func)
yield 'steps', test_steps
@with_phases([BELLATRIX])
@spec_state_test
def test_too_late_for_merge(spec, state):
test_steps = []
# Initialization
state = build_state_with_incomplete_transition(spec, state)
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
pow_block_parent = prepare_random_pow_block(spec)
pow_block_parent.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
pow_block = prepare_random_pow_block(spec)
pow_block.parent_hash = pow_block_parent.block_hash
pow_block.total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY + uint256(1)
pow_blocks = [pow_block, pow_block_parent]
for pb in pow_blocks:
yield from add_pow_block(spec, store, pb, test_steps)
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
yield from with_pow_block_patch(spec, pow_blocks, run_func)
yield 'steps', test_steps
| 7,491 | 39.939891 | 107 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/__init__.py
| 0 | 0 | 0 |
py
|
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py
|
from random import Random
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
next_slot,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.helpers.execution_payload import (
build_randomized_execution_payload
)
from eth2spec.test.context import (
BELLATRIX,
with_bellatrix_and_later,
with_phases,
spec_state_test,
)
@with_bellatrix_and_later
@spec_state_test
def test_empty_block_transition_no_tx(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
assert len(block.body.execution_payload.transactions) == 0
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
@with_bellatrix_and_later
@spec_state_test
def test_empty_block_transition_randomized_payload(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
next_slot_state = state.copy()
next_slot(spec, next_slot_state)
block.body.execution_payload = build_randomized_execution_payload(spec, next_slot_state, rng=Random(34433))
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
@with_phases([BELLATRIX])
@spec_state_test
def test_is_execution_enabled_false(spec, state):
# Set `latest_execution_payload_header` to empty
state.latest_execution_payload_header = spec.ExecutionPayloadHeader()
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
# Set `execution_payload` to empty
block.body.execution_payload = spec.ExecutionPayload()
assert len(block.body.execution_payload.transactions) == 0
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
| 1,895 | 27.298507 | 111 |
py
|
consensus-specs
|
consensus-specs-master/tests/core/pyspec/eth2spec/test/bellatrix/sanity/__init__.py
| 0 | 0 | 0 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.