File size: 4,032 Bytes
7244347
 
 
 
db04d2b
7244347
 
 
 
 
 
 
 
 
db04d2b
 
7244347
 
 
 
 
 
 
 
db04d2b
 
7244347
 
 
 
 
 
 
 
 
 
db04d2b
7244347
 
 
 
 
 
 
 
 
 
 
 
754fe36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7244347
 
 
 
 
 
 
 
 
 
 
 
 
db04d2b
7244347
 
 
 
 
 
 
 
 
 
db04d2b
7244347
 
db04d2b
7244347
 
 
 
 
 
db04d2b
 
7244347
 
 
 
 
 
 
 
 
db04d2b
 
7244347
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import json
import mlcroissant as mlc
from mlcroissant._src.structure_graph.nodes.source import FileProperty
from mlcroissant.scripts import validate


# 1. Base repo
repo = mlc.FileObject(
    id="huggingface-repo",
    name="huggingface-repo",
    description="ProofWalaDataset GitHub repo",
    content_url="https://huggingface.co/datasets/amitayusht/ProofWalaDataset",
    encoding_formats=["git+https"],
    sha256="main",
)

# 2. Single FileSet that includes all .json files recursively
file_set = mlc.FileSet(
    id="all-json",
    name="all-json",
    description="All .json files across datasets and splits.",
    contained_in=["huggingface-repo"],
    includes="**/*.json",  # recursive glob
    encoding_formats=["application/json"],
)

# 3. Helper to make fields
def make_field(field_id, name, description, json_path):
    return mlc.Field(
        id=field_id,
        name=name,
        description=description,
        data_types=[mlc.DataType.TEXT],
        source=mlc.Source(
            file_set="all-json",
            extract=mlc.Extract(json_path=json_path),
        ),
    )

# 4. Standard fields
fields = [
    make_field("record/proof_id", "proof_id", "Proof ID", "training_data[*].proof_id"),
    make_field("record/goal_description", "goal_description", "Goal description", "training_data[*].goal_description"),
    make_field("record/proof_steps", "proof_steps", "Tactic steps", "training_data[*].proof_steps"),
    make_field("record/theorem_name", "theorem_name", "Theorem name", "training_data[*].theorem_name"),
    make_field("record/project_id", "project_id", "Project ID", "training_data[*].project_id"),
    make_field("record/file_path", "file_path", "File path", "training_data[*].file_path"),
]

# fields += [
#     make_field(
#         "record/start_goal",
#         "start_goal",
#         "Main goal from start_goals list",
#         "training_data[*].start_goals[*].goal",
#     ),
#     make_field(
#         "record/start_hypotheses",
#         "start_hypotheses",
#         "List of hypotheses from start_goals",
#         "training_data[*].start_goals[*].hypotheses",
#     ),
#     make_field(
#         "record/end_goal",
#         "end_goal",
#         "Main goal from end_goals list",
#         "training_data[*].end_goals[*].goal",
#     ),
#     make_field(
#         "record/end_hypotheses",
#         "end_hypotheses",
#         "List of hypotheses from end_goals",
#         "training_data[*].end_goals[*].hypotheses",
#     ),
# ]


# 5. Derived fields: dataset family and split from path
fields += [
    mlc.Field(
        id="record/source_dataset",
        name="source_dataset",
        description="Dataset family (e.g. lean, coq).",
        data_types=[mlc.DataType.TEXT],
        source=mlc.Source(
            file_set="all-json",
            extract=mlc.Extract(file_property=FileProperty.filepath),
            transforms=[mlc.Transform(regex="^([^/]+)/.*")],
        ),
    ),
    mlc.Field(
        id="record/split",
        name="split",
        description="Split name (train/test/eval).",
        data_types=[mlc.DataType.TEXT],
        source=mlc.Source(
            file_set="all-json",
            extract=mlc.Extract(file_property=FileProperty.filepath),
            transforms=[mlc.Transform(regex="^[^/]+/([^/]+)/.*")],
        ),
    ),
]

# 6. RecordSet
record_set = mlc.RecordSet(
    id="proof-records",
    name="proof-records",
    description="All proof records across dataset families and splits.",
    fields=fields,
)

metadata = mlc.Metadata(
    name="ProofWalaDataset",
    description="A dataset of formal theorem-proving steps extracted from Lean, Coq, GeoCoq, MathComp, and more.",
    license="https://opensource.org/licenses/MIT",
    url="https://huggingface.co/datasets/amitayusht/ProofWalaDataset",
    version="1.0.0",
    distribution=[repo, file_set],
    record_sets=[record_set],
)


json_dict = metadata.to_json()
with open("croissant.json", "w") as f:
    json.dump(json_dict, f, indent=2)
# validate.main("croissant.json")