import json import mlcroissant as mlc from mlcroissant._src.structure_graph.nodes.source import FileProperty from mlcroissant.scripts import validate # 1. Base repo repo = mlc.FileObject( id="huggingface-repo", name="huggingface-repo", description="ProofWalaDataset GitHub repo", content_url="https://huggingface.co/datasets/amitayusht/ProofWalaDataset", encoding_formats=["git+https"], sha256="main", ) # 2. Single FileSet that includes all .json files recursively file_set = mlc.FileSet( id="all-json", name="all-json", description="All .json files across datasets and splits.", contained_in=["huggingface-repo"], includes="**/*.json", # recursive glob encoding_formats=["application/json"], ) # 3. Helper to make fields def make_field(field_id, name, description, json_path): return mlc.Field( id=field_id, name=name, description=description, data_types=[mlc.DataType.TEXT], source=mlc.Source( file_set="all-json", extract=mlc.Extract(json_path=json_path), ), ) # 4. Standard fields fields = [ make_field("record/proof_id", "proof_id", "Proof ID", "training_data[*].proof_id"), make_field("record/goal_description", "goal_description", "Goal description", "training_data[*].goal_description"), make_field("record/proof_steps", "proof_steps", "Tactic steps", "training_data[*].proof_steps"), make_field("record/theorem_name", "theorem_name", "Theorem name", "training_data[*].theorem_name"), make_field("record/project_id", "project_id", "Project ID", "training_data[*].project_id"), make_field("record/file_path", "file_path", "File path", "training_data[*].file_path"), ] # fields += [ # make_field( # "record/start_goal", # "start_goal", # "Main goal from start_goals list", # "training_data[*].start_goals[*].goal", # ), # make_field( # "record/start_hypotheses", # "start_hypotheses", # "List of hypotheses from start_goals", # "training_data[*].start_goals[*].hypotheses", # ), # make_field( # "record/end_goal", # "end_goal", # "Main goal from end_goals list", # "training_data[*].end_goals[*].goal", # ), # make_field( # "record/end_hypotheses", # "end_hypotheses", # "List of hypotheses from end_goals", # "training_data[*].end_goals[*].hypotheses", # ), # ] # 5. Derived fields: dataset family and split from path fields += [ mlc.Field( id="record/source_dataset", name="source_dataset", description="Dataset family (e.g. lean, coq).", data_types=[mlc.DataType.TEXT], source=mlc.Source( file_set="all-json", extract=mlc.Extract(file_property=FileProperty.filepath), transforms=[mlc.Transform(regex="^([^/]+)/.*")], ), ), mlc.Field( id="record/split", name="split", description="Split name (train/test/eval).", data_types=[mlc.DataType.TEXT], source=mlc.Source( file_set="all-json", extract=mlc.Extract(file_property=FileProperty.filepath), transforms=[mlc.Transform(regex="^[^/]+/([^/]+)/.*")], ), ), ] # 6. RecordSet record_set = mlc.RecordSet( id="proof-records", name="proof-records", description="All proof records across dataset families and splits.", fields=fields, ) metadata = mlc.Metadata( name="ProofWalaDataset", description="A dataset of formal theorem-proving steps extracted from Lean, Coq, GeoCoq, MathComp, and more.", license="https://opensource.org/licenses/MIT", url="https://huggingface.co/datasets/amitayusht/ProofWalaDataset", version="1.0.0", distribution=[repo, file_set], record_sets=[record_set], ) json_dict = metadata.to_json() with open("croissant.json", "w") as f: json.dump(json_dict, f, indent=2) # validate.main("croissant.json")