Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
| from datetime import date | |
| from pathlib import Path | |
| from typing import Any, Literal | |
| import pytest | |
| from datasets import load_dataset | |
| from pydantic import AfterValidator, BaseModel, BeforeValidator | |
| from typing_extensions import Annotated | |
| from .conftest import DATASET_NAMES | |
| from .readme_parsing import get_tag_idx, read_frontmatter_and_body | |
| def ensure_tuple(created: str | tuple) -> tuple: | |
| if isinstance(created, str): | |
| return tuple(created.split(", ")) | |
| return created | |
| def validate_sample_metadata(metadata: dict[str, Any]) -> dict[str, Any]: | |
| if "source-pretty" not in metadata: | |
| raise ValueError("'source-pretty' should be in metadata dict.") | |
| return metadata | |
| class SampleSchema(BaseModel): | |
| text: str | |
| source: str | |
| id: str | |
| added: date # date.fromisoformat | |
| created: Annotated[tuple[date, date], BeforeValidator(ensure_tuple)] | |
| license: str # TODO: should probably be a literal | |
| domain: str # TODO: convert to literal | |
| metadata: Annotated[dict[str, Any], AfterValidator(validate_sample_metadata)] | |
| def test_sample_schema(repo_path: Path, dataset_name: str): | |
| """Ensure that the dataset samples follow the correct schema""" | |
| ds = load_dataset( | |
| str(repo_path.resolve()), dataset_name, split="train", streaming=True | |
| ) | |
| sample = next(iter(ds)) | |
| SampleSchema(**sample) | |
| class FrontmatterSchema(BaseModel): | |
| pretty_name: str | |
| language: list[Literal["da"]] | |
| license: Literal["cc0-1.0", "other", "cc-by-sa-4.0"] | |
| def test_dataset_readme(repo_path: Path, dataset_name: str): | |
| """tests that the dataset frontmatter and markdown follows the correct format.""" | |
| readme = repo_path / "data" / dataset_name / f"{dataset_name}.md" | |
| frontmatter, body = read_frontmatter_and_body(readme) | |
| frontmatter_validated = FrontmatterSchema(**frontmatter) | |
| # ensure tags: | |
| tags = ["SHORT DESCRIPTION", "DESC-STATS", "DATASET PLOTS", "SAMPLE"] | |
| for tag in tags: | |
| get_tag_idx(body, tag) | |
| h2_headings = {line for line in body.splitlines() if line.startswith("## ")} | |
| if ( | |
| frontmatter_validated.license == "other" | |
| ): # ensure description of underspecified licenses | |
| assert "## License Information" in h2_headings | |
| # required headings | |
| req_h2_headings = ["## Dataset Description", "## Additional Information"] | |
| for req_h2 in req_h2_headings: | |
| assert req_h2 in h2_headings | |
| pass | |
| def test_dataset_folder_structure(repo_path: Path, dataset_name: str): | |
| """tests that the dataset folder structure is as follows. | |
| dataset_name | |
| |- dataset_name.md | |
| |- dataset_name.parquet | |
| If there is a python file, there should at least be one called `create.py`, but there can be additional. | |
| """ | |
| path = repo_path / "data" / dataset_name | |
| assert (path / f"{path.name}.parquet").exists() | |
| assert (path / f"{path.name}.md").exists() | |
| if any(p.name.endswith(".py") for p in path.glob("*")): | |
| assert (path / "create.py").exists() | |