""" A simple CLI to updates descriptive statistics on all datasets. Example use: python update_descriptive_statistics.py --dataset wikisource """ import argparse import json import logging from dataclasses import dataclass from pathlib import Path from textwrap import dedent from typing import Self, cast from datasets import Dataset, load_dataset from git_utilities import check_is_ancestor, get_current_revision, get_latest_revision logger = logging.getLogger(__name__) repo_path = Path(__file__).parent.parent def human_readable_large_int(value: int) -> str: thresholds = [ (1_000_000_000, "B"), (1_000_000, "M"), (1_000, "K"), ] for threshold, label in thresholds: if value > threshold: return f"{value/threshold:.2f}{label}" return str(value) @dataclass() class DescriptiveStatsOverview: number_of_samples: int average_document_length: float language: str = "dan, dansk, Danish" @classmethod def from_dataset(cls, dataset: Dataset) -> Self: return cls( number_of_samples=len(dataset), average_document_length=cls.calculate_average_document_length(dataset), ) @staticmethod def calculate_average_document_length(dataset: Dataset) -> float: texts = sum(len(t) for t in dataset["text"]) return texts / len(dataset) def to_markdown(self) -> str: format = dedent(f""" - **Language**: {self.language} - **Number of samples**: {human_readable_large_int(self.number_of_samples)} - **Average document length (characters)**: {self.average_document_length:.2f} """) return format def add_to_markdown(self, markdown: str) -> str: start_identifier = "" end_identifier = "" if markdown.count(start_identifier) != 1 or markdown.count(end_identifier) != 1: raise ValueError("Markers should appear exactly once in the markdown.") start_md, _, remainder = markdown.partition(start_identifier) _, _, end_md = remainder.partition(end_identifier) stats = self.to_markdown() return f"{start_md}{start_identifier}{stats}{end_identifier}{end_md}" def to_disk(self, path: Path): data = self.__dict__ data["revision"] = get_current_revision() with path.with_suffix(".json").open("w") as f: json.dump(self.__dict__, f) def update_statitics( dataset_path: Path, name: str, readme_name: None | str = None, force: bool = False, ) -> None: rev = get_latest_revision(dataset_path) desc_stats_path = dataset_path / "descriptive_stats.json" if desc_stats_path.exists() and force is False: with desc_stats_path.open("r") as f: last_update = json.load(f).get("revision", None) if last_update is None: logging.warning(f"revision is not defined in {desc_stats_path}.") elif check_is_ancestor(ancestor_rev=last_update, rev=rev): logging.info( f"descriptive statistics for '{name}' is already up to date, skipping." ) return logger.info(f"Updating statistics for {name}") ds = load_dataset(str(repo_path), name, split="train") ds = cast(Dataset, ds) desc_stats = DescriptiveStatsOverview.from_dataset(ds) readme_name = f"{name}.md" if readme_name is None else readme_name markdown_path = dataset_path / readme_name with markdown_path.open("r") as f: new_markdown = desc_stats.add_to_markdown(f.read()) with markdown_path.open("w") as f: f.write(new_markdown) desc_stats.to_disk(desc_stats_path) def create_parser(): parser = argparse.ArgumentParser( description="Calculated descriptive statistics of the datasets in tha data folder" ) parser.add_argument( "--dataset", default=None, type=str, help="Use to specify if you only want to compute the statistics from a singular dataset.", ) parser.add_argument( "--logging_level", default=20, type=int, help="Sets the logging level. Default to 20 (INFO), other reasonable levels are 10 (DEBUG) and 30 (WARNING).", ) parser.add_argument( "--force", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Should the statistics be forcefully recomputed. By default it checks the difference in commit ids.", ) parser.add_argument( "--repo_path", default=str(repo_path), type=str, help="The repository where to calculate the descriptive statistics from", ) return parser def main( dataset: str | None = None, logging_level: int = 20, force: bool = False, repo_path: Path = repo_path, ): logging.basicConfig(level=logging_level) if dataset: dataset_path = repo_path / "data" / dataset update_statitics(repo_path, dataset_path.name, force=force) return datasets = (repo_path / "data").glob("*") for dataset_path in datasets: update_statitics(dataset_path, dataset_path.name, force=force) update_statitics(repo_path, "default", "README.md", force=force) if __name__ == "__main__": parser = create_parser() args = parser.parse_args() main( args.dataset, logging_level=args.logging_level, force=args.force, repo_path=Path(args.repo_path), )