File size: 2,699 Bytes
			
			| 553c80f c7cf381 553c80f c7cf381 553c80f 00568c1 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f c7cf381 553c80f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | """Module for testing streaming dataset sequence packing"""
import functools
import unittest
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from axolotl.utils.data import get_dataset_wrapper, wrap_pretraining_dataset
from axolotl.utils.dict import DictDefault
class TestPretrainingPacking(unittest.TestCase):
    """
    Test class for packing streaming dataset sequences
    """
    def setUp(self) -> None:
        # pylint: disable=duplicate-code
        self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
        self.tokenizer.pad_token = "</s>"
    def test_packing_stream_dataset(self):
        # pylint: disable=duplicate-code
        dataset = load_dataset(
            "c4",
            "en",
            streaming=True,
        )["train"]
        cfg = DictDefault(
            {
                "pretraining_dataset": [
                    {
                        "path": "c4",
                        "name": "en",
                        "type": "pretrain",
                    }
                ],
                "sample_packing": True,
                "pad_to_sequence_len": True,
                "sequence_len": 2048,
                "micro_batch_size": 2,
            }
        )
        ds_wrapper_partial = functools.partial(
            get_dataset_wrapper,
            cfg.pretraining_dataset[0],
            self.tokenizer,
            cfg,
            cfg.pretraining_dataset[0]["type"] or "pretrain",
        )
        original_bsz = cfg.micro_batch_size
        train_dataset = wrap_pretraining_dataset(
            dataset,
            self.tokenizer,
            cfg,
            ds_wrapper_partial,
            max_tokens=cfg.sequence_len,
            batch_size=cfg.micro_batch_size,
            seed=cfg.seed or 42,
        )
        trainer_loader = DataLoader(
            train_dataset,
            batch_size=1,
            collate_fn=None,
            drop_last=True,
        )
        idx = 0
        for data in trainer_loader:
            if idx > 10:
                break
            assert data["input_ids"].shape == torch.Size(
                [1, original_bsz * cfg.sequence_len]
            )
            assert data["position_ids"].shape == torch.Size(
                [1, original_bsz * cfg.sequence_len]
            )
            assert data["labels"].shape == torch.Size(
                [1, original_bsz * cfg.sequence_len]
            )
            assert data["attention_mask"].shape == torch.Size(
                [1, original_bsz * cfg.sequence_len]
            )
            idx += 1
if __name__ == "__main__":
    unittest.main()
 |