Set to use cfg.seed or 42 for backward compat
Browse files- src/axolotl/utils/data.py +10 -3
- src/axolotl/utils/trainer.py +4 -0
src/axolotl/utils/data.py
CHANGED
|
@@ -78,6 +78,13 @@ def load_tokenized_prepared_datasets(
|
|
| 78 |
else:
|
| 79 |
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
| 80 |
logging.info("Loading raw datasets...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
datasets = []
|
| 82 |
# pylint: disable=invalid-name
|
| 83 |
for d in cfg.datasets:
|
|
@@ -127,11 +134,11 @@ def load_tokenized_prepared_datasets(
|
|
| 127 |
# support for using a subset of the data
|
| 128 |
if d.shards:
|
| 129 |
if "train" in ds:
|
| 130 |
-
ds = ds.shuffle(seed=
|
| 131 |
num_shards=d.shards, index=0
|
| 132 |
)
|
| 133 |
else:
|
| 134 |
-
ds = ds.shuffle(seed=
|
| 135 |
d_type = d.type
|
| 136 |
d_type_split = d_type.split(":")
|
| 137 |
d_base_type = d_type_split[0]
|
|
@@ -239,7 +246,7 @@ def load_tokenized_prepared_datasets(
|
|
| 239 |
samples: List[int] = []
|
| 240 |
for d in datasets:
|
| 241 |
samples = samples + list(d)
|
| 242 |
-
dataset = Dataset.from_list(samples).shuffle(seed=
|
| 243 |
if cfg.local_rank == 0:
|
| 244 |
logging.info(
|
| 245 |
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
|
|
|
|
| 78 |
else:
|
| 79 |
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
| 80 |
logging.info("Loading raw datasets...")
|
| 81 |
+
|
| 82 |
+
if cfg.seed:
|
| 83 |
+
seed = cfg.seed
|
| 84 |
+
else:
|
| 85 |
+
logging.info("No seed provided, using default seed of 42")
|
| 86 |
+
seed = 42
|
| 87 |
+
|
| 88 |
datasets = []
|
| 89 |
# pylint: disable=invalid-name
|
| 90 |
for d in cfg.datasets:
|
|
|
|
| 134 |
# support for using a subset of the data
|
| 135 |
if d.shards:
|
| 136 |
if "train" in ds:
|
| 137 |
+
ds = ds.shuffle(seed=seed)["train"].shard(
|
| 138 |
num_shards=d.shards, index=0
|
| 139 |
)
|
| 140 |
else:
|
| 141 |
+
ds = ds.shuffle(seed=seed).shard(num_shards=d.shards, index=0)
|
| 142 |
d_type = d.type
|
| 143 |
d_type_split = d_type.split(":")
|
| 144 |
d_base_type = d_type_split[0]
|
|
|
|
| 246 |
samples: List[int] = []
|
| 247 |
for d in datasets:
|
| 248 |
samples = samples + list(d)
|
| 249 |
+
dataset = Dataset.from_list(samples).shuffle(seed=seed)
|
| 250 |
if cfg.local_rank == 0:
|
| 251 |
logging.info(
|
| 252 |
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
|
src/axolotl/utils/trainer.py
CHANGED
|
@@ -74,6 +74,10 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|
| 74 |
training_arguments_kwargs["tf32"] = cfg.tf32
|
| 75 |
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
| 76 |
training_arguments_kwargs["logging_steps"] = logging_steps
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
if cfg.gradient_checkpointing:
|
| 78 |
if cfg.gptq:
|
| 79 |
from alpaca_lora_4bit.gradient_checkpointing import (
|
|
|
|
| 74 |
training_arguments_kwargs["tf32"] = cfg.tf32
|
| 75 |
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
| 76 |
training_arguments_kwargs["logging_steps"] = logging_steps
|
| 77 |
+
|
| 78 |
+
if cfg.seed:
|
| 79 |
+
training_arguments_kwargs["seed"] = cfg.seed
|
| 80 |
+
|
| 81 |
if cfg.gradient_checkpointing:
|
| 82 |
if cfg.gptq:
|
| 83 |
from alpaca_lora_4bit.gradient_checkpointing import (
|