Spaces:
Starting
on
L40S
Starting
on
L40S
Update train.py
Browse files
train.py
CHANGED
|
@@ -9,12 +9,12 @@ from tokenizers import ByteLevelBPETokenizer
|
|
| 9 |
|
| 10 |
MAX_SEQ_LENGTH = 128
|
| 11 |
BATCH_SIZE = 128
|
| 12 |
-
EPOCHS =
|
| 13 |
LEARNING_RATE = 2.5e-5
|
| 14 |
FACTOR = 256
|
| 15 |
VOCAB_SIZE = 32000
|
| 16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
| 17 |
-
INSTRUCT_DATASET = "nroggendorff/
|
| 18 |
OUTPUT_REPO = "smallama"
|
| 19 |
FP16 = True
|
| 20 |
WARMUP_STEPS = 0
|
|
@@ -25,9 +25,9 @@ PUSH_TO_HUB = True
|
|
| 25 |
|
| 26 |
def load_data():
|
| 27 |
pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
|
| 28 |
-
pretrain = Dataset.from_generator(lambda: pretrain.take(int(2.5e+
|
| 29 |
instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
|
| 30 |
-
instruct = Dataset.from_generator(lambda: instruct.take(int(
|
| 31 |
dataset_dict = DatasetDict({
|
| 32 |
'pretrain': pretrain,
|
| 33 |
'instruct': instruct
|
|
|
|
| 9 |
|
| 10 |
MAX_SEQ_LENGTH = 128
|
| 11 |
BATCH_SIZE = 128
|
| 12 |
+
EPOCHS = 3
|
| 13 |
LEARNING_RATE = 2.5e-5
|
| 14 |
FACTOR = 256
|
| 15 |
VOCAB_SIZE = 32000
|
| 16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
| 17 |
+
INSTRUCT_DATASET = "nroggendorff/elephant"
|
| 18 |
OUTPUT_REPO = "smallama"
|
| 19 |
FP16 = True
|
| 20 |
WARMUP_STEPS = 0
|
|
|
|
| 25 |
|
| 26 |
def load_data():
|
| 27 |
pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
|
| 28 |
+
pretrain = Dataset.from_generator(lambda: pretrain.take(int(2.5e+6)))
|
| 29 |
instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
|
| 30 |
+
instruct = Dataset.from_generator(lambda: instruct.take(int(3e+6)))
|
| 31 |
dataset_dict = DatasetDict({
|
| 32 |
'pretrain': pretrain,
|
| 33 |
'instruct': instruct
|