Spaces:
Runtime error
Runtime error
Update train.py
Browse files
train.py
CHANGED
@@ -7,26 +7,26 @@ from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingA
|
|
7 |
from datasets import load_dataset, DatasetDict, Dataset
|
8 |
from tokenizers import ByteLevelBPETokenizer
|
9 |
|
10 |
-
MAX_SEQ_LENGTH =
|
11 |
-
BATCH_SIZE =
|
12 |
EPOCHS = 2
|
13 |
-
LEARNING_RATE = 2e-
|
14 |
FACTOR = 256
|
15 |
VOCAB_SIZE = 32000
|
16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
17 |
INSTRUCT_DATASET = "nroggendorff/openhermes"
|
18 |
OUTPUT_REPO = "smallama"
|
19 |
-
FP16 =
|
20 |
WARMUP_STEPS = 20
|
21 |
-
DECAY =
|
22 |
GRADIENT_ACCUMULATION_STEPS = 1
|
23 |
CLIPPING = 1.0
|
24 |
PUSH_TO_HUB = True
|
25 |
|
26 |
def load_data():
|
27 |
pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
|
28 |
-
pretrain = Dataset.from_generator(lambda: pretrain.take(
|
29 |
-
instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(
|
30 |
dataset_dict = DatasetDict({
|
31 |
'pretrain': pretrain,
|
32 |
'instruct': instruct
|
|
|
7 |
from datasets import load_dataset, DatasetDict, Dataset
|
8 |
from tokenizers import ByteLevelBPETokenizer
|
9 |
|
10 |
+
MAX_SEQ_LENGTH = 128
|
11 |
+
BATCH_SIZE = 256
|
12 |
EPOCHS = 2
|
13 |
+
LEARNING_RATE = 2e-3
|
14 |
FACTOR = 256
|
15 |
VOCAB_SIZE = 32000
|
16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
17 |
INSTRUCT_DATASET = "nroggendorff/openhermes"
|
18 |
OUTPUT_REPO = "smallama"
|
19 |
+
FP16 = False
|
20 |
WARMUP_STEPS = 20
|
21 |
+
DECAY = 1e-3
|
22 |
GRADIENT_ACCUMULATION_STEPS = 1
|
23 |
CLIPPING = 1.0
|
24 |
PUSH_TO_HUB = True
|
25 |
|
26 |
def load_data():
|
27 |
pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
|
28 |
+
pretrain = Dataset.from_generator(lambda: pretrain.take(int(2e+6)))
|
29 |
+
instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(int(2e+5)))
|
30 |
dataset_dict = DatasetDict({
|
31 |
'pretrain': pretrain,
|
32 |
'instruct': instruct
|