Spaces:
Runtime error
Runtime error
Update train.py
Browse files
train.py
CHANGED
@@ -7,18 +7,18 @@ from transformers import AutoTokenizer, LlamaConfig, AutoModelForCausalLM, Llama
|
|
7 |
from datasets import load_dataset, Dataset
|
8 |
from tokenizers import ByteLevelBPETokenizer
|
9 |
|
10 |
-
BATCH_SIZE =
|
11 |
EPOCHS = 1
|
12 |
LEARNING_RATE = 2e-4
|
13 |
-
FACTOR = 22 *
|
14 |
-
MAX_SEQ_LENGTH =
|
15 |
VOCAB_SIZE = 32000
|
16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
17 |
INSTRUCT_DATASET = "nroggendorff/elephant"
|
18 |
OUTPUT_REPO = "nroggendorff/smallama"
|
19 |
-
INSTRUCT_FINETUNE_BOOL =
|
20 |
-
INIT =
|
21 |
-
SHARD_SIZE = int(
|
22 |
FP16 = True
|
23 |
WARMUP_STEPS = 0
|
24 |
DECAY = 0
|
@@ -179,8 +179,8 @@ def train_model(model, tokenizer, dataset, push, isinst):
|
|
179 |
def main(push_to_hub=True, is_inst_finetune=False):
|
180 |
dataset = load_data()
|
181 |
if not is_inst_finetune:
|
182 |
-
training_corpus = get_training_corpus(dataset)
|
183 |
if INIT == 0:
|
|
|
184 |
tokenizer = create_tokenizer(training_corpus)
|
185 |
else:
|
186 |
tokenizer = load_tokenizer()
|
|
|
7 |
from datasets import load_dataset, Dataset
|
8 |
from tokenizers import ByteLevelBPETokenizer
|
9 |
|
10 |
+
BATCH_SIZE = 16
|
11 |
EPOCHS = 1
|
12 |
LEARNING_RATE = 2e-4
|
13 |
+
FACTOR = 22 * 20
|
14 |
+
MAX_SEQ_LENGTH = 512
|
15 |
VOCAB_SIZE = 32000
|
16 |
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
|
17 |
INSTRUCT_DATASET = "nroggendorff/elephant"
|
18 |
OUTPUT_REPO = "nroggendorff/smallama"
|
19 |
+
INSTRUCT_FINETUNE_BOOL = False
|
20 |
+
INIT = 0#/2
|
21 |
+
SHARD_SIZE = int(5e+6)
|
22 |
FP16 = True
|
23 |
WARMUP_STEPS = 0
|
24 |
DECAY = 0
|
|
|
179 |
def main(push_to_hub=True, is_inst_finetune=False):
|
180 |
dataset = load_data()
|
181 |
if not is_inst_finetune:
|
|
|
182 |
if INIT == 0:
|
183 |
+
training_corpus = get_training_corpus(dataset)
|
184 |
tokenizer = create_tokenizer(training_corpus)
|
185 |
else:
|
186 |
tokenizer = load_tokenizer()
|