Spaces:
Runtime error
Runtime error
File size: 7,384 Bytes
d72e6ae 040f347 0f3cc51 d72e6ae fa92db0 126a079 3ebb277 f76d178 f957dfd b1e38e3 c5cf0dd fea8645 8cbd82e 09118fb a1b6148 30b07ad fb56e7c 98928b9 1ca2977 c36a6eb 1f2defa d72e6ae 0f3cc51 2d0aeb0 a1b6148 b06da2c 09118fb 0f3cc51 d72e6ae 062ca1d 85c4894 d72e6ae 062ca1d d72e6ae f81694f 4b91394 4aafa13 d72e6ae b8f77cf 6008f38 b8f77cf d72e6ae fc4a559 d72e6ae fc4a559 aede1bb d72e6ae 304de92 1f2defa d72e6ae 3304e16 9783042 3304e16 d72e6ae 5c9b987 d72e6ae 5c9b987 d72e6ae d4793df d72e6ae d4793df d72e6ae fc4a559 d72e6ae a3dd2de 837ed4a 67fdfd0 5d4d177 e8700aa d72e6ae a3dd2de a1b6148 a3dd2de d0eec81 fc4a559 a3dd2de 84b97dd a3dd2de a1b6148 fc4a559 a1b6148 d72e6ae 88f2941 d72e6ae d2ce25e 31cd6e7 d72e6ae 1f2defa 3304e16 31cd6e7 fc4a559 1f2defa 1fbb83b 1f2defa d72e6ae 13b4d78 d72e6ae 4aafa13 fdff0a0 4aafa13 f81694f d4793df 3304e16 ca7245e ff3a9c3 3304e16 09118fb ff3a9c3 d72e6ae 3304e16 d72e6ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
import os
import torch
import trl
from transformers import AutoTokenizer, LlamaConfig, AutoModelForCausalLM, LlamaForCausalLM, TrainingArguments, PreTrainedTokenizerFast, AdamW, get_cosine_schedule_with_warmup
from datasets import load_dataset, Dataset
from tokenizers import ByteLevelBPETokenizer
BATCH_SIZE = 128
EPOCHS = 1
LEARNING_RATE = 2e-4
FACTOR = 22 * 30
MAX_SEQ_LENGTH = 128
VOCAB_SIZE = 32000
INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
INSTRUCT_DATASET = "nroggendorff/elephant"
OUTPUT_REPO = "nroggendorff/smallama"
INSTRUCT_FINETUNE_BOOL = False
INIT = 0#/13
SHARD_SIZE = int(3e+6)
FP16 = True
WARMUP_STEPS = 0
DECAY = 0
GRADIENT_ACCUMULATION_STEPS = 1
PUSH_TO_HUB = True
def load_data():
if not INSTRUCT_FINETUNE_BOOL:
dataset = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
dataset = Dataset.from_generator(lambda: dataset.take(int(15e+6)))
# dataset = dataset.shard(num_shards=len(dataset) // SHARD_SIZE, index=INIT)
else:
dataset = load_dataset(INSTRUCT_DATASET, split="train")#, streaming=True)
# dataset = Dataset.from_generator(lambda: dataset.take(int(5e+6)))
return dataset
def create_tokenizer(training_corpus):
tokenizer = ByteLevelBPETokenizer()
special_tokens = ["<s>", "<pad>", "</s>", "<unk>", "<mask>"]
if INSTRUCT_FINETUNE_BOOL:
special_tokens.append(["<|user|>", "<|bot|>", "<|end|>"])
tokenizer.train_from_iterator(
training_corpus,
vocab_size=VOCAB_SIZE,
min_frequency=2,
special_tokens=special_tokens
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer._tokenizer)
return fast_tokenizer
def load_tokenizer():
tokenizer = AutoTokenizer.from_pretrained(OUTPUT_REPO)
return tokenizer
def get_training_corpus(dataset):
texts = []
#for field in ['pretrain', 'instruct']:
# texts.extend(dataset[field]['text'])
texts.extend(dataset['text'])
for i in range(0, len(texts), 1000):
yield texts[i : i + 1000]
def format_prompts(examples, tokenizer, isinst):
texts = []
for text in examples['text']:
if isinst:
conversation = []
parts = text.split('<|end|>')
for i in range(0, len(parts) - 1, 2):
prompt = parts[i].replace("<|user|>", "")
response = parts[i + 1].replace("<|bot|>", "")
conversation.append({"role": "user", "content": prompt})
conversation.append({"role": "assistant", "content": response})
formatted_conversation = tokenizer.apply_chat_template(conversation, tokenize=False)
texts.append(formatted_conversation)
else:
texts.append(tokenizer.bos_token + text + tokenizer.eos_token)
return {"text": texts}
def create_model(tokenizer):
config = LlamaConfig(
vocab_size=tokenizer.vocab_size,
hidden_size=FACTOR,
intermediate_size=FACTOR * 4,
num_hidden_layers=max(1, FACTOR // 32),
num_attention_heads=max(1, FACTOR // 64),
max_position_embeddings=MAX_SEQ_LENGTH,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
tie_word_embeddings=False,
)
model = LlamaForCausalLM(config)
return model
def load_model():
model = AutoModelForCausalLM.from_pretrained(OUTPUT_REPO)
return model
def configure_tokenizer(tokenizer):
special_tokens = {
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
"mask_token": "<mask>"
}
if INSTRUCT_FINETUNE_BOOL:
special_tokens["additional_special_tokens"] = ["<|user|>", "<|bot|>", "<|end|>"]
tokenizer.add_special_tokens(special_tokens)
if INSTRUCT_FINETUNE_BOOL:
tokenizer.user_token_id = tokenizer.convert_tokens_to_ids("<|user|>")
tokenizer.assistant_token_id = tokenizer.convert_tokens_to_ids("<|bot|>")
chat_template = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '<|end|>\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\n' + message['content'] + '<|end|>\n' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
tokenizer.chat_template = chat_template
def train_model(model, tokenizer, dataset, push, isinst):
args = TrainingArguments(
output_dir="model",
num_train_epochs=EPOCHS,
per_device_train_batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
optim="adamw_torch",
warmup_steps=WARMUP_STEPS,
weight_decay=DECAY,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
fp16=FP16,
save_steps=int(1e+10),
logging_steps=10
)
dataset = dataset.shard(num_shards=len(dataset) // SHARD_SIZE, index=INIT)
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=(len(dataset) // args.per_device_train_batch_size) * args.num_train_epochs
)
dataset = dataset.map(lambda examples: format_prompts(examples, tokenizer, isinst), batched=True, remove_columns=dataset.column_names)
trainer = trl.SFTTrainer(
model=model,
tokenizer=tokenizer,
args=args,
train_dataset=dataset,
dataset_text_field='text',
max_seq_length=MAX_SEQ_LENGTH,
optimizers=(optimizer, scheduler)
)
train = trainer.train()
trained_model = trainer.model
trained_tokenizer = trainer.tokenizer
if push:
if INSTRUCT_FINETUNE_BOOL:
repo_id = OUTPUT_REPO + "-it"
else:
repo_id = OUTPUT_REPO
msg = str(train.training_loss)
trained_model.push_to_hub(repo_id, commit_message=msg, force=True)
trained_tokenizer.push_to_hub(repo_id, commit_message=msg, force=True)
else:
trained_model.save_pretrained("model")
trained_tokenizer.save_pretrained("tokenizer")
def main(push_to_hub=True, is_inst_finetune=False):
dataset = load_data()
if not is_inst_finetune:
training_corpus = get_training_corpus(dataset)
if INIT == 0:
tokenizer = create_tokenizer(training_corpus)
else:
tokenizer = load_tokenizer()
else:
tokenizer = load_tokenizer()
configure_tokenizer(tokenizer)
if is_inst_finetune:
model = load_model()
model.resize_token_embeddings(len(tokenizer))
train_model(model, tokenizer, dataset, push_to_hub, True)
else:
if INIT == 0:
model = create_model(tokenizer)
else:
model = load_model()
train_model(model, tokenizer, dataset, push_to_hub, False)
if __name__ == "__main__":
main(PUSH_TO_HUB, INSTRUCT_FINETUNE_BOOL)
raise RuntimeError("The script is finished.") |