Spaces:
Runtime error
Runtime error
File size: 5,340 Bytes
d72e6ae a3dd2de d72e6ae ddd8eee de34e9c 3fa9d6e b07f6c8 82ca3e9 a91f43b 5368800 5c49423 a3dd2de d2b80b3 a3dd2de d2b80b3 837ed4a 1f2defa d72e6ae 57e78da d72e6ae 86f46a1 d72e6ae 304de92 1f2defa d72e6ae 86f46a1 d72e6ae 86f46a1 d72e6ae 86f46a1 d72e6ae ac383dd d72e6ae a3dd2de 837ed4a 67fdfd0 3baa40e d72e6ae a3dd2de 86f46a1 d72e6ae 88f2941 d72e6ae d2ce25e d72e6ae 1f2defa 1fbb83b 1f2defa d72e6ae 1f2defa d72e6ae ac383dd d72e6ae 1f2defa d72e6ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import torch
import trl
from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingArguments, PreTrainedTokenizerFast, AdamW, get_linear_schedule_with_warmup
from datasets import load_dataset
from tokenizers import ByteLevelBPETokenizer
MAX_SEQ_LENGTH = 512
BATCH_SIZE = 64
EPOCHS = 3
LEARNING_RATE = 2e-2
FACTOR = 256
VOCAB_SIZE = 32000
INPUT_DATASET = "nroggendorff/openhermes"
OUTPUT_REPO = "smallama"
FP16 = True
WARMUP_STEPS = 200
DECAY = 0.01
GRADIENT_ACCUMULATION_STEPS = 1
CLIPPING = 1.0
PUSH_TO_HUB = True
def load_data():
dataset = load_dataset(INPUT_DATASET, split="train").select(range(int(2e+5)))
return dataset
def create_tokenizer(training_corpus):
tokenizer = ByteLevelBPETokenizer()
tokenizer.train_from_iterator(
training_corpus,
vocab_size=VOCAB_SIZE,
min_frequency=2,
special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>", "<|user|>", "<|bot|>", "<|end|>"]
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer._tokenizer)
return fast_tokenizer
def get_training_corpus(dataset):
for i in range(0, len(dataset), 1000):
yield dataset[i : i + 1000]["text"]
def format_prompts(examples, tokenizer):
texts = []
for text in examples['text']:
conversation = []
parts = text.split('<|end|>')
for i in range(0, len(parts) - 1, 2):
prompt = parts[i].replace("<|user|>", "")
response = parts[i + 1].replace("<|bot|>", "")
conversation.append({"role": "user", "content": prompt})
conversation.append({"role": "assistant", "content": response})
formatted_conversation = tokenizer.apply_chat_template(conversation, tokenize=False)
texts.append(formatted_conversation)
return {"text": texts}
def create_model(tokenizer):
config = LlamaConfig(
vocab_size=tokenizer.vocab_size,
hidden_size=FACTOR,
intermediate_size=FACTOR * 4,
num_hidden_layers=max(1, FACTOR // 32),
num_attention_heads=max(1, FACTOR // 64),
max_position_embeddings=MAX_SEQ_LENGTH,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
tie_word_embeddings=False,
)
model = LlamaForCausalLM(config)
return model
def configure_tokenizer(tokenizer):
special_tokens = {
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
"mask_token": "<mask>",
"additional_special_tokens": ["<|user|>", "<|bot|>", "<|end|>"]
}
tokenizer.add_special_tokens(special_tokens)
tokenizer.user_token_id = tokenizer.convert_tokens_to_ids("<|user|>")
tokenizer.assistant_token_id = tokenizer.convert_tokens_to_ids("<|bot|>")
chat_template = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '<|end|>\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\n' + message['content'] + '<|end|>\n' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{{ eos_token }}"
tokenizer.chat_template = chat_template
def train_model(model, tokenizer, dataset, push):
args = TrainingArguments(
output_dir="model",
num_train_epochs=EPOCHS,
per_device_train_batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
optim="adamw_torch",
warmup_steps=WARMUP_STEPS,
weight_decay=DECAY,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
fp16=FP16,
max_grad_norm=CLIPPING
)
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=len(dataset) * args.num_train_epochs // args.gradient_accumulation_steps
)
dataset = dataset.map(lambda examples: format_prompts(examples, tokenizer), batched=True)
trainer = trl.SFTTrainer(
model=model,
tokenizer=tokenizer,
args=args,
train_dataset=dataset,
dataset_text_field='text',
max_seq_length=MAX_SEQ_LENGTH,
optimizers=(optimizer, scheduler)
)
trainer.train()
trained_model = trainer.model
trained_tokenizer = trainer.tokenizer
if push:
repo_id = OUTPUT_REPO
trained_model.push_to_hub(repo_id)
trained_tokenizer.push_to_hub(repo_id)
else:
trained_model.save_pretrained("model")
trained_tokenizer.save_pretrained("tokenizer")
def main(push_to_hub=True):
dataset = load_data()
training_corpus = get_training_corpus(dataset)
tokenizer = create_tokenizer(training_corpus)
configure_tokenizer(tokenizer)
model = create_model(tokenizer)
train_model(model, tokenizer, dataset, push_to_hub)
if __name__ == "__main__":
main(PUSH_TO_HUB)
raise RuntimeError("The script is finished.") |