Spaces:
Runtime error
Runtime error
File size: 4,606 Bytes
d72e6ae 5c00385 a91f43b 5c49423 304de92 a91f43b 86f46a1 5c49423 1f2defa d72e6ae 86f46a1 d72e6ae 304de92 1f2defa d72e6ae 86f46a1 d72e6ae 86f46a1 d72e6ae 86f46a1 d72e6ae 1f2defa d72e6ae 86f46a1 d72e6ae 1f2defa d72e6ae 1f2defa d72e6ae 1f2defa d72e6ae 1f2defa d72e6ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import os
import torch
import trl
from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingArguments, PreTrainedTokenizerFast
from datasets import load_dataset
from tokenizers import ByteLevelBPETokenizer
MAX_SEQ_LENGTH = 128
BATCH_SIZE = 256
EPOCHS = 20
LEARNING_RATE = 1e-5
FACTOR = 128
VOCAB_SIZE = 32000
INPUT_DATASET = "nroggendorff/oak"
OUTPUT_REPO = "smallama"
PUSH_TO_HUB = True
def load_data():
dataset = load_dataset(INPUT_DATASET, split="train")
return dataset
def create_tokenizer(training_corpus):
tokenizer = ByteLevelBPETokenizer()
tokenizer.train_from_iterator(
training_corpus,
vocab_size=VOCAB_SIZE,
min_frequency=2,
special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>", "<|user|>", "<|bot|>", "<|end|>"]
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer._tokenizer)
return fast_tokenizer
def get_training_corpus(dataset):
for i in range(0, len(dataset), 1000):
yield dataset[i : i + 1000]["text"]
def format_prompts(examples, tokenizer):
texts = []
for text in examples['text']:
conversation = []
parts = text.split('<|end|>')
for i in range(0, len(parts) - 1, 2):
prompt = parts[i].replace("<|user|>", "")
response = parts[i + 1].replace("<|bot|>", "")
conversation.append({"role": "user", "content": prompt})
conversation.append({"role": "assistant", "content": response})
formatted_conversation = tokenizer.apply_chat_template(conversation, tokenize=False)
texts.append(formatted_conversation)
return {"text": texts}
def create_model(tokenizer):
config = LlamaConfig(
vocab_size=tokenizer.vocab_size,
hidden_size=FACTOR,
intermediate_size=FACTOR * 4,
num_hidden_layers=max(1, FACTOR // 32),
num_attention_heads=max(1, FACTOR // 64),
max_position_embeddings=MAX_SEQ_LENGTH,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
tie_word_embeddings=False,
)
model = LlamaForCausalLM(config)
return model
def configure_tokenizer(tokenizer):
special_tokens = {
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
"mask_token": "<mask>",
"additional_special_tokens": ["<|user|>", "<|bot|>", "<|end|>"]
}
tokenizer.add_special_tokens(special_tokens)
tokenizer.user_token_id = tokenizer.convert_tokens_to_ids("<|user|>")
tokenizer.assistant_token_id = tokenizer.convert_tokens_to_ids("<|bot|>")
chat_template = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '<|end|>\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\n' + message['content'] + '<|end|>\n' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{{ eos_token }}"
tokenizer.chat_template = chat_template
def train_model(model, tokenizer, dataset, push):
args = TrainingArguments(
output_dir="model",
num_train_epochs=EPOCHS,
per_device_train_batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
optim="sgd"
)
dataset = dataset.map(lambda examples: format_prompts(examples, tokenizer), batched=True)
trainer = trl.SFTTrainer(
model=model,
tokenizer=tokenizer,
args=args,
train_dataset=dataset,
dataset_text_field='text',
max_seq_length=MAX_SEQ_LENGTH
)
trainer.train()
trained_model = trainer.model
trained_tokenizer = trainer.tokenizer
if push:
repo_id = OUTPUT_REPO
trained_model.push_to_hub(repo_id)
trained_tokenizer.push_to_hub(repo_id)
else:
trained_tokenizer.save_pretrained("tokenizer")
def main(push_to_hub=True):
dataset = load_data()
training_corpus = get_training_corpus(dataset)
tokenizer = create_tokenizer(training_corpus)
configure_tokenizer(tokenizer)
model = create_model(tokenizer)
train_model(model, tokenizer, dataset, push_to_hub)
if __name__ == "__main__":
main(PUSH_TO_HUB)
raise RuntimeError("The script is finished.") |