Canstralian commited on
Commit
a1190a7
·
verified ·
1 Parent(s): e01a4a0

Create fine_tune_model.py

Browse files
Files changed (1) hide show
  1. fine_tune_model.py +47 -0
fine_tune_model.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
3
+
4
+ # Load custom dataset
5
+ dataset = load_dataset('json', data_files='path_to_your/shell_commands_mock_data.json')
6
+
7
+ # Load tokenizer and model for Repl.it LLM
8
+ model_name = "Repl.it/llama-2-13b"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+
12
+ # Tokenization function
13
+ def tokenize_function(examples):
14
+ return tokenizer(examples['prompt'], padding="max_length", truncation=True)
15
+
16
+ tokenized_datasets = dataset.map(tokenize_function, batched=True)
17
+
18
+ # Training arguments
19
+ training_args = TrainingArguments(
20
+ output_dir="./results",
21
+ evaluation_strategy="epoch",
22
+ learning_rate=2e-5,
23
+ per_device_train_batch_size=1,
24
+ per_device_eval_batch_size=1,
25
+ num_train_epochs=3,
26
+ weight_decay=0.01,
27
+ logging_dir="./logs",
28
+ logging_steps=10,
29
+ save_steps=100,
30
+ )
31
+
32
+ # Trainer setup
33
+ trainer = Trainer(
34
+ model=model,
35
+ args=training_args,
36
+ train_dataset=tokenized_datasets['train'],
37
+ eval_dataset=tokenized_datasets['test'] if 'test' in tokenized_datasets else None,
38
+ )
39
+
40
+ # Start training
41
+ trainer.train()
42
+
43
+ # Save fine-tuned model
44
+ trainer.save_model("./fine_tuned_model")
45
+
46
+ # Evaluate the model
47
+ trainer.evaluate()