File size: 5,344 Bytes
eb4c2ff
f72ef0e
d098f6f
eb4c2ff
 
f7961d6
612c19e
eb4c2ff
 
 
 
 
 
f7961d6
eb4c2ff
f7961d6
eb4c2ff
 
f7961d6
 
 
eb4c2ff
 
 
 
 
 
 
f7961d6
eb4c2ff
 
f7961d6
 
 
eb4c2ff
f7961d6
eb4c2ff
 
 
 
 
 
f7961d6
eb4c2ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7961d6
 
 
 
 
 
eb4c2ff
 
 
 
 
f7961d6
 
 
 
6d318d5
612c19e
871c25a
612c19e
 
eb4c2ff
 
612c19e
 
 
eb4c2ff
871c25a
612c19e
affb25b
 
eb4c2ff
affb25b
eb4c2ff
 
 
 
 
 
 
 
affb25b
 
 
 
 
eb4c2ff
 
 
 
 
 
 
 
 
 
 
 
 
affb25b
 
f72ef0e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import spaces
import gradio as gr
from transformers import Trainer, TrainingArguments, AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import DataCollatorForSeq2Seq
from datasets import load_dataset, concatenate_datasets, load_from_disk
import traceback


import os
from huggingface_hub import login

@spaces.GPU
def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
    try:
        #login(api_key.strip())
        # Load the model and tokenizer
        model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2)
        
    
        # Set training arguments
        training_args = TrainingArguments(
            output_dir='/data/results',
            eval_strategy="steps",  # Change this to steps
            save_strategy='steps',
            learning_rate=lr*0.00001,
            per_device_train_batch_size=int(batch_size),
            per_device_eval_batch_size=int(batch_size), 
            num_train_epochs=int(num_epochs),
            weight_decay=0.01,
            gradient_accumulation_steps=int(grad),
            max_grad_norm = 1.0, 
            load_best_model_at_end=True,
            metric_for_best_model="accuracy",
            greater_is_better=True,
            logging_dir='/data/logs',
            logging_steps=10,
            #push_to_hub=True,
            hub_model_id=hub_id.strip(),
            fp16=True,
            #lr_scheduler_type='cosine',
            save_steps=100,  # Save checkpoint every 500 steps
            save_total_limit=3, 
        )
        # Check if a checkpoint exists and load it
        max_length = 128
        # Load the dataset
        dataset = load_dataset(dataset_name.strip())
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        # Tokenize the dataset
        def tokenize_function(examples):
            
            # Assuming 'text' is the input and 'target' is the expected output
            model_inputs = tokenizer(
                examples['text'], 
                max_length=max_length,  # Set to None for dynamic padding
                padding=True,     # Disable padding here, we will handle it later
                truncation=True,
            )
        
            # Setup the decoder input IDs (shifted right)
            labels = tokenizer(
                examples['target'], 
                max_length=max_length,  # Set to None for dynamic padding
                padding=True,     # Disable padding here, we will handle it later
                truncation=True,
                text_target=examples['target']  # Use text_target for target text
            )
        
            # Add labels to the model inputs
            model_inputs["labels"] = labels["input_ids"]
        
        
        tokenized_datasets = dataset.map(tokenize_function, batched=True)
        
        tokenized_datasets['train'].save_to_disk(f'/data/{hub_id.strip()}_train_dataset')
        tokenized_datasets['test'].save_to_disk(f'/data/{hub_id.strip()}_test_dataset')
    
        # Create Trainer
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=tokenized_datasets['train'],
            eval_dataset=tokenized_datasets['test'],
            compute_metrics=compute_metrics,
            #callbacks=[LoggingCallback()], 
        )            

        # Fine-tune the model
        trainer.train()
        trainer.push_to_hub(commit_message="Training complete!")
    except Exception as e:
        return f"An error occurred: {str(e)}, TB: {traceback.format_exc()}"
    return 'DONE!'#model
'''
# Define Gradio interface
def predict(text):
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2)
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
    outputs = model(inputs)
    predictions = outputs.logits.argmax(dim=-1)
    return predictions.item()
'''
# Create Gradio interface
try:
    iface = gr.Interface(
        fn=fine_tune_model,
        inputs=[
            gr.Textbox(label="Model Name (e.g., 'google/t5-efficient-tiny-nh8')"),
            gr.Textbox(label="Dataset Name (e.g., 'imdb')"),
            gr.Textbox(label="HF hub to push to after training"),
            gr.Textbox(label="HF API token"),
            gr.Slider(minimum=1, maximum=10, value=3, label="Number of Epochs", step=1),
            gr.Slider(minimum=1, maximum=2000, value=1, label="Batch Size", step=1),
            gr.Slider(minimum=1, maximum=1000, value=1, label="Learning Rate (e-5)", step=1),
            gr.Slider(minimum=1, maximum=100, value=1, label="Gradient accumulation", step=1), 
        ],
        outputs="text",
        title="Fine-Tune Hugging Face Model",
        description="This interface allows you to fine-tune a Hugging Face model on a specified dataset."
    )
    '''
    iface = gr.Interface(
        fn=predict,
        inputs=[
            gr.Textbox(label="Query"),
        ],
        outputs="text",
        title="Fine-Tune Hugging Face Model",
        description="This interface allows you to test a fine-tune Hugging Face model."
    )
    '''
    # Launch the interface
    iface.launch()    
except Exception as e:
    print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")