File size: 2,878 Bytes
f72ef0e
612c19e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f72ef0e
612c19e
 
 
 
 
 
f72ef0e
612c19e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f72ef0e
612c19e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
from transformers import Trainer, TrainingArguments, AutoTokenizer, AutoModelForSeq2SeqLM
from datasets import load_dataset

def fine_tune_model(model_name, dataset_name, hub_id, num_epochs, batch_size, lr, grad):
    # Load the dataset
    dataset = load_dataset(dataset_name)

    # Load the model and tokenizer
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name, num_labels=2)
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    # Tokenize the dataset
    def tokenize_function(examples):
        return tokenizer(examples['text'], padding="max_length", truncation=True)

    tokenized_datasets = dataset.map(tokenize_function, batched=True)

    # Set training arguments
    training_args = TrainingArguments(
        output_dir='./results',
        evaluation_strategy="epoch",
        learning_rate=lr,
        per_device_train_batch_size=batch_size,
        per_device_eval_batch_size=batch_size, 
        num_train_epochs=num_epochs,
        weight_decay=0.01,
        evaluation_strategy='epoch',
        gradient_accumulation_steps=grad,
        load_best_model_at_end=True,
        metric_for_best_model="accuracy",
        greater_is_better=True,
        logging_dir='./logs',
        logging_steps=10,
        push_to_hub=True,
        hub_model_id=hub_id, 
    )

    # Create Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets['train'],
        eval_dataset=tokenized_datasets['validation'],
    )

    # Fine-tune the model
    trainer.train()
    trainer.push_to_hub(commit_message="Training complete!")
    return 'DONE!'#model

# Define Gradio interface
def predict(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
    outputs = model(inputs)
    predictions = outputs.logits.argmax(dim=-1)
    return "Positive" if predictions.item() == 1 else "Negative"

# Create Gradio interface
iface = gr.Interface(
    fn=fine_tune_model,
    inputs=[
        gr.inputs.Textbox(label="Model Name (e.g., 'google/t5-efficient-tiny-nh8')"),
        gr.inputs.Textbox(label="Dataset Name (e.g., 'imdb')"),
        gr.inputs.Textbox(label="HF hub to push to after training"),
        gr.inputs.Slider(minimum=1, maximum=10, default=3, label="Number of Epochs"),
        gr.inputs.Slider(minimum=1, maximum=16, default=4, label="Batch Size"),
        gr.inputs.Slider(minimum=1, maximum=16, default=4, label="Batch Size"),
        gr.inputs.Slider(minimum=1, maximum=1000, default=50, label="Learning Rate (e-6)"),
        gr.inputs.Slider(minimum=1, maximum=100, default=1, label="Gradient accumulation (e-1)"), 
    ],
    outputs="text",
    title="Fine-Tune Hugging Face Model",
    description="This interface allows you to fine-tune a Hugging Face model on a specified dataset."
)

# Launch the interface
iface.launch()