File size: 3,667 Bytes
0bf414e
3a2eb19
0bf414e
04919db
 
0bf414e
3a2eb19
0bf414e
3a2eb19
 
 
 
 
 
 
 
30579a3
db2b363
 
 
 
 
 
 
0bf414e
 
06760de
03b46bf
3bc0a38
 
 
c5dedae
 
3bc0a38
6c2240a
 
 
 
 
0bf414e
6c2240a
 
 
0bf414e
06760de
db2b363
0bf414e
db2b363
 
 
 
 
 
 
 
 
 
 
0bf414e
06760de
db2b363
0bf414e
db2b363
 
 
 
 
 
 
 
 
 
 
0bf414e
06760de
0bf414e
06760de
 
0bf414e
 
 
 
 
 
6c2240a
 
db2b363
 
6c2240a
06760de
 
6c2240a
0bf414e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Tokenizer
import torch
from huggingface_hub import login
import os

# Load text generation model with fallback for tokenizer
def load_model(model_name):
    try:
        # Try loading the fast tokenizer first
        tokenizer = AutoTokenizer.from_pretrained(model_name)
    except Exception as e:
        print(f"Fast tokenizer not available for {model_name}. Falling back to regular tokenizer. Error: {e}")
        # If fast tokenizer is not available, fall back to the regular tokenizer
        tokenizer = GPT2Tokenizer.from_pretrained(model_name)
    
    model = AutoModelForCausalLM.from_pretrained(model_name)
    
    # Assign eos_token as pad_token if not already set
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    if model.config.pad_token_id is None:
        model.config.pad_token_id = tokenizer.pad_token_id
    
    return tokenizer, model

# Load Hugging Face token
hf_token = os.getenv('HF_API_TOKEN')
if not hf_token:
    raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")

# Login to Hugging Face Hub
login(hf_token)

# Function to compare text generation from both models
def compare_models(prompt, original_model_name, fine_tuned_model_name):
    # Load the original and fine-tuned models based on user input
    original_tokenizer, original_model = load_model(original_model_name)
    fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)

    # Ensure models are in evaluation mode
    original_model.eval()
    fine_tuned_model.eval()

    # Generate text with the original model
    inputs_orig = original_tokenizer(prompt, return_tensors="pt", padding=True)
    with torch.no_grad():
        generated_ids_orig = original_model.generate(
            input_ids=inputs_orig["input_ids"],
            attention_mask=inputs_orig["attention_mask"],
            max_length=100,
            pad_token_id=original_tokenizer.pad_token_id
        )
    generated_text_orig = original_tokenizer.decode(
        generated_ids_orig[0],
        skip_special_tokens=True,
        clean_up_tokenization_spaces=True  # Optional
    )

    # Generate text with the fine-tuned model
    inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt", padding=True)
    with torch.no_grad():
        generated_ids_fine = fine_tuned_model.generate(
            input_ids=inputs_fine["input_ids"],
            attention_mask=inputs_fine["attention_mask"],
            max_length=100,
            pad_token_id=fine_tuned_tokenizer.pad_token_id
        )
    generated_text_fine = fine_tuned_tokenizer.decode(
        generated_ids_fine[0],
        skip_special_tokens=True,
        clean_up_tokenization_spaces=True  # Optional
    )

    # Return the generated text from both models for comparison
    result = {
        "Original Model Output": generated_text_orig,
        "Fine-Tuned Model Output": generated_text_fine
    }
    return result

# Gradio Interface
iface = gr.Interface(
    fn=compare_models,
    inputs=[
        gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
        gr.Textbox(lines=1, placeholder="e.g., gpt2-medium", label="Original Model Name"),
        gr.Textbox(lines=1, placeholder="e.g., your-username/gpt2-medium-finetuned", label="Fine-Tuned Model Name")
    ],
    outputs=gr.JSON(label="Generated Texts"),
    title="Compare Text Generation from Original and Fine-Tuned Models",
    description="Enter a prompt and model names to generate text from the original and fine-tuned models."
)

iface.launch()