Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import torch
|
|
4 |
from huggingface_hub import login
|
5 |
import os
|
6 |
|
7 |
-
# Load
|
8 |
def load_model(model_name):
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
@@ -12,9 +12,9 @@ def load_model(model_name):
|
|
12 |
|
13 |
# Models to compare
|
14 |
original_model_name = "Vishwas1/hummingbird-base-marathi" # Replace with your original model
|
15 |
-
fine_tuned_model_name = "Vishwas1/hummingbird-finetuned-marathi" # Replace with your fine-tuned model
|
16 |
|
17 |
-
# Load
|
18 |
hf_token = os.getenv('HF_API_TOKEN')
|
19 |
if not hf_token:
|
20 |
raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")
|
@@ -30,32 +30,24 @@ fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)
|
|
30 |
original_model.eval()
|
31 |
fine_tuned_model.eval()
|
32 |
|
33 |
-
# Function to compare models
|
34 |
-
def compare_models(
|
35 |
-
#
|
36 |
-
inputs_orig = original_tokenizer(
|
37 |
with torch.no_grad():
|
38 |
-
|
39 |
-
|
40 |
-
probs_orig = torch.softmax(logits_orig, dim=1)
|
41 |
-
pred_orig = torch.argmax(probs_orig, dim=1).item()
|
42 |
-
confidence_orig = probs_orig[0][pred_orig].item()
|
43 |
|
44 |
-
#
|
45 |
-
inputs_fine = fine_tuned_tokenizer(
|
46 |
with torch.no_grad():
|
47 |
-
|
48 |
-
|
49 |
-
probs_fine = torch.softmax(logits_fine, dim=1)
|
50 |
-
pred_fine = torch.argmax(probs_fine, dim=1).item()
|
51 |
-
confidence_fine = probs_fine[0][pred_fine].item()
|
52 |
-
|
53 |
-
# Map predictions to labels (adjust based on your model's labels)
|
54 |
-
labels = {0: "Negative", 1: "Positive"}
|
55 |
|
|
|
56 |
result = {
|
57 |
-
"Original Model
|
58 |
-
"Fine-Tuned Model
|
59 |
}
|
60 |
return result
|
61 |
|
@@ -63,11 +55,12 @@ def compare_models(text):
|
|
63 |
iface = gr.Interface(
|
64 |
fn=compare_models,
|
65 |
inputs=gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
|
66 |
-
outputs=gr.JSON(label="
|
67 |
-
title="Compare Original and Fine-Tuned Models",
|
68 |
-
description="Enter
|
69 |
)
|
70 |
|
71 |
iface.launch()
|
72 |
|
73 |
|
|
|
|
4 |
from huggingface_hub import login
|
5 |
import os
|
6 |
|
7 |
+
# Load text generation model
|
8 |
def load_model(model_name):
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
12 |
|
13 |
# Models to compare
|
14 |
original_model_name = "Vishwas1/hummingbird-base-marathi" # Replace with your original model
|
15 |
+
fine_tuned_model_name = "Vishwas1/hummingbird-finetuned-marathi" # Replace with your fine-tuned model
|
16 |
|
17 |
+
# Load Hugging Face token
|
18 |
hf_token = os.getenv('HF_API_TOKEN')
|
19 |
if not hf_token:
|
20 |
raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")
|
|
|
30 |
original_model.eval()
|
31 |
fine_tuned_model.eval()
|
32 |
|
33 |
+
# Function to compare text generation from both models
|
34 |
+
def compare_models(prompt):
|
35 |
+
# Generate text with the original model
|
36 |
+
inputs_orig = original_tokenizer(prompt, return_tensors="pt")
|
37 |
with torch.no_grad():
|
38 |
+
generated_ids_orig = original_model.generate(inputs_orig["input_ids"], max_length=100)
|
39 |
+
generated_text_orig = original_tokenizer.decode(generated_ids_orig[0], skip_special_tokens=True)
|
|
|
|
|
|
|
40 |
|
41 |
+
# Generate text with the fine-tuned model
|
42 |
+
inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt")
|
43 |
with torch.no_grad():
|
44 |
+
generated_ids_fine = fine_tuned_model.generate(inputs_fine["input_ids"], max_length=100)
|
45 |
+
generated_text_fine = fine_tuned_tokenizer.decode(generated_ids_fine[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
# Return the generated text from both models for comparison
|
48 |
result = {
|
49 |
+
"Original Model Output": generated_text_orig,
|
50 |
+
"Fine-Tuned Model Output": generated_text_fine
|
51 |
}
|
52 |
return result
|
53 |
|
|
|
55 |
iface = gr.Interface(
|
56 |
fn=compare_models,
|
57 |
inputs=gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
|
58 |
+
outputs=gr.JSON(label="Generated Texts"),
|
59 |
+
title="Compare Text Generation from Original and Fine-Tuned Models",
|
60 |
+
description="Enter a prompt to generate text from the original and fine-tuned models."
|
61 |
)
|
62 |
|
63 |
iface.launch()
|
64 |
|
65 |
|
66 |
+
|