Vishwas1 commited on
Commit
0bf414e
·
verified ·
1 Parent(s): 136c43d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -0
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+
5
+ # Load the original pre-trained model
6
+ def load_model(model_name):
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
9
+ return tokenizer, model
10
+
11
+ # Models to compare
12
+ original_model_name = "bert-base-uncased" # Replace with your original model
13
+ fine_tuned_model_name = "Vishwas1/bert-base-imdb" # Replace with your fine-tuned model's repo ID
14
+
15
+ # Load models
16
+ original_tokenizer, original_model = load_model(original_model_name)
17
+ fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)
18
+
19
+ # Ensure models are in evaluation mode
20
+ original_model.eval()
21
+ fine_tuned_model.eval()
22
+
23
+ def compare_models(text):
24
+ # Original model prediction
25
+ inputs_orig = original_tokenizer(text, return_tensors='pt', truncation=True, padding=True)
26
+ with torch.no_grad():
27
+ outputs_orig = original_model(**inputs_orig)
28
+ logits_orig = outputs_orig.logits
29
+ probs_orig = torch.softmax(logits_orig, dim=1)
30
+ pred_orig = torch.argmax(probs_orig, dim=1).item()
31
+ confidence_orig = probs_orig[0][pred_orig].item()
32
+
33
+ # Fine-tuned model prediction
34
+ inputs_fine = fine_tuned_tokenizer(text, return_tensors='pt', truncation=True, padding=True)
35
+ with torch.no_grad():
36
+ outputs_fine = fine_tuned_model(**inputs_fine)
37
+ logits_fine = outputs_fine.logits
38
+ probs_fine = torch.softmax(logits_fine, dim=1)
39
+ pred_fine = torch.argmax(probs_fine, dim=1).item()
40
+ confidence_fine = probs_fine[0][pred_fine].item()
41
+
42
+ # Map predictions to labels (adjust based on your model's labels)
43
+ labels = {0: "Negative", 1: "Positive"}
44
+
45
+ result = {
46
+ "Original Model Prediction": f"{labels[pred_orig]} ({confidence_orig:.2f})",
47
+ "Fine-Tuned Model Prediction": f"{labels[pred_fine]} ({confidence_fine:.2f})"
48
+ }
49
+ return result
50
+
51
+ # Gradio Interface
52
+ iface = gr.Interface(
53
+ fn=compare_models,
54
+ inputs=gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
55
+ outputs=gr.JSON(label="Model Predictions"),
56
+ title="Compare Original and Fine-Tuned Models",
57
+ description="Enter text to see predictions from the original and fine-tuned models."
58
+ )
59
+
60
+ iface.launch()