Vishwas1 commited on
Commit
3bc0a38
·
verified ·
1 Parent(s): c5dedae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
  from huggingface_hub import login
5
  import os
@@ -16,20 +16,21 @@ fine_tuned_model_name = "Vishwas1/hummingbird-finetuned-marathi" # Replace with
16
 
17
  # Load models
18
  hf_token = os.getenv('HUGGINGFACE_TOKEN')
19
- if not hf_token:
20
- return "Error: Hugging Face token not found. Please set it as a secret."
21
-
22
  # Login to Hugging Face Hub
23
  login(hf_token)
24
- original_tokenizer = AutoTokenizer.from_pretrained(original_model_name)
25
- original_model = AutoModelForSequenceClassification.from_pretrained(original_model_name)
26
- fine_tuned_tokenizer = AutoTokenizer.from_pretrained(fine_tuned_model_name)
27
- fine_tuned_model = AutoModelForSequenceClassification.from_pretrained(fine_tuned_model_name)
28
 
29
  # Ensure models are in evaluation mode
30
  original_model.eval()
31
  fine_tuned_model.eval()
32
 
 
33
  def compare_models(text):
34
  # Original model prediction
35
  inputs_orig = original_tokenizer(text, return_tensors='pt', truncation=True, padding=True)
@@ -68,3 +69,5 @@ iface = gr.Interface(
68
  )
69
 
70
  iface.launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  from huggingface_hub import login
5
  import os
 
16
 
17
  # Load models
18
  hf_token = os.getenv('HUGGINGFACE_TOKEN')
19
+ if not hf_token:
20
+ raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")
21
+
22
  # Login to Hugging Face Hub
23
  login(hf_token)
24
+
25
+ # Load the original and fine-tuned models
26
+ original_tokenizer, original_model = load_model(original_model_name)
27
+ fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)
28
 
29
  # Ensure models are in evaluation mode
30
  original_model.eval()
31
  fine_tuned_model.eval()
32
 
33
+ # Function to compare models
34
  def compare_models(text):
35
  # Original model prediction
36
  inputs_orig = original_tokenizer(text, return_tensors='pt', truncation=True, padding=True)
 
69
  )
70
 
71
  iface.launch()
72
+
73
+