Vishwas1 commited on
Commit
3a2eb19
·
verified ·
1 Parent(s): 8993d3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -1,12 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  from huggingface_hub import login
5
  import os
6
 
7
- # Load text generation model
8
  def load_model(model_name):
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
 
 
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
  return tokenizer, model
12
 
@@ -61,4 +68,3 @@ iface = gr.Interface(
61
  )
62
 
63
  iface.launch()
64
-
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Tokenizer
3
  import torch
4
  from huggingface_hub import login
5
  import os
6
 
7
+ # Load text generation model with fallback for tokenizer
8
  def load_model(model_name):
9
+ try:
10
+ # Try loading the fast tokenizer first
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ except Exception as e:
13
+ print(f"Fast tokenizer not available for {model_name}. Falling back to regular tokenizer. Error: {e}")
14
+ # If fast tokenizer is not available, fall back to the regular tokenizer
15
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
16
+
17
  model = AutoModelForCausalLM.from_pretrained(model_name)
18
  return tokenizer, model
19
 
 
68
  )
69
 
70
  iface.launch()