tejash300 commited on
Commit
3128c1f
·
verified ·
1 Parent(s): b6de26f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -193,11 +193,12 @@ try:
193
  nlp = spacy.load("en_core_web_sm")
194
  print("✅ Loading NLP models...")
195
 
196
- # Initialize summarizer with a slow tokenizer
 
197
  summarizer = pipeline(
198
  "summarization",
199
  model="nsi319/legal-pegasus",
200
- tokenizer=AutoTokenizer.from_pretrained("nsi319/legal-pegasus", use_fast=False),
201
  device=0 if torch.cuda.is_available() else -1
202
  )
203
 
@@ -206,7 +207,6 @@ try:
206
  speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-medium", chunk_length_s=30,
207
  device_map="auto" if torch.cuda.is_available() else "cpu")
208
 
209
- # Load or fine tune CUAD QA model
210
  if os.path.exists("fine_tuned_legal_qa"):
211
  print("✅ Loading fine-tuned CUAD QA model from fine_tuned_legal_qa...")
212
  cuad_tokenizer = AutoTokenizer.from_pretrained("fine_tuned_legal_qa")
 
193
  nlp = spacy.load("en_core_web_sm")
194
  print("✅ Loading NLP models...")
195
 
196
+ # Use the slow PegasusTokenizer explicitly
197
+ from transformers import PegasusTokenizer
198
  summarizer = pipeline(
199
  "summarization",
200
  model="nsi319/legal-pegasus",
201
+ tokenizer=PegasusTokenizer.from_pretrained("nsi319/legal-pegasus", use_fast=False),
202
  device=0 if torch.cuda.is_available() else -1
203
  )
204
 
 
207
  speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-medium", chunk_length_s=30,
208
  device_map="auto" if torch.cuda.is_available() else "cpu")
209
 
 
210
  if os.path.exists("fine_tuned_legal_qa"):
211
  print("✅ Loading fine-tuned CUAD QA model from fine_tuned_legal_qa...")
212
  cuad_tokenizer = AutoTokenizer.from_pretrained("fine_tuned_legal_qa")