Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,8 @@ import io
|
|
3 |
import torch
|
4 |
import uvicorn
|
5 |
import spacy
|
6 |
-
import subprocess #
|
7 |
import pdfplumber
|
8 |
-
# Removed: import moviepy.editor as mp
|
9 |
import librosa
|
10 |
import soundfile as sf
|
11 |
import matplotlib.pyplot as plt
|
@@ -165,6 +164,7 @@ def fine_tune_cuad_model():
|
|
165 |
)
|
166 |
|
167 |
print("✅ Starting fine tuning on CUAD QA dataset...")
|
|
|
168 |
trainer = Trainer(
|
169 |
model=model,
|
170 |
args=training_args,
|
@@ -185,9 +185,6 @@ def fine_tune_cuad_model():
|
|
185 |
# Load NLP Models #
|
186 |
#############################
|
187 |
|
188 |
-
# ... (other imports and code remain unchanged)
|
189 |
-
|
190 |
-
# Load NLP models
|
191 |
try:
|
192 |
try:
|
193 |
nlp = spacy.load("en_core_web_sm")
|
@@ -211,9 +208,6 @@ try:
|
|
211 |
chunk_length_s=30,
|
212 |
device_map="auto" if torch.cuda.is_available() else "cpu")
|
213 |
|
214 |
-
# ... (rest of your model loading code remains unchanged)
|
215 |
-
|
216 |
-
|
217 |
# Load or Fine Tune CUAD QA Model
|
218 |
if os.path.exists("fine_tuned_legal_qa"):
|
219 |
print("✅ Loading fine-tuned CUAD QA model from fine_tuned_legal_qa...")
|
@@ -747,3 +741,4 @@ if __name__ == "__main__":
|
|
747 |
else:
|
748 |
print("\n⚠️ Ngrok setup failed. API will only be available locally.\n")
|
749 |
run()
|
|
|
|
3 |
import torch
|
4 |
import uvicorn
|
5 |
import spacy
|
6 |
+
import subprocess # For running ffmpeg commands
|
7 |
import pdfplumber
|
|
|
8 |
import librosa
|
9 |
import soundfile as sf
|
10 |
import matplotlib.pyplot as plt
|
|
|
164 |
)
|
165 |
|
166 |
print("✅ Starting fine tuning on CUAD QA dataset...")
|
167 |
+
from transformers import Trainer # Ensure Trainer is imported here
|
168 |
trainer = Trainer(
|
169 |
model=model,
|
170 |
args=training_args,
|
|
|
185 |
# Load NLP Models #
|
186 |
#############################
|
187 |
|
|
|
|
|
|
|
188 |
try:
|
189 |
try:
|
190 |
nlp = spacy.load("en_core_web_sm")
|
|
|
208 |
chunk_length_s=30,
|
209 |
device_map="auto" if torch.cuda.is_available() else "cpu")
|
210 |
|
|
|
|
|
|
|
211 |
# Load or Fine Tune CUAD QA Model
|
212 |
if os.path.exists("fine_tuned_legal_qa"):
|
213 |
print("✅ Loading fine-tuned CUAD QA model from fine_tuned_legal_qa...")
|
|
|
741 |
else:
|
742 |
print("\n⚠️ Ngrok setup failed. API will only be available locally.\n")
|
743 |
run()
|
744 |
+
|