Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,8 @@ from PIL import Image
|
|
7 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
8 |
import re
|
9 |
|
|
|
|
|
10 |
# --------------------------- Configuration & Session State ---------------------------
|
11 |
# Define maximum dimensions for the fortune image (in pixels)
|
12 |
MAX_SIZE = (400, 400)
|
@@ -56,7 +58,7 @@ def load_finetuned_classifier_model(question):
|
|
56 |
# Function to generate a detailed answer by combining the user's question and the fortune detail
|
57 |
def generate_answer(question, fortune):
|
58 |
tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
59 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
60 |
input_text = "Question: " + question + " Fortune: " + fortune
|
61 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
62 |
outputs = model.generate(
|
|
|
7 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
8 |
import re
|
9 |
|
10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
+
|
12 |
# --------------------------- Configuration & Session State ---------------------------
|
13 |
# Define maximum dimensions for the fortune image (in pixels)
|
14 |
MAX_SIZE = (400, 400)
|
|
|
58 |
# Function to generate a detailed answer by combining the user's question and the fortune detail
|
59 |
def generate_answer(question, fortune):
|
60 |
tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
61 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen", device_map="auto")
|
62 |
input_text = "Question: " + question + " Fortune: " + fortune
|
63 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
64 |
outputs = model.generate(
|