Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -52,13 +52,18 @@ def load_finetuned_classifier_model(question):
|
|
52 |
|
53 |
# Define your inference function
|
54 |
def generate_answer(question, fortune):
|
55 |
-
|
56 |
-
|
57 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("/home/user/app/my_finetuned_model_2/")
|
58 |
-
|
59 |
input_text = "Question: " + question + " Fortune: " + fortune
|
60 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
61 |
-
outputs = model.generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
63 |
return answer
|
64 |
|
|
|
52 |
|
53 |
# Define your inference function
|
54 |
def generate_answer(question, fortune):
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
56 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
|
|
|
|
57 |
input_text = "Question: " + question + " Fortune: " + fortune
|
58 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
59 |
+
outputs = model.generate(
|
60 |
+
**inputs,
|
61 |
+
max_length=256,
|
62 |
+
num_beams=4,
|
63 |
+
early_stopping=True,
|
64 |
+
repetition_penalty=2.0,
|
65 |
+
no_repeat_ngram_size=3
|
66 |
+
)
|
67 |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
68 |
return answer
|
69 |
|