Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -50,14 +50,10 @@ def load_finetuned_classifier_model(question):
|
|
50 |
print(predicted_label)
|
51 |
return predicted_label
|
52 |
|
53 |
-
|
54 |
-
def
|
55 |
tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
56 |
model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
57 |
-
return tokenizer, model
|
58 |
-
|
59 |
-
def generate_answer(question, fortune):
|
60 |
-
tokenizer, model = load_model_and_tokenizer()
|
61 |
input_text = "Question: " + question + " Fortune: " + fortune
|
62 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
63 |
outputs = model.generate(
|
@@ -238,4 +234,4 @@ with right_col:
|
|
238 |
st.markdown(summary, unsafe_allow_html=True)
|
239 |
|
240 |
st.text_area("Description", value=description_text, height=150, disabled=True)
|
241 |
-
st.text_area("Detail", value=detail_text, height=150, disabled=True)
|
|
|
50 |
print(predicted_label)
|
51 |
return predicted_label
|
52 |
|
53 |
+
# Define your inference function
|
54 |
+
def generate_answer(question, fortune):
|
55 |
tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
56 |
model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
|
|
|
|
|
|
|
|
|
57 |
input_text = "Question: " + question + " Fortune: " + fortune
|
58 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
59 |
outputs = model.generate(
|
|
|
234 |
st.markdown(summary, unsafe_allow_html=True)
|
235 |
|
236 |
st.text_area("Description", value=description_text, height=150, disabled=True)
|
237 |
+
st.text_area("Detail", value=detail_text, height=150, disabled=True)
|