tonyhui2234 commited on
Commit
71b65a7
·
verified ·
1 Parent(s): 3f9e16d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -50,14 +50,10 @@ def load_finetuned_classifier_model(question):
50
  print(predicted_label)
51
  return predicted_label
52
 
53
- @st.cache_resource
54
- def load_model_and_tokenizer():
55
  tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
56
  model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
57
- return tokenizer, model
58
-
59
- def generate_answer(question, fortune):
60
- tokenizer, model = load_model_and_tokenizer()
61
  input_text = "Question: " + question + " Fortune: " + fortune
62
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
63
  outputs = model.generate(
@@ -238,4 +234,4 @@ with right_col:
238
  st.markdown(summary, unsafe_allow_html=True)
239
 
240
  st.text_area("Description", value=description_text, height=150, disabled=True)
241
- st.text_area("Detail", value=detail_text, height=150, disabled=True)
 
50
  print(predicted_label)
51
  return predicted_label
52
 
53
+ # Define your inference function
54
+ def generate_answer(question, fortune):
55
  tokenizer = AutoTokenizer.from_pretrained("tonyhui2234/finetuned_model_text_gen")
56
  model = AutoModelForSeq2SeqLM.from_pretrained("tonyhui2234/finetuned_model_text_gen")
 
 
 
 
57
  input_text = "Question: " + question + " Fortune: " + fortune
58
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
59
  outputs = model.generate(
 
234
  st.markdown(summary, unsafe_allow_html=True)
235
 
236
  st.text_area("Description", value=description_text, height=150, disabled=True)
237
+ st.text_area("Detail", value=detail_text, height=150, disabled=True)