tonyhui2234 commited on
Commit
6bbfa6b
·
verified ·
1 Parent(s): 71bde16

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -0
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+
3
+ # Load the saved model and tokenizer
4
+ tokenizer = AutoTokenizer.from_pretrained("/home/user/app/my_finetuned_model_2/")
5
+ model = AutoModelForSeq2SeqLM.from_pretrained("/home/user/app/my_finetuned_model_2/")
6
+
7
+ # Define your inference function
8
+ def generate_answer(question, fortune):
9
+ input_text = "Question: " + question + " Fortune: " + fortune
10
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
11
+ outputs = model.generate(**inputs, max_length=256, num_beams=4, early_stopping=True, repetition_penalty=2.0, no_repeat_ngram_size=3)
12
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
+ return answer
14
+
15
+ # Test the model with a sample input
16
+ sample_question = "Should I start my own business now?"
17
+ sample_fortune = "absence of rain causes worry."
18
+ print("Generated Answer:")
19
+ print(generate_answer(sample_question, sample_fortune))