File size: 929 Bytes
6bbfa6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load the saved model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("/home/user/app/my_finetuned_model_2/")
model = AutoModelForSeq2SeqLM.from_pretrained("/home/user/app/my_finetuned_model_2/")

# Define your inference function
def generate_answer(question, fortune):
    input_text = "Question: " + question + " Fortune: " + fortune
    inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
    outputs = model.generate(**inputs, max_length=256, num_beams=4, early_stopping=True, repetition_penalty=2.0, no_repeat_ngram_size=3)
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return answer

# Test the model with a sample input
sample_question = "Should I start my own business now?"
sample_fortune = "absence of rain causes worry."
print("Generated Answer:")
print(generate_answer(sample_question, sample_fortune))