File size: 244 Bytes
e3fb6ae
 
 
 
 
 
1
2
3
4
5
6

# response_handler.py
def generate_response(prompt, tokenizer, model):
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(**inputs, max_length=200)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)