s / backend /llm_utils.py
redfernstech's picture
Upload 5 files
6537124 verified
raw
history blame
246 Bytes
from transformers import pipeline
# Load the Hugging Face LLM
llm = pipeline("text-generation", model="gpt2", max_length=100)
def get_llm_response(prompt: str) -> str:
response = llm(prompt)
return response[0]["generated_text"]