from fastapi import FastAPI from pydantic import BaseModel from huggingface_hub import InferenceClient import uvicorn import asyncio app = FastAPI() client = InferenceClient(model="Qwen/Qwen2.5-7B-Instruct-GGUF") class Item(BaseModel): prompt: str history: list system_prompt: str temperature: float = 0.0 max_new_tokens: int = 1048 top_p: float = 0.15 repetition_penalty: float = 1.0 def format_prompt(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt def generate(item: Item): temperature = max(float(item.temperature), 1e-2) top_p = float(item.top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=item.max_new_tokens, top_p=top_p, repetition_penalty=item.repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text return output @app.post("/generate/") async def generate_text(item: Item): return {"response": generate(item)} @app.on_event("startup") async def preload_model(): # Check if the model is already loaded status = client.get_model_status() if not status.loaded: # Trigger model loading by making a dummy request dummy_prompt = "This is a dummy prompt to load the model." client.text_generation(dummy_prompt, max_new_tokens=1) # Optionally, wait until the model is loaded while not client.get_model_status().loaded: await asyncio.sleep(5) # Wait for 5 seconds before checking again