Spaces:
Runtime error
Runtime error
File size: 1,945 Bytes
7464038 1bd9398 7464038 1bd9398 7464038 1bd9398 7464038 1bd9398 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from fastapi import FastAPI
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import uvicorn
import asyncio
app = FastAPI()
client = InferenceClient(model="Qwen/Qwen2.5-7B")
class Item(BaseModel):
prompt: str
history: list
system_prompt: str
temperature: float = 0.0
max_new_tokens: int = 1048
top_p: float = 0.15
repetition_penalty: float = 1.0
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def generate(item: Item):
temperature = max(float(item.temperature), 1e-2)
top_p = float(item.top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=item.max_new_tokens,
top_p=top_p,
repetition_penalty=item.repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
return output
@app.post("/generate/")
async def generate_text(item: Item):
return {"response": generate(item)}
@app.on_event("startup")
async def preload_model():
# Check if the model is already loaded
status = client.get_model_status()
if not status.loaded:
# Trigger model loading by making a dummy request
dummy_prompt = "This is a dummy prompt to load the model."
client.text_generation(dummy_prompt, max_new_tokens=1)
# Optionally, wait until the model is loaded
while not client.get_model_status().loaded:
await asyncio.sleep(5) # Wait for 5 seconds before checking again
|