Spaces:
Runtime error
Runtime error
File size: 2,157 Bytes
92d7c75 8fa5ce9 92d7c75 8fa5ce9 92d7c75 8fa5ce9 92d7c75 8fa5ce9 d8802c6 8fa5ce9 92d7c75 8fa5ce9 92d7c75 d8802c6 92d7c75 8fa5ce9 92d7c75 8fa5ce9 92d7c75 8fa5ce9 92d7c75 8fa5ce9 92d7c75 8824f88 92d7c75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
import uvicorn
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
app = FastAPI()
# Chargement du modèle uniquement si CUDA est disponible
if torch.cuda.is_available():
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
else:
model = None
tokenizer = None
MAX_INPUT_TOKEN_LENGTH = 4096
def generate_response(message: str, history: list) -> str:
conversation = history + [{"role": "user", "content": message}]
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = {
"input_ids": input_ids,
"streamer": streamer,
"max_new_tokens": 1024,
"do_sample": True,
"top_p": 0.9,
"top_k": 50,
"temperature": 0.6,
"num_beams": 1,
"repetition_penalty": 1.2,
}
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
response_text = ""
for text in streamer:
response_text += text
return response_text
@app.post("/chat")
async def chat_endpoint(request: Request):
data = await request.json()
message = data.get("message", "")
# Utilisation d'un historique vide pour simplifier
response_text = generate_response(message, history=[])
return JSONResponse({"response": response_text})
@app.get("/", response_class=HTMLResponse)
async def root():
with open("index.html", "r", encoding="utf-8") as f:
html_content = f.read()
return HTMLResponse(content=html_content, status_code=200)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|