Spaces:
Sleeping
Sleeping
File size: 2,597 Bytes
1b0a70f 6ec905a 1b0a70f 6ec905a 1b0a70f 6ec905a 1b0a70f 6ec905a 1b0a70f 6ec905a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import gradio as gr
import requests
def respond(message, history, system_message, max_tokens, temperature, top_p):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
data = {
"model": "hf.co/ibrahimBlyc/LA_Llama:latest",
"prompt": "", # On construit le prompt complet ci-dessous
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p
# Autres paramètres Ollama si nécessaire
}
# Construire le prompt complet à partir des messages
prompt = ""
for msg in messages:
prompt += f"{msg['role']}: {msg['content']}\n"
data["prompt"] = prompt
url = "http://localhost:11434/api/generate"
try:
stream_response = requests.post(url, json=data, stream=True)
stream_response.raise_for_status() # Lève une exception si le code de status n'est pas 2xx
for chunk in stream_response.iter_lines():
if chunk:
decoded_chunk = chunk.decode()
try:
response_json = eval(decoded_chunk) # Évalue la réponse JSON. Attention à la sécurité !
token = response_json.get("response", "")
if token:
response += token
yield response
except (SyntaxError, NameError, json.JSONDecodeError) as e:
print(f"Erreur lors du décodage du chunk : {e}. Chunk : {decoded_chunk}")
yield f"Erreur: Impossible de décoder la réponse du serveur."
return
except requests.exceptions.RequestException as e:
print(f"Erreur de requête : {e}")
yield f"Erreur: Impossible de communiquer avec le serveur Ollama."
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch(share=True)
|