import gradio as gr import requests def respond(message, history, system_message, max_tokens, temperature, top_p): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" data = { "model": "hf.co/ibrahimBlyc/LA_Llama:latest", "prompt": "", # On construit le prompt complet ci-dessous "max_tokens": max_tokens, "temperature": temperature, "top_p": top_p # Autres paramètres Ollama si nécessaire } # Construire le prompt complet à partir des messages prompt = "" for msg in messages: prompt += f"{msg['role']}: {msg['content']}\n" data["prompt"] = prompt url = "http://localhost:11434/api/generate" try: stream_response = requests.post(url, json=data, stream=True) stream_response.raise_for_status() # Lève une exception si le code de status n'est pas 2xx for chunk in stream_response.iter_lines(): if chunk: decoded_chunk = chunk.decode() try: response_json = eval(decoded_chunk) # Évalue la réponse JSON. Attention à la sécurité ! token = response_json.get("response", "") if token: response += token yield response except (SyntaxError, NameError, json.JSONDecodeError) as e: print(f"Erreur lors du décodage du chunk : {e}. Chunk : {decoded_chunk}") yield f"Erreur: Impossible de décoder la réponse du serveur." return except requests.exceptions.RequestException as e: print(f"Erreur de requête : {e}") yield f"Erreur: Impossible de communiquer avec le serveur Ollama." demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], ) if __name__ == "__main__": demo.launch(share=True)