Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,42 +1,44 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
import os
|
|
|
|
| 4 |
|
| 5 |
-
|
|
|
|
| 6 |
api_token = os.environ.get("TOKEN")
|
| 7 |
headers = {"Authorization": f"Bearer {api_token}"}
|
| 8 |
-
|
| 9 |
def query(payload):
|
| 10 |
response = requests.post(API_URL, headers=headers, json=payload)
|
| 11 |
return response.json()
|
| 12 |
|
| 13 |
-
def
|
| 14 |
-
# Créez un prompt qui force le modèle à choisir entre Oui et Non
|
| 15 |
-
prompt = {question}
|
| 16 |
-
|
| 17 |
-
# Configurez les paramètres pour forcer une réponse courte
|
| 18 |
payload = {
|
| 19 |
"inputs": prompt,
|
| 20 |
"parameters": {
|
| 21 |
-
"max_new_tokens":
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
-
"
|
| 25 |
-
"temperature": 0.7
|
| 26 |
}
|
| 27 |
}
|
| 28 |
|
| 29 |
response = query(payload)
|
| 30 |
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
def chatbot(message, history):
|
| 33 |
-
response =
|
| 34 |
return response
|
| 35 |
|
| 36 |
iface = gr.ChatInterface(
|
| 37 |
fn=chatbot,
|
| 38 |
-
title="Chatbot
|
| 39 |
-
description="
|
| 40 |
)
|
| 41 |
|
| 42 |
iface.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
import os
|
| 4 |
+
import spaces
|
| 5 |
|
| 6 |
+
|
| 7 |
+
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
| 8 |
api_token = os.environ.get("TOKEN")
|
| 9 |
headers = {"Authorization": f"Bearer {api_token}"}
|
| 10 |
+
@spaces.GPU
|
| 11 |
def query(payload):
|
| 12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
| 13 |
return response.json()
|
| 14 |
|
| 15 |
+
def generate_response(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
payload = {
|
| 17 |
"inputs": prompt,
|
| 18 |
"parameters": {
|
| 19 |
+
"max_new_tokens": 100,
|
| 20 |
+
"temperature": 0.7,
|
| 21 |
+
"top_p": 0.95,
|
| 22 |
+
"do_sample": True
|
|
|
|
| 23 |
}
|
| 24 |
}
|
| 25 |
|
| 26 |
response = query(payload)
|
| 27 |
|
| 28 |
+
if isinstance(response, list) and len(response) > 0:
|
| 29 |
+
return response[0].get('generated_text', '')
|
| 30 |
+
elif isinstance(response, dict) and 'generated_text' in response:
|
| 31 |
+
return response['generated_text']
|
| 32 |
+
return "Désolé, je n'ai pas pu générer de réponse."
|
| 33 |
+
|
| 34 |
def chatbot(message, history):
|
| 35 |
+
response = generate_response(message)
|
| 36 |
return response
|
| 37 |
|
| 38 |
iface = gr.ChatInterface(
|
| 39 |
fn=chatbot,
|
| 40 |
+
title="Chatbot Meta-Llama-3-8B-Instruct",
|
| 41 |
+
description="Interagissez avec le modèle Meta-Llama-3-8B-Instruct."
|
| 42 |
)
|
| 43 |
|
| 44 |
iface.launch()
|