Spaces:
Running
Running
baptiste.bernard
commited on
Commit
·
f27ada3
1
Parent(s):
b2b0700
Bot fonctionnelle sans les questions
Browse files
app.py
CHANGED
@@ -10,11 +10,12 @@ from langchain.schema import Document
|
|
10 |
|
11 |
load_dotenv()
|
12 |
hftoken = os.getenv("HFTOKEN")
|
|
|
13 |
prompt_systeme = os.getenv("PROMPT_SYSTEM")
|
14 |
-
questions = os.getenv("QUESTIONS").split(",")
|
15 |
|
16 |
|
17 |
login(token=hftoken)
|
|
|
18 |
client = InferenceClient(model="meta-llama/Llama-3.3-70B-Instruct", token=hftoken)
|
19 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
20 |
|
@@ -111,22 +112,13 @@ def chatbot_response(message, history, system_message, max_tokens, temperature,
|
|
111 |
print(f"Erreur réponse : {e}")
|
112 |
yield "❌ Une erreur est survenue lors de la génération de la réponse."
|
113 |
|
114 |
-
def handle_question(selected_question):
|
115 |
-
"""Gère la sélection d'une question pré-remplie."""
|
116 |
-
return selected_question.strip()
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
121 |
with gr.Row():
|
122 |
gr.Image("logo-gaia.png", width=300, height=300, show_label=False, show_download_button=False)
|
123 |
|
124 |
with gr.Row():
|
125 |
gr.Markdown("<h1 style='text-align: center;'>📚 Chatbot GAIA</h1>")
|
126 |
-
|
127 |
-
with gr.Row():
|
128 |
-
gr.Markdown("<p style='text-align: center;'>Exemple : Allez sur <a href='https://www.societe.ninja/' target='_blank'>société.ninja</a> avec votre numéro de SIREN et utilisez un PDF.</p>")
|
129 |
-
|
130 |
with gr.Row():
|
131 |
with gr.Column():
|
132 |
gr.Markdown("## ⚙️ Paramètres")
|
@@ -135,20 +127,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
135 |
max_tokens = gr.Slider(1, 2048, value=800, step=1, label="Max tokens")
|
136 |
temperature = gr.Slider(0.1, 4.0, value=0.3, step=0.1, label="Température")
|
137 |
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
138 |
-
|
|
|
139 |
gr.Markdown("## 📂 Télécharger un fichier")
|
140 |
file_upload = gr.File(label="Téléchargez un PDF ou TXT", file_types=[".pdf", ".txt"], type="filepath")
|
141 |
file_upload.change(embed_documents, inputs=file_upload, outputs=[])
|
142 |
-
gr.Markdown("## ❓ Questions pré-remplies")
|
143 |
-
question_buttons = []
|
144 |
-
for question in questions:
|
145 |
-
button = gr.Button(value=question.strip())
|
146 |
-
question_buttons.append(button)
|
147 |
-
|
148 |
-
selected_question_output = gr.Textbox(label="Question sélectionnée", interactive=False)
|
149 |
-
|
150 |
-
for button in question_buttons:
|
151 |
-
button.click(handle_question, inputs=button, outputs=selected_question_output)
|
152 |
|
153 |
with gr.Column():
|
154 |
gr.Markdown("## 💬 Chat")
|
|
|
10 |
|
11 |
load_dotenv()
|
12 |
hftoken = os.getenv("HFTOKEN")
|
13 |
+
|
14 |
prompt_systeme = os.getenv("PROMPT_SYSTEM")
|
|
|
15 |
|
16 |
|
17 |
login(token=hftoken)
|
18 |
+
|
19 |
client = InferenceClient(model="meta-llama/Llama-3.3-70B-Instruct", token=hftoken)
|
20 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
21 |
|
|
|
112 |
print(f"Erreur réponse : {e}")
|
113 |
yield "❌ Une erreur est survenue lors de la génération de la réponse."
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
116 |
with gr.Row():
|
117 |
gr.Image("logo-gaia.png", width=300, height=300, show_label=False, show_download_button=False)
|
118 |
|
119 |
with gr.Row():
|
120 |
gr.Markdown("<h1 style='text-align: center;'>📚 Chatbot GAIA</h1>")
|
121 |
+
|
|
|
|
|
|
|
122 |
with gr.Row():
|
123 |
with gr.Column():
|
124 |
gr.Markdown("## ⚙️ Paramètres")
|
|
|
127 |
max_tokens = gr.Slider(1, 2048, value=800, step=1, label="Max tokens")
|
128 |
temperature = gr.Slider(0.1, 4.0, value=0.3, step=0.1, label="Température")
|
129 |
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
130 |
+
|
131 |
+
|
132 |
gr.Markdown("## 📂 Télécharger un fichier")
|
133 |
file_upload = gr.File(label="Téléchargez un PDF ou TXT", file_types=[".pdf", ".txt"], type="filepath")
|
134 |
file_upload.change(embed_documents, inputs=file_upload, outputs=[])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
with gr.Column():
|
137 |
gr.Markdown("## 💬 Chat")
|