Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,79 +1,26 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
import gradio as gr
|
4 |
-
import
|
5 |
-
|
6 |
-
|
7 |
-
# Cargar la clave API desde el entorno
|
8 |
-
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
9 |
|
10 |
-
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
|
16 |
-
def
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
return new_history
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
chat_history = transform_history(history)
|
28 |
-
|
29 |
-
if system_instruction:
|
30 |
-
chat_history.insert(0, {"role": "system", "content": system_instruction})
|
31 |
-
|
32 |
-
# Configuraci贸n del modelo Flash 1.5
|
33 |
-
generation_config = genai.types.GenerationConfig(
|
34 |
-
temperature=0.7,
|
35 |
-
max_output_tokens=8192,
|
36 |
-
top_k=10,
|
37 |
-
top_p=0.9
|
38 |
-
)
|
39 |
-
|
40 |
-
# Si se incluyen archivos, procesa esa entrada multimodal
|
41 |
-
if files:
|
42 |
-
for file_path in files:
|
43 |
-
with open(file_path, "r") as file:
|
44 |
-
file_content = file.read()
|
45 |
-
chat_history.append({"role": "user", "content": f"Archivo cargado: {file_content}"})
|
46 |
-
|
47 |
-
response = genai.ChatCompletion.create(
|
48 |
-
model=model_choice,
|
49 |
-
messages=chat_history,
|
50 |
-
generation_config=generation_config
|
51 |
)
|
|
|
52 |
|
53 |
-
reply = response['candidates'][0]['content']
|
54 |
-
for i in range(len(reply)):
|
55 |
-
time.sleep(0.05)
|
56 |
-
yield history + [[None, reply[:i + 1]]] # Agrega la respuesta progresivamente al historial
|
57 |
-
|
58 |
-
# Interfaz con Gradio
|
59 |
-
with gr.Blocks() as demo:
|
60 |
-
chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
|
61 |
-
|
62 |
-
chat_input = gr.Textbox(
|
63 |
-
placeholder="Escribe un mensaje...",
|
64 |
-
show_label=False
|
65 |
-
)
|
66 |
-
|
67 |
-
submit_btn = gr.Button("Enviar")
|
68 |
-
system_input = gr.Textbox(placeholder="Instrucci贸n del sistema (opcional)", show_label=True, lines=2)
|
69 |
-
model_choice = gr.Dropdown(choices=["gemini-1.5-flash"], value="gemini-1.5-flash", label="Modelo")
|
70 |
-
file_input = gr.File(label="Subir archivo (opcional)", file_types=[".txt", ".md", ".json"])
|
71 |
-
|
72 |
-
# Manejar el env铆o del mensaje
|
73 |
-
submit_btn.click(
|
74 |
-
bot,
|
75 |
-
inputs=[file_input, model_choice, system_input, chatbot],
|
76 |
-
outputs=chatbot
|
77 |
-
)
|
78 |
-
|
79 |
demo.launch()
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import random
|
3 |
+
import time
|
|
|
|
|
|
|
4 |
|
5 |
+
with gr.Blocks() as demo:
|
6 |
+
chatbot = gr.Chatbot(type="messages")
|
7 |
+
msg = gr.Textbox()
|
8 |
+
clear = gr.Button("Clear")
|
9 |
|
10 |
+
def user(user_message, history: list):
|
11 |
+
return "", history + [{"role": "user", "content": user_message}]
|
12 |
|
13 |
+
def bot(history: list):
|
14 |
+
bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
|
15 |
+
history.append({"role": "assistant", "content": ""})
|
16 |
+
for character in bot_message:
|
17 |
+
history[-1]['content'] += character
|
18 |
+
time.sleep(0.05)
|
19 |
+
yield history
|
|
|
20 |
|
21 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
22 |
+
bot, chatbot, chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
)
|
24 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
demo.launch()
|