Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>
|
|
3 |
|
4 |
import os
|
5 |
import time
|
6 |
-
from typing import Optional
|
7 |
|
8 |
import google.generativeai as genai
|
9 |
import gradio as gr
|
@@ -35,12 +35,13 @@ def bot_response(
|
|
35 |
model_choice: str,
|
36 |
system_instruction: Optional[str],
|
37 |
text_prompt: str,
|
38 |
-
|
|
|
39 |
"""
|
40 |
-
Env铆a el mensaje al modelo y obtiene la respuesta.
|
41 |
"""
|
42 |
if not text_prompt.strip():
|
43 |
-
return
|
44 |
|
45 |
model = genai.GenerativeModel(
|
46 |
model_name=model_choice,
|
@@ -49,14 +50,20 @@ def bot_response(
|
|
49 |
)
|
50 |
|
51 |
response = model.generate_content([text_prompt], stream=True, generation_config=generation_config)
|
52 |
-
generated_text = ""
|
53 |
|
|
|
|
|
54 |
for chunk in response:
|
55 |
for i in range(0, len(chunk.text), 10): # Mostrar texto en partes
|
56 |
section = chunk.text[i:i + 10]
|
57 |
generated_text += section
|
58 |
time.sleep(0.01)
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
# Componentes de la interfaz
|
62 |
chatbot_component = gr.Chatbot(label="Gemini", scale=2, height=300)
|
@@ -91,14 +98,14 @@ with gr.Blocks() as demo:
|
|
91 |
# Configurar eventos
|
92 |
run_button_component.click(
|
93 |
fn=bot_response,
|
94 |
-
inputs=[model_dropdown_component, system_instruction_component, text_input_component],
|
95 |
-
outputs=[chatbot_component],
|
96 |
)
|
97 |
|
98 |
text_input_component.submit(
|
99 |
fn=bot_response,
|
100 |
-
inputs=[model_dropdown_component, system_instruction_component, text_input_component],
|
101 |
-
outputs=[chatbot_component],
|
102 |
)
|
103 |
|
104 |
# Lanzar la aplicaci贸n
|
|
|
3 |
|
4 |
import os
|
5 |
import time
|
6 |
+
from typing import Optional, Tuple
|
7 |
|
8 |
import google.generativeai as genai
|
9 |
import gradio as gr
|
|
|
35 |
model_choice: str,
|
36 |
system_instruction: Optional[str],
|
37 |
text_prompt: str,
|
38 |
+
chatbot: list,
|
39 |
+
) -> Tuple[list, str]:
|
40 |
"""
|
41 |
+
Env铆a el mensaje al modelo y obtiene la respuesta, actualizando el historial del chatbot.
|
42 |
"""
|
43 |
if not text_prompt.strip():
|
44 |
+
return chatbot, "Por favor, escribe un mensaje v谩lido."
|
45 |
|
46 |
model = genai.GenerativeModel(
|
47 |
model_name=model_choice,
|
|
|
50 |
)
|
51 |
|
52 |
response = model.generate_content([text_prompt], stream=True, generation_config=generation_config)
|
|
|
53 |
|
54 |
+
# Preparar el texto generado
|
55 |
+
generated_text = ""
|
56 |
for chunk in response:
|
57 |
for i in range(0, len(chunk.text), 10): # Mostrar texto en partes
|
58 |
section = chunk.text[i:i + 10]
|
59 |
generated_text += section
|
60 |
time.sleep(0.01)
|
61 |
+
chatbot.append((text_prompt, generated_text))
|
62 |
+
yield chatbot, ""
|
63 |
+
|
64 |
+
# Devolver el historial actualizado
|
65 |
+
chatbot.append((text_prompt, generated_text))
|
66 |
+
return chatbot, ""
|
67 |
|
68 |
# Componentes de la interfaz
|
69 |
chatbot_component = gr.Chatbot(label="Gemini", scale=2, height=300)
|
|
|
98 |
# Configurar eventos
|
99 |
run_button_component.click(
|
100 |
fn=bot_response,
|
101 |
+
inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component],
|
102 |
+
outputs=[chatbot_component, text_input_component],
|
103 |
)
|
104 |
|
105 |
text_input_component.submit(
|
106 |
fn=bot_response,
|
107 |
+
inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component],
|
108 |
+
outputs=[chatbot_component, text_input_component],
|
109 |
)
|
110 |
|
111 |
# Lanzar la aplicaci贸n
|