Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,26 @@
|
|
1 |
-
TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
|
2 |
-
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
|
3 |
-
|
4 |
import os
|
5 |
import time
|
6 |
import uuid
|
7 |
from typing import List, Tuple, Optional, Union
|
|
|
|
|
8 |
from PIL import Image
|
|
|
9 |
import google.generativeai as genai
|
10 |
import gradio as gr
|
11 |
-
from dotenv import load_dotenv
|
12 |
|
13 |
# Cargar las variables de entorno desde el archivo .env
|
14 |
load_dotenv()
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
# Obtener la clave de la API de las variables de entorno
|
19 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
20 |
-
|
21 |
-
# Verificar que la clave de la API esté configurada
|
22 |
if not GOOGLE_API_KEY:
|
23 |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
|
24 |
|
25 |
-
# Configuración
|
|
|
|
|
|
|
26 |
generation_config = {
|
27 |
"temperature": 1,
|
28 |
"top_p": 0.95,
|
@@ -31,98 +29,120 @@ generation_config = {
|
|
31 |
"response_mime_type": "text/plain",
|
32 |
}
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
model = genai.GenerativeModel(
|
37 |
-
model_name="gemini-1.5-flash",
|
38 |
-
generation_config=generation_config
|
39 |
-
)
|
40 |
-
|
41 |
-
# Inicializar la sesión de chat
|
42 |
-
chat = model.start_chat(history=[])
|
43 |
-
|
44 |
-
# Función para transformar el historial de Gradio al formato de Gemini
|
45 |
-
def transform_history(history):
|
46 |
-
new_history = []
|
47 |
-
for chat_entry in history:
|
48 |
-
new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"})
|
49 |
-
new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
|
50 |
-
return new_history
|
51 |
-
|
52 |
-
# Función de respuesta que maneja el historial
|
53 |
-
def bot_response(
|
54 |
-
model_choice: str,
|
55 |
-
system_instruction: str,
|
56 |
-
text_prompt: str,
|
57 |
-
chatbot: list,
|
58 |
-
) -> Tuple[list, str]:
|
59 |
-
"""
|
60 |
-
Envía el mensaje al modelo, obtiene la respuesta y actualiza el historial.
|
61 |
-
"""
|
62 |
-
if not text_prompt.strip():
|
63 |
-
return chatbot, "Por favor, escribe un mensaje válido."
|
64 |
|
65 |
-
|
66 |
-
|
|
|
67 |
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
72 |
response = chat.send_message(text_prompt)
|
73 |
response.resolve()
|
74 |
-
|
75 |
-
# Obtener el texto generado por el modelo
|
76 |
-
generated_text = response.text
|
77 |
-
|
78 |
-
# Actualizar el historial con la pregunta y la respuesta
|
79 |
-
chatbot.append((text_prompt, generated_text))
|
80 |
-
|
81 |
return chatbot, ""
|
82 |
|
83 |
-
#
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
)
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
with gr.Blocks() as demo:
|
102 |
gr.HTML(TITLE)
|
103 |
gr.HTML(SUBTITLE)
|
104 |
-
with gr.
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
)
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
if __name__ == "__main__":
|
128 |
-
demo.
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import time
|
3 |
import uuid
|
4 |
from typing import List, Tuple, Optional, Union
|
5 |
+
|
6 |
+
from dotenv import load_dotenv
|
7 |
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
import google.generativeai as genai
|
10 |
import gradio as gr
|
|
|
11 |
|
12 |
# Cargar las variables de entorno desde el archivo .env
|
13 |
load_dotenv()
|
14 |
|
15 |
+
# Configuración de claves
|
|
|
|
|
16 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
|
|
|
17 |
if not GOOGLE_API_KEY:
|
18 |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
|
19 |
|
20 |
+
# Configuración de Gemini
|
21 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
22 |
+
|
23 |
+
# Parámetros comunes para el modelo
|
24 |
generation_config = {
|
25 |
"temperature": 1,
|
26 |
"top_p": 0.95,
|
|
|
29 |
"response_mime_type": "text/plain",
|
30 |
}
|
31 |
|
32 |
+
# Inicialización del modelo
|
33 |
+
model = genai.GenerativeModel(model_name="gemini-1.5-flash", generation_config=generation_config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
# Título y subtítulo
|
36 |
+
TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
|
37 |
+
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
|
38 |
|
39 |
+
# Función para transformar el historial de chat
|
40 |
+
def transform_history(history: List[Tuple[str, str]]) -> list:
|
41 |
+
return [
|
42 |
+
{"parts": [{"text": entry[0]}], "role": "user"} if i % 2 == 0 else {"parts": [{"text": entry[1]}], "role": "model"}
|
43 |
+
for i, entry in enumerate(history)
|
44 |
+
]
|
45 |
|
46 |
+
# Código para la pestaña 1: Chat básico con Gemini
|
47 |
+
def bot_response(model_choice: str, system_instruction: str, text_prompt: str, chatbot: list) -> Tuple[list, str]:
|
48 |
+
if not text_prompt.strip():
|
49 |
+
return chatbot, "Por favor, escribe un mensaje válido."
|
50 |
+
chat = model.start_chat(history=transform_history(chatbot))
|
51 |
+
chat.system_instruction = system_instruction
|
52 |
response = chat.send_message(text_prompt)
|
53 |
response.resolve()
|
54 |
+
chatbot.append((text_prompt, response.text))
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
return chatbot, ""
|
56 |
|
57 |
+
# Código para la pestaña 2: Chat con imágenes y configuración avanzada
|
58 |
+
IMAGE_CACHE_DIRECTORY = "/tmp"
|
59 |
+
IMAGE_WIDTH = 512
|
60 |
+
|
61 |
+
def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
|
62 |
+
if image:
|
63 |
+
image_height = int(image.height * IMAGE_WIDTH / image.width)
|
64 |
+
return image.resize((IMAGE_WIDTH, image_height))
|
65 |
+
|
66 |
+
def cache_pil_image(image: Image.Image) -> str:
|
67 |
+
image_filename = f"{uuid.uuid4()}.jpeg"
|
68 |
+
os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
|
69 |
+
image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
|
70 |
+
image.save(image_path, "JPEG")
|
71 |
+
return image_path
|
72 |
+
|
73 |
+
def upload(files: Optional[List[str]], chatbot: List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]) -> list:
|
74 |
+
for file in files:
|
75 |
+
image = Image.open(file).convert("RGB")
|
76 |
+
image_preview = preprocess_image(image)
|
77 |
+
if image_preview:
|
78 |
+
gr.Image(image_preview).render()
|
79 |
+
image_path = cache_pil_image(image)
|
80 |
+
chatbot.append(((image_path,), None))
|
81 |
+
return chatbot
|
82 |
+
|
83 |
+
def advanced_response(
|
84 |
+
files: Optional[List[str]], model_choice: str, system_instruction: str, chatbot: list
|
85 |
+
):
|
86 |
+
if not files:
|
87 |
+
return chatbot
|
88 |
+
chat = model.start_chat(history=transform_history(chatbot))
|
89 |
+
chat.system_instruction = system_instruction
|
90 |
+
images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
|
91 |
+
response = chat.generate_content(images, stream=True, generation_config=generation_config)
|
92 |
+
chatbot[-1][1] = ""
|
93 |
+
for chunk in response:
|
94 |
+
chatbot[-1][1] += chunk.text
|
95 |
+
yield chatbot
|
96 |
+
|
97 |
+
# Construcción de la interfaz
|
98 |
with gr.Blocks() as demo:
|
99 |
gr.HTML(TITLE)
|
100 |
gr.HTML(SUBTITLE)
|
101 |
+
with gr.Tab("Chat Básico"):
|
102 |
+
gr.Markdown("### Chat con Gemini Pro")
|
103 |
+
model_dropdown = gr.Dropdown(
|
104 |
+
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
|
105 |
+
value="gemini-1.5-flash",
|
106 |
+
label="Selecciona el modelo"
|
107 |
+
)
|
108 |
+
chatbot_basic = gr.Chatbot(label="Chat Básico", height=300)
|
109 |
+
text_input = gr.Textbox(placeholder="Escribe tu mensaje...", show_label=False)
|
110 |
+
system_instruction = gr.Textbox(
|
111 |
+
placeholder="Instrucción del sistema...",
|
112 |
+
label="Instrucción del sistema",
|
113 |
+
value="You are an assistant."
|
114 |
+
)
|
115 |
+
send_button = gr.Button("Enviar")
|
116 |
+
send_button.click(
|
117 |
+
bot_response,
|
118 |
+
inputs=[model_dropdown, system_instruction, text_input, chatbot_basic],
|
119 |
+
outputs=[chatbot_basic, text_input]
|
120 |
+
)
|
121 |
+
with gr.Tab("Chat Avanzado con Imágenes"):
|
122 |
+
gr.Markdown("### Interacción con imágenes y Gemini Pro Vision")
|
123 |
+
model_dropdown_adv = gr.Dropdown(
|
124 |
+
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
|
125 |
+
value="gemini-1.5-flash",
|
126 |
+
label="Selecciona el modelo"
|
127 |
+
)
|
128 |
+
chatbot_advanced = gr.Chatbot(label="Chat Avanzado", height=300)
|
129 |
+
text_input_adv = gr.Textbox(placeholder="Mensaje o descripción...")
|
130 |
+
system_instruction_adv = gr.Textbox(
|
131 |
+
placeholder="Instrucción del sistema...",
|
132 |
+
label="Instrucción del sistema"
|
133 |
+
)
|
134 |
+
upload_button = gr.UploadButton(label="Subir Imágenes", file_types=["image"])
|
135 |
+
process_button = gr.Button("Procesar")
|
136 |
+
process_button.click(
|
137 |
+
advanced_response,
|
138 |
+
inputs=[upload_button, model_dropdown_adv, system_instruction_adv, chatbot_advanced],
|
139 |
+
outputs=[chatbot_advanced]
|
140 |
+
)
|
141 |
+
upload_button.upload(
|
142 |
+
upload,
|
143 |
+
inputs=[upload_button, chatbot_advanced],
|
144 |
+
outputs=[chatbot_advanced]
|
145 |
+
)
|
146 |
+
|
147 |
if __name__ == "__main__":
|
148 |
+
demo.launch(debug=True)
|