Spaces:
Running
Running
File size: 6,086 Bytes
62e3275 729bc22 0d6d18a 729bc22 f981512 5306417 f981512 5010813 f981512 56a81a0 239043a 84e07f0 99ca542 239043a 0d6d18a 239043a acd8b76 0d6d18a 5306417 0d6d18a 239043a 5306417 a22e988 239043a 5306417 acd8b76 5306417 0d6d18a 9a58086 84e07f0 0d6d18a 239043a 0d6d18a 5306417 0d6d18a 239043a 0d6d18a 5306417 239043a 5306417 0d6d18a 893883c 0d6d18a 239043a 0d6d18a 239043a 81e1fd9 0d6d18a a22e988 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import os
import uuid
from typing import List, Tuple, Optional, Union
from PIL import Image
import google.generativeai as genai
import gradio as gr
from dotenv import load_dotenv
# Cargar las variables de entorno desde el archivo .env
load_dotenv()
# Configuración de claves
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
if not GOOGLE_API_KEY:
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
genai.configure(api_key=GOOGLE_API_KEY)
# Configuración general
TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
IMAGE_CACHE_DIRECTORY = "/tmp"
IMAGE_WIDTH = 512
# Función para preprocesar imágenes
def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
if image:
image_height = int(image.height * IMAGE_WIDTH / image.width)
return image.resize((IMAGE_WIDTH, image_height))
# Función para guardar imágenes en caché
def cache_pil_image(image: Image.Image) -> str:
image_filename = f"{uuid.uuid4()}.jpeg"
os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
image.save(image_path, "JPEG")
return image_path
# Pestaña 1: Chatbot de solo texto con historial
def bot_response(
model_choice: str,
system_instruction: str,
text_prompt: str,
chatbot: List[Tuple[str, str]],
) -> Tuple[List[Tuple[str, str]], str]:
if not text_prompt.strip():
return chatbot, "Por favor, escribe un mensaje válido."
model = genai.GenerativeModel(
model_name=model_choice,
generation_config={
"temperature": 1,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
},
)
chat = model.start_chat(history=chatbot)
chat.system_instruction = system_instruction
response = chat.send_message(text_prompt)
response.resolve()
chatbot.append((text_prompt, response.text))
return chatbot, ""
# Pestaña 2: Chatbot avanzado con imágenes
def upload(files: Optional[List[gr.File]], chatbot: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
if files:
for file in files:
image = Image.open(file.name).convert("RGB")
image_preview = preprocess_image(image)
if image_preview:
image_path = cache_pil_image(image_preview)
chatbot.append((f"Uploaded image: {image_path}", "Imagen cargada correctamente."))
return chatbot
def advanced_response(
text_prompt: str,
files: Optional[List[gr.File]],
model_choice: str,
system_instruction: str,
chatbot: List[Tuple[str, str]],
):
if not text_prompt.strip() and not files:
chatbot.append(("", "Por favor, proporciona un mensaje o sube una imagen."))
yield chatbot
return
model = genai.GenerativeModel(
model_name=model_choice,
generation_config={
"temperature": 0.7,
"max_output_tokens": 8192,
"top_k": 10,
"top_p": 0.9,
},
)
chat = model.start_chat(history=chatbot)
chat.system_instruction = system_instruction
if text_prompt:
chatbot.append((text_prompt, ""))
if files:
images = [cache_pil_image(preprocess_image(Image.open(file.name))) for file in files]
chatbot.append((f"Uploaded images: {', '.join(images)}", ""))
response = chat.send_message(text_prompt)
response.resolve()
chatbot[-1] = (chatbot[-1][0], response.text)
yield chatbot
# Construcción de la interfaz
def build_interface():
with gr.Blocks() as demo:
gr.HTML(TITLE)
gr.HTML(SUBTITLE)
with gr.Tab("Pestaña 1: Chatbot Texto"):
model_dropdown_1 = gr.Dropdown(
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
value="gemini-1.5-flash",
label="Selecciona el modelo",
)
chatbot_1 = gr.Chatbot(label="Gemini", height=300)
system_instruction_1 = gr.Textbox(
placeholder="Escribe una instrucción para el sistema...",
label="Instrucción del sistema",
value="You are an assistant.",
)
with gr.Row():
text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False)
run_button_1 = gr.Button(value="Enviar", variant="primary")
run_button_1.click(
fn=bot_response,
inputs=[model_dropdown_1, system_instruction_1, text_input_1, chatbot_1],
outputs=[chatbot_1, text_input_1],
)
with gr.Tab("Pestaña 2: Chatbot con Imágenes"):
model_dropdown_2 = gr.Dropdown(
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
value="gemini-1.5-flash",
label="Selecciona el modelo",
)
chatbot_2 = gr.Chatbot(label="Gemini", height=300)
system_instruction_2 = gr.Textbox(
placeholder="Escribe una instrucción para el sistema...",
label="Instrucción del sistema",
)
with gr.Row():
text_input_2 = gr.Textbox(placeholder="Mensaje o descripción...", show_label=False)
upload_button = gr.UploadButton(label="Subir Imágenes", file_count="multiple", file_types=["image"])
run_button_2 = gr.Button(value="Ejecutar", variant="primary")
run_button_2.click(
fn=advanced_response,
inputs=[text_input_2, upload_button, model_dropdown_2, system_instruction_2, chatbot_2],
outputs=[chatbot_2],
)
upload_button.upload(
fn=upload,
inputs=[upload_button, chatbot_2],
outputs=[chatbot_2],
)
return demo
if __name__ == "__main__":
demo = build_interface()
demo.launch(debug=True)
|