Spaces:
Running
Running
TITLE = """<h1 align="center">Gemini Playground ✨</h1>""" | |
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>""" | |
import os | |
import time | |
from typing import List, Tuple, Optional, Union | |
import google.generativeai as genai | |
import gradio as gr | |
from dotenv import load_dotenv | |
# Cargar las variables de entorno desde el archivo .env | |
load_dotenv() | |
# Obtener la clave de la API de las variables de entorno | |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
if not GOOGLE_API_KEY: | |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.") | |
# Configuración global | |
genai.configure(api_key=GOOGLE_API_KEY) | |
IMAGE_WIDTH = 512 | |
CHAT_HISTORY = List[Tuple[Optional[str], Optional[str]]] | |
# Inicializar el modelo y la sesión de chat | |
model_name_default = "gemini-1.5-flash" | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.7, | |
max_output_tokens=8192, | |
top_k=10, | |
top_p=0.9 | |
) | |
model = genai.GenerativeModel(model_name=model_name_default, generation_config=generation_config) | |
chat = model.start_chat(history=[]) | |
def transform_history(history: CHAT_HISTORY): | |
""" | |
Transforma el historial del formato Gradio al formato esperado por Gemini. | |
""" | |
new_history = [] | |
for user_input, model_response in history: | |
if user_input: | |
new_history.append({"parts": [{"text": user_input}], "role": "user"}) | |
if model_response: | |
new_history.append({"parts": [{"text": model_response}], "role": "model"}) | |
return new_history | |
def user_input_handler(text_prompt: str, chatbot: CHAT_HISTORY): | |
""" | |
Agrega la entrada del usuario al historial y retorna la interfaz actualizada. | |
""" | |
if text_prompt.strip(): | |
chatbot.append((text_prompt, None)) | |
return "", chatbot | |
def bot_response_handler( | |
model_choice: str, | |
system_instruction: Optional[str], | |
chatbot: CHAT_HISTORY, | |
): | |
""" | |
Genera la respuesta del modelo basado en el historial y devuelve la interfaz actualizada. | |
""" | |
global chat | |
if not GOOGLE_API_KEY: | |
raise ValueError("GOOGLE_API_KEY is not set.") | |
# Configurar el modelo y la instrucción del sistema | |
model = genai.GenerativeModel( | |
model_name=model_choice, | |
generation_config=generation_config, | |
system_instruction=system_instruction or "Default instruction" | |
) | |
# Transformar el historial para la sesión del chat | |
chat.history = transform_history(chatbot) | |
# Obtener el mensaje más reciente | |
user_message = chatbot[-1][0] if chatbot and chatbot[-1][0] else "" | |
# Enviar el mensaje y procesar la respuesta | |
response = chat.send_message(user_message) | |
response.resolve() | |
# Actualizar el historial con la respuesta del modelo | |
chatbot[-1] = (user_message, response.text) | |
# Devolver la respuesta por fragmentos para simular la experiencia de escritura | |
for i in range(len(response.text)): | |
time.sleep(0.01) | |
yield chatbot | |
# Componentes de la interfaz | |
chatbot_component = gr.Chatbot(label="Gemini Chat", height=400) | |
text_input_component = gr.Textbox(placeholder="Escribe tu mensaje aquí...", show_label=False) | |
model_dropdown_component = gr.Dropdown( | |
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"], | |
value=model_name_default, | |
label="Selecciona el modelo", | |
) | |
system_instruction_component = gr.Textbox( | |
placeholder="Instrucción para el modelo...", | |
label="System Instruction", | |
optional=True, | |
) | |
run_button_component = gr.Button("Enviar") | |
# Layout de la interfaz | |
with gr.Blocks() as demo: | |
gr.HTML(TITLE) | |
gr.HTML(SUBTITLE) | |
with gr.Column(): | |
model_dropdown_component.render() | |
chatbot_component.render() | |
with gr.Row(): | |
text_input_component.render() | |
run_button_component.render() | |
with gr.Accordion("System Instruction", open=False): | |
system_instruction_component.render() | |
# Conexiones de eventos | |
run_button_component.click( | |
user_input_handler, | |
inputs=[text_input_component, chatbot_component], | |
outputs=[text_input_component, chatbot_component], | |
queue=False, | |
).then( | |
bot_response_handler, | |
inputs=[model_dropdown_component, system_instruction_component, chatbot_component], | |
outputs=[chatbot_component], | |
) | |
text_input_component.submit( | |
user_input_handler, | |
inputs=[text_input_component, chatbot_component], | |
outputs=[text_input_component, chatbot_component], | |
queue=False, | |
).then( | |
bot_response_handler, | |
inputs=[model_dropdown_component, system_instruction_component, chatbot_component], | |
outputs=[chatbot_component], | |
) | |
# Lanzar la aplicación | |
demo.queue(max_size=99).launch(debug=True, show_error=True) | |