Spaces:
Sleeping
Sleeping
TITLE = """<h1 align="center">Gemini Playground ✨</h1>""" | |
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>""" | |
import os | |
import time | |
from typing import List, Tuple, Optional, Union | |
import google.generativeai as genai | |
import gradio as gr | |
from dotenv import load_dotenv | |
# Cargar las variables de entorno desde el archivo .env | |
load_dotenv() | |
# Obtener la clave de la API de las variables de entorno | |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
# Verificar que la clave de la API esté configurada | |
if not GOOGLE_API_KEY: | |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.") | |
# Configurar la API | |
genai.configure(api_key=GOOGLE_API_KEY) | |
# Constantes | |
IMAGE_WIDTH = 512 | |
CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]] | |
def user(text_prompt: str, chatbot: CHAT_HISTORY): | |
""" | |
Maneja las entradas del usuario en el chatbot. | |
""" | |
if text_prompt: | |
chatbot.append((text_prompt, None)) | |
return "", chatbot | |
def bot( | |
model_choice: str, | |
system_instruction: Optional[str], | |
chatbot: CHAT_HISTORY | |
): | |
""" | |
Maneja las respuestas del modelo generativo. | |
""" | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.7, | |
max_output_tokens=8192, | |
top_k=10, | |
top_p=0.9 | |
) | |
# Usar un valor predeterminado si system_instruction está vacío | |
if not system_instruction: | |
system_instruction = "You are a helpful assistant." | |
# Obtener el prompt más reciente del usuario | |
text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] else [] | |
# Crear y configurar el modelo generativo | |
model = genai.GenerativeModel( | |
model_name=model_choice, | |
generation_config=generation_config, | |
system_instruction=system_instruction, | |
) | |
# Generar contenido usando streaming | |
response = model.generate_content(text_prompt, stream=True) | |
# Preparar la respuesta para el chatbot | |
chatbot[-1] = (chatbot[-1][0], "") | |
for chunk in response: | |
chatbot[-1] = (chatbot[-1][0], chatbot[-1][1] + chunk.text) | |
yield chatbot | |
# Componentes de la interfaz de usuario | |
system_instruction_component = gr.Textbox( | |
placeholder="Enter system instruction...", | |
label="System Instruction", | |
lines=2 | |
) | |
chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False, height=300) | |
text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True) | |
run_button_component = gr.Button(value="Run", variant="primary") | |
model_choice_component = gr.Dropdown( | |
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"], | |
value="gemini-1.5-flash", | |
label="Select Model" | |
) | |
user_inputs = [text_prompt_component, chatbot_component] | |
bot_inputs = [model_choice_component, system_instruction_component, chatbot_component] | |
# Definir la interfaz de usuario | |
with gr.Blocks() as demo: | |
gr.HTML(TITLE) | |
gr.HTML(SUBTITLE) | |
with gr.Column(): | |
# Campo de selección de modelo arriba | |
model_choice_component.render() | |
chatbot_component.render() | |
with gr.Row(): | |
text_prompt_component.render() | |
run_button_component.render() | |
# Crear el acordeón para la instrucción del sistema al final | |
with gr.Accordion("System Instruction", open=False): | |
system_instruction_component.render() | |
run_button_component.click( | |
fn=user, | |
inputs=user_inputs, | |
outputs=[text_prompt_component, chatbot_component], | |
queue=False | |
).then( | |
fn=bot, inputs=bot_inputs, outputs=[chatbot_component], | |
) | |
text_prompt_component.submit( | |
fn=user, | |
inputs=user_inputs, | |
outputs=[text_prompt_component, chatbot_component], | |
queue=False | |
).then( | |
fn=bot, inputs=bot_inputs, outputs=[chatbot_component], | |
) | |
# Lanzar la aplicación | |
demo.queue(max_size=99).launch(debug=False, show_error=True) | |