Spaces:
Sleeping
Sleeping
| TITLE = """<h1 align="center">Gemini Playground ✨</h1>""" | |
| SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>""" | |
| import os | |
| import time | |
| import uuid | |
| from typing import List, Tuple, Optional, Union | |
| from PIL import Image | |
| import google.generativeai as genai | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| # Cargar las variables de entorno desde el archivo .env | |
| load_dotenv() | |
| print("google-generativeai:", genai.__version__) | |
| # Obtener la clave de la API de las variables de entorno | |
| GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
| # Verificar que la clave de la API esté configurada | |
| if not GOOGLE_API_KEY: | |
| raise ValueError("GOOGLE_API_KEY is not set in environment variables.") | |
| # Configuración del modelo Gemini | |
| generation_config = { | |
| "temperature": 1, | |
| "top_p": 0.95, | |
| "top_k": 40, | |
| "max_output_tokens": 8192, | |
| "response_mime_type": "text/plain", | |
| } | |
| genai.configure(api_key=GOOGLE_API_KEY) | |
| model = genai.GenerativeModel( | |
| model_name="gemini-1.5-flash", | |
| generation_config=generation_config | |
| ) | |
| # Inicializar la sesión de chat | |
| chat = model.start_chat(history=[]) | |
| # Función para transformar el historial de Gradio al formato de Gemini | |
| def transform_history(history): | |
| new_history = [] | |
| for chat_entry in history: | |
| new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"}) | |
| new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"}) | |
| return new_history | |
| # Función de respuesta que maneja el historial | |
| def bot_response( | |
| model_choice: str, | |
| system_instruction: str, | |
| text_prompt: str, | |
| chatbot: list, | |
| ) -> Tuple[list, str]: | |
| """ | |
| Envía el mensaje al modelo, obtiene la respuesta y actualiza el historial. | |
| """ | |
| if not text_prompt.strip(): | |
| return chatbot, "Por favor, escribe un mensaje válido." | |
| # Transformar el historial al formato que espera Gemini | |
| transformed_history = transform_history(chatbot) | |
| # Configurar el modelo | |
| chat.history = transformed_history | |
| # Enviar el mensaje y obtener la respuesta | |
| response = chat.send_message(text_prompt) | |
| response.resolve() | |
| # Obtener el texto generado por el modelo | |
| generated_text = response.text | |
| # Actualizar el historial con la pregunta y la respuesta | |
| chatbot.append((text_prompt, generated_text)) | |
| return chatbot, "" | |
| # Componentes de la interfaz | |
| chatbot_component = gr.Chatbot(label="Gemini", scale=2, height=300) | |
| text_input_component = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8) | |
| run_button_component = gr.Button(value="Enviar", variant="primary", scale=1) | |
| model_dropdown_component = gr.Dropdown( | |
| choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"], | |
| value="gemini-1.5-flash", | |
| label="Selecciona el modelo", | |
| scale=2 | |
| ) | |
| system_instruction_component = gr.Textbox( | |
| placeholder="Escribe una instrucción para el sistema...", | |
| label="Instrucción del sistema", | |
| scale=8, | |
| value="You are an assistant." | |
| ) | |
| # Definir la interfaz | |
| with gr.Blocks() as demo: | |
| gr.HTML(TITLE) | |
| gr.HTML(SUBTITLE) | |
| with gr.Column(): | |
| model_dropdown_component.render() | |
| chatbot_component.render() | |
| with gr.Row(): | |
| text_input_component.render() | |
| run_button_component.render() | |
| with gr.Accordion("Instrucción del sistema", open=False): | |
| system_instruction_component.render() | |
| # Configurar eventos | |
| run_button_component.click( | |
| fn=bot_response, | |
| inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component], | |
| outputs=[chatbot_component, text_input_component], | |
| ) | |
| text_input_component.submit( | |
| fn=bot_response, | |
| inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component], | |
| outputs=[chatbot_component, text_input_component], | |
| ) | |
| # Lanzar la aplicación | |
| if __name__ == "__main__": | |
| demo.queue(max_size=99).launch(debug=True, show_error=True) | |