Spaces:
Running
Running
File size: 5,482 Bytes
62e3275 729bc22 a22e988 729bc22 f981512 5306417 f981512 5010813 f981512 56a81a0 239043a 84e07f0 99ca542 239043a acd8b76 5306417 a22e988 239043a 5306417 a22e988 239043a 5306417 acd8b76 5306417 a22e988 9a58086 84e07f0 5306417 239043a a22e988 239043a a22e988 239043a 5306417 a22e988 239043a 5306417 239043a 5306417 239043a 5306417 a22e988 239043a a22e988 239043a 5306417 84e07f0 4d8c8e7 5306417 239043a 5306417 239043a a22e988 5306417 239043a 5306417 239043a 5306417 fd4c4a7 5306417 239043a 5306417 239043a 5306417 239043a a22e988 5306417 239043a 5306417 fd4c4a7 5306417 239043a 5306417 239043a 81e1fd9 a22e988 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import os
import uuid
from typing import List, Optional, Union
from PIL import Image
import google.generativeai as genai
import gradio as gr
from dotenv import load_dotenv
# Cargar las variables de entorno desde el archivo .env
load_dotenv()
# Configuración de claves
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
if not GOOGLE_API_KEY:
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
genai.configure(api_key=GOOGLE_API_KEY)
TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
# Código de la pestaña 1
def bot_response(
model_choice: str,
system_instruction: str,
text_prompt: str,
chatbot: List[dict],
) -> tuple:
if not text_prompt.strip():
return chatbot, "Por favor, escribe un mensaje válido."
model = genai.GenerativeModel(
model_name=model_choice,
generation_config={
"temperature": 1,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
},
)
chat = model.start_chat(history=chatbot)
chat.system_instruction = system_instruction
response = chat.send_message(text_prompt)
response.resolve()
chatbot.append({"role": "user", "content": text_prompt})
chatbot.append({"role": "assistant", "content": response.text})
return chatbot, ""
# Código de la pestaña 2
IMAGE_CACHE_DIRECTORY = "/tmp"
IMAGE_WIDTH = 512
def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
if image:
image_height = int(image.height * IMAGE_WIDTH / image.width)
return image.resize((IMAGE_WIDTH, image_height))
def cache_pil_image(image: Image.Image) -> str:
image_filename = f"{uuid.uuid4()}.jpeg"
os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
image.save(image_path, "JPEG")
return image_path
def upload(files: Optional[List[str]], chatbot: List[dict]) -> List[dict]:
for file in files:
image = Image.open(file).convert("RGB")
image_preview = preprocess_image(image)
if image_preview:
gr.Image(image_preview).render()
image_path = cache_pil_image(image)
chatbot.append({"role": "user", "content": f"Uploaded image: {image_path}"})
return chatbot
def advanced_response(
files: Optional[List[str]],
model_choice: str,
system_instruction: str,
chatbot: List[dict],
):
if not files:
return chatbot
model = genai.GenerativeModel(
model_name=model_choice,
generation_config={
"temperature": 0.7,
"max_output_tokens": 8192,
"top_k": 10,
"top_p": 0.9,
},
)
chat = model.start_chat(history=chatbot)
chat.system_instruction = system_instruction
images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
response = chat.generate_content(images, stream=True)
chatbot.append({"role": "assistant", "content": ""})
for chunk in response:
chatbot[-1]["content"] += chunk.text
yield chatbot
# Construcción de la interfaz con las dos pestañas originales
with gr.Blocks() as demo:
gr.HTML(TITLE)
gr.HTML(SUBTITLE)
with gr.Tab("Pestaña 1"):
model_dropdown_1 = gr.Dropdown(
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
value="gemini-1.5-flash",
label="Selecciona el modelo",
)
chatbot_1 = gr.Chatbot(label="Gemini", scale=2, height=300, type="messages")
system_instruction_1 = gr.Textbox(
placeholder="Escribe una instrucción para el sistema...",
label="Instrucción del sistema",
scale=8,
value="You are an assistant.",
)
with gr.Row():
text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
run_button_1 = gr.Button(value="Enviar", variant="primary", scale=1)
run_button_1.click(
fn=bot_response,
inputs=[model_dropdown_1, system_instruction_1, text_input_1, chatbot_1],
outputs=[chatbot_1, text_input_1],
)
with gr.Tab("Pestaña 2"):
model_dropdown_2 = gr.Dropdown(
choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
value="gemini-1.5-flash",
label="Select Model",
)
chatbot_2 = gr.Chatbot(label="Gemini", height=300, type="messages")
system_instruction_2 = gr.Textbox(
placeholder="Enter system instruction...",
label="System Instruction",
scale=8,
)
with gr.Row():
text_input_2 = gr.Textbox(placeholder="Message or description...", show_label=False, scale=8)
upload_button = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"])
run_button_2 = gr.Button(value="Run", variant="primary", scale=1)
run_button_2.click(
fn=advanced_response,
inputs=[upload_button, model_dropdown_2, system_instruction_2, chatbot_2],
outputs=[chatbot_2],
)
upload_button.upload(
fn=upload,
inputs=[upload_button, chatbot_2],
outputs=[chatbot_2],
)
if __name__ == "__main__":
demo.launch(debug=True)
|