File size: 5,666 Bytes
69a8ba9
 
 
500f371
4bde338
69a8ba9
 
 
bc54a0a
69a8ba9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb0053e
69a8ba9
 
 
 
 
 
 
88c5efb
69a8ba9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9815f3
 
69a8ba9
c99083d
69a8ba9
 
 
 
 
 
 
f9815f3
69a8ba9
 
 
 
 
 
acaff10
69a8ba9
 
 
 
 
 
 
 
 
 
6efed0c
c99083d
69a8ba9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""

import os
import time
import uuid
from typing import List, Tuple, Optional, Union

import google.generativeai as genai
import gradio as gr
from PIL import Image
from dotenv import load_dotenv

# Cargar las variables de entorno desde el archivo .env
load_dotenv()

print("google-generativeai:", genai.__version__)

# Obtener la clave de la API de las variables de entorno
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")

# Verificar que la clave de la API esté configurada
if not GOOGLE_API_KEY:
    raise ValueError("GOOGLE_API_KEY is not set in environment variables.")

IMAGE_CACHE_DIRECTORY = "/tmp"
IMAGE_WIDTH = 512
CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]

def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
    if image:
        image_height = int(image.height * IMAGE_WIDTH / image.width)
        return image.resize((IMAGE_WIDTH, image_height))

def cache_pil_image(image: Image.Image) -> str:
    image_filename = f"{uuid.uuid4()}.jpeg"
    os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
    image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
    image.save(image_path, "JPEG")
    return image_path

def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
    for file in files:
        image = Image.open(file).convert('RGB')
        image_preview = preprocess_image(image)
        if image_preview:
            gr.Image(image_preview).render()
        image_path = cache_pil_image(image)
        chatbot.append(((image_path,), None))
    return chatbot

def user(text_prompt: str, chatbot: CHAT_HISTORY):
    if text_prompt:
        chatbot.append((text_prompt, None))
    return "", chatbot

def bot(
    files: Optional[List[str]],
    model_choice: str,
    system_instruction: Optional[str],  # Sistema de instrucciones opcional
    chatbot: CHAT_HISTORY
):
    if not GOOGLE_API_KEY:
        raise ValueError("GOOGLE_API_KEY is not set.")

    genai.configure(api_key=GOOGLE_API_KEY)
    generation_config = genai.types.GenerationConfig(
        temperature=0.7,
        max_output_tokens=8192,
        top_k=10,
        top_p=0.9
    )

    # Usar el valor por defecto para system_instruction si está vacío
    if not system_instruction:
        system_instruction = "1"  # O puedes poner un valor predeterminado como "No system instruction provided."

    text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
    image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
    
    model = genai.GenerativeModel(
        model_name=model_choice,
        generation_config=generation_config,
        system_instruction=system_instruction  # Usar el valor por defecto si está vacío
    )

    response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)

    chatbot[-1][1] = ""
    for chunk in response:
        for i in range(0, len(chunk.text), 10):
            section = chunk.text[i:i + 10]
            chatbot[-1][1] += section
            time.sleep(0.01)
            yield chatbot

# Componente para el acordeón que contiene el cuadro de texto para la instrucción del sistema
system_instruction_component = gr.Textbox(
    placeholder="Enter system instruction...",
    show_label=True,
    scale=8
)

# Definir los componentes de entrada y salida
chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False, scale=2, height=300)
text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
run_button_component = gr.Button(value="Run", variant="primary", scale=1)
model_choice_component = gr.Dropdown(
    choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
    value="gemini-1.5-flash",
    label="Select Model",
    scale=2
)

user_inputs = [text_prompt_component, chatbot_component]
bot_inputs = [upload_button_component, model_choice_component, system_instruction_component, chatbot_component]

# Definir la interfaz de usuario
with gr.Blocks() as demo:
    gr.HTML(TITLE)
    gr.HTML(SUBTITLE)
    with gr.Column():
        # Campo de selección de modelo arriba
        model_choice_component.render()
        chatbot_component.render()
        with gr.Row():
            text_prompt_component.render()
            upload_button_component.render()
            run_button_component.render()

        # Crear el acordeón para la instrucción del sistema al final
        with gr.Accordion("System Instruction", open=False):  # Acordeón cerrado por defecto
            system_instruction_component.render()

    run_button_component.click(
        fn=user,
        inputs=user_inputs,
        outputs=[text_prompt_component, chatbot_component],
        queue=False
    ).then(
        fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
    )

    text_prompt_component.submit(
        fn=user,
        inputs=user_inputs,
        outputs=[text_prompt_component, chatbot_component],
        queue=False
    ).then(
        fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
    )

    upload_button_component.upload(
        fn=upload,
        inputs=[upload_button_component, chatbot_component],
        outputs=[chatbot_component],
        queue=False
    )

# Lanzar la aplicación
demo.queue(max_size=99).launch(debug=False, show_error=True)