JeCabrera commited on
Commit
446d2fd
·
verified ·
1 Parent(s): de7f30c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +240 -32
app.py CHANGED
@@ -1,32 +1,240 @@
1
- runtime error
2
- Exit code: 1. Reason: google-generativeai: 0.8.3
3
- /usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:273: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.
4
- warnings.warn(
5
- Traceback (most recent call last):
6
- File "/home/user/app/app.py", line 223, in <module>
7
- text_prompt_component.render()
8
- File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 186, in render
9
- raise DuplicateBlockError(
10
- gradio.exceptions.DuplicateBlockError: A block with id: 2 has already been rendered in the current Blocks.
11
- Container logs:
12
-
13
- ===== Application Startup at 2025-01-18 21:07:48 =====
14
-
15
- google-generativeai: 0.8.3
16
- /usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:273: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.
17
- warnings.warn(
18
- Traceback (most recent call last):
19
- File "/home/user/app/app.py", line 223, in <module>
20
- text_prompt_component.render()
21
- File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 186, in render
22
- raise DuplicateBlockError(
23
- gradio.exceptions.DuplicateBlockError: A block with id: 2 has already been rendered in the current Blocks.
24
- google-generativeai: 0.8.3
25
- /usr/local/lib/python3.10/site-packages/gradio/components/chatbot.py:273: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.
26
- warnings.warn(
27
- Traceback (most recent call last):
28
- File "/home/user/app/app.py", line 223, in <module>
29
- text_prompt_component.render()
30
- File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 186, in render
31
- raise DuplicateBlockError(
32
- gradio.exceptions.DuplicateBlockError: A block with id: 2 has already been rendered in the current Blocks.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import uuid
4
+ from typing import List, Tuple, Optional, Union
5
+ from PIL import Image
6
+ import google.generativeai as genai
7
+ import gradio as gr
8
+ from dotenv import load_dotenv
9
+
10
+ # Cargar las variables de entorno desde el archivo .env
11
+ load_dotenv()
12
+
13
+ print("google-generativeai:", genai.__version__)
14
+
15
+ # Obtener la clave de la API de las variables de entorno
16
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
17
+
18
+ # Verificar que la clave de la API esté configurada
19
+ if not GOOGLE_API_KEY:
20
+ raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
21
+
22
+ # Configuración del modelo Gemini
23
+ generation_config = {
24
+ "temperature": 1,
25
+ "top_p": 0.95,
26
+ "top_k": 40,
27
+ "max_output_tokens": 8192,
28
+ "response_mime_type": "text/plain",
29
+ }
30
+
31
+ genai.configure(api_key=GOOGLE_API_KEY)
32
+
33
+ # Inicializar los modelos para ambas pestañas
34
+ model_with_images = genai.GenerativeModel(
35
+ model_name="gemini-1.5-flash",
36
+ generation_config=generation_config
37
+ )
38
+
39
+ model_text_only = genai.GenerativeModel(
40
+ model_name="gemini-1.5-flash",
41
+ generation_config=generation_config
42
+ )
43
+
44
+ # Inicializar la sesión de chat para el chatbot sin imágenes
45
+ chat_text_only = model_text_only.start_chat(history=[])
46
+
47
+ # Función para transformar el historial de Gradio al formato de Gemini
48
+ def transform_history(history):
49
+ new_history = []
50
+ for chat_entry in history:
51
+ new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"})
52
+ new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
53
+ return new_history
54
+
55
+ # Función de respuesta que maneja el historial para el chatbot sin imágenes
56
+ def bot_response(
57
+ model_choice: str,
58
+ system_instruction: str,
59
+ text_prompt: str,
60
+ chatbot: list,
61
+ ) -> Tuple[list, str]:
62
+ """
63
+ Envía el mensaje al modelo, obtiene la respuesta y actualiza el historial.
64
+ """
65
+ if not text_prompt.strip():
66
+ return chatbot, "Por favor, escribe un mensaje válido."
67
+
68
+ # Transformar el historial al formato que espera Gemini
69
+ transformed_history = transform_history(chatbot)
70
+
71
+ # Configurar el modelo
72
+ chat_text_only.history = transformed_history
73
+
74
+ # Enviar el mensaje y obtener la respuesta
75
+ response = chat_text_only.send_message(text_prompt)
76
+ response.resolve()
77
+
78
+ # Obtener el texto generado por el modelo
79
+ generated_text = response.text
80
+
81
+ # Actualizar el historial con la pregunta y la respuesta
82
+ chatbot.append((text_prompt, generated_text))
83
+
84
+ return chatbot, ""
85
+
86
+ # Funciones para manejar el chatbot con imágenes
87
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
88
+ if image:
89
+ image_height = int(image.height * 512 / image.width)
90
+ return image.resize((512, image_height))
91
+
92
+ def cache_pil_image(image: Image.Image) -> str:
93
+ image_filename = f"{uuid.uuid4()}.jpeg"
94
+ os.makedirs("/tmp", exist_ok=True)
95
+ image_path = os.path.join("/tmp", image_filename)
96
+ image.save(image_path, "JPEG")
97
+ return image_path
98
+
99
+ def upload(files: Optional[List[str]], chatbot: list) -> list:
100
+ for file in files:
101
+ image = Image.open(file).convert('RGB')
102
+ image_preview = preprocess_image(image)
103
+ if image_preview:
104
+ gr.Image(image_preview).render()
105
+ image_path = cache_pil_image(image)
106
+ chatbot.append(((image_path,), None))
107
+ return chatbot
108
+
109
+ def user(text_prompt: str, chatbot: list):
110
+ if text_prompt:
111
+ chatbot.append((text_prompt, None))
112
+ return "", chatbot
113
+
114
+ def bot(
115
+ files: Optional[List[str]],
116
+ model_choice: str,
117
+ system_instruction: Optional[str],
118
+ chatbot: list
119
+ ):
120
+ if not GOOGLE_API_KEY:
121
+ raise ValueError("GOOGLE_API_KEY is not set.")
122
+
123
+ genai.configure(api_key=GOOGLE_API_KEY)
124
+ generation_config = genai.types.GenerationConfig(
125
+ temperature=0.7,
126
+ max_output_tokens=8192,
127
+ top_k=10,
128
+ top_p=0.9
129
+ )
130
+
131
+ if not system_instruction:
132
+ system_instruction = "1"
133
+
134
+ text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
135
+ image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
136
+
137
+ model_with_images = genai.GenerativeModel(
138
+ model_name=model_choice,
139
+ generation_config=generation_config,
140
+ system_instruction=system_instruction
141
+ )
142
+
143
+ response = model_with_images.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
144
+
145
+ chatbot[-1][1] = ""
146
+ for chunk in response:
147
+ for i in range(0, len(chunk.text), 10):
148
+ section = chunk.text[i:i + 10]
149
+ chatbot[-1][1] += section
150
+ time.sleep(0.01)
151
+ yield chatbot
152
+
153
+ # Interfaces
154
+ TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
155
+ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
156
+
157
+ # Componentes comunes
158
+ chatbot_component_with_images = gr.Chatbot(label='Gemini with Images', scale=2, height=300)
159
+ chatbot_component_text_only = gr.Chatbot(label='Gemini Text Only', scale=2, height=300)
160
+ text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
161
+ run_button_component = gr.Button(value="Run", variant="primary", scale=1)
162
+ upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
163
+
164
+ # Componentes separados para cada pestaña
165
+ model_choice_component_text_only = gr.Dropdown(
166
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
167
+ value="gemini-1.5-flash",
168
+ label="Select Model",
169
+ scale=2
170
+ )
171
+
172
+ model_choice_component_with_images = gr.Dropdown(
173
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
174
+ value="gemini-1.5-flash",
175
+ label="Select Model",
176
+ scale=2
177
+ )
178
+
179
+ system_instruction_component = gr.Textbox(
180
+ placeholder="Enter system instruction...",
181
+ show_label=True,
182
+ scale=8
183
+ )
184
+
185
+ with gr.Blocks() as demo:
186
+ gr.HTML(TITLE)
187
+ gr.HTML(SUBTITLE)
188
+ with gr.Tabs():
189
+ with gr.TabItem("Chatbot with Images"):
190
+ with gr.Column():
191
+ model_choice_component_with_images
192
+ chatbot_component_with_images
193
+ with gr.Row():
194
+ text_prompt_component
195
+ upload_button_component
196
+ run_button_component
197
+ with gr.Accordion("System Instruction", open=False):
198
+ system_instruction_component
199
+
200
+ run_button_component.click(
201
+ fn=user,
202
+ inputs=[text_prompt_component, chatbot_component_with_images],
203
+ outputs=[text_prompt_component, chatbot_component_with_images],
204
+ queue=False
205
+ ).then(
206
+ fn=bot,
207
+ inputs=[upload_button_component, model_choice_component_with_images, system_instruction_component, chatbot_component_with_images],
208
+ outputs=[chatbot_component_with_images],
209
+ )
210
+
211
+ upload_button_component.upload(
212
+ fn=upload,
213
+ inputs=[upload_button_component, chatbot_component_with_images],
214
+ outputs=[chatbot_component_with_images],
215
+ queue=False
216
+ )
217
+
218
+ with gr.TabItem("Chatbot Text Only"):
219
+ with gr.Column():
220
+ model_choice_component_text_only
221
+ chatbot_component_text_only
222
+ with gr.Row():
223
+ text_prompt_component
224
+ run_button_component
225
+ with gr.Accordion("System Instruction", open=False):
226
+ system_instruction_component
227
+
228
+ run_button_component.click(
229
+ fn=bot_response,
230
+ inputs=[model_choice_component_text_only, system_instruction_component, text_prompt_component, chatbot_component_text_only],
231
+ outputs=[chatbot_component_text_only, text_prompt_component],
232
+ )
233
+
234
+ text_prompt_component.submit(
235
+ fn=bot_response,
236
+ inputs=[model_choice_component_text_only, system_instruction_component, text_prompt_component, chatbot_component_text_only],
237
+ outputs=[chatbot_component_text_only, text_prompt_component],
238
+ )
239
+
240
+ demo.queue(max_size=99).launch(debug=True, show_error=True)