JeCabrera commited on
Commit
0d6d18a
·
verified ·
1 Parent(s): 61fa289

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -93
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
  import uuid
3
- from typing import List, Optional, Union
4
  from PIL import Image
5
  import google.generativeai as genai
6
  import gradio as gr
@@ -16,16 +16,34 @@ if not GOOGLE_API_KEY:
16
 
17
  genai.configure(api_key=GOOGLE_API_KEY)
18
 
 
19
  TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
20
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
21
 
22
- # Código de la pestaña 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def bot_response(
24
  model_choice: str,
25
  system_instruction: str,
26
  text_prompt: str,
27
- chatbot: List[dict],
28
- ) -> tuple:
29
  if not text_prompt.strip():
30
  return chatbot, "Por favor, escribe un mensaje válido."
31
 
@@ -44,44 +62,31 @@ def bot_response(
44
  response = chat.send_message(text_prompt)
45
  response.resolve()
46
 
47
- chatbot.append({"role": "user", "content": text_prompt})
48
- chatbot.append({"role": "assistant", "content": response.text})
49
  return chatbot, ""
50
 
51
- # Código de la pestaña 2
52
- IMAGE_CACHE_DIRECTORY = "/tmp"
53
- IMAGE_WIDTH = 512
54
-
55
- def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
56
- if image:
57
- image_height = int(image.height * IMAGE_WIDTH / image.width)
58
- return image.resize((IMAGE_WIDTH, image_height))
59
-
60
- def cache_pil_image(image: Image.Image) -> str:
61
- image_filename = f"{uuid.uuid4()}.jpeg"
62
- os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
63
- image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
64
- image.save(image_path, "JPEG")
65
- return image_path
66
-
67
- def upload(files: Optional[List[str]], chatbot: List[dict]) -> List[dict]:
68
- for file in files:
69
- image = Image.open(file).convert("RGB")
70
- image_preview = preprocess_image(image)
71
- if image_preview:
72
- gr.Image(image_preview).render()
73
- image_path = cache_pil_image(image)
74
- chatbot.append({"role": "user", "content": f"Uploaded image: {image_path}"})
75
  return chatbot
76
 
77
  def advanced_response(
78
- files: Optional[List[str]],
 
79
  model_choice: str,
80
  system_instruction: str,
81
- chatbot: List[dict],
82
  ):
83
- if not files:
84
- return chatbot
 
 
85
 
86
  model = genai.GenerativeModel(
87
  model_name=model_choice,
@@ -95,67 +100,77 @@ def advanced_response(
95
  chat = model.start_chat(history=chatbot)
96
  chat.system_instruction = system_instruction
97
 
98
- images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
99
- response = chat.generate_content(images, stream=True)
 
 
 
100
 
101
- chatbot.append({"role": "assistant", "content": ""})
102
- for chunk in response:
103
- chatbot[-1]["content"] += chunk.text
104
- yield chatbot
105
 
106
- # Construcción de la interfaz con las dos pestañas originales
107
- with gr.Blocks() as demo:
108
- gr.HTML(TITLE)
109
- gr.HTML(SUBTITLE)
110
-
111
- with gr.Tab("Pestaña 1"):
112
- model_dropdown_1 = gr.Dropdown(
113
- choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
114
- value="gemini-1.5-flash",
115
- label="Selecciona el modelo",
116
- )
117
- chatbot_1 = gr.Chatbot(label="Gemini", scale=2, height=300, type="messages")
118
- text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
119
- run_button_1 = gr.Button(value="Enviar", variant="primary", scale=1)
120
- system_instruction_1 = gr.Textbox(
121
- placeholder="Escribe una instrucción para el sistema...",
122
- label="Instrucción del sistema",
123
- scale=8,
124
- value="You are an assistant.",
125
- )
126
-
127
- run_button_1.click(
128
- fn=bot_response,
129
- inputs=[model_dropdown_1, system_instruction_1, text_input_1, chatbot_1],
130
- outputs=[chatbot_1, text_input_1],
131
- )
132
-
133
- with gr.Tab("Pestaña 2"):
134
- model_dropdown_2 = gr.Dropdown(
135
- choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
136
- value="gemini-1.5-flash",
137
- label="Select Model",
138
- )
139
- chatbot_2 = gr.Chatbot(label="Gemini", height=300, type="messages")
140
- text_input_2 = gr.Textbox(placeholder="Message or description...", show_label=False, scale=8)
141
- upload_button = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"])
142
- run_button_2 = gr.Button(value="Run", variant="primary", scale=1)
143
- system_instruction_2 = gr.Textbox(
144
- placeholder="Enter system instruction...",
145
- label="System Instruction",
146
- scale=8,
147
- )
148
-
149
- run_button_2.click(
150
- fn=advanced_response,
151
- inputs=[upload_button, model_dropdown_2, system_instruction_2, chatbot_2],
152
- outputs=[chatbot_2],
153
- )
154
- upload_button.upload(
155
- fn=upload,
156
- inputs=[upload_button, chatbot_2],
157
- outputs=[chatbot_2],
158
- )
 
 
 
 
 
 
 
 
159
 
160
  if __name__ == "__main__":
 
161
  demo.launch(debug=True)
 
1
  import os
2
  import uuid
3
+ from typing import List, Tuple, Optional, Union
4
  from PIL import Image
5
  import google.generativeai as genai
6
  import gradio as gr
 
16
 
17
  genai.configure(api_key=GOOGLE_API_KEY)
18
 
19
+ # Configuración general
20
  TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
21
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
22
 
23
+ IMAGE_CACHE_DIRECTORY = "/tmp"
24
+ IMAGE_WIDTH = 512
25
+
26
+ # Función para preprocesar imágenes
27
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
28
+ if image:
29
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
30
+ return image.resize((IMAGE_WIDTH, image_height))
31
+
32
+ # Función para guardar imágenes en caché
33
+ def cache_pil_image(image: Image.Image) -> str:
34
+ image_filename = f"{uuid.uuid4()}.jpeg"
35
+ os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
36
+ image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
37
+ image.save(image_path, "JPEG")
38
+ return image_path
39
+
40
+ # Pestaña 1: Chatbot de solo texto con historial
41
  def bot_response(
42
  model_choice: str,
43
  system_instruction: str,
44
  text_prompt: str,
45
+ chatbot: List[Tuple[str, str]],
46
+ ) -> Tuple[List[Tuple[str, str]], str]:
47
  if not text_prompt.strip():
48
  return chatbot, "Por favor, escribe un mensaje válido."
49
 
 
62
  response = chat.send_message(text_prompt)
63
  response.resolve()
64
 
65
+ chatbot.append((text_prompt, response.text))
 
66
  return chatbot, ""
67
 
68
+ # Pestaña 2: Chatbot avanzado con imágenes
69
+ def upload(files: Optional[List[gr.File]], chatbot: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
70
+ if files:
71
+ for file in files:
72
+ image = Image.open(file.name).convert("RGB")
73
+ image_preview = preprocess_image(image)
74
+ if image_preview:
75
+ image_path = cache_pil_image(image_preview)
76
+ chatbot.append((f"Uploaded image: {image_path}", "Imagen cargada correctamente."))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  return chatbot
78
 
79
  def advanced_response(
80
+ text_prompt: str,
81
+ files: Optional[List[gr.File]],
82
  model_choice: str,
83
  system_instruction: str,
84
+ chatbot: List[Tuple[str, str]],
85
  ):
86
+ if not text_prompt.strip() and not files:
87
+ chatbot.append(("", "Por favor, proporciona un mensaje o sube una imagen."))
88
+ yield chatbot
89
+ return
90
 
91
  model = genai.GenerativeModel(
92
  model_name=model_choice,
 
100
  chat = model.start_chat(history=chatbot)
101
  chat.system_instruction = system_instruction
102
 
103
+ if text_prompt:
104
+ chatbot.append((text_prompt, ""))
105
+ if files:
106
+ images = [cache_pil_image(preprocess_image(Image.open(file.name))) for file in files]
107
+ chatbot.append((f"Uploaded images: {', '.join(images)}", ""))
108
 
109
+ response = chat.send_message(text_prompt)
110
+ response.resolve()
 
 
111
 
112
+ chatbot[-1] = (chatbot[-1][0], response.text)
113
+ yield chatbot
114
+
115
+ # Construcción de la interfaz
116
+ def build_interface():
117
+ with gr.Blocks() as demo:
118
+ gr.HTML(TITLE)
119
+ gr.HTML(SUBTITLE)
120
+
121
+ with gr.Tab("Pestaña 1: Chatbot Texto"):
122
+ model_dropdown_1 = gr.Dropdown(
123
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
124
+ value="gemini-1.5-flash",
125
+ label="Selecciona el modelo",
126
+ )
127
+ chatbot_1 = gr.Chatbot(label="Gemini", height=300)
128
+ system_instruction_1 = gr.Textbox(
129
+ placeholder="Escribe una instrucción para el sistema...",
130
+ label="Instrucción del sistema",
131
+ value="You are an assistant.",
132
+ )
133
+
134
+ with gr.Row():
135
+ text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False)
136
+ run_button_1 = gr.Button(value="Enviar", variant="primary")
137
+
138
+ run_button_1.click(
139
+ fn=bot_response,
140
+ inputs=[model_dropdown_1, system_instruction_1, text_input_1, chatbot_1],
141
+ outputs=[chatbot_1, text_input_1],
142
+ )
143
+
144
+ with gr.Tab("Pestaña 2: Chatbot con Imágenes"):
145
+ model_dropdown_2 = gr.Dropdown(
146
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
147
+ value="gemini-1.5-flash",
148
+ label="Selecciona el modelo",
149
+ )
150
+ chatbot_2 = gr.Chatbot(label="Gemini", height=300)
151
+ system_instruction_2 = gr.Textbox(
152
+ placeholder="Escribe una instrucción para el sistema...",
153
+ label="Instrucción del sistema",
154
+ )
155
+
156
+ with gr.Row():
157
+ text_input_2 = gr.Textbox(placeholder="Mensaje o descripción...", show_label=False)
158
+ upload_button = gr.UploadButton(label="Subir Imágenes", file_count="multiple", file_types=["image"])
159
+ run_button_2 = gr.Button(value="Ejecutar", variant="primary")
160
+
161
+ run_button_2.click(
162
+ fn=advanced_response,
163
+ inputs=[text_input_2, upload_button, model_dropdown_2, system_instruction_2, chatbot_2],
164
+ outputs=[chatbot_2],
165
+ )
166
+ upload_button.upload(
167
+ fn=upload,
168
+ inputs=[upload_button, chatbot_2],
169
+ outputs=[chatbot_2],
170
+ )
171
+
172
+ return demo
173
 
174
  if __name__ == "__main__":
175
+ demo = build_interface()
176
  demo.launch(debug=True)