JeCabrera commited on
Commit
c163d83
·
verified ·
1 Parent(s): 446d2fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -158
app.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import os
2
  import time
3
  import uuid
@@ -30,19 +33,13 @@ generation_config = {
30
 
31
  genai.configure(api_key=GOOGLE_API_KEY)
32
 
33
- # Inicializar los modelos para ambas pestañas
34
- model_with_images = genai.GenerativeModel(
35
- model_name="gemini-1.5-flash",
36
- generation_config=generation_config
37
- )
38
-
39
- model_text_only = genai.GenerativeModel(
40
- model_name="gemini-1.5-flash",
41
  generation_config=generation_config
42
  )
43
 
44
- # Inicializar la sesión de chat para el chatbot sin imágenes
45
- chat_text_only = model_text_only.start_chat(history=[])
46
 
47
  # Función para transformar el historial de Gradio al formato de Gemini
48
  def transform_history(history):
@@ -52,7 +49,7 @@ def transform_history(history):
52
  new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
53
  return new_history
54
 
55
- # Función de respuesta que maneja el historial para el chatbot sin imágenes
56
  def bot_response(
57
  model_choice: str,
58
  system_instruction: str,
@@ -69,10 +66,10 @@ def bot_response(
69
  transformed_history = transform_history(chatbot)
70
 
71
  # Configurar el modelo
72
- chat_text_only.history = transformed_history
73
 
74
  # Enviar el mensaje y obtener la respuesta
75
- response = chat_text_only.send_message(text_prompt)
76
  response.resolve()
77
 
78
  # Obtener el texto generado por el modelo
@@ -83,158 +80,49 @@ def bot_response(
83
 
84
  return chatbot, ""
85
 
86
- # Funciones para manejar el chatbot con imágenes
87
- def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
88
- if image:
89
- image_height = int(image.height * 512 / image.width)
90
- return image.resize((512, image_height))
91
-
92
- def cache_pil_image(image: Image.Image) -> str:
93
- image_filename = f"{uuid.uuid4()}.jpeg"
94
- os.makedirs("/tmp", exist_ok=True)
95
- image_path = os.path.join("/tmp", image_filename)
96
- image.save(image_path, "JPEG")
97
- return image_path
98
-
99
- def upload(files: Optional[List[str]], chatbot: list) -> list:
100
- for file in files:
101
- image = Image.open(file).convert('RGB')
102
- image_preview = preprocess_image(image)
103
- if image_preview:
104
- gr.Image(image_preview).render()
105
- image_path = cache_pil_image(image)
106
- chatbot.append(((image_path,), None))
107
- return chatbot
108
-
109
- def user(text_prompt: str, chatbot: list):
110
- if text_prompt:
111
- chatbot.append((text_prompt, None))
112
- return "", chatbot
113
-
114
- def bot(
115
- files: Optional[List[str]],
116
- model_choice: str,
117
- system_instruction: Optional[str],
118
- chatbot: list
119
- ):
120
- if not GOOGLE_API_KEY:
121
- raise ValueError("GOOGLE_API_KEY is not set.")
122
-
123
- genai.configure(api_key=GOOGLE_API_KEY)
124
- generation_config = genai.types.GenerationConfig(
125
- temperature=0.7,
126
- max_output_tokens=8192,
127
- top_k=10,
128
- top_p=0.9
129
- )
130
-
131
- if not system_instruction:
132
- system_instruction = "1"
133
-
134
- text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
135
- image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
136
-
137
- model_with_images = genai.GenerativeModel(
138
- model_name=model_choice,
139
- generation_config=generation_config,
140
- system_instruction=system_instruction
141
- )
142
-
143
- response = model_with_images.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
144
-
145
- chatbot[-1][1] = ""
146
- for chunk in response:
147
- for i in range(0, len(chunk.text), 10):
148
- section = chunk.text[i:i + 10]
149
- chatbot[-1][1] += section
150
- time.sleep(0.01)
151
- yield chatbot
152
-
153
- # Interfaces
154
- TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
155
- SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
156
-
157
- # Componentes comunes
158
- chatbot_component_with_images = gr.Chatbot(label='Gemini with Images', scale=2, height=300)
159
- chatbot_component_text_only = gr.Chatbot(label='Gemini Text Only', scale=2, height=300)
160
- text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
161
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
162
- upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
163
-
164
- # Componentes separados para cada pestaña
165
- model_choice_component_text_only = gr.Dropdown(
166
- choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
167
- value="gemini-1.5-flash",
168
- label="Select Model",
169
- scale=2
170
- )
171
-
172
- model_choice_component_with_images = gr.Dropdown(
173
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
174
  value="gemini-1.5-flash",
175
- label="Select Model",
176
  scale=2
177
  )
178
-
179
  system_instruction_component = gr.Textbox(
180
- placeholder="Enter system instruction...",
181
- show_label=True,
182
- scale=8
 
183
  )
184
 
 
185
  with gr.Blocks() as demo:
186
  gr.HTML(TITLE)
187
  gr.HTML(SUBTITLE)
188
- with gr.Tabs():
189
- with gr.TabItem("Chatbot with Images"):
190
- with gr.Column():
191
- model_choice_component_with_images
192
- chatbot_component_with_images
193
- with gr.Row():
194
- text_prompt_component
195
- upload_button_component
196
- run_button_component
197
- with gr.Accordion("System Instruction", open=False):
198
- system_instruction_component
199
-
200
- run_button_component.click(
201
- fn=user,
202
- inputs=[text_prompt_component, chatbot_component_with_images],
203
- outputs=[text_prompt_component, chatbot_component_with_images],
204
- queue=False
205
- ).then(
206
- fn=bot,
207
- inputs=[upload_button_component, model_choice_component_with_images, system_instruction_component, chatbot_component_with_images],
208
- outputs=[chatbot_component_with_images],
209
- )
210
-
211
- upload_button_component.upload(
212
- fn=upload,
213
- inputs=[upload_button_component, chatbot_component_with_images],
214
- outputs=[chatbot_component_with_images],
215
- queue=False
216
- )
217
-
218
- with gr.TabItem("Chatbot Text Only"):
219
- with gr.Column():
220
- model_choice_component_text_only
221
- chatbot_component_text_only
222
- with gr.Row():
223
- text_prompt_component
224
- run_button_component
225
- with gr.Accordion("System Instruction", open=False):
226
- system_instruction_component
227
-
228
- run_button_component.click(
229
- fn=bot_response,
230
- inputs=[model_choice_component_text_only, system_instruction_component, text_prompt_component, chatbot_component_text_only],
231
- outputs=[chatbot_component_text_only, text_prompt_component],
232
- )
233
-
234
- text_prompt_component.submit(
235
- fn=bot_response,
236
- inputs=[model_choice_component_text_only, system_instruction_component, text_prompt_component, chatbot_component_text_only],
237
- outputs=[chatbot_component_text_only, text_prompt_component],
238
- )
239
-
240
- demo.queue(max_size=99).launch(debug=True, show_error=True)
 
1
+ TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
2
+ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
3
+
4
  import os
5
  import time
6
  import uuid
 
33
 
34
  genai.configure(api_key=GOOGLE_API_KEY)
35
 
36
+ model = genai.GenerativeModel(
37
+ model_name="gemini-1.5-flash",
 
 
 
 
 
 
38
  generation_config=generation_config
39
  )
40
 
41
+ # Inicializar la sesión de chat
42
+ chat = model.start_chat(history=[])
43
 
44
  # Función para transformar el historial de Gradio al formato de Gemini
45
  def transform_history(history):
 
49
  new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
50
  return new_history
51
 
52
+ # Función de respuesta que maneja el historial
53
  def bot_response(
54
  model_choice: str,
55
  system_instruction: str,
 
66
  transformed_history = transform_history(chatbot)
67
 
68
  # Configurar el modelo
69
+ chat.history = transformed_history
70
 
71
  # Enviar el mensaje y obtener la respuesta
72
+ response = chat.send_message(text_prompt)
73
  response.resolve()
74
 
75
  # Obtener el texto generado por el modelo
 
80
 
81
  return chatbot, ""
82
 
83
+ # Componentes de la interfaz
84
+ chatbot_component = gr.Chatbot(label="Gemini", scale=2, height=300)
85
+ text_input_component = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
86
+ run_button_component = gr.Button(value="Enviar", variant="primary", scale=1)
87
+ model_dropdown_component = gr.Dropdown(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
89
  value="gemini-1.5-flash",
90
+ label="Selecciona el modelo",
91
  scale=2
92
  )
 
93
  system_instruction_component = gr.Textbox(
94
+ placeholder="Escribe una instrucción para el sistema...",
95
+ label="Instrucción del sistema",
96
+ scale=8,
97
+ value="You are an assistant."
98
  )
99
 
100
+ # Definir la interfaz
101
  with gr.Blocks() as demo:
102
  gr.HTML(TITLE)
103
  gr.HTML(SUBTITLE)
104
+ with gr.Column():
105
+ model_dropdown_component.render()
106
+ chatbot_component.render()
107
+ with gr.Row():
108
+ text_input_component.render()
109
+ run_button_component.render()
110
+ with gr.Accordion("Instrucción del sistema", open=False):
111
+ system_instruction_component.render()
112
+
113
+ # Configurar eventos
114
+ run_button_component.click(
115
+ fn=bot_response,
116
+ inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component],
117
+ outputs=[chatbot_component, text_input_component],
118
+ )
119
+
120
+ text_input_component.submit(
121
+ fn=bot_response,
122
+ inputs=[model_dropdown_component, system_instruction_component, text_input_component, chatbot_component],
123
+ outputs=[chatbot_component, text_input_component],
124
+ )
125
+
126
+ # Lanzar la aplicación
127
+ if __name__ == "__main__":
128
+ demo.queue(max_size=99).launch(debug=True, show_error=True)