JeCabrera commited on
Commit
a22e988
·
verified ·
1 Parent(s): 5306417

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -21
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import os
2
- import time
3
  import uuid
4
- from typing import List, Tuple, Optional, Union
5
  from PIL import Image
6
  import google.generativeai as genai
7
  import gradio as gr
@@ -21,19 +20,12 @@ TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
21
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
22
 
23
  # Código de la pestaña 1
24
- def transform_history(history):
25
- new_history = []
26
- for chat_entry in history:
27
- new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"})
28
- new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
29
- return new_history
30
-
31
  def bot_response(
32
  model_choice: str,
33
  system_instruction: str,
34
  text_prompt: str,
35
- chatbot: list,
36
- ) -> Tuple[list, str]:
37
  if not text_prompt.strip():
38
  return chatbot, "Por favor, escribe un mensaje válido."
39
 
@@ -46,13 +38,14 @@ def bot_response(
46
  "max_output_tokens": 8192,
47
  },
48
  )
49
- chat = model.start_chat(history=transform_history(chatbot))
50
  chat.system_instruction = system_instruction
51
 
52
  response = chat.send_message(text_prompt)
53
  response.resolve()
54
 
55
- chatbot.append((text_prompt, response.text))
 
56
  return chatbot, ""
57
 
58
  # Código de la pestaña 2
@@ -71,21 +64,21 @@ def cache_pil_image(image: Image.Image) -> str:
71
  image.save(image_path, "JPEG")
72
  return image_path
73
 
74
- def upload(files: Optional[List[str]], chatbot: list) -> list:
75
  for file in files:
76
  image = Image.open(file).convert("RGB")
77
  image_preview = preprocess_image(image)
78
  if image_preview:
79
  gr.Image(image_preview).render()
80
  image_path = cache_pil_image(image)
81
- chatbot.append(((image_path,), None))
82
  return chatbot
83
 
84
  def advanced_response(
85
  files: Optional[List[str]],
86
  model_choice: str,
87
  system_instruction: str,
88
- chatbot: list,
89
  ):
90
  if not files:
91
  return chatbot
@@ -105,9 +98,9 @@ def advanced_response(
105
  images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
106
  response = chat.generate_content(images, stream=True)
107
 
108
- chatbot[-1][1] = ""
109
  for chunk in response:
110
- chatbot[-1][1] += chunk.text
111
  yield chatbot
112
 
113
  # Construcción de la interfaz con las dos pestañas originales
@@ -121,7 +114,7 @@ with gr.Blocks() as demo:
121
  value="gemini-1.5-flash",
122
  label="Selecciona el modelo",
123
  )
124
- chatbot_1 = gr.Chatbot(label="Gemini", scale=2, height=300)
125
  text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
126
  run_button_1 = gr.Button(value="Enviar", variant="primary", scale=1)
127
  system_instruction_1 = gr.Textbox(
@@ -143,7 +136,7 @@ with gr.Blocks() as demo:
143
  value="gemini-1.5-flash",
144
  label="Select Model",
145
  )
146
- chatbot_2 = gr.Chatbot(label="Gemini", height=300)
147
  text_input_2 = gr.Textbox(placeholder="Message or description...", show_label=False, scale=8)
148
  upload_button = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"])
149
  run_button_2 = gr.Button(value="Run", variant="primary", scale=1)
@@ -165,4 +158,4 @@ with gr.Blocks() as demo:
165
  )
166
 
167
  if __name__ == "__main__":
168
- demo.launch(debug=True, queue=True)
 
1
  import os
 
2
  import uuid
3
+ from typing import List, Optional, Union
4
  from PIL import Image
5
  import google.generativeai as genai
6
  import gradio as gr
 
20
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
21
 
22
  # Código de la pestaña 1
 
 
 
 
 
 
 
23
  def bot_response(
24
  model_choice: str,
25
  system_instruction: str,
26
  text_prompt: str,
27
+ chatbot: List[dict],
28
+ ) -> tuple:
29
  if not text_prompt.strip():
30
  return chatbot, "Por favor, escribe un mensaje válido."
31
 
 
38
  "max_output_tokens": 8192,
39
  },
40
  )
41
+ chat = model.start_chat(history=chatbot)
42
  chat.system_instruction = system_instruction
43
 
44
  response = chat.send_message(text_prompt)
45
  response.resolve()
46
 
47
+ chatbot.append({"role": "user", "content": text_prompt})
48
+ chatbot.append({"role": "assistant", "content": response.text})
49
  return chatbot, ""
50
 
51
  # Código de la pestaña 2
 
64
  image.save(image_path, "JPEG")
65
  return image_path
66
 
67
+ def upload(files: Optional[List[str]], chatbot: List[dict]) -> List[dict]:
68
  for file in files:
69
  image = Image.open(file).convert("RGB")
70
  image_preview = preprocess_image(image)
71
  if image_preview:
72
  gr.Image(image_preview).render()
73
  image_path = cache_pil_image(image)
74
+ chatbot.append({"role": "user", "content": f"Uploaded image: {image_path}"})
75
  return chatbot
76
 
77
  def advanced_response(
78
  files: Optional[List[str]],
79
  model_choice: str,
80
  system_instruction: str,
81
+ chatbot: List[dict],
82
  ):
83
  if not files:
84
  return chatbot
 
98
  images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
99
  response = chat.generate_content(images, stream=True)
100
 
101
+ chatbot.append({"role": "assistant", "content": ""})
102
  for chunk in response:
103
+ chatbot[-1]["content"] += chunk.text
104
  yield chatbot
105
 
106
  # Construcción de la interfaz con las dos pestañas originales
 
114
  value="gemini-1.5-flash",
115
  label="Selecciona el modelo",
116
  )
117
+ chatbot_1 = gr.Chatbot(label="Gemini", scale=2, height=300, type="messages")
118
  text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
119
  run_button_1 = gr.Button(value="Enviar", variant="primary", scale=1)
120
  system_instruction_1 = gr.Textbox(
 
136
  value="gemini-1.5-flash",
137
  label="Select Model",
138
  )
139
+ chatbot_2 = gr.Chatbot(label="Gemini", height=300, type="messages")
140
  text_input_2 = gr.Textbox(placeholder="Message or description...", show_label=False, scale=8)
141
  upload_button = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"])
142
  run_button_2 = gr.Button(value="Run", variant="primary", scale=1)
 
158
  )
159
 
160
  if __name__ == "__main__":
161
+ demo.launch(debug=True)